summaryrefslogtreecommitdiffstats
path: root/playbooks
diff options
context:
space:
mode:
Diffstat (limited to 'playbooks')
-rw-r--r--playbooks/README.md2
-rw-r--r--playbooks/adhoc/uninstall.yml2
-rw-r--r--playbooks/aws/README.md256
-rwxr-xr-xplaybooks/aws/openshift-cluster/accept.yml19
-rw-r--r--playbooks/aws/openshift-cluster/add_nodes.yml35
-rw-r--r--playbooks/aws/openshift-cluster/build_ami.yml159
-rw-r--r--playbooks/aws/openshift-cluster/build_node_group.yml47
-rw-r--r--playbooks/aws/openshift-cluster/cluster_hosts.yml25
-rw-r--r--playbooks/aws/openshift-cluster/config.yml37
-rw-r--r--playbooks/aws/openshift-cluster/install.yml74
-rw-r--r--playbooks/aws/openshift-cluster/launch.yml54
-rw-r--r--playbooks/aws/openshift-cluster/list.yml23
-rw-r--r--playbooks/aws/openshift-cluster/provision.yml158
-rw-r--r--playbooks/aws/openshift-cluster/provision_install.yml16
-rw-r--r--playbooks/aws/openshift-cluster/provision_nodes.yml49
-rw-r--r--playbooks/aws/openshift-cluster/provisioning_vars.example.yml26
-rw-r--r--playbooks/aws/openshift-cluster/scaleup.yml32
-rw-r--r--playbooks/aws/openshift-cluster/service.yml31
-rw-r--r--playbooks/aws/openshift-cluster/tasks/launch_instances.yml188
-rw-r--r--playbooks/aws/openshift-cluster/templates/user_data.j222
-rw-r--r--playbooks/aws/openshift-cluster/terminate.yml77
-rw-r--r--playbooks/aws/openshift-cluster/update.yml34
-rw-r--r--playbooks/aws/openshift-cluster/vars.yml156
-rw-r--r--playbooks/byo/openshift-checks/README.md48
-rw-r--r--playbooks/byo/openshift-checks/adhoc.yml27
-rw-r--r--playbooks/byo/openshift-checks/health.yml3
-rw-r--r--playbooks/byo/openshift-checks/pre-install.yml3
-rw-r--r--playbooks/byo/openshift-cluster/config.yml2
-rw-r--r--playbooks/byo/openshift-cluster/openshift-logging.yml3
-rw-r--r--playbooks/byo/openshift-cluster/openshift-prometheus.yml4
-rw-r--r--playbooks/byo/openshift-cluster/openshift-provisioners.yml6
-rw-r--r--playbooks/byo/openshift-cluster/service-catalog.yml3
-rw-r--r--playbooks/byo/openshift-etcd/config.yml8
-rw-r--r--playbooks/byo/openshift-etcd/migrate.yml6
-rw-r--r--playbooks/byo/openshift-etcd/restart.yml4
-rw-r--r--playbooks/byo/openshift-etcd/scaleup.yml6
-rw-r--r--playbooks/byo/openshift-master/additional_config.yml6
-rw-r--r--playbooks/byo/openshift-master/config.yml6
-rw-r--r--playbooks/byo/openshift-master/restart.yml4
-rw-r--r--playbooks/byo/openshift-master/scaleup.yml5
-rw-r--r--playbooks/byo/openshift-node/config.yml6
-rw-r--r--playbooks/byo/openshift-node/restart.yml4
-rw-r--r--playbooks/byo/openshift-node/scaleup.yml9
-rw-r--r--playbooks/byo/vagrant.yml4
-rw-r--r--playbooks/common/README.md7
-rw-r--r--playbooks/common/openshift-checks/adhoc.yml12
-rw-r--r--playbooks/common/openshift-checks/health.yml6
-rw-r--r--playbooks/common/openshift-checks/pre-install.yml8
-rw-r--r--playbooks/common/openshift-cluster/config.yml42
-rw-r--r--playbooks/common/openshift-cluster/enable_dnsmasq.yml6
-rw-r--r--playbooks/common/openshift-cluster/evaluate_groups.yml20
-rw-r--r--playbooks/common/openshift-cluster/initialize_facts.yml23
-rw-r--r--playbooks/common/openshift-cluster/initialize_firewall.yml7
-rw-r--r--playbooks/common/openshift-cluster/openshift_hosted.yml4
-rw-r--r--playbooks/common/openshift-cluster/openshift_prometheus.yml9
-rw-r--r--playbooks/common/openshift-cluster/sanity_checks.yml47
-rw-r--r--playbooks/common/openshift-cluster/std_include.yml4
-rw-r--r--playbooks/common/openshift-cluster/tasks/set_etcd_launch_facts.yml2
-rw-r--r--playbooks/common/openshift-cluster/tasks/set_master_launch_facts.yml2
-rw-r--r--playbooks/common/openshift-cluster/tasks/set_node_launch_facts.yml2
-rw-r--r--playbooks/common/openshift-cluster/update_repos_and_packages.yml18
-rw-r--r--playbooks/common/openshift-cluster/upgrades/docker/docker_upgrade.yml3
-rw-r--r--playbooks/common/openshift-cluster/upgrades/docker/tasks/restart.yml (renamed from playbooks/common/openshift-cluster/upgrades/docker/restart.yml)0
-rw-r--r--playbooks/common/openshift-cluster/upgrades/docker/tasks/upgrade.yml (renamed from playbooks/common/openshift-cluster/upgrades/docker/upgrade.yml)0
-rw-r--r--playbooks/common/openshift-cluster/upgrades/init.yml1
-rw-r--r--playbooks/common/openshift-cluster/upgrades/pre/tasks/verify_docker_upgrade_targets.yml (renamed from playbooks/common/openshift-cluster/upgrades/pre/verify_docker_upgrade_targets.yml)0
-rw-r--r--playbooks/common/openshift-cluster/upgrades/pre/verify_nodes_running.yml13
-rw-r--r--playbooks/common/openshift-cluster/upgrades/upgrade_control_plane.yml7
-rw-r--r--playbooks/common/openshift-cluster/upgrades/v3_3/upgrade.yml2
-rw-r--r--playbooks/common/openshift-cluster/upgrades/v3_3/upgrade_control_plane.yml2
-rw-r--r--playbooks/common/openshift-cluster/upgrades/v3_3/upgrade_nodes.yml2
-rw-r--r--playbooks/common/openshift-cluster/upgrades/v3_4/upgrade.yml2
-rw-r--r--playbooks/common/openshift-cluster/upgrades/v3_4/upgrade_control_plane.yml2
-rw-r--r--playbooks/common/openshift-cluster/upgrades/v3_4/upgrade_nodes.yml2
-rw-r--r--playbooks/common/openshift-cluster/upgrades/v3_5/upgrade.yml2
-rw-r--r--playbooks/common/openshift-cluster/upgrades/v3_5/upgrade_control_plane.yml2
-rw-r--r--playbooks/common/openshift-cluster/upgrades/v3_5/upgrade_nodes.yml2
-rw-r--r--playbooks/common/openshift-cluster/upgrades/v3_6/upgrade.yml2
-rw-r--r--playbooks/common/openshift-cluster/upgrades/v3_6/upgrade_control_plane.yml2
-rw-r--r--playbooks/common/openshift-cluster/upgrades/v3_6/upgrade_nodes.yml2
-rw-r--r--playbooks/common/openshift-cluster/upgrades/v3_7/upgrade.yml2
-rw-r--r--playbooks/common/openshift-cluster/upgrades/v3_7/upgrade_control_plane.yml2
-rw-r--r--playbooks/common/openshift-cluster/upgrades/v3_7/upgrade_nodes.yml2
-rw-r--r--playbooks/common/openshift-cluster/upgrades/v3_7/validator.yml11
-rw-r--r--playbooks/common/openshift-etcd/config.yml1
-rw-r--r--playbooks/common/openshift-etcd/migrate.yml18
-rw-r--r--playbooks/common/openshift-etcd/scaleup.yml8
-rw-r--r--playbooks/common/openshift-etcd/service.yml23
-rw-r--r--playbooks/common/openshift-loadbalancer/config.yml1
-rw-r--r--playbooks/common/openshift-loadbalancer/service.yml23
-rw-r--r--playbooks/common/openshift-master/additional_config.yml (renamed from playbooks/common/openshift-cluster/additional_config.yml)6
-rw-r--r--playbooks/common/openshift-master/config.yml21
-rw-r--r--playbooks/common/openshift-master/scaleup.yml37
-rw-r--r--playbooks/common/openshift-master/service.yml23
-rw-r--r--playbooks/common/openshift-nfs/service.yml21
-rw-r--r--playbooks/common/openshift-node/config.yml26
-rw-r--r--playbooks/common/openshift-node/scaleup.yml50
-rw-r--r--playbooks/common/openshift-node/service.yml26
-rw-r--r--playbooks/gce/README.md4
-rw-r--r--playbooks/gce/openshift-cluster/add_nodes.yml43
-rw-r--r--playbooks/gce/openshift-cluster/cluster_hosts.yml25
-rw-r--r--playbooks/gce/openshift-cluster/config.yml36
l---------playbooks/gce/openshift-cluster/filter_plugins1
-rw-r--r--playbooks/gce/openshift-cluster/launch.yml67
-rw-r--r--playbooks/gce/openshift-cluster/list.yml23
l---------playbooks/gce/openshift-cluster/lookup_plugins1
l---------playbooks/gce/openshift-cluster/roles1
-rw-r--r--playbooks/gce/openshift-cluster/service.yml29
-rw-r--r--playbooks/gce/openshift-cluster/tasks/launch_instances.yml65
-rw-r--r--playbooks/gce/openshift-cluster/terminate.yml58
-rw-r--r--playbooks/gce/openshift-cluster/update.yml34
-rw-r--r--playbooks/gce/openshift-cluster/vars.yml18
-rw-r--r--playbooks/libvirt/README.md4
-rw-r--r--playbooks/libvirt/openshift-cluster/cluster_hosts.yml25
-rw-r--r--playbooks/libvirt/openshift-cluster/config.yml39
l---------playbooks/libvirt/openshift-cluster/filter_plugins1
-rw-r--r--playbooks/libvirt/openshift-cluster/launch.yml57
-rw-r--r--playbooks/libvirt/openshift-cluster/list.yml23
l---------playbooks/libvirt/openshift-cluster/lookup_plugins1
l---------playbooks/libvirt/openshift-cluster/roles1
-rw-r--r--playbooks/libvirt/openshift-cluster/service.yml34
-rw-r--r--playbooks/libvirt/openshift-cluster/tasks/configure_libvirt.yml6
-rw-r--r--playbooks/libvirt/openshift-cluster/tasks/configure_libvirt_network.yml11
-rw-r--r--playbooks/libvirt/openshift-cluster/tasks/configure_libvirt_storage_pool.yml30
-rw-r--r--playbooks/libvirt/openshift-cluster/tasks/launch_instances.yml142
-rw-r--r--playbooks/libvirt/openshift-cluster/templates/domain.xml65
-rw-r--r--playbooks/libvirt/openshift-cluster/templates/meta-data3
-rw-r--r--playbooks/libvirt/openshift-cluster/templates/network.xml23
-rw-r--r--playbooks/libvirt/openshift-cluster/templates/storage-pool.xml6
-rw-r--r--playbooks/libvirt/openshift-cluster/templates/user-data43
-rw-r--r--playbooks/libvirt/openshift-cluster/terminate.yml70
-rw-r--r--playbooks/libvirt/openshift-cluster/update.yml37
-rw-r--r--playbooks/libvirt/openshift-cluster/vars.yml40
-rw-r--r--playbooks/openstack/README.md4
-rw-r--r--playbooks/openstack/openshift-cluster/cluster_hosts.yml25
-rw-r--r--playbooks/openstack/openshift-cluster/config.yml33
-rw-r--r--playbooks/openstack/openshift-cluster/files/heat_stack.yaml508
-rw-r--r--playbooks/openstack/openshift-cluster/files/heat_stack_server.yaml152
l---------playbooks/openstack/openshift-cluster/filter_plugins1
-rw-r--r--playbooks/openstack/openshift-cluster/launch.yml191
-rw-r--r--playbooks/openstack/openshift-cluster/list.yml24
l---------playbooks/openstack/openshift-cluster/lookup_plugins1
l---------playbooks/openstack/openshift-cluster/roles1
-rw-r--r--playbooks/openstack/openshift-cluster/terminate.yml49
-rw-r--r--playbooks/openstack/openshift-cluster/update.yml34
-rw-r--r--playbooks/openstack/openshift-cluster/vars.yml38
146 files changed, 637 insertions, 3747 deletions
diff --git a/playbooks/README.md b/playbooks/README.md
index 5857a9f59..290d4c082 100644
--- a/playbooks/README.md
+++ b/playbooks/README.md
@@ -12,8 +12,6 @@ And:
- [`adhoc`](adhoc) is a generic home for playbooks and tasks that are community
supported and not officially maintained.
-- [`aws`](aws), [`gce`](gce), [`libvirt`](libvirt) and [`openstack`](openstack)
- are related to the [`bin/cluster`](../bin) tool and its usage is deprecated.
Refer to the `README.md` file in each playbook directory for more information
about them.
diff --git a/playbooks/adhoc/uninstall.yml b/playbooks/adhoc/uninstall.yml
index 58b3a7835..5072d10fa 100644
--- a/playbooks/adhoc/uninstall.yml
+++ b/playbooks/adhoc/uninstall.yml
@@ -35,11 +35,9 @@
- /etc/dnsmasq.d/origin-upstream-dns.conf
- /etc/dnsmasq.d/openshift-ansible.conf
- /etc/NetworkManager/dispatcher.d/99-origin-dns.sh
- when: openshift_use_dnsmasq | default(true) | bool
- service:
name: NetworkManager
state: restarted
- when: openshift_use_dnsmasq | default(true) | bool
- name: Stop services
service: name={{ item }} state=stopped
with_items:
diff --git a/playbooks/aws/README.md b/playbooks/aws/README.md
index 410d98a9c..c2da4b632 100644
--- a/playbooks/aws/README.md
+++ b/playbooks/aws/README.md
@@ -1,9 +1,5 @@
# AWS playbooks
-Parts of this playbook directory are meant to be driven by [`bin/cluster`](../../bin),
-which is community supported and use is considered **deprecated**.
-
-
## Provisioning
With recent desire for provisioning from customers and developers alike, the AWS
@@ -36,132 +32,52 @@ Before any provisioning may occur, AWS account credentials must be present in th
### Let's Provision!
The newly added playbooks are the following:
-- build_ami.yml
-- provision.yml
+- build_ami.yml - Builds a custom AMI. This currently requires the user to supply a valid AMI with access to repositories that contain openshift repositories.
+- provision.yml - Create a vpc, elbs, security groups, launch config, asg's, etc.
+- install.yml - Calls the openshift-ansible installer on the newly created instances
+- provision_nodes.yml - Creates the infra and compute node scale groups
+- accept.yml - This is a playbook to accept infra and compute nodes into the cluster
+- provision_install.yml - This is a combination of all 3 of the above playbooks. (provision, install, and provision_nodes as well as accept.yml)
-The current expected work flow should be to provide the `vars.yml` file with the
-desired settings for cluster instances. These settings are AWS specific and should
-be tailored to the consumer's AWS custom account settings.
+The current expected work flow should be to provide an AMI with access to Openshift repositories. There should be a repository specified in the `openshift_additional_repos` parameter of the inventory file. The next expectation is a minimal set of values in the `provisioning_vars.yml` file to configure the desired settings for cluster instances. These settings are AWS specific and should be tailored to the consumer's AWS custom account settings.
```yaml
-clusterid: mycluster
-region: us-east-1
-
-provision:
- clusterid: "{{ clusterid }}"
- region: "{{ region }}"
-
- build:
- base_image: ami-bdd5d6ab # base image for AMI to build from
- # when creating an encrypted AMI please specify use_encryption
- use_encryption: False
-
- yum_repositories: # this is an example repository but it requires sslclient info. Use a valid yum repository for openshift rpms
- - name: openshift-repo
- file: openshift-repo
- description: OpenShift Builds
- baseurl: https://mirror.openshift.com/enterprise/online-int/latest/x86_64/os/
- enabled: yes
- gpgcheck: no
- sslverify: no
- # client cert and key required for this repository
- sslclientcert: "/var/lib/yum/client-cert.pem"
- sslclientkey: "/var/lib/yum/client-key.pem"
- gpgkey: "https://mirror.ops.rhcloud.com/libra/keys/RPM-GPG-KEY-redhat-release https://mirror.ops.rhcloud.com/libra/keys/RPM-GPG-KEY-redhat-beta https://mirror.ops.rhcloud.com/libra/keys/RPM-GPG-KEY-redhat-openshifthosted"
-
- # for s3 registry backend
- openshift_registry_s3: True
-
- # if using custom certificates these are required for the ELB
- iam_cert_ca:
- name: test_openshift
- cert_path: '/path/to/wildcard.<clusterid>.example.com.crt'
- key_path: '/path/to/wildcard.<clusterid>.example.com.key'
- chain_path: '/path/to/cert.ca.crt'
-
- instance_users:
- - key_name: myuser_key
- username: myuser
- pub_key: |
- ssh-rsa aaa<place public ssh key here>aaaaa user@<clusterid>
-
- node_group_config:
- tags:
- clusterid: "{{ clusterid }}"
- environment: stg
- ssh_key_name: myuser_key # name of the ssh key from above
-
- # configure master settings here
- master:
- instance_type: m4.xlarge
- ami: ami-cdeec8b6 # if using an encrypted AMI this will be replaced
- volumes:
- - device_name: /dev/sdb
- volume_size: 100
- device_type: gp2
- delete_on_termination: False
- health_check:
- period: 60
- type: EC2
- # Set the following number to be the same for masters.
- min_size: 3
- max_size: 3
- desired_size: 3
- tags:
- host-type: master
- sub-host-type: default
- wait_for_instances: True
-...
- vpc:
- # name: mycluster # If missing; will default to clusterid
- cidr: 172.31.0.0/16
- subnets:
- us-east-1: # These are us-east-1 region defaults. Ensure this matches your region
- - cidr: 172.31.48.0/20
- az: "us-east-1c"
- - cidr: 172.31.32.0/20
- az: "us-east-1e"
- - cidr: 172.31.16.0/20
- az: "us-east-1a"
+---
+openshift_node_bootstrap: True
-```
+# specify a clusterid
+# openshift_aws_clusterid: default
-Repeat the following setup for the infra and compute node groups. This most likely
- will not need editing but if further customization is required these parameters
- can be updated.
+# specify a region
+# openshift_aws_region: us-east-1
-#### Step 1
+# must specify a base_ami when building an AMI
+# openshift_aws_base_ami: # base image for AMI to build from
+# specify when using a custom AMI
+# openshift_aws_ami:
-Once the vars.yml file has been updated with the correct settings for the desired AWS account then we are ready to build an AMI.
-
-```
-$ ansible-playbook build_ami.yml
-```
+# when creating an encrypted AMI please specify use_encryption
+# openshift_aws_ami_encrypt: False
-1. This script will build a VPC. Default name will be clusterid if not specified.
-2. Create an ssh key required for the instance.
-3. Create an instance.
-4. Run some setup roles to ensure packages and services are correctly configured.
-5. Create the AMI.
-6. If encryption is desired
- - A KMS key is created with the name of $clusterid
- - An encrypted AMI will be produced with $clusterid KMS key
-7. Terminate the instance used to configure the AMI.
-
-#### Step 2
+# custom certificates are required for the ELB
+# openshift_aws_iam_cert_path: '/path/to/cert/wildcard.<clusterid>.<domain>.com.crt'
+# openshift_aws_iam_cert_key_path: '/path/to/key/wildcard.<clusterid>.<domain>.com.key'
+# openshift_aws_iam_cert_chain_path: '/path/to/ca_cert_file/ca.crt'
-Now that we have created an AMI for our Openshift installation, that AMI id needs to be placed in the `vars.yml` file. To do so update the following fields (The AMI can be captured from the output of the previous step or found in the ec2 console under AMIs):
+# This is required for any ec2 instances
+# openshift_aws_ssh_key_name: myuser_key
+# This will ensure these users are created
+#openshift_aws_users:
+#- key_name: myuser_key
+# username: myuser
+# pub_key: |
+# ssh-rsa AAAA
```
- # when creating an encrypted AMI please specify use_encryption
- use_encryption: False # defaults to false
-```
-
-**Note**: If using encryption, specify with `use_encryption: True`. This will ensure to take the recently created AMI and encrypt it to be used later. If encryption is not desired then set the value to false. The AMI id will be fetched and used according to its most recent creation date.
-#### Step 3
+If customization is required for the instances, scale groups, or any other configurable option please see the ['openshift_aws/defaults/main.yml'](../../roles/openshift_aws/defaults/main.yml) for variables and overrides. These overrides can be placed in the `provisioning_vars.yml`, `inventory`, or `group_vars`.
-Create an openshift-ansible inventory file to use for a byo installation. The exception here is that there will be no hosts specified by the inventory file. Here is an example:
+In order to create the bootstrap-able AMI we need to create an openshift-ansible inventory file. This file enables us to create the AMI using the openshift-ansible node roles. The exception here is that there will be no hosts specified by the inventory file. Here is an example:
```ini
[OSEv3:children]
@@ -175,10 +91,22 @@ nodes
etcd
[OSEv3:vars]
-# cluster specific settings maybe be placed here
+################################################################################
+# Ensure these variables are set for bootstrap
+################################################################################
+# openshift_deployment_type is required for installation
+openshift_deployment_type=origin
+openshift_master_bootstrap_enabled=True
+
openshift_hosted_router_wait=False
openshift_hosted_registry_wait=False
+# Repository for installation
+openshift_additional_repos=[{'name': 'openshift-repo', 'id': 'openshift-repo', 'baseurl': 'https://mirror.openshift.com/enterprise/enterprise-3.6/latest/x86_64/os/', 'enabled': 'yes', 'gpgcheck': 0, 'sslverify': 'no', 'sslclientcert': '/var/lib/yum/client-cert.pem', 'sslclientkey': '/var/lib/yum/client-key.pem', 'gpgkey': 'https://mirror.ops.rhcloud.com/libra/keys/RPM-GPG-KEY-redhat-release https://mirror.ops.rhcloud.com/libra/keys/RPM-GPG-KEY-redhat-beta https://mirror.ops.rhcloud.com/libra/keys/RPM-GPG-KEY-redhat-openshifthosted'}]
+
+################################################################################
+# cluster specific settings maybe be placed here
+
[masters]
[etcd]
@@ -188,43 +116,94 @@ openshift_hosted_registry_wait=False
There are more examples of cluster inventory settings [`here`](../../inventory/byo/).
-#### Step 4
+#### Step 1
+
+Once the `inventory` and the `provisioning_vars.yml` file has been updated with the correct settings for the desired AWS account then we are ready to build an AMI.
+
+```
+$ ansible-playbook -i inventory.yml build_ami.yml -e @provisioning_vars.yml
+```
+
+1. This script will build a VPC. Default name will be clusterid if not specified.
+2. Create an ssh key required for the instance.
+3. Create a security group.
+4. Create an instance using the key from step 2 or a specified key.
+5. Run openshift-ansible setup roles to ensure packages and services are correctly configured.
+6. Create the AMI.
+7. If encryption is desired
+ - A KMS key is created with the name of $clusterid
+ - An encrypted AMI will be produced with $clusterid KMS key
+8. Terminate the instance used to configure the AMI.
+
+More AMI specific options can be found in ['openshift_aws/defaults/main.yml'](../../roles/openshift_aws/defaults/main.yml). When creating an encrypted AMI please specify use_encryption:
+```
+# openshift_aws_ami_encrypt: True # defaults to false
+```
+
+**Note**: This will ensure to take the recently created AMI and encrypt it to be used later. If encryption is not desired then set the value to false (defaults to false). The AMI id will be fetched and used according to its most recent creation date.
+
+#### Step 2
+
+Now that we have created an AMI for our Openshift installation, there are two ways to use the AMI.
-We are ready to create the master instances and install Openshift.
+1. In the default behavior, the AMI id will be found and used in the last created fashion.
+2. The `openshift_aws_ami` option can be specified. This will allow the user to override the behavior of the role and use a custom AMI specified in the `openshift_aws_ami` variable.
+We are now ready to provision and install the cluster. This can be accomplished by calling all of the following steps at once or one-by-one. The all in one can be called like this:
```
-$ ansible-playbook -i <inventory from step 3> provision.yml
+$ ansible-playbook -i inventory.yml provision_install.yml -e @provisioning_vars.yml
+```
+
+If this is the first time running through this process, please attempt the following steps one-by-one and ensure the setup works correctly.
+
+#### Step 3
+
+We are ready to create the master instances.
+
+```
+$ ansible-playbook provision.yml -e @provisioning_vars.yml
```
This playbook runs through the following steps:
-1. Ensures a VPC is created
-2. Ensures a SSH key exists
-3. Creates an s3 bucket for the registry named $clusterid
-4. Create master security groups
-5. Create a master launch config
-6. Create the master auto scaling groups
-7. If certificates are desired for ELB, they will be uploaded
-8. Create internal and external master ELBs
-9. Add newly created masters to the correct groups
-10. Set a couple of important facts for the masters
-11. Run the [`byo`](../../common/openshift-cluster/config.yml)
+1. Ensures a VPC is created.
+2. Ensures a SSH key exists.
+3. Creates an s3 bucket for the registry named $clusterid-docker-registry
+4. Create master security groups.
+5. Create a master launch config.
+6. Create the master auto scaling groups.
+7. If certificates are desired for ELB, they will be uploaded.
+8. Create internal and external master ELBs.
+9. Add newly created masters to the correct groups.
+10. Set a couple of important facts for the masters.
+
+At this point we have successfully created the infrastructure including the master nodes.
-At this point we have created a successful cluster with only the master nodes.
+#### Step 4
+
+Now it is time to install Openshift using the openshift-ansible installer. This can be achieved by running the following playbook:
+
+```
+$ ansible-playbook -i inventory.yml install.yml @provisioning_vars.yml
+```
+This playbook accomplishes the following:
+1. Builds a dynamic inventory file by querying AWS.
+2. Runs the [`byo`](../../common/openshift-cluster/config.yml)
+Once this playbook completes, the cluster masters should be installed and configured.
#### Step 5
-Now that we have a cluster deployed it might be more interesting to create some node types. This can be done easily with the following playbook:
+Now that we have a cluster deployed it will be more interesting to create some node types. This can be done easily with the following playbook:
```
-$ ansible-playbook provision_nodes.yml
+$ ansible-playbook provision_nodes.yml -e @provisioning_vars.yml
```
Once this playbook completes, it should create the compute and infra node scale groups. These nodes will attempt to register themselves to the cluster. These requests must be approved by an administrator.
#### Step 6
-The registration of our nodes can be automated by running the following script `accept.yml`. This script can handle the registration in a few different ways.
+To facilitate the node registration process, nodes may be registered by running the following script `accept.yml`. This script can register in a few different ways.
- approve_all - **Note**: this option is for development and test environments. Security is bypassed
- nodes - A list of node names that will be accepted into the cluster
@@ -234,10 +213,11 @@ The registration of our nodes can be automated by running the following script `
nodes: < list of nodes here >
timeout: 0
```
+
Once the desired accept method is chosen, run the following playbook `accept.yml`:
1. Run the following playbook.
```
-$ ansible-playbook accept.yml
+$ ansible-playbook accept.yml -e @provisioning_vars.yml
```
Login to a master and run the following command:
@@ -264,6 +244,6 @@ ip-172-31-49-148.ec2.internal Ready 1h v1.6.1+5115d
At this point your cluster should be ready for workloads. Proceed to deploy applications on your cluster.
-### Still to compute
+### Still to come
There are more enhancements that are arriving for provisioning. These will include more playbooks that enhance the provisioning capabilities.
diff --git a/playbooks/aws/openshift-cluster/accept.yml b/playbooks/aws/openshift-cluster/accept.yml
index d43c84205..ffc367f9f 100755
--- a/playbooks/aws/openshift-cluster/accept.yml
+++ b/playbooks/aws/openshift-cluster/accept.yml
@@ -1,12 +1,17 @@
+#!/usr/bin/ansible-playbook
---
- name: Setup the vpc and the master node group
- #hosts: oo_first_master
hosts: localhost
remote_user: root
gather_facts: no
tasks:
- - name: get provisioning vars
- include_vars: vars.yml
+ - name: Alert user to variables needed - clusterid
+ debug:
+ msg: "openshift_aws_clusterid={{ openshift_aws_clusterid | default('default') }}"
+
+ - name: Alert user to variables needed - region
+ debug:
+ msg: "openshift_aws_region={{ openshift_aws_region | default('us-east-1') }}"
- name: bring lib_openshift into scope
include_role:
@@ -14,9 +19,9 @@
- name: fetch masters
ec2_remote_facts:
- region: "{{ provision.region }}"
+ region: "{{ openshift_aws_region | default('us-east-1') }}"
filters:
- "tag:clusterid": "{{ provision.clusterid }}"
+ "tag:clusterid": "{{ openshift_aws_clusterid | default('default') }}"
"tag:host-type": master
instance-state-name: running
register: mastersout
@@ -26,9 +31,9 @@
- name: fetch new node instances
ec2_remote_facts:
- region: "{{ provision.region }}"
+ region: "{{ openshift_aws_region | default('us-east-1') }}"
filters:
- "tag:clusterid": "{{ provision.clusterid }}"
+ "tag:clusterid": "{{ openshift_aws_clusterid | default('default') }}"
"tag:host-type": node
instance-state-name: running
register: instancesout
diff --git a/playbooks/aws/openshift-cluster/add_nodes.yml b/playbooks/aws/openshift-cluster/add_nodes.yml
deleted file mode 100644
index 0e8eb90c1..000000000
--- a/playbooks/aws/openshift-cluster/add_nodes.yml
+++ /dev/null
@@ -1,35 +0,0 @@
----
-- name: Launch instance(s)
- hosts: localhost
- connection: local
- become: no
- gather_facts: no
- vars_files:
- - vars.yml
- vars:
- oo_extend_env: True
- tasks:
- - include: ../../common/openshift-cluster/tasks/set_node_launch_facts.yml
- vars:
- type: "compute"
- count: "{{ num_nodes }}"
- - include: tasks/launch_instances.yml
- vars:
- instances: "{{ node_names }}"
- cluster: "{{ cluster_id }}"
- type: "{{ k8s_type }}"
- g_sub_host_type: "{{ sub_host_type }}"
-
- - include: ../../common/openshift-cluster/tasks/set_node_launch_facts.yml
- vars:
- type: "infra"
- count: "{{ num_infra }}"
- - include: tasks/launch_instances.yml
- vars:
- instances: "{{ node_names }}"
- cluster: "{{ cluster_id }}"
- type: "{{ k8s_type }}"
- g_sub_host_type: "{{ sub_host_type }}"
-
-- include: scaleup.yml
-- include: list.yml
diff --git a/playbooks/aws/openshift-cluster/build_ami.yml b/playbooks/aws/openshift-cluster/build_ami.yml
index fa708ffa1..d3c0057b5 100644
--- a/playbooks/aws/openshift-cluster/build_ami.yml
+++ b/playbooks/aws/openshift-cluster/build_ami.yml
@@ -3,132 +3,83 @@
connection: local
gather_facts: no
tasks:
- - name: get the necessary vars for ami building
- include_vars: vars.yml
+ - name: Require openshift_aws_base_ami
+ fail:
+ msg: "A base AMI is required for AMI building. Please ensure `openshift_aws_base_ami` is defined."
+ when: openshift_aws_base_ami is undefined
- - name: create a vpc with the name <clusterid>
- include_role:
- name: openshift_aws_vpc
- vars:
- r_openshift_aws_vpc_clusterid: "{{ provision.clusterid }}"
- r_openshift_aws_vpc_cidr: "{{ provision.vpc.cidr }}"
- r_openshift_aws_vpc_subnets: "{{ provision.vpc.subnets }}"
- r_openshift_aws_vpc_region: "{{ provision.region }}"
- r_openshift_aws_vpc_tags: "{{ provision.vpc.tags }}"
- r_openshift_aws_vpc_name: "{{ provision.vpc.name | default(provision.clusterid) }}"
+ - name: "Alert user to variables needed and their values - {{ item.name }}"
+ debug:
+ msg: "{{ item.msg }}"
+ with_items:
+ - name: openshift_aws_clusterid
+ msg: "openshift_aws_clusterid={{ openshift_aws_clusterid | default('default') }}"
+ - name: openshift_aws_region
+ msg: "openshift_aws_region={{ openshift_aws_region | default('us-east-1') }}"
- - name: create aws ssh keypair
+ - name: create an instance and prepare for ami
include_role:
- name: openshift_aws_ssh_keys
+ name: openshift_aws
+ tasks_from: build_ami.yml
vars:
- r_openshift_aws_ssh_keys_users: "{{ provision.instance_users }}"
- r_openshift_aws_ssh_keys_region: "{{ provision.region }}"
+ openshift_aws_node_group_type: compute
- - name: fetch the default subnet id
- ec2_vpc_subnet_facts:
- region: "{{ provision.region }}"
+ - name: fetch newly created instances
+ ec2_remote_facts:
+ region: "{{ openshift_aws_region | default('us-east-1') }}"
filters:
- "tag:Name": "{{ provision.vpc.subnets[provision.region][0].az }}"
- register: subnetout
-
- - name: create instance for ami creation
- ec2:
- assign_public_ip: yes
- region: "{{ provision.region }}"
- key_name: "{{ provision.node_group_config.ssh_key_name }}"
- group: "{{ provision.clusterid }}"
- instance_type: m4.xlarge
- vpc_subnet_id: "{{ subnetout.subnets[0].id }}"
- image: "{{ provision.build.base_image }}"
- volumes:
- - device_name: /dev/sdb
- volume_type: gp2
- volume_size: 100
- delete_on_termination: true
- wait: yes
- exact_count: 1
- count_tag:
- Name: ami_base
- instance_tags:
- Name: ami_base
- register: amibase
+ "tag:Name": "{{ openshift_aws_base_ami_name | default('ami_base') }}"
+ instance-state-name: running
+ register: instancesout
+ retries: 20
+ delay: 3
+ until: instancesout.instances|length > 0
- name: wait for ssh to become available
wait_for:
port: 22
- host: "{{ amibase.tagged_instances.0.public_ip }}"
+ host: "{{ instancesout.instances[0].public_ip_address }}"
timeout: 300
search_regex: OpenSSH
- - name: add host to group
+ - name: add host to nodes
add_host:
- name: "{{ amibase.tagged_instances.0.public_dns_name }}"
- groups: amibase
+ groups: nodes
+ name: "{{ instancesout.instances[0].public_dns_name }}"
+
+ - name: set the user to perform installation
+ set_fact:
+ ansible_ssh_user: root
+
+- name: normalize groups
+ include: ../../byo/openshift-cluster/initialize_groups.yml
-- hosts: amibase
+- name: run the std_include
+ include: ../../common/openshift-cluster/evaluate_groups.yml
+
+- name: run the std_include
+ include: ../../common/openshift-cluster/initialize_facts.yml
+
+- name: run the std_include
+ include: ../../common/openshift-cluster/initialize_openshift_repos.yml
+
+- hosts: nodes
remote_user: root
tasks:
- - name: included required variables
- include_vars: vars.yml
+ - set_fact:
+ openshift_node_bootstrap: True
- name: run openshift image preparation
include_role:
- name: openshift_ami_prep
- vars:
- r_openshift_ami_prep_yum_repositories: "{{ provision.build.yum_repositories }}"
- r_openshift_ami_prep_node: atomic-openshift-node
- r_openshift_ami_prep_master: atomic-openshift-master
+ name: openshift_node
- hosts: localhost
connection: local
become: no
tasks:
- - name: bundle ami
- ec2_ami:
- instance_id: "{{ amibase.tagged_instances.0.id }}"
- region: "{{ provision.region }}"
- state: present
- description: "This was provisioned {{ ansible_date_time.iso8601 }}"
- name: "{{ provision.build.ami_name }}{{ lookup('pipe', 'date +%Y%m%d%H%M')}}"
- wait: yes
- register: amioutput
-
- - debug: var=amioutput
-
- - when: provision.build.use_encryption | default(False)
- block:
- - name: setup kms key for encryption
- include_role:
- name: openshift_aws_iam_kms
- vars:
- r_openshift_aws_iam_kms_region: "{{ provision.region }}"
- r_openshift_aws_iam_kms_alias: "alias/{{ provision.clusterid }}_kms"
-
- - name: augment the encrypted ami tags with source-ami
- set_fact:
- source_tag:
- source-ami: "{{ amioutput.image_id }}"
-
- - name: copy the ami for encrypted disks
- include_role:
- name: openshift_aws_ami_copy
- vars:
- r_openshift_aws_ami_copy_region: "{{ provision.region }}"
- r_openshift_aws_ami_copy_name: "{{ provision.build.ami_name }}{{ lookup('pipe', 'date +%Y%m%d%H%M')}}-encrypted"
- r_openshift_aws_ami_copy_src_ami: "{{ amioutput.image_id }}"
- r_openshift_aws_ami_copy_kms_alias: "alias/{{ provision.clusterid }}_kms"
- r_openshift_aws_ami_copy_tags: "{{ source_tag | combine(provision.build.openshift_ami_tags) }}"
- r_openshift_aws_ami_copy_encrypt: "{{ provision.build.use_encryption }}"
- # this option currently fails due to boto waiters
- # when supported this need to be reapplied
- #r_openshift_aws_ami_copy_wait: True
-
- - name: Display newly created encrypted ami id
- debug:
- msg: "{{ r_openshift_aws_ami_copy_retval_custom_ami }}"
-
- - name: terminate temporary instance
- ec2:
- state: absent
- region: "{{ provision.region }}"
- instance_ids: "{{ amibase.tagged_instances.0.id }}"
+ - name: seal the ami
+ include_role:
+ name: openshift_aws
+ tasks_from: seal_ami.yml
+ vars:
+ openshift_aws_ami_name: "openshift-gi-{{ lookup('pipe', 'date +%Y%m%d%H%M')}}"
diff --git a/playbooks/aws/openshift-cluster/build_node_group.yml b/playbooks/aws/openshift-cluster/build_node_group.yml
deleted file mode 100644
index 3ef492238..000000000
--- a/playbooks/aws/openshift-cluster/build_node_group.yml
+++ /dev/null
@@ -1,47 +0,0 @@
----
-- name: fetch recently created AMI
- ec2_ami_find:
- region: "{{ provision.region }}"
- sort: creationDate
- sort_order: descending
- name: "{{ provision.build.ami_name }}*"
- ami_tags: "{{ provision.build.openshift_ami_tags }}"
- #no_result_action: fail
- register: amiout
-
-- block:
- - name: "Create {{ openshift_build_node_type }} sgs"
- include_role:
- name: openshift_aws_sg
- vars:
- r_openshift_aws_sg_clusterid: "{{ provision.clusterid }}"
- r_openshift_aws_sg_region: "{{ provision.region }}"
- r_openshift_aws_sg_type: "{{ openshift_build_node_type }}"
-
- - name: "generate a launch config name for {{ openshift_build_node_type }}"
- set_fact:
- launch_config_name: "{{ provision.clusterid }}-{{ openshift_build_node_type }}-{{ ansible_date_time.epoch }}"
-
- - name: create "{{ openshift_build_node_type }} launch config"
- include_role:
- name: openshift_aws_launch_config
- vars:
- r_openshift_aws_launch_config_name: "{{ launch_config_name }}"
- r_openshift_aws_launch_config_clusterid: "{{ provision.clusterid }}"
- r_openshift_aws_launch_config_region: "{{ provision.region }}"
- r_openshift_aws_launch_config: "{{ provision.node_group_config }}"
- r_openshift_aws_launch_config_type: "{{ openshift_build_node_type }}"
- r_openshift_aws_launch_config_custom_image: "{{ '' if 'results' not in amiout else amiout.results[0].ami_id }}"
- r_openshift_aws_launch_config_bootstrap_token: "{{ (local_bootstrap['content'] |b64decode) if local_bootstrap is defined else '' }}"
-
- - name: "create {{ openshift_build_node_type }} node groups"
- include_role:
- name: openshift_aws_node_group
- vars:
- r_openshift_aws_node_group_name: "{{ provision.clusterid }} openshift {{ openshift_build_node_type }}"
- r_openshift_aws_node_group_lc_name: "{{ launch_config_name }}"
- r_openshift_aws_node_group_clusterid: "{{ provision.clusterid }}"
- r_openshift_aws_node_group_region: "{{ provision.region }}"
- r_openshift_aws_node_group_config: "{{ provision.node_group_config }}"
- r_openshift_aws_node_group_type: "{{ openshift_build_node_type }}"
- r_openshift_aws_node_group_subnet_name: "{{ provision.vpc.subnets[provision.region][0].az }}"
diff --git a/playbooks/aws/openshift-cluster/cluster_hosts.yml b/playbooks/aws/openshift-cluster/cluster_hosts.yml
deleted file mode 100644
index c2f4dfedc..000000000
--- a/playbooks/aws/openshift-cluster/cluster_hosts.yml
+++ /dev/null
@@ -1,25 +0,0 @@
----
-g_all_hosts: "{{ groups['tag_clusterid_' ~ cluster_id] | default([])
- | intersect(groups['tag_environment_' ~ cluster_env] | default([])) }}"
-
-g_etcd_hosts: "{{ g_all_hosts | intersect(groups['tag_host-type_etcd'] | default([])) }}"
-
-g_new_etcd_hosts: "{{ g_all_hosts | intersect(groups['tag_host-type_new_etcd'] | default([])) }}"
-
-g_lb_hosts: "{{ g_all_hosts | intersect(groups['tag_host-type_lb'] | default([])) }}"
-
-g_nfs_hosts: "{{ g_all_hosts | intersect(groups['tag_host-type_nfs'] | default([])) }}"
-
-g_glusterfs_hosts: "{{ g_all_hosts | intersect(groups['tag_host-type-glusterfs'] | default([])) }}"
-
-g_master_hosts: "{{ g_all_hosts | intersect(groups['tag_host-type_master'] | default([])) }}"
-
-g_new_master_hosts: "{{ g_all_hosts | intersect(groups['tag_host-type_new_master'] | default([])) }}"
-
-g_node_hosts: "{{ g_all_hosts | intersect(groups['tag_host-type_node'] | default([])) }}"
-
-g_new_node_hosts: "{{ g_all_hosts | intersect(groups['tag_host-type_new_node'] | default([])) }}"
-
-g_infra_hosts: "{{ g_node_hosts | intersect(groups['tag_sub-host-type_infra'] | default([])) }}"
-
-g_compute_hosts: "{{ g_node_hosts | intersect(groups['tag_sub-host-type_compute'] | default([])) }}"
diff --git a/playbooks/aws/openshift-cluster/config.yml b/playbooks/aws/openshift-cluster/config.yml
deleted file mode 100644
index 821a0f30e..000000000
--- a/playbooks/aws/openshift-cluster/config.yml
+++ /dev/null
@@ -1,37 +0,0 @@
----
-- hosts: localhost
- gather_facts: no
- tasks:
- - include_vars: vars.yml
- - include_vars: cluster_hosts.yml
- - add_host:
- name: "{{ item }}"
- groups: l_oo_all_hosts
- with_items: "{{ g_all_hosts | default([]) }}"
-
-- hosts: l_oo_all_hosts
- gather_facts: no
- tasks:
- - include_vars: vars.yml
- - include_vars: cluster_hosts.yml
-
-- include: ../../common/openshift-cluster/config.yml
- vars:
- g_ssh_user: "{{ deployment_vars[deployment_type].ssh_user }}"
- g_sudo: "{{ deployment_vars[deployment_type].become }}"
- g_nodeonmaster: true
- openshift_cluster_id: "{{ cluster_id }}"
- openshift_debug_level: "{{ debug_level }}"
- openshift_deployment_type: "{{ deployment_type }}"
- openshift_public_hostname: "{{ ec2_ip_address }}"
- openshift_hosted_registry_selector: 'type=infra'
- openshift_hosted_router_selector: 'type=infra'
- openshift_node_labels:
- region: "{{ deployment_vars[deployment_type].region }}"
- type: "{{ hostvars[inventory_hostname]['ec2_tag_sub-host-type'] }}"
- openshift_master_cluster_method: 'native'
- openshift_use_openshift_sdn: "{{ lookup('oo_option', 'use_openshift_sdn') }}"
- os_sdn_network_plugin_name: "{{ lookup('oo_option', 'sdn_network_plugin_name') }}"
- openshift_use_flannel: "{{ lookup('oo_option', 'use_flannel') }}"
- openshift_use_calico: "{{ lookup('oo_option', 'use_calico') }}"
- openshift_use_fluentd: "{{ lookup('oo_option', 'use_fluentd') }}"
diff --git a/playbooks/aws/openshift-cluster/install.yml b/playbooks/aws/openshift-cluster/install.yml
new file mode 100644
index 000000000..86d58a68e
--- /dev/null
+++ b/playbooks/aws/openshift-cluster/install.yml
@@ -0,0 +1,74 @@
+---
+- name: Setup the vpc and the master node group
+ hosts: localhost
+ tasks:
+ - name: Alert user to variables needed - clusterid
+ debug:
+ msg: "openshift_aws_clusterid={{ openshift_aws_clusterid | default('default') }}"
+
+ - name: Alert user to variables needed - region
+ debug:
+ msg: "openshift_aws_region={{ openshift_aws_region | default('us-east-1') }}"
+
+ - name: fetch newly created instances
+ ec2_remote_facts:
+ region: "{{ openshift_aws_region | default('us-east-1') }}"
+ filters:
+ "tag:clusterid": "{{ openshift_aws_clusterid | default('default') }}"
+ "tag:host-type": master
+ instance-state-name: running
+ register: instancesout
+ retries: 20
+ delay: 3
+ until: instancesout.instances|length > 0
+
+ - name: add new master to masters group
+ add_host:
+ groups: "masters,etcd,nodes"
+ name: "{{ item.public_ip_address }}"
+ hostname: "{{ openshift_aws_clusterid | default('default') }}-master-{{ item.id[:-5] }}"
+ with_items: "{{ instancesout.instances }}"
+
+ - name: wait for ssh to become available
+ wait_for:
+ port: 22
+ host: "{{ item.public_ip_address }}"
+ timeout: 300
+ search_regex: OpenSSH
+ with_items: "{{ instancesout.instances }}"
+
+- name: set the master facts for hostname to elb
+ hosts: masters
+ gather_facts: no
+ remote_user: root
+ tasks:
+ - name: fetch elbs
+ ec2_elb_facts:
+ region: "{{ openshift_aws_region | default('us-east-1') }}"
+ names:
+ - "{{ item }}"
+ with_items:
+ - "{{ openshift_aws_clusterid | default('default') }}-master-external"
+ - "{{ openshift_aws_clusterid | default('default') }}-master-internal"
+ delegate_to: localhost
+ register: elbs
+
+ - debug: var=elbs
+
+ - name: set fact
+ set_fact:
+ openshift_master_cluster_hostname: "{{ elbs.results[1].elbs[0].dns_name }}"
+ osm_custom_cors_origins:
+ - "{{ elbs.results[1].elbs[0].dns_name }}"
+ - "console.{{ openshift_aws_clusterid | default('default') }}.openshift.com"
+ - "api.{{ openshift_aws_clusterid | default('default') }}.openshift.com"
+ with_items: "{{ groups['masters'] }}"
+
+- name: normalize groups
+ include: ../../byo/openshift-cluster/initialize_groups.yml
+
+- name: run the std_include
+ include: ../../common/openshift-cluster/std_include.yml
+
+- name: run the config
+ include: ../../common/openshift-cluster/config.yml
diff --git a/playbooks/aws/openshift-cluster/launch.yml b/playbooks/aws/openshift-cluster/launch.yml
deleted file mode 100644
index 3edace493..000000000
--- a/playbooks/aws/openshift-cluster/launch.yml
+++ /dev/null
@@ -1,54 +0,0 @@
----
-- name: Launch instance(s)
- hosts: localhost
- connection: local
- become: no
- gather_facts: no
- vars_files:
- - vars.yml
- tasks:
- - include: ../../common/openshift-cluster/tasks/set_etcd_launch_facts.yml
- - include: tasks/launch_instances.yml
- vars:
- instances: "{{ etcd_names }}"
- cluster: "{{ cluster_id }}"
- type: "{{ k8s_type }}"
- g_sub_host_type: "default"
-
- - include: ../../common/openshift-cluster/tasks/set_master_launch_facts.yml
- - include: tasks/launch_instances.yml
- vars:
- instances: "{{ master_names }}"
- cluster: "{{ cluster_id }}"
- type: "{{ k8s_type }}"
- g_sub_host_type: "default"
-
- - include: ../../common/openshift-cluster/tasks/set_node_launch_facts.yml
- vars:
- type: "compute"
- count: "{{ num_nodes }}"
- - include: tasks/launch_instances.yml
- vars:
- instances: "{{ node_names }}"
- cluster: "{{ cluster_id }}"
- type: "{{ k8s_type }}"
- g_sub_host_type: "{{ sub_host_type }}"
-
- - include: ../../common/openshift-cluster/tasks/set_node_launch_facts.yml
- vars:
- type: "infra"
- count: "{{ num_infra }}"
- - include: tasks/launch_instances.yml
- vars:
- instances: "{{ node_names }}"
- cluster: "{{ cluster_id }}"
- type: "{{ k8s_type }}"
- g_sub_host_type: "{{ sub_host_type }}"
-
- - add_host:
- name: "{{ master_names.0 }}"
- groups: service_master
- when: master_names is defined and master_names.0 is defined
-
-- include: update.yml
-- include: list.yml
diff --git a/playbooks/aws/openshift-cluster/list.yml b/playbooks/aws/openshift-cluster/list.yml
deleted file mode 100644
index ed8aac398..000000000
--- a/playbooks/aws/openshift-cluster/list.yml
+++ /dev/null
@@ -1,23 +0,0 @@
----
-- name: Generate oo_list_hosts group
- hosts: localhost
- gather_facts: no
- connection: local
- become: no
- vars_files:
- - vars.yml
- tasks:
- - set_fact: scratch_group=tag_clusterid_{{ cluster_id }}
- when: cluster_id != ''
- - set_fact: scratch_group=all
- when: cluster_id == ''
- - add_host:
- name: "{{ item }}"
- groups: oo_list_hosts
- ansible_ssh_user: "{{ deployment_vars[deployment_type].ssh_user }}"
- ansible_become: "{{ deployment_vars[deployment_type].become }}"
- oo_public_ipv4: "{{ hostvars[item].ec2_ip_address }}"
- oo_private_ipv4: "{{ hostvars[item].ec2_private_ip_address }}"
- with_items: "{{ groups[scratch_group] | default([]) | difference(['localhost']) }}"
- - debug:
- msg: "{{ hostvars | oo_select_keys(groups[scratch_group] | default([])) | oo_pretty_print_cluster }}"
diff --git a/playbooks/aws/openshift-cluster/provision.yml b/playbooks/aws/openshift-cluster/provision.yml
index dfbf61cc7..db7afac6f 100644
--- a/playbooks/aws/openshift-cluster/provision.yml
+++ b/playbooks/aws/openshift-cluster/provision.yml
@@ -2,156 +2,16 @@
- name: Setup the vpc and the master node group
hosts: localhost
tasks:
- - name: get provisioning vars
- include_vars: vars.yml
- - name: create default vpc
- include_role:
- name: openshift_aws_vpc
- vars:
- r_openshift_aws_vpc_clusterid: "{{ provision.clusterid }}"
- r_openshift_aws_vpc_cidr: "{{ provision.vpc.cidr }}"
- r_openshift_aws_vpc_subnets: "{{ provision.vpc.subnets }}"
- r_openshift_aws_vpc_region: "{{ provision.region }}"
- r_openshift_aws_vpc_tags: "{{ provision.vpc.tags }}"
- r_openshift_aws_vpc_name: "{{ provision.vpc.name | default(provision.clusterid) }}"
-
- - name: create aws ssh keypair
- include_role:
- name: openshift_aws_ssh_keys
- vars:
- r_openshift_aws_ssh_keys_users: "{{ provision.instance_users }}"
- r_openshift_aws_ssh_keys_region: "{{ provision.region }}"
-
- - when: provision.openshift_registry_s3 | default(false)
- name: create s3 bucket for registry
- include_role:
- name: openshift_aws_s3
- vars:
- r_openshift_aws_s3_clusterid: "{{ provision.clusterid }}-docker-registry"
- r_openshift_aws_s3_region: "{{ provision.region }}"
- r_openshift_aws_s3_mode: create
+ - name: Alert user to variables needed - clusterid
+ debug:
+ msg: "openshift_aws_clusterid={{ openshift_aws_clusterid | default('default') }}"
- - name: include scale group creation for master
- include: build_node_group.yml
- vars:
- openshift_build_node_type: master
+ - name: Alert user to variables needed - region
+ debug:
+ msg: "openshift_aws_region={{ openshift_aws_region | default('us-east-1') }}"
- - name: fetch new master instances
- ec2_remote_facts:
- region: "{{ provision.region }}"
- filters:
- "tag:clusterid": "{{ provision.clusterid }}"
- "tag:host-type": master
- instance-state-name: running
- register: instancesout
- retries: 20
- delay: 3
- until: instancesout.instances|length > 0
-
- - name: bring iam_cert23 into scope
- include_role:
- name: lib_utils
-
- - name: upload certificates to AWS IAM
- iam_cert23:
- state: present
- name: "{{ provision.clusterid }}-master-external"
- cert: "{{ provision.iam_cert_ca.cert_path }}"
- key: "{{ provision.iam_cert_ca.key_path }}"
- cert_chain: "{{ provision.iam_cert_ca.chain_path | default(omit) }}"
- register: elb_cert_chain
- failed_when:
- - "'failed' in elb_cert_chain"
- - elb_cert_chain.failed
- - "'msg' in elb_cert_chain"
- - "'already exists' not in elb_cert_chain.msg"
- when: provision.iam_cert_ca is defined
-
- - debug: var=elb_cert_chain
-
- - name: create our master external and internal load balancers
+ - name: create default vpc
include_role:
- name: openshift_aws_elb
- vars:
- r_openshift_aws_elb_clusterid: "{{ provision.clusterid }}"
- r_openshift_aws_elb_region: "{{ provision.region }}"
- r_openshift_aws_elb_instance_filter:
- "tag:clusterid": "{{ provision.clusterid }}"
- "tag:host-type": master
- instance-state-name: running
- r_openshift_aws_elb_type: master
- r_openshift_aws_elb_direction: "{{ elb_item }}"
- r_openshift_aws_elb_idle_timout: 400
- r_openshift_aws_elb_scheme: internet-facing
- r_openshift_aws_elb_security_groups:
- - "{{ provision.clusterid }}"
- - "{{ provision.clusterid }}_master"
- r_openshift_aws_elb_subnet_name: "{{ provision.vpc.subnets[provision.region][0].az }}"
- r_openshift_aws_elb_name: "{{ provision.clusterid }}-master-{{ elb_item }}"
- r_openshift_aws_elb_cert_arn: "{{ elb_cert_chain.arn }}"
- with_items:
- - internal
- - external
- loop_control:
- loop_var: elb_item
-
- - name: add new master to masters group
- add_host:
- groups: "masters,etcd,nodes"
- name: "{{ item.public_ip_address }}"
- hostname: "{{ provision.clusterid }}-master-{{ item.id[:-5] }}"
- with_items: "{{ instancesout.instances }}"
-
- - name: set facts for group normalization
- set_fact:
- cluster_id: "{{ provision.clusterid }}"
- cluster_env: "{{ provision.node_group_config.tags.environment | default('dev') }}"
-
- - name: wait for ssh to become available
- wait_for:
- port: 22
- host: "{{ item.public_ip_address }}"
- timeout: 300
- search_regex: OpenSSH
- with_items: "{{ instancesout.instances }}"
-
-
-- name: set the master facts for hostname to elb
- hosts: masters
- gather_facts: no
- remote_user: root
- tasks:
- - name: include vars
- include_vars: vars.yml
-
- - name: fetch elbs
- ec2_elb_facts:
- region: "{{ provision.region }}"
- names:
- - "{{ item }}"
- with_items:
- - "{{ provision.clusterid }}-master-external"
- - "{{ provision.clusterid }}-master-internal"
- delegate_to: localhost
- register: elbs
-
- - debug: var=elbs
-
- - name: set fact
- set_fact:
- openshift_master_cluster_hostname: "{{ elbs.results[1].elbs[0].dns_name }}"
- osm_custom_cors_origins:
- - "{{ elbs.results[1].elbs[0].dns_name }}"
- - "console.{{ provision.clusterid }}.openshift.com"
- - "api.{{ provision.clusterid }}.openshift.com"
- with_items: "{{ groups['masters'] }}"
-
-- name: normalize groups
- include: ../../byo/openshift-cluster/initialize_groups.yml
-
-- name: run the std_include
- include: ../../common/openshift-cluster/std_include.yml
-
-- name: run the config
- include: ../../common/openshift-cluster/config.yml
+ name: openshift_aws
+ tasks_from: provision.yml
diff --git a/playbooks/aws/openshift-cluster/provision_install.yml b/playbooks/aws/openshift-cluster/provision_install.yml
new file mode 100644
index 000000000..e787deced
--- /dev/null
+++ b/playbooks/aws/openshift-cluster/provision_install.yml
@@ -0,0 +1,16 @@
+---
+# Once an AMI is built then this script is used for
+# the one stop shop to provision and install a cluster
+# this playbook is run with the following parameters:
+# ansible-playbook -i openshift-ansible-inventory provision_install.yml
+- name: Include the provision.yml playbook to create cluster
+ include: provision.yml
+
+- name: Include the install.yml playbook to install cluster
+ include: install.yml
+
+- name: Include the install.yml playbook to install cluster
+ include: provision_nodes.yml
+
+- name: Include the accept.yml playbook to accept nodes into the cluster
+ include: accept.yml
diff --git a/playbooks/aws/openshift-cluster/provision_nodes.yml b/playbooks/aws/openshift-cluster/provision_nodes.yml
index 5428fb307..44c686e08 100644
--- a/playbooks/aws/openshift-cluster/provision_nodes.yml
+++ b/playbooks/aws/openshift-cluster/provision_nodes.yml
@@ -1,47 +1,18 @@
---
-# Get bootstrap config token
-# bootstrap should be created on first master
-# need to fetch it and shove it into cloud data
- name: create the node scale groups
hosts: localhost
connection: local
gather_facts: yes
tasks:
- - name: get provisioning vars
- include_vars: vars.yml
+ - name: Alert user to variables needed - clusterid
+ debug:
+ msg: "openshift_aws_clusterid={{ openshift_aws_clusterid | default('default') }}"
- - name: fetch master instances
- ec2_remote_facts:
- region: "{{ provision.region }}"
- filters:
- "tag:clusterid": "{{ provision.clusterid }}"
- "tag:host-type": master
- instance-state-name: running
- register: instancesout
- retries: 20
- delay: 3
- until: instancesout.instances|length > 0
+ - name: Alert user to variables needed - region
+ debug:
+ msg: "openshift_aws_region={{ openshift_aws_region | default('us-east-1') }}"
- - name: slurp down the bootstrap.kubeconfig
- slurp:
- src: /etc/origin/master/bootstrap.kubeconfig
- delegate_to: "{{ instancesout.instances[0].public_ip_address }}"
- remote_user: root
- register: bootstrap
-
- - name: set_fact on localhost for kubeconfig
- set_fact:
- local_bootstrap: "{{ bootstrap }}"
- launch_config_name:
- infra: "infra-{{ ansible_date_time.epoch }}"
- compute: "compute-{{ ansible_date_time.epoch }}"
-
- - name: include build node group
- include: build_node_group.yml
- vars:
- openshift_build_node_type: infra
-
- - name: include build node group
- include: build_node_group.yml
- vars:
- openshift_build_node_type: compute
+ - name: create the node groups
+ include_role:
+ name: openshift_aws
+ tasks_from: provision_nodes.yml
diff --git a/playbooks/aws/openshift-cluster/provisioning_vars.example.yml b/playbooks/aws/openshift-cluster/provisioning_vars.example.yml
new file mode 100644
index 000000000..5a30ad3a5
--- /dev/null
+++ b/playbooks/aws/openshift-cluster/provisioning_vars.example.yml
@@ -0,0 +1,26 @@
+---
+openshift_node_bootstrap: True
+
+# specify a clusterid
+#openshift_aws_clusterid: default
+
+# must specify a base_ami when building an AMI
+#openshift_aws_base_ami:
+
+# when creating an encrypted AMI please specify use_encryption
+#openshift_aws_ami_encrypt: False
+
+# custom certificates are required for the ELB
+#openshift_aws_iam_cert_path: '/path/to/wildcard.<clusterid>.example.com.crt'
+#openshift_aws_iam_key_path: '/path/to/wildcard.<clusterid>.example.com.key'
+#openshift_aws_iam_cert_chain_path: '/path/to/cert.ca.crt'
+
+# This is required for any ec2 instances
+#openshift_aws_ssh_key_name: myuser_key
+
+# This will ensure these users are created
+#openshift_aws_users:
+#- key_name: myuser_key
+# username: myuser
+# pub_key: |
+# ssh-rsa AAAA
diff --git a/playbooks/aws/openshift-cluster/scaleup.yml b/playbooks/aws/openshift-cluster/scaleup.yml
deleted file mode 100644
index 6fa9142a0..000000000
--- a/playbooks/aws/openshift-cluster/scaleup.yml
+++ /dev/null
@@ -1,32 +0,0 @@
----
-
-- hosts: localhost
- gather_facts: no
- connection: local
- become: no
- vars_files:
- - vars.yml
- tasks:
- - name: Evaluate oo_hosts_to_update
- add_host:
- name: "{{ item }}"
- groups: oo_hosts_to_update
- ansible_ssh_user: "{{ deployment_vars[deployment_type].ssh_user }}"
- ansible_become: "{{ deployment_vars[deployment_type].become }}"
- with_items: "{{ groups.nodes_to_add }}"
-
-- include: ../../common/openshift-cluster/update_repos_and_packages.yml
-
-- include: ../../common/openshift-cluster/scaleup.yml
- vars_files:
- - ../../aws/openshift-cluster/vars.yml
- - ../../aws/openshift-cluster/cluster_hosts.yml
- vars:
- g_new_node_hosts: "{{ groups.nodes_to_add }}"
- g_ssh_user: "{{ deployment_vars[deployment_type].ssh_user }}"
- g_sudo: "{{ deployment_vars[deployment_type].become }}"
- g_nodeonmaster: true
- openshift_cluster_id: "{{ cluster_id }}"
- openshift_debug_level: "{{ debug_level }}"
- openshift_deployment_type: "{{ deployment_type }}"
- openshift_public_hostname: "{{ ec2_ip_address }}"
diff --git a/playbooks/aws/openshift-cluster/service.yml b/playbooks/aws/openshift-cluster/service.yml
deleted file mode 100644
index f7f4812bb..000000000
--- a/playbooks/aws/openshift-cluster/service.yml
+++ /dev/null
@@ -1,31 +0,0 @@
----
-- name: Call same systemctl command for openshift on all instance(s)
- hosts: localhost
- connection: local
- become: no
- gather_facts: no
- vars_files:
- - vars.yml
- - cluster_hosts.yml
- tasks:
- - fail: msg="cluster_id is required to be injected in this playbook"
- when: cluster_id is not defined
-
- - name: Evaluate g_service_masters
- add_host:
- name: "{{ item }}"
- groups: g_service_masters
- ansible_ssh_user: "{{ deployment_vars[deployment_type].ssh_user }}"
- ansible_become: "{{ deployment_vars[deployment_type].become }}"
- with_items: "{{ master_hosts | default([]) }}"
-
- - name: Evaluate g_service_nodes
- add_host:
- name: "{{ item }}"
- groups: g_service_nodes
- ansible_ssh_user: "{{ deployment_vars[deployment_type].ssh_user }}"
- ansible_become: "{{ deployment_vars[deployment_type].become }}"
- with_items: "{{ node_hosts | default([]) }}"
-
-- include: ../../common/openshift-node/service.yml
-- include: ../../common/openshift-master/service.yml
diff --git a/playbooks/aws/openshift-cluster/tasks/launch_instances.yml b/playbooks/aws/openshift-cluster/tasks/launch_instances.yml
deleted file mode 100644
index 608512b79..000000000
--- a/playbooks/aws/openshift-cluster/tasks/launch_instances.yml
+++ /dev/null
@@ -1,188 +0,0 @@
----
-- set_fact:
- created_by: "{{ lookup('env', 'LOGNAME')|default(cluster, true) }}"
- docker_vol_ephemeral: "{{ lookup('env', 'os_docker_vol_ephemeral') | default(false, true) }}"
- cluster: "{{ cluster_id }}"
- env: "{{ cluster_env }}"
- host_type: "{{ type }}"
- sub_host_type: "{{ g_sub_host_type }}"
-
-- set_fact:
- ec2_instance_type: "{{ lookup('env', 'ec2_master_instance_type') | default(deployment_vars[deployment_type].type, true) }}"
- ec2_security_groups: "{{ lookup('env', 'ec2_master_security_groups') | default(deployment_vars[deployment_type].security_groups, true) }}"
- when: host_type == "master" and sub_host_type == "default"
-
-- set_fact:
- ec2_instance_type: "{{ lookup('env', 'ec2_etcd_instance_type') | default(deployment_vars[deployment_type].type, true) }}"
- ec2_security_groups: "{{ lookup('env', 'ec2_etcd_security_groups') | default(deployment_vars[deployment_type].security_groups, true) }}"
- when: host_type == "etcd" and sub_host_type == "default"
-
-- set_fact:
- ec2_instance_type: "{{ lookup('env', 'ec2_infra_instance_type') | default(deployment_vars[deployment_type].type, true) }}"
- ec2_security_groups: "{{ lookup('env', 'ec2_infra_security_groups') | default(deployment_vars[deployment_type].security_groups, true) }}"
- when: host_type == "node" and sub_host_type == "infra"
-
-- set_fact:
- ec2_instance_type: "{{ lookup('env', 'ec2_node_instance_type') | default(deployment_vars[deployment_type].type, true) }}"
- ec2_security_groups: "{{ lookup('env', 'ec2_node_security_groups') | default(deployment_vars[deployment_type].security_groups, true) }}"
- when: host_type == "node" and sub_host_type == "compute"
-
-- set_fact:
- ec2_instance_type: "{{ deployment_vars[deployment_type].type }}"
- when: ec2_instance_type is not defined
-- set_fact:
- ec2_security_groups: "{{ deployment_vars[deployment_type].security_groups }}"
- when: ec2_security_groups is not defined
-
-- name: Find amis for deployment_type
- ec2_ami_find:
- region: "{{ deployment_vars[deployment_type].region }}"
- ami_id: "{{ deployment_vars[deployment_type].image }}"
- name: "{{ deployment_vars[deployment_type].image_name }}"
- register: ami_result
-
-- fail: msg="Could not find requested ami"
- when: not ami_result.results
-
-- set_fact:
- latest_ami: "{{ ami_result.results | oo_ami_selector(deployment_vars[deployment_type].image_name) }}"
- volume_defs:
- etcd:
- root:
- volume_size: "{{ lookup('env', 'os_etcd_root_vol_size') | default(25, true) }}"
- device_type: "{{ lookup('env', 'os_etcd_root_vol_type') | default('gp2', true) }}"
- iops: "{{ lookup('env', 'os_etcd_root_vol_iops') | default(500, true) }}"
- master:
- root:
- volume_size: "{{ lookup('env', 'os_master_root_vol_size') | default(25, true) }}"
- device_type: "{{ lookup('env', 'os_master_root_vol_type') | default('gp2', true) }}"
- iops: "{{ lookup('env', 'os_master_root_vol_iops') | default(500, true) }}"
- docker:
- volume_size: "{{ lookup('env', 'os_docker_vol_size') | default(10, true) }}"
- device_type: "{{ lookup('env', 'os_docker_vol_type') | default('gp2', true) }}"
- iops: "{{ lookup('env', 'os_docker_vol_iops') | default(500, true) }}"
- node:
- root:
- volume_size: "{{ lookup('env', 'os_node_root_vol_size') | default(85, true) }}"
- device_type: "{{ lookup('env', 'os_node_root_vol_type') | default('gp2', true) }}"
- iops: "{{ lookup('env', 'os_node_root_vol_iops') | default(500, true) }}"
- docker:
- volume_size: "{{ lookup('env', 'os_docker_vol_size') | default(32, true) }}"
- device_type: "{{ lookup('env', 'os_docker_vol_type') | default('gp2', true) }}"
- iops: "{{ lookup('env', 'os_docker_vol_iops') | default(500, true) }}"
-
-- set_fact:
- volumes: "{{ volume_defs | oo_ec2_volume_definition(host_type, docker_vol_ephemeral | bool) }}"
-
-- name: Launch instance(s)
- ec2:
- state: present
- region: "{{ deployment_vars[deployment_type].region }}"
- keypair: "{{ deployment_vars[deployment_type].keypair }}"
- group: "{{ deployment_vars[deployment_type].security_groups }}"
- instance_type: "{{ ec2_instance_type }}"
- image: "{{ deployment_vars[deployment_type].image }}"
- count: "{{ instances | length }}"
- vpc_subnet_id: "{{ deployment_vars[deployment_type].vpc_subnet }}"
- assign_public_ip: "{{ deployment_vars[deployment_type].assign_public_ip }}"
- user_data: "{{ lookup('template', '../templates/user_data.j2') }}"
- wait: yes
- instance_tags:
- created-by: "{{ created_by }}"
- clusterid: "{{ cluster }}"
- environment: "{{ cluster_env }}"
- host-type: "{{ host_type }}"
- sub-host-type: "{{ sub_host_type }}"
- volumes: "{{ volumes }}"
- register: ec2
-
-- name: Add Name tag to instances
- ec2_tag: resource={{ item.1.id }} region={{ deployment_vars[deployment_type].region }} state=present
- with_together:
- - "{{ instances }}"
- - "{{ ec2.instances }}"
- args:
- tags:
- Name: "{{ item.0 }}"
-
-- set_fact:
- instance_groups: >
- tag_created-by_{{ created_by }}, tag_clusterid_{{ cluster }},
- tag_environment_{{ cluster_env }}, tag_host-type_{{ host_type }},
- tag_sub-host-type_{{ sub_host_type }}
-
-- set_fact:
- node_label:
- region: "{{ deployment_vars[deployment_type].region }}"
- type: "{{sub_host_type}}"
- when: host_type == "node"
-
-- set_fact:
- node_label:
- region: "{{ deployment_vars[deployment_type].region }}"
- type: "{{host_type}}"
- when: host_type != "node"
-
-- set_fact:
- logrotate:
- - name: syslog
- path: |
- /var/log/cron
- /var/log/maillog
- /var/log/messages
- /var/log/secure
- /var/log/spooler"
- options:
- - daily
- - rotate 7
- - compress
- - sharedscripts
- - missingok
- scripts:
- postrotate: "/bin/kill -HUP `cat /var/run/syslogd.pid 2> /dev/null` 2> /dev/null || true"
-
-- name: Add new instances groups and variables
- add_host:
- hostname: "{{ item.0 }}"
- ansible_ssh_host: "{{ item.1.dns_name }}"
- ansible_ssh_user: "{{ deployment_vars[deployment_type].ssh_user }}"
- ansible_become: "{{ deployment_vars[deployment_type].become }}"
- groups: "{{ instance_groups }}"
- ec2_private_ip_address: "{{ item.1.private_ip }}"
- ec2_ip_address: "{{ item.1.public_ip }}"
- ec2_tag_sub-host-type: "{{ sub_host_type }}"
- openshift_node_labels: "{{ node_label }}"
- logrotate_scripts: "{{ logrotate }}"
- with_together:
- - "{{ instances }}"
- - "{{ ec2.instances }}"
-
-- name: Add new instances to nodes_to_add group if needed
- add_host:
- hostname: "{{ item.0 }}"
- ansible_ssh_host: "{{ item.1.dns_name }}"
- ansible_ssh_user: "{{ deployment_vars[deployment_type].ssh_user }}"
- ansible_become: "{{ deployment_vars[deployment_type].become }}"
- groups: nodes_to_add
- ec2_private_ip_address: "{{ item.1.private_ip }}"
- ec2_ip_address: "{{ item.1.public_ip }}"
- openshift_node_labels: "{{ node_label }}"
- logrotate_scripts: "{{ logrotate }}"
- with_together:
- - "{{ instances }}"
- - "{{ ec2.instances }}"
- when: oo_extend_env is defined and oo_extend_env | bool
-
-- name: Wait for ssh
- wait_for: "port=22 host={{ item.dns_name }}"
- with_items: "{{ ec2.instances }}"
-
-- name: Wait for user setup
- command: "ssh -o StrictHostKeyChecking=no -o PasswordAuthentication=no -o ConnectTimeout=10 -o UserKnownHostsFile=/dev/null {{ hostvars[item.0].ansible_ssh_user }}@{{ item.1.dns_name }} echo {{ hostvars[item.0].ansible_ssh_user }} user is setup"
- register: result
- until: result.rc == 0
- retries: 20
- delay: 10
- with_together:
- - "{{ instances }}"
- - "{{ ec2.instances }}"
diff --git a/playbooks/aws/openshift-cluster/templates/user_data.j2 b/playbooks/aws/openshift-cluster/templates/user_data.j2
deleted file mode 100644
index b1087f9c4..000000000
--- a/playbooks/aws/openshift-cluster/templates/user_data.j2
+++ /dev/null
@@ -1,22 +0,0 @@
-#cloud-config
-{% if type in ['node', 'master'] and 'docker' in volume_defs[type] %}
-mounts:
-- [ xvdb ]
-- [ ephemeral0 ]
-{% endif %}
-
-write_files:
-{% if type in ['node', 'master'] and 'docker' in volume_defs[type] %}
-- content: |
- DEVS=/dev/xvdb
- VG=docker_vg
- path: /etc/sysconfig/docker-storage-setup
- owner: root:root
- permissions: '0644'
-{% endif %}
-{% if deployment_vars[deployment_type].become | bool %}
-- path: /etc/sudoers.d/99-{{ deployment_vars[deployment_type].ssh_user }}-cloud-init-requiretty
- permissions: 440
- content: |
- Defaults:{{ deployment_vars[deployment_type].ssh_user }} !requiretty
-{% endif %}
diff --git a/playbooks/aws/openshift-cluster/terminate.yml b/playbooks/aws/openshift-cluster/terminate.yml
deleted file mode 100644
index 1f15aa4bf..000000000
--- a/playbooks/aws/openshift-cluster/terminate.yml
+++ /dev/null
@@ -1,77 +0,0 @@
----
-- name: Terminate instance(s)
- hosts: localhost
- connection: local
- become: no
- gather_facts: no
- vars_files:
- - vars.yml
- tasks:
- - add_host:
- name: "{{ item }}"
- groups: oo_hosts_to_terminate
- ansible_ssh_user: "{{ deployment_vars[deployment_type].ssh_user }}"
- ansible_become: "{{ deployment_vars[deployment_type].become }}"
- with_items: "{{ (groups['tag_clusterid_' ~ cluster_id] | default([])) | difference(['localhost']) }}"
-
-- name: Unsubscribe VMs
- hosts: oo_hosts_to_terminate
- roles:
- - role: rhel_unsubscribe
- when: deployment_type in ['atomic-enterprise', 'enterprise', 'openshift-enterprise'] and
- ansible_distribution == "RedHat" and
- lookup('oo_option', 'rhel_skip_subscription') | default(rhsub_skip, True) |
- default('no', True) | lower in ['no', 'false']
-
-- name: Terminate instances
- hosts: localhost
- connection: local
- become: no
- gather_facts: no
- tasks:
- - name: Remove tags from instances
- ec2_tag:
- resource: "{{ hostvars[item]['ec2_id'] }}"
- region: "{{ hostvars[item]['ec2_region'] }}"
- state: absent
- tags:
- environment: "{{ hostvars[item]['ec2_tag_environment'] }}"
- clusterid: "{{ hostvars[item]['ec2_tag_clusterid'] }}"
- host-type: "{{ hostvars[item]['ec2_tag_host-type'] }}"
- sub_host_type: "{{ hostvars[item]['ec2_tag_sub-host-type'] }}"
- with_items: "{{ groups.oo_hosts_to_terminate }}"
- when: "'oo_hosts_to_terminate' in groups"
-
- - name: Terminate instances
- ec2:
- state: absent
- instance_ids: ["{{ hostvars[item].ec2_id }}"]
- region: "{{ hostvars[item].ec2_region }}"
- ignore_errors: yes
- register: ec2_term
- with_items: "{{ groups.oo_hosts_to_terminate }}"
- when: "'oo_hosts_to_terminate' in groups"
-
- # Fail if any of the instances failed to terminate with an error other
- # than 403 Forbidden
- - fail:
- msg: "Terminating instance {{ item.ec2_id }} failed with message {{ item.msg }}"
- when: "'oo_hosts_to_terminate' in groups and item.has_key('failed') and item.failed"
- with_items: "{{ ec2_term.results }}"
-
- - name: Stop instance if termination failed
- ec2:
- state: stopped
- instance_ids: ["{{ item.item.ec2_id }}"]
- region: "{{ item.item.ec2_region }}"
- register: ec2_stop
- when: "'oo_hosts_to_terminate' in groups and item.has_key('failed') and item.failed"
- with_items: "{{ ec2_term.results }}"
-
- - name: Rename stopped instances
- ec2_tag: resource={{ item.item.item.ec2_id }} region={{ item.item.item.ec2_region }} state=present
- args:
- tags:
- Name: "{{ item.item.item.ec2_tag_Name }}-terminate"
- with_items: "{{ ec2_stop.results }}"
- when: ec2_stop | changed
diff --git a/playbooks/aws/openshift-cluster/update.yml b/playbooks/aws/openshift-cluster/update.yml
deleted file mode 100644
index ed05d61ed..000000000
--- a/playbooks/aws/openshift-cluster/update.yml
+++ /dev/null
@@ -1,34 +0,0 @@
----
-- hosts: localhost
- gather_facts: no
- tasks:
- - include_vars: vars.yml
- - include_vars: cluster_hosts.yml
- - add_host:
- name: "{{ item }}"
- groups: l_oo_all_hosts
- with_items: "{{ g_all_hosts }}"
-
-- hosts: l_oo_all_hosts
- gather_facts: no
- tasks:
- - include_vars: vars.yml
- - include_vars: cluster_hosts.yml
-
-- name: Update - Populate oo_hosts_to_update group
- hosts: localhost
- connection: local
- become: no
- gather_facts: no
- tasks:
- - name: Update - Evaluate oo_hosts_to_update
- add_host:
- name: "{{ item }}"
- groups: oo_hosts_to_update
- ansible_ssh_user: "{{ deployment_vars[deployment_type].ssh_user }}"
- ansible_become: "{{ deployment_vars[deployment_type].become }}"
- with_items: "{{ g_all_hosts | default([]) }}"
-
-- include: ../../common/openshift-cluster/update_repos_and_packages.yml
-
-- include: config.yml
diff --git a/playbooks/aws/openshift-cluster/vars.yml b/playbooks/aws/openshift-cluster/vars.yml
deleted file mode 100644
index b2b0716be..000000000
--- a/playbooks/aws/openshift-cluster/vars.yml
+++ /dev/null
@@ -1,156 +0,0 @@
----
-debug_level: 2
-
-deployment_rhel7_ent_base:
- # rhel-7.1, requires cloud access subscription
- image: "{{ lookup('oo_option', 'ec2_image') | default('ami-10251c7a', True) }}"
- image_name: "{{ lookup('oo_option', 'ec2_image_name') | default(None, True) }}"
- region: "{{ lookup('oo_option', 'ec2_region') | default('us-east-1', True) }}"
- ssh_user: ec2-user
- become: yes
- keypair: "{{ lookup('oo_option', 'ec2_keypair') | default('libra', True) }}"
- type: "{{ lookup('oo_option', 'ec2_instance_type') | default('m4.large', True) }}"
- security_groups: "{{ lookup('oo_option', 'ec2_security_groups') | default([ 'public' ], True) }}"
- vpc_subnet: "{{ lookup('oo_option', 'ec2_vpc_subnet') | default(omit, True) }}"
- assign_public_ip: "{{ lookup('oo_option', 'ec2_assign_public_ip') | default(omit, True) }}"
-
-deployment_vars:
- origin:
- # centos-7, requires marketplace
- image: "{{ lookup('oo_option', 'ec2_image') | default('ami-6d1c2007', True) }}"
- image_name: "{{ lookup('oo_option', 'ec2_image_name') | default(None, True) }}"
- region: "{{ lookup('oo_option', 'ec2_region') | default('us-east-1', True) }}"
- ssh_user: centos
- become: yes
- keypair: "{{ lookup('oo_option', 'ec2_keypair') | default('libra', True) }}"
- type: "{{ lookup('oo_option', 'ec2_instance_type') | default('m4.large', True) }}"
- security_groups: "{{ lookup('oo_option', 'ec2_security_groups') | default([ 'public' ], True) }}"
- vpc_subnet: "{{ lookup('oo_option', 'ec2_vpc_subnet') | default(omit, True) }}"
- assign_public_ip: "{{ lookup('oo_option', 'ec2_assign_public_ip') | default(omit, True) }}"
-
- enterprise: "{{ deployment_rhel7_ent_base }}"
- openshift-enterprise: "{{ deployment_rhel7_ent_base }}"
- atomic-enterprise: "{{ deployment_rhel7_ent_base }}"
-
-clusterid: mycluster
-region: us-east-1
-
-provision:
- clusterid: "{{ clusterid }}"
- region: "{{ region }}"
-
- build: # build specific variables here
- ami_name: "openshift-gi-"
- base_image: ami-bdd5d6ab # base image for AMI to build from
- yum_repositories: # this is an example repository but it requires sslclient info
- - name: openshift-repo
- file: openshift-repo
- description: OpenShift Builds
- baseurl: https://mirror.openshift.com/enterprise/online-int/latest/x86_64/os/
- enabled: yes
- gpgcheck: no
- sslverify: no
- sslclientcert: "/var/lib/yum/client-cert.pem"
- sslclientkey: "/var/lib/yum/client-key.pem"
- gpgkey: "https://mirror.ops.rhcloud.com/libra/keys/RPM-GPG-KEY-redhat-release https://mirror.ops.rhcloud.com/libra/keys/RPM-GPG-KEY-redhat-beta https://mirror.ops.rhcloud.com/libra/keys/RPM-GPG-KEY-redhat-openshifthosted"
-
- # when creating an encrypted AMI please specify use_encryption
- use_encryption: False
-
- openshift_ami_tags:
- bootstrap: "true"
- openshift-created: "true"
- clusterid: "{{ clusterid }}"
-
- # Use s3 backed registry storage
- openshift_registry_s3: True
-
- # if using custom certificates these are required for the ELB
- iam_cert_ca:
- name: "{{ clusterid }}_openshift"
- cert_path: '/path/to/wildcard.<clusterid>.example.com.crt'
- key_path: '/path/to/wildcard.<clusterid>.example.com.key'
- chain_path: '/path/to/cert.ca.crt'
-
- instance_users:
- - key_name: myuser_key
- username: myuser
- pub_key: |
- ssh-rsa AAAA== myuser@system
-
- node_group_config:
- tags:
- clusterid: "{{ clusterid }}"
- environment: stg
-
- ssh_key_name: myuser_key
-
- # master specific cluster node settings
- master:
- instance_type: m4.xlarge
- ami: ami-cdeec8b6 # if using an encrypted AMI this will be replaced
- volumes:
- - device_name: /dev/sdb
- volume_size: 100
- device_type: gp2
- delete_on_termination: False
- health_check:
- period: 60
- type: EC2
- min_size: 3
- max_size: 3
- desired_size: 3
- tags:
- host-type: master
- sub-host-type: default
- wait_for_instances: True
-
- # compute specific cluster node settings
- compute:
- instance_type: m4.xlarge
- ami: ami-cdeec8b6
- volumes:
- - device_name: /dev/sdb
- volume_size: 100
- device_type: gp2
- delete_on_termination: True
- health_check:
- period: 60
- type: EC2
- min_size: 3
- max_size: 100
- desired_size: 3
- tags:
- host-type: node
- sub-host-type: compute
-
- # infra specific cluster node settings
- infra:
- instance_type: m4.xlarge
- ami: ami-cdeec8b6
- volumes:
- - device_name: /dev/sdb
- volume_size: 100
- device_type: gp2
- delete_on_termination: True
- health_check:
- period: 60
- type: EC2
- min_size: 2
- max_size: 20
- desired_size: 2
- tags:
- host-type: node
- sub-host-type: infra
-
- # vpc settings
- vpc:
- cidr: 172.31.0.0/16
- subnets:
- us-east-1: # These are us-east-1 region defaults. Ensure this matches your region
- - cidr: 172.31.48.0/20
- az: "us-east-1c"
- - cidr: 172.31.32.0/20
- az: "us-east-1e"
- - cidr: 172.31.16.0/20
- az: "us-east-1a"
diff --git a/playbooks/byo/openshift-checks/README.md b/playbooks/byo/openshift-checks/README.md
index f0f14b268..b26e7d7ed 100644
--- a/playbooks/byo/openshift-checks/README.md
+++ b/playbooks/byo/openshift-checks/README.md
@@ -7,15 +7,14 @@ Ansible's default operation mode is to fail fast, on the first error. However,
when performing checks, it is useful to gather as much information about
problems as possible in a single run.
-Thus, the playbooks run a battery of checks against the inventory hosts and have
-Ansible gather intermediate errors, giving a more complete diagnostic of the
-state of each host. If any check failed, the playbook run will be marked as
-failed.
+Thus, the playbooks run a battery of checks against the inventory hosts and
+gather intermediate errors, giving a more complete diagnostic of the state of
+each host. If any check failed, the playbook run will be marked as failed.
To facilitate understanding the problems that were encountered, a custom
callback plugin summarizes execution errors at the end of a playbook run.
-# Available playbooks
+## Available playbooks
1. Pre-install playbook ([pre-install.yml](pre-install.yml)) - verifies system
requirements and look for common problems that can prevent a successful
@@ -27,6 +26,10 @@ callback plugin summarizes execution errors at the end of a playbook run.
3. Certificate expiry playbooks ([certificate_expiry](certificate_expiry)) -
check that certificates in use are valid and not expiring soon.
+4. Adhoc playbook ([adhoc.yml](adhoc.yml)) - use it to run adhoc checks or to
+ list existing checks.
+ See the [next section](#the-adhoc-playbook) for a usage example.
+
## Running
With a [recent installation of Ansible](../../../README.md#setup), run the playbook
@@ -59,6 +62,41 @@ against your inventory file. Here is the step-by-step:
$ ansible-playbook -i <inventory file> playbooks/byo/openshift-checks/certificate_expiry/default.yaml -v
```
+### The adhoc playbook
+
+The adhoc playbook gives flexibility to run any check or a custom group of
+checks. What will be run is determined by the `openshift_checks` variable,
+which, among other ways supported by Ansible, can be set on the command line
+using the `-e` flag.
+
+For example, to run the `docker_storage` check:
+
+```console
+$ ansible-playbook -i <inventory file> playbooks/byo/openshift-checks/adhoc.yml -e openshift_checks=docker_storage
+```
+
+To run more checks, use a comma-separated list of check names:
+
+```console
+$ ansible-playbook -i <inventory file> playbooks/byo/openshift-checks/adhoc.yml -e openshift_checks=docker_storage,disk_availability
+```
+
+To run an entire class of checks, use the name of a check group tag, prefixed by `@`. This will run all checks tagged `preflight`:
+
+```console
+$ ansible-playbook -i <inventory file> playbooks/byo/openshift-checks/adhoc.yml -e openshift_checks=@preflight
+```
+
+It is valid to specify multiple check tags and individual check names together
+in a comma-separated list.
+
+To list all of the available checks and tags, run the adhoc playbook without
+setting the `openshift_checks` variable:
+
+```console
+$ ansible-playbook -i <inventory file> playbooks/byo/openshift-checks/adhoc.yml
+```
+
## Running in a container
This repository is built into a Docker image including Ansible so that it can
diff --git a/playbooks/byo/openshift-checks/adhoc.yml b/playbooks/byo/openshift-checks/adhoc.yml
new file mode 100644
index 000000000..226bed732
--- /dev/null
+++ b/playbooks/byo/openshift-checks/adhoc.yml
@@ -0,0 +1,27 @@
+---
+# NOTE: ideally this would be just part of a single play in
+# common/openshift-checks/adhoc.yml that lists the existing checks when
+# openshift_checks is not set or run the requested checks. However, to actually
+# run the checks we need to have the included dependencies to run first and that
+# takes time. To speed up listing checks, we use this separate play that runs
+# before the include of dependencies to save time and improve the UX.
+- name: OpenShift health checks
+ # NOTE: though the openshift_checks variable could be potentially defined on
+ # individual hosts while not defined for localhost, we do not support that
+ # usage. Running this play only in localhost speeds up execution.
+ hosts: localhost
+ connection: local
+ roles:
+ - openshift_health_checker
+ vars:
+ - r_openshift_health_checker_playbook_context: adhoc
+ pre_tasks:
+ - name: List known health checks
+ action: openshift_health_check
+ when: openshift_checks is undefined or not openshift_checks
+
+- include: ../openshift-cluster/initialize_groups.yml
+
+- include: ../../common/openshift-cluster/std_include.yml
+
+- include: ../../common/openshift-checks/adhoc.yml
diff --git a/playbooks/byo/openshift-checks/health.yml b/playbooks/byo/openshift-checks/health.yml
index dfc1a7db0..96a71e4dc 100644
--- a/playbooks/byo/openshift-checks/health.yml
+++ b/playbooks/byo/openshift-checks/health.yml
@@ -1,3 +1,6 @@
---
- include: ../openshift-cluster/initialize_groups.yml
+
+- include: ../../common/openshift-cluster/std_include.yml
+
- include: ../../common/openshift-checks/health.yml
diff --git a/playbooks/byo/openshift-checks/pre-install.yml b/playbooks/byo/openshift-checks/pre-install.yml
index 5e8c3ab9b..dd93df0bb 100644
--- a/playbooks/byo/openshift-checks/pre-install.yml
+++ b/playbooks/byo/openshift-checks/pre-install.yml
@@ -1,3 +1,6 @@
---
- include: ../openshift-cluster/initialize_groups.yml
+
+- include: ../../common/openshift-cluster/std_include.yml
+
- include: ../../common/openshift-checks/pre-install.yml
diff --git a/playbooks/byo/openshift-cluster/config.yml b/playbooks/byo/openshift-cluster/config.yml
index acf5469bf..60fa44c5b 100644
--- a/playbooks/byo/openshift-cluster/config.yml
+++ b/playbooks/byo/openshift-cluster/config.yml
@@ -9,6 +9,4 @@
- include: ../../common/openshift-cluster/config.yml
vars:
- openshift_cluster_id: "{{ cluster_id | default('default') }}"
- openshift_debug_level: "{{ debug_level | default(2) }}"
openshift_deployment_subtype: "{{ deployment_subtype | default(none) }}"
diff --git a/playbooks/byo/openshift-cluster/openshift-logging.yml b/playbooks/byo/openshift-cluster/openshift-logging.yml
index bbec3a4c2..a523bb47f 100644
--- a/playbooks/byo/openshift-cluster/openshift-logging.yml
+++ b/playbooks/byo/openshift-cluster/openshift-logging.yml
@@ -13,6 +13,3 @@
- always
- include: ../../common/openshift-cluster/openshift_logging.yml
- vars:
- openshift_cluster_id: "{{ cluster_id | default('default') }}"
- openshift_debug_level: "{{ debug_level | default(2) }}"
diff --git a/playbooks/byo/openshift-cluster/openshift-prometheus.yml b/playbooks/byo/openshift-cluster/openshift-prometheus.yml
new file mode 100644
index 000000000..15917078d
--- /dev/null
+++ b/playbooks/byo/openshift-cluster/openshift-prometheus.yml
@@ -0,0 +1,4 @@
+---
+- include: initialize_groups.yml
+
+- include: ../../common/openshift-cluster/openshift_prometheus.yml
diff --git a/playbooks/byo/openshift-cluster/openshift-provisioners.yml b/playbooks/byo/openshift-cluster/openshift-provisioners.yml
new file mode 100644
index 000000000..8e80f158b
--- /dev/null
+++ b/playbooks/byo/openshift-cluster/openshift-provisioners.yml
@@ -0,0 +1,6 @@
+---
+- include: initialize_groups.yml
+
+- include: ../../common/openshift-cluster/std_include.yml
+
+- include: ../../common/openshift-cluster/openshift_provisioners.yml
diff --git a/playbooks/byo/openshift-cluster/service-catalog.yml b/playbooks/byo/openshift-cluster/service-catalog.yml
index 6f95b4e2d..40a7606e7 100644
--- a/playbooks/byo/openshift-cluster/service-catalog.yml
+++ b/playbooks/byo/openshift-cluster/service-catalog.yml
@@ -13,6 +13,3 @@
- always
- include: ../../common/openshift-cluster/service_catalog.yml
- vars:
- openshift_cluster_id: "{{ cluster_id | default('default') }}"
- openshift_debug_level: "{{ debug_level | default(2) }}"
diff --git a/playbooks/byo/openshift-etcd/config.yml b/playbooks/byo/openshift-etcd/config.yml
index dd3f47a4d..1342bd60c 100644
--- a/playbooks/byo/openshift-etcd/config.yml
+++ b/playbooks/byo/openshift-etcd/config.yml
@@ -1,14 +1,6 @@
---
- include: ../openshift-cluster/initialize_groups.yml
- tags:
- - always
- include: ../../common/openshift-cluster/std_include.yml
- tags:
- - always
- include: ../../common/openshift-etcd/config.yml
- vars:
- openshift_cluster_id: "{{ cluster_id | default('default') }}"
- openshift_debug_level: "{{ debug_level | default(2) }}"
- openshift_deployment_subtype: "{{ deployment_subtype | default(none) }}"
diff --git a/playbooks/byo/openshift-etcd/migrate.yml b/playbooks/byo/openshift-etcd/migrate.yml
index 143016159..2dec2bef6 100644
--- a/playbooks/byo/openshift-etcd/migrate.yml
+++ b/playbooks/byo/openshift-etcd/migrate.yml
@@ -1,8 +1,6 @@
---
- include: ../openshift-cluster/initialize_groups.yml
- tags:
- - always
+
+- include: ../../common/openshift-cluster/std_include.yml
- include: ../../common/openshift-etcd/migrate.yml
- tags:
- - always
diff --git a/playbooks/byo/openshift-etcd/restart.yml b/playbooks/byo/openshift-etcd/restart.yml
index d43533641..034bba4b4 100644
--- a/playbooks/byo/openshift-etcd/restart.yml
+++ b/playbooks/byo/openshift-etcd/restart.yml
@@ -1,10 +1,6 @@
---
- include: ../openshift-cluster/initialize_groups.yml
- tags:
- - always
- include: ../../common/openshift-cluster/std_include.yml
- tags:
- - always
- include: ../../common/openshift-etcd/restart.yml
diff --git a/playbooks/byo/openshift-etcd/scaleup.yml b/playbooks/byo/openshift-etcd/scaleup.yml
index c35fd9f37..a2a5856a9 100644
--- a/playbooks/byo/openshift-etcd/scaleup.yml
+++ b/playbooks/byo/openshift-etcd/scaleup.yml
@@ -1,8 +1,6 @@
---
- include: ../openshift-cluster/initialize_groups.yml
-- include: ../../common/openshift-cluster/evaluate_groups.yml
+- include: ../../common/openshift-cluster/std_include.yml
+
- include: ../../common/openshift-etcd/scaleup.yml
- vars:
- openshift_cluster_id: "{{ cluster_id | default('default') }}"
- openshift_deployment_type: "{{ deployment_type }}"
diff --git a/playbooks/byo/openshift-master/additional_config.yml b/playbooks/byo/openshift-master/additional_config.yml
new file mode 100644
index 000000000..b3d7b5731
--- /dev/null
+++ b/playbooks/byo/openshift-master/additional_config.yml
@@ -0,0 +1,6 @@
+---
+- include: ../openshift-cluster/initialize_groups.yml
+
+- include: ../../common/openshift-cluster/std_include.yml
+
+- include: ../../common/openshift-master/additional_config.yml
diff --git a/playbooks/byo/openshift-master/config.yml b/playbooks/byo/openshift-master/config.yml
new file mode 100644
index 000000000..98be0c448
--- /dev/null
+++ b/playbooks/byo/openshift-master/config.yml
@@ -0,0 +1,6 @@
+---
+- include: ../openshift-cluster/initialize_groups.yml
+
+- include: ../../common/openshift-cluster/std_include.yml
+
+- include: ../../common/openshift-master/config.yml
diff --git a/playbooks/byo/openshift-master/restart.yml b/playbooks/byo/openshift-master/restart.yml
index 7988863f3..8950efd00 100644
--- a/playbooks/byo/openshift-master/restart.yml
+++ b/playbooks/byo/openshift-master/restart.yml
@@ -1,10 +1,6 @@
---
- include: ../openshift-cluster/initialize_groups.yml
- tags:
- - always
- include: ../../common/openshift-cluster/std_include.yml
- tags:
- - always
- include: ../../common/openshift-master/restart.yml
diff --git a/playbooks/byo/openshift-master/scaleup.yml b/playbooks/byo/openshift-master/scaleup.yml
index 64811e80d..2179d1416 100644
--- a/playbooks/byo/openshift-master/scaleup.yml
+++ b/playbooks/byo/openshift-master/scaleup.yml
@@ -15,7 +15,6 @@
when:
- (g_new_master_hosts | default([]) | length == 0) or (g_new_node_hosts | default([]) | length == 0)
+- include: ../../common/openshift-cluster/std_include.yml
+
- include: ../../common/openshift-master/scaleup.yml
- vars:
- openshift_cluster_id: "{{ cluster_id | default('default') }}"
- openshift_debug_level: "{{ debug_level | default(2) }}"
diff --git a/playbooks/byo/openshift-node/config.yml b/playbooks/byo/openshift-node/config.yml
new file mode 100644
index 000000000..839dc36ff
--- /dev/null
+++ b/playbooks/byo/openshift-node/config.yml
@@ -0,0 +1,6 @@
+---
+- include: ../openshift-cluster/initialize_groups.yml
+
+- include: ../../common/openshift-cluster/std_include.yml
+
+- include: ../../common/openshift-node/config.yml
diff --git a/playbooks/byo/openshift-node/restart.yml b/playbooks/byo/openshift-node/restart.yml
index 92665d71d..ccf9e82da 100644
--- a/playbooks/byo/openshift-node/restart.yml
+++ b/playbooks/byo/openshift-node/restart.yml
@@ -1,10 +1,6 @@
---
- include: ../openshift-cluster/initialize_groups.yml
- tags:
- - always
- include: ../../common/openshift-cluster/std_include.yml
- tags:
- - always
- include: ../../common/openshift-node/restart.yml
diff --git a/playbooks/byo/openshift-node/scaleup.yml b/playbooks/byo/openshift-node/scaleup.yml
index fda89b1ea..e0c36fb69 100644
--- a/playbooks/byo/openshift-node/scaleup.yml
+++ b/playbooks/byo/openshift-node/scaleup.yml
@@ -14,9 +14,6 @@
when:
- g_new_node_hosts | default([]) | length == 0
-- include: ../../common/openshift-node/scaleup.yml
- vars:
- openshift_cluster_id: "{{ cluster_id | default('default') }}"
- openshift_debug_level: "{{ debug_level | default(2) }}"
- openshift_master_etcd_hosts: "{{ groups.etcd | default([]) }}"
- openshift_master_etcd_port: 2379
+- include: ../../common/openshift-cluster/std_include.yml
+
+- include: ../../common/openshift-node/config.yml
diff --git a/playbooks/byo/vagrant.yml b/playbooks/byo/vagrant.yml
deleted file mode 100644
index 76246e7b0..000000000
--- a/playbooks/byo/vagrant.yml
+++ /dev/null
@@ -1,4 +0,0 @@
----
-- include: rhel_subscribe.yml
-
-- include: config.yml
diff --git a/playbooks/common/README.md b/playbooks/common/README.md
index 0b5e26989..968bd99cb 100644
--- a/playbooks/common/README.md
+++ b/playbooks/common/README.md
@@ -1,9 +1,8 @@
# Common playbooks
This directory has a generic set of playbooks that are included by playbooks in
-[`byo`](../byo), as well as other playbooks related to the
-[`bin/cluster`](../../bin) tool.
+[`byo`](../byo).
Note: playbooks in this directory use generic group names that do not line up
-with the groups used by the `byo` playbooks or `bin/cluster` derived playbooks,
-requiring an explicit remapping of groups.
+with the groups used by the `byo` playbooks, requiring an explicit remapping of
+groups.
diff --git a/playbooks/common/openshift-checks/adhoc.yml b/playbooks/common/openshift-checks/adhoc.yml
new file mode 100644
index 000000000..dfcef8435
--- /dev/null
+++ b/playbooks/common/openshift-checks/adhoc.yml
@@ -0,0 +1,12 @@
+---
+- name: OpenShift health checks
+ hosts: oo_all_hosts
+ roles:
+ - openshift_health_checker
+ vars:
+ - r_openshift_health_checker_playbook_context: adhoc
+ post_tasks:
+ - name: Run health checks
+ action: openshift_health_check
+ args:
+ checks: '{{ openshift_checks | default([]) }}'
diff --git a/playbooks/common/openshift-checks/health.yml b/playbooks/common/openshift-checks/health.yml
index ff5b5af67..21ea785ef 100644
--- a/playbooks/common/openshift-checks/health.yml
+++ b/playbooks/common/openshift-checks/health.yml
@@ -1,10 +1,6 @@
---
-- include: ../openshift-cluster/std_include.yml
- tags:
- - always
-
- name: Run OpenShift health checks
- hosts: OSEv3
+ hosts: oo_all_hosts
roles:
- openshift_health_checker
vars:
diff --git a/playbooks/common/openshift-checks/pre-install.yml b/playbooks/common/openshift-checks/pre-install.yml
index 861229f21..88e6f9120 100644
--- a/playbooks/common/openshift-checks/pre-install.yml
+++ b/playbooks/common/openshift-checks/pre-install.yml
@@ -1,10 +1,6 @@
---
-- include: ../openshift-cluster/std_include.yml
- tags:
- - always
-
-- hosts: OSEv3
- name: run OpenShift pre-install checks
+- name: run OpenShift pre-install checks
+ hosts: oo_all_hosts
roles:
- openshift_health_checker
vars:
diff --git a/playbooks/common/openshift-cluster/config.yml b/playbooks/common/openshift-cluster/config.yml
index e1df71112..3baa3c54d 100644
--- a/playbooks/common/openshift-cluster/config.yml
+++ b/playbooks/common/openshift-cluster/config.yml
@@ -18,33 +18,19 @@
- docker_image_availability
- docker_storage
-- include: initialize_firewall.yml
- tags:
- - always
-
-- hosts: localhost
- tasks:
- - fail:
- msg: No etcd hosts defined. Running an all-in-one master is deprecated and will no longer be supported in a future upgrade.
- when: groups.oo_etcd_to_config | default([]) | length == 0 and not openshift_master_unsupported_all_in_one | default(False)
-
- include: initialize_oo_option_facts.yml
tags:
- always
-- name: Disable excluders
+- name: Set hostname
hosts: oo_masters_to_config:oo_nodes_to_config
- tags:
- - always
- gather_facts: no
- roles:
- - role: openshift_excluder
- r_openshift_excluder_action: disable
- r_openshift_excluder_service_type: "{{ openshift.common.service_type }}"
+ tasks:
+ - name: Set hostname
+ hostname:
+ name: "{{ openshift.common.hostname }}"
+ when: openshift_set_hostname | default(True) | bool
- include: ../openshift-etcd/config.yml
- tags:
- - etcd
- include: ../openshift-nfs/config.yml
tags:
@@ -55,12 +41,8 @@
- loadbalancer
- include: ../openshift-master/config.yml
- tags:
- - master
-- include: additional_config.yml
- tags:
- - master
+- include: ../openshift-master/additional_config.yml
- include: ../openshift-node/config.yml
tags:
@@ -79,13 +61,3 @@
- openshift_enable_service_catalog | default(false) | bool
tags:
- servicecatalog
-
-- name: Re-enable excluder if it was previously enabled
- hosts: oo_masters_to_config:oo_nodes_to_config
- tags:
- - always
- gather_facts: no
- roles:
- - role: openshift_excluder
- r_openshift_excluder_action: enable
- r_openshift_excluder_service_type: "{{ openshift.common.service_type }}"
diff --git a/playbooks/common/openshift-cluster/enable_dnsmasq.yml b/playbooks/common/openshift-cluster/enable_dnsmasq.yml
index 50351588f..be14b06f0 100644
--- a/playbooks/common/openshift-cluster/enable_dnsmasq.yml
+++ b/playbooks/common/openshift-cluster/enable_dnsmasq.yml
@@ -27,9 +27,6 @@
role: "{{ item.role }}"
local_facts: "{{ item.local_facts }}"
with_items:
- - role: common
- local_facts:
- use_dnsmasq: True
- role: master
local_facts:
dns_port: '8053'
@@ -50,9 +47,6 @@
role: "{{ item.role }}"
local_facts: "{{ item.local_facts }}"
with_items:
- - role: common
- local_facts:
- use_dnsmasq: True
- role: node
local_facts:
dns_ip: "{{ hostvars[inventory_hostname]['ansible_default_ipv4']['address'] }}"
diff --git a/playbooks/common/openshift-cluster/evaluate_groups.yml b/playbooks/common/openshift-cluster/evaluate_groups.yml
index c56b07037..16a733899 100644
--- a/playbooks/common/openshift-cluster/evaluate_groups.yml
+++ b/playbooks/common/openshift-cluster/evaluate_groups.yml
@@ -13,12 +13,12 @@
- name: Evaluate groups - g_master_hosts or g_new_master_hosts required
fail:
msg: This playbook requires g_master_hosts or g_new_master_hosts to be set
- when: g_master_hosts is not defined or g_new_master_hosts is not defined
+ when: g_master_hosts is not defined and g_new_master_hosts is not defined
- name: Evaluate groups - g_node_hosts or g_new_node_hosts required
fail:
msg: This playbook requires g_node_hosts or g_new_node_hosts to be set
- when: g_node_hosts is not defined or g_new_node_hosts is not defined
+ when: g_node_hosts is not defined and g_new_node_hosts is not defined
- name: Evaluate groups - g_lb_hosts required
fail:
@@ -33,13 +33,23 @@
- name: Evaluate groups - g_nfs_hosts is single host
fail:
msg: The nfs group must be limited to one host
- when: (groups[g_nfs_hosts] | default([])) | length > 1
+ when: g_nfs_hosts | default([]) | length > 1
- name: Evaluate groups - g_glusterfs_hosts required
fail:
msg: This playbook requires g_glusterfs_hosts to be set
when: g_glusterfs_hosts is not defined
+ - name: Evaluate groups - Fail if no etcd hosts group is defined
+ fail:
+ msg: >
+ No etcd hosts defined. Running an all-in-one master is deprecated and
+ will no longer be supported in a future upgrade.
+ when:
+ - g_etcd_hosts | default([]) | length == 0
+ - not openshift_master_unsupported_all_in_one | default(False)
+ - not openshift_node_bootstrap | default(False)
+
- name: Evaluate oo_all_hosts
add_host:
name: "{{ item }}"
@@ -117,7 +127,7 @@
add_host:
name: "{{ item }}"
groups: oo_etcd_hosts_to_backup
- with_items: "{{ groups.oo_etcd_to_config if groups.oo_etcd_to_config is defined and groups.oo_etcd_to_config | length > 0 else groups.oo_first_master }}"
+ with_items: "{{ groups.oo_etcd_to_config if groups.oo_etcd_to_config is defined and groups.oo_etcd_to_config | length > 0 else (groups.oo_first_master | default([])) }}"
changed_when: False
- name: Evaluate oo_nodes_to_config
@@ -173,5 +183,5 @@
groups: oo_etcd_to_migrate
ansible_ssh_user: "{{ g_ssh_user | default(omit) }}"
ansible_become: "{{ g_sudo | default(omit) }}"
- with_items: "{{ groups.oo_etcd_to_config if groups.oo_etcd_to_config | default([]) | length != 0 else groups.oo_first_master }}"
+ with_items: "{{ groups.oo_etcd_to_config if groups.oo_etcd_to_config | default([]) | length != 0 else (groups.oo_first_master |default([]))}}"
changed_when: no
diff --git a/playbooks/common/openshift-cluster/initialize_facts.yml b/playbooks/common/openshift-cluster/initialize_facts.yml
index 4bf5d33b1..0723575c2 100644
--- a/playbooks/common/openshift-cluster/initialize_facts.yml
+++ b/playbooks/common/openshift-cluster/initialize_facts.yml
@@ -108,14 +108,27 @@
when:
- l_any_system_container | bool
+ - name: Default system_images_registry to a enterprise registry
+ set_fact:
+ system_images_registry: "registry.access.redhat.com"
+ when:
+ - system_images_registry is not defined
+ - openshift_deployment_type == "openshift-enterprise"
+
+ - name: Default system_images_registry to community registry
+ set_fact:
+ system_images_registry: "docker.io"
+ when:
+ - system_images_registry is not defined
+ - openshift_deployment_type == "origin"
+
- name: Gather Cluster facts and set is_containerized if needed
openshift_facts:
role: common
local_facts:
- debug_level: "{{ openshift_debug_level | default(2) }}"
deployment_type: "{{ openshift_deployment_type }}"
deployment_subtype: "{{ openshift_deployment_subtype | default(None) }}"
- cluster_id: "{{ openshift_cluster_id | default('default') }}"
+ cli_image: "{{ osm_image | default(None) }}"
hostname: "{{ openshift_hostname | default(None) }}"
ip: "{{ openshift_ip | default(None) }}"
is_containerized: "{{ l_is_containerized | default(None) }}"
@@ -124,7 +137,7 @@
is_master_system_container: "{{ l_is_master_system_container | default(false) }}"
is_etcd_system_container: "{{ l_is_etcd_system_container | default(false) }}"
etcd_runtime: "{{ l_etcd_runtime }}"
- system_images_registry: "{{ system_images_registry | default('') }}"
+ system_images_registry: "{{ system_images_registry }}"
public_hostname: "{{ openshift_public_hostname | default(None) }}"
public_ip: "{{ openshift_public_ip | default(None) }}"
portal_net: "{{ openshift_portal_net | default(openshift_master_portal_net) | default(None) }}"
@@ -133,8 +146,6 @@
no_proxy: "{{ openshift_no_proxy | default(None) }}"
generate_no_proxy_hosts: "{{ openshift_generate_no_proxy_hosts | default(True) }}"
no_proxy_internal_hostnames: "{{ openshift_no_proxy_internal_hostnames | default(None) }}"
- sdn_network_plugin_name: "{{ os_sdn_network_plugin_name | default(None) }}"
- use_openshift_sdn: "{{ openshift_use_openshift_sdn | default(None) }}"
- name: initialize_facts set_fact repoquery command
set_fact:
@@ -142,4 +153,4 @@
- name: initialize_facts set_fact on openshift_docker_hosted_registry_network
set_fact:
- openshift_docker_hosted_registry_network: "{{ hostvars[groups.oo_first_master.0].openshift.common.portal_net }}"
+ openshift_docker_hosted_registry_network: "{{ '' if 'oo_first_master' not in groups else hostvars[groups.oo_first_master.0].openshift.common.portal_net }}"
diff --git a/playbooks/common/openshift-cluster/initialize_firewall.yml b/playbooks/common/openshift-cluster/initialize_firewall.yml
deleted file mode 100644
index f0374fbc7..000000000
--- a/playbooks/common/openshift-cluster/initialize_firewall.yml
+++ /dev/null
@@ -1,7 +0,0 @@
----
-- name: Initialize host firewall
- hosts: oo_all_hosts
- tasks:
- - name: Install and configure the proper firewall settings
- include_role:
- name: os_firewall
diff --git a/playbooks/common/openshift-cluster/openshift_hosted.yml b/playbooks/common/openshift-cluster/openshift_hosted.yml
index 99a634970..75339f6df 100644
--- a/playbooks/common/openshift-cluster/openshift_hosted.yml
+++ b/playbooks/common/openshift-cluster/openshift_hosted.yml
@@ -29,7 +29,6 @@
- role: openshift_default_storage_class
when: openshift_cloudprovider_kind is defined and (openshift_cloudprovider_kind == 'aws' or openshift_cloudprovider_kind == 'gce')
- role: openshift_hosted
- r_openshift_hosted_use_calico: "{{ openshift.common.use_calico | default(false) | bool }}"
- role: openshift_metrics
when: openshift_hosted_metrics_deploy | default(false) | bool
- role: openshift_logging
@@ -49,6 +48,9 @@
- role: cockpit-ui
when: ( openshift.common.version_gte_3_3_or_1_3 | bool ) and ( openshift_hosted_manage_registry | default(true) | bool ) and not (openshift.docker.hosted_registry_insecure | default(false) | bool)
+ - role: openshift_prometheus
+ when: openshift_hosted_prometheus_deploy | default(false) | bool
+
- name: Update master-config for publicLoggingURL
hosts: oo_masters_to_config:!oo_first_master
tags:
diff --git a/playbooks/common/openshift-cluster/openshift_prometheus.yml b/playbooks/common/openshift-cluster/openshift_prometheus.yml
new file mode 100644
index 000000000..a979c0c00
--- /dev/null
+++ b/playbooks/common/openshift-cluster/openshift_prometheus.yml
@@ -0,0 +1,9 @@
+---
+- include: std_include.yml
+
+- name: OpenShift Prometheus
+ hosts: oo_first_master
+ roles:
+ - openshift_prometheus
+ vars:
+ openshift_prometheus_state: present
diff --git a/playbooks/common/openshift-cluster/sanity_checks.yml b/playbooks/common/openshift-cluster/sanity_checks.yml
new file mode 100644
index 000000000..7e28a11e8
--- /dev/null
+++ b/playbooks/common/openshift-cluster/sanity_checks.yml
@@ -0,0 +1,47 @@
+---
+- name: Verify Requirements
+ hosts: oo_all_hosts
+ tasks:
+ - fail:
+ msg: Flannel can not be used with openshift sdn, set openshift_use_openshift_sdn=false if you want to use flannel
+ when: openshift_use_openshift_sdn | default(true) | bool and openshift_use_flannel | default(false) | bool
+
+ - fail:
+ msg: Nuage sdn can not be used with openshift sdn, set openshift_use_openshift_sdn=false if you want to use nuage
+ when: openshift_use_openshift_sdn | default(true) | bool and openshift_use_nuage | default(false) | bool
+
+ - fail:
+ msg: Nuage sdn can not be used with flannel
+ when: openshift_use_flannel | default(false) | bool and openshift_use_nuage | default(false) | bool
+
+ - fail:
+ msg: Contiv can not be used with openshift sdn, set openshift_use_openshift_sdn=false if you want to use contiv
+ when: openshift_use_openshift_sdn | default(true) | bool and openshift_use_contiv | default(false) | bool
+
+ - fail:
+ msg: Contiv can not be used with flannel
+ when: openshift_use_flannel | default(false) | bool and openshift_use_contiv | default(false) | bool
+
+ - fail:
+ msg: Contiv can not be used with nuage
+ when: openshift_use_nuage | default(false) | bool and openshift_use_contiv | default(false) | bool
+
+ - fail:
+ msg: Calico can not be used with openshift sdn, set openshift_use_openshift_sdn=false if you want to use Calico
+ when: openshift_use_openshift_sdn | default(true) | bool and openshift_use_calico | default(false) | bool
+
+ - fail:
+ msg: The Calico playbook does not yet integrate with the Flannel playbook in Openshift. Set either openshift_use_calico or openshift_use_flannel, but not both.
+ when: openshift_use_calico | default(false) | bool and openshift_use_flannel | default(false) | bool
+
+ - fail:
+ msg: Calico can not be used with Nuage in Openshift. Set either openshift_use_calico or openshift_use_nuage, but not both
+ when: openshift_use_calico | default(false) | bool and openshift_use_nuage | default(false) | bool
+
+ - fail:
+ msg: Calico can not be used with Contiv in Openshift. Set either openshift_use_calico or openshift_use_contiv, but not both
+ when: openshift_use_calico | default(false) | bool and openshift_use_contiv | default(false) | bool
+
+ - fail:
+ msg: openshift_hostname must be 63 characters or less
+ when: openshift_hostname is defined and openshift_hostname | length > 63
diff --git a/playbooks/common/openshift-cluster/std_include.yml b/playbooks/common/openshift-cluster/std_include.yml
index 6cc56889a..cef0072f3 100644
--- a/playbooks/common/openshift-cluster/std_include.yml
+++ b/playbooks/common/openshift-cluster/std_include.yml
@@ -7,6 +7,10 @@
tags:
- always
+- include: sanity_checks.yml
+ tags:
+ - always
+
- include: validate_hostnames.yml
tags:
- node
diff --git a/playbooks/common/openshift-cluster/tasks/set_etcd_launch_facts.yml b/playbooks/common/openshift-cluster/tasks/set_etcd_launch_facts.yml
index 1a6580795..eb118365a 100644
--- a/playbooks/common/openshift-cluster/tasks/set_etcd_launch_facts.yml
+++ b/playbooks/common/openshift-cluster/tasks/set_etcd_launch_facts.yml
@@ -3,7 +3,7 @@
- name: Generate etcd instance names(s)
set_fact:
- scratch_name: "{{ cluster_id }}-{{ k8s_type }}-{{ '%05x' | format(1048576 | random) }}"
+ scratch_name: "{{ openshift_cluster_id | default('default') }}-{{ k8s_type }}-{{ '%05x' | format(1048576 | random) }}"
register: etcd_names_output
with_sequence: count={{ num_etcd }}
diff --git a/playbooks/common/openshift-cluster/tasks/set_master_launch_facts.yml b/playbooks/common/openshift-cluster/tasks/set_master_launch_facts.yml
index 36d7b7870..783f70f50 100644
--- a/playbooks/common/openshift-cluster/tasks/set_master_launch_facts.yml
+++ b/playbooks/common/openshift-cluster/tasks/set_master_launch_facts.yml
@@ -3,7 +3,7 @@
- name: Generate master instance names(s)
set_fact:
- scratch_name: "{{ cluster_id }}-{{ k8s_type }}-{{ '%05x' | format(1048576 | random) }}"
+ scratch_name: "{{ openshift_cluster_id | default('default') }}-{{ k8s_type }}-{{ '%05x' | format(1048576 | random) }}"
register: master_names_output
with_sequence: count={{ num_masters }}
diff --git a/playbooks/common/openshift-cluster/tasks/set_node_launch_facts.yml b/playbooks/common/openshift-cluster/tasks/set_node_launch_facts.yml
index 278942f8b..c103e40a9 100644
--- a/playbooks/common/openshift-cluster/tasks/set_node_launch_facts.yml
+++ b/playbooks/common/openshift-cluster/tasks/set_node_launch_facts.yml
@@ -5,7 +5,7 @@
- name: Generate node instance names(s)
set_fact:
- scratch_name: "{{ cluster_id }}-{{ k8s_type }}-{{ sub_host_type }}-{{ '%05x' | format(1048576 | random) }}"
+ scratch_name: "{{ openshift_cluster_id | default('default') }}-{{ k8s_type }}-{{ sub_host_type }}-{{ '%05x' | format(1048576 | random) }}"
register: node_names_output
with_sequence: count={{ number_nodes }}
diff --git a/playbooks/common/openshift-cluster/update_repos_and_packages.yml b/playbooks/common/openshift-cluster/update_repos_and_packages.yml
deleted file mode 100644
index be956fca5..000000000
--- a/playbooks/common/openshift-cluster/update_repos_and_packages.yml
+++ /dev/null
@@ -1,18 +0,0 @@
----
-- include: evaluate_groups.yml
-
-- name: Subscribe hosts, update repos and update OS packages
- hosts: oo_hosts_to_update
- roles:
- # Explicitly calling openshift_facts because it appears that when
- # rhel_subscribe is skipped that the openshift_facts dependency for
- # openshift_repos is also skipped (this is the case at least for Ansible
- # 2.0.2)
- - openshift_facts
- - role: rhel_subscribe
- when: deployment_type in ["enterprise", "atomic-enterprise", "openshift-enterprise"] and
- ansible_distribution == "RedHat" and
- lookup('oo_option', 'rhel_skip_subscription') | default(rhsub_skip, True) |
- default('no', True) | lower in ['no', 'false']
- - openshift_repos
- - os_update_latest
diff --git a/playbooks/common/openshift-cluster/upgrades/docker/docker_upgrade.yml b/playbooks/common/openshift-cluster/upgrades/docker/docker_upgrade.yml
index 02b8a9d3c..98953f72e 100644
--- a/playbooks/common/openshift-cluster/upgrades/docker/docker_upgrade.yml
+++ b/playbooks/common/openshift-cluster/upgrades/docker/docker_upgrade.yml
@@ -4,7 +4,6 @@
# Do not allow adding hosts during upgrade.
g_new_master_hosts: []
g_new_node_hosts: []
- openshift_cluster_id: "{{ cluster_id | default('default') }}"
- include: ../initialize_nodes_to_upgrade.yml
@@ -60,7 +59,7 @@
retries: 60
delay: 60
- - include: upgrade.yml
+ - include: tasks/upgrade.yml
when: l_docker_upgrade is defined and l_docker_upgrade | bool
- name: Set node schedulability
diff --git a/playbooks/common/openshift-cluster/upgrades/docker/restart.yml b/playbooks/common/openshift-cluster/upgrades/docker/tasks/restart.yml
index 83f16ac0d..83f16ac0d 100644
--- a/playbooks/common/openshift-cluster/upgrades/docker/restart.yml
+++ b/playbooks/common/openshift-cluster/upgrades/docker/tasks/restart.yml
diff --git a/playbooks/common/openshift-cluster/upgrades/docker/upgrade.yml b/playbooks/common/openshift-cluster/upgrades/docker/tasks/upgrade.yml
index 808cc562c..808cc562c 100644
--- a/playbooks/common/openshift-cluster/upgrades/docker/upgrade.yml
+++ b/playbooks/common/openshift-cluster/upgrades/docker/tasks/upgrade.yml
diff --git a/playbooks/common/openshift-cluster/upgrades/init.yml b/playbooks/common/openshift-cluster/upgrades/init.yml
index 0f421928b..c98065cf4 100644
--- a/playbooks/common/openshift-cluster/upgrades/init.yml
+++ b/playbooks/common/openshift-cluster/upgrades/init.yml
@@ -4,7 +4,6 @@
# Do not allow adding hosts during upgrade.
g_new_master_hosts: []
g_new_node_hosts: []
- openshift_cluster_id: "{{ cluster_id | default('default') }}"
- include: ../initialize_oo_option_facts.yml
diff --git a/playbooks/common/openshift-cluster/upgrades/pre/verify_docker_upgrade_targets.yml b/playbooks/common/openshift-cluster/upgrades/pre/tasks/verify_docker_upgrade_targets.yml
index 9d8b73cff..9d8b73cff 100644
--- a/playbooks/common/openshift-cluster/upgrades/pre/verify_docker_upgrade_targets.yml
+++ b/playbooks/common/openshift-cluster/upgrades/pre/tasks/verify_docker_upgrade_targets.yml
diff --git a/playbooks/common/openshift-cluster/upgrades/pre/verify_nodes_running.yml b/playbooks/common/openshift-cluster/upgrades/pre/verify_nodes_running.yml
deleted file mode 100644
index 354af3cde..000000000
--- a/playbooks/common/openshift-cluster/upgrades/pre/verify_nodes_running.yml
+++ /dev/null
@@ -1,13 +0,0 @@
----
-- name: Verify node processes
- hosts: oo_nodes_to_config
- roles:
- - openshift_facts
- - openshift_docker_facts
- tasks:
- - name: Ensure Node is running
- service:
- name: "{{ openshift.common.service_type }}-node"
- state: started
- enabled: yes
- when: openshift.common.is_containerized | bool
diff --git a/playbooks/common/openshift-cluster/upgrades/upgrade_control_plane.yml b/playbooks/common/openshift-cluster/upgrades/upgrade_control_plane.yml
index abcd21c90..18f10437d 100644
--- a/playbooks/common/openshift-cluster/upgrades/upgrade_control_plane.yml
+++ b/playbooks/common/openshift-cluster/upgrades/upgrade_control_plane.yml
@@ -91,10 +91,7 @@
- include_vars: ../../../../roles/openshift_master/vars/main.yml
- - name: Remove any legacy systemd units
- include: ../../../../roles/openshift_master/tasks/clean_systemd_units.yml
-
- - name: Update systemd units
+ - name: Remove any legacy systemd units and update systemd units
include: ../../../../roles/openshift_master/tasks/systemd_units.yml
- name: Check for ca-bundle.crt
@@ -284,7 +281,7 @@
roles:
- openshift_facts
tasks:
- - include: docker/upgrade.yml
+ - include: docker/tasks/upgrade.yml
when: l_docker_upgrade is defined and l_docker_upgrade | bool and not openshift.common.is_atomic | bool
- name: Drain and upgrade master nodes
diff --git a/playbooks/common/openshift-cluster/upgrades/v3_3/upgrade.yml b/playbooks/common/openshift-cluster/upgrades/v3_3/upgrade.yml
index 0f6fb46a4..a241ef039 100644
--- a/playbooks/common/openshift-cluster/upgrades/v3_3/upgrade.yml
+++ b/playbooks/common/openshift-cluster/upgrades/v3_3/upgrade.yml
@@ -89,7 +89,7 @@
- name: Verify docker upgrade targets
hosts: oo_masters_to_config:oo_nodes_to_upgrade:oo_etcd_to_config
tasks:
- - include: ../pre/verify_docker_upgrade_targets.yml
+ - include: ../pre/tasks/verify_docker_upgrade_targets.yml
tags:
- pre_upgrade
diff --git a/playbooks/common/openshift-cluster/upgrades/v3_3/upgrade_control_plane.yml b/playbooks/common/openshift-cluster/upgrades/v3_3/upgrade_control_plane.yml
index cfba788a8..54c85f0fb 100644
--- a/playbooks/common/openshift-cluster/upgrades/v3_3/upgrade_control_plane.yml
+++ b/playbooks/common/openshift-cluster/upgrades/v3_3/upgrade_control_plane.yml
@@ -93,7 +93,7 @@
- name: Verify docker upgrade targets
hosts: oo_masters_to_config:oo_etcd_to_config
tasks:
- - include: ../pre/verify_docker_upgrade_targets.yml
+ - include: ../pre/tasks/verify_docker_upgrade_targets.yml
tags:
- pre_upgrade
diff --git a/playbooks/common/openshift-cluster/upgrades/v3_3/upgrade_nodes.yml b/playbooks/common/openshift-cluster/upgrades/v3_3/upgrade_nodes.yml
index 1054f430e..cee4e9087 100644
--- a/playbooks/common/openshift-cluster/upgrades/v3_3/upgrade_nodes.yml
+++ b/playbooks/common/openshift-cluster/upgrades/v3_3/upgrade_nodes.yml
@@ -90,7 +90,7 @@
- name: Verify docker upgrade targets
hosts: oo_nodes_to_upgrade
tasks:
- - include: ../pre/verify_docker_upgrade_targets.yml
+ - include: ../pre/tasks/verify_docker_upgrade_targets.yml
tags:
- pre_upgrade
diff --git a/playbooks/common/openshift-cluster/upgrades/v3_4/upgrade.yml b/playbooks/common/openshift-cluster/upgrades/v3_4/upgrade.yml
index 783289c87..ae217ba2e 100644
--- a/playbooks/common/openshift-cluster/upgrades/v3_4/upgrade.yml
+++ b/playbooks/common/openshift-cluster/upgrades/v3_4/upgrade.yml
@@ -89,7 +89,7 @@
- name: Verify docker upgrade targets
hosts: oo_masters_to_config:oo_nodes_to_upgrade:oo_etcd_to_config
tasks:
- - include: ../pre/verify_docker_upgrade_targets.yml
+ - include: ../pre/tasks/verify_docker_upgrade_targets.yml
tags:
- pre_upgrade
diff --git a/playbooks/common/openshift-cluster/upgrades/v3_4/upgrade_control_plane.yml b/playbooks/common/openshift-cluster/upgrades/v3_4/upgrade_control_plane.yml
index 8aa443c3c..d7cb38d03 100644
--- a/playbooks/common/openshift-cluster/upgrades/v3_4/upgrade_control_plane.yml
+++ b/playbooks/common/openshift-cluster/upgrades/v3_4/upgrade_control_plane.yml
@@ -93,7 +93,7 @@
- name: Verify docker upgrade targets
hosts: oo_masters_to_config:oo_etcd_to_config
tasks:
- - include: ../pre/verify_docker_upgrade_targets.yml
+ - include: ../pre/tasks/verify_docker_upgrade_targets.yml
tags:
- pre_upgrade
diff --git a/playbooks/common/openshift-cluster/upgrades/v3_4/upgrade_nodes.yml b/playbooks/common/openshift-cluster/upgrades/v3_4/upgrade_nodes.yml
index 436795694..8531e6045 100644
--- a/playbooks/common/openshift-cluster/upgrades/v3_4/upgrade_nodes.yml
+++ b/playbooks/common/openshift-cluster/upgrades/v3_4/upgrade_nodes.yml
@@ -90,7 +90,7 @@
- name: Verify docker upgrade targets
hosts: oo_nodes_to_upgrade
tasks:
- - include: ../pre/verify_docker_upgrade_targets.yml
+ - include: ../pre/tasks/verify_docker_upgrade_targets.yml
tags:
- pre_upgrade
diff --git a/playbooks/common/openshift-cluster/upgrades/v3_5/upgrade.yml b/playbooks/common/openshift-cluster/upgrades/v3_5/upgrade.yml
index 9a000265e..a3d0d6305 100644
--- a/playbooks/common/openshift-cluster/upgrades/v3_5/upgrade.yml
+++ b/playbooks/common/openshift-cluster/upgrades/v3_5/upgrade.yml
@@ -89,7 +89,7 @@
- name: Verify docker upgrade targets
hosts: oo_masters_to_config:oo_nodes_to_upgrade:oo_etcd_to_config
tasks:
- - include: ../pre/verify_docker_upgrade_targets.yml
+ - include: ../pre/tasks/verify_docker_upgrade_targets.yml
tags:
- pre_upgrade
diff --git a/playbooks/common/openshift-cluster/upgrades/v3_5/upgrade_control_plane.yml b/playbooks/common/openshift-cluster/upgrades/v3_5/upgrade_control_plane.yml
index 2dd9676c7..5fee56615 100644
--- a/playbooks/common/openshift-cluster/upgrades/v3_5/upgrade_control_plane.yml
+++ b/playbooks/common/openshift-cluster/upgrades/v3_5/upgrade_control_plane.yml
@@ -93,7 +93,7 @@
- name: Verify docker upgrade targets
hosts: oo_masters_to_config:oo_etcd_to_config
tasks:
- - include: ../pre/verify_docker_upgrade_targets.yml
+ - include: ../pre/tasks/verify_docker_upgrade_targets.yml
tags:
- pre_upgrade
diff --git a/playbooks/common/openshift-cluster/upgrades/v3_5/upgrade_nodes.yml b/playbooks/common/openshift-cluster/upgrades/v3_5/upgrade_nodes.yml
index d5fe8285e..e29d0f8e6 100644
--- a/playbooks/common/openshift-cluster/upgrades/v3_5/upgrade_nodes.yml
+++ b/playbooks/common/openshift-cluster/upgrades/v3_5/upgrade_nodes.yml
@@ -90,7 +90,7 @@
- name: Verify docker upgrade targets
hosts: oo_nodes_to_upgrade
tasks:
- - include: ../pre/verify_docker_upgrade_targets.yml
+ - include: ../pre/tasks/verify_docker_upgrade_targets.yml
tags:
- pre_upgrade
diff --git a/playbooks/common/openshift-cluster/upgrades/v3_6/upgrade.yml b/playbooks/common/openshift-cluster/upgrades/v3_6/upgrade.yml
index 8ceab09f4..51acd17da 100644
--- a/playbooks/common/openshift-cluster/upgrades/v3_6/upgrade.yml
+++ b/playbooks/common/openshift-cluster/upgrades/v3_6/upgrade.yml
@@ -93,7 +93,7 @@
- name: Verify docker upgrade targets
hosts: oo_masters_to_config:oo_nodes_to_upgrade:oo_etcd_to_config
tasks:
- - include: ../pre/verify_docker_upgrade_targets.yml
+ - include: ../pre/tasks/verify_docker_upgrade_targets.yml
tags:
- pre_upgrade
diff --git a/playbooks/common/openshift-cluster/upgrades/v3_6/upgrade_control_plane.yml b/playbooks/common/openshift-cluster/upgrades/v3_6/upgrade_control_plane.yml
index f765e9064..9fe059ac9 100644
--- a/playbooks/common/openshift-cluster/upgrades/v3_6/upgrade_control_plane.yml
+++ b/playbooks/common/openshift-cluster/upgrades/v3_6/upgrade_control_plane.yml
@@ -93,7 +93,7 @@
- name: Verify docker upgrade targets
hosts: oo_masters_to_config:oo_etcd_to_config
tasks:
- - include: ../pre/verify_docker_upgrade_targets.yml
+ - include: ../pre/tasks/verify_docker_upgrade_targets.yml
tags:
- pre_upgrade
diff --git a/playbooks/common/openshift-cluster/upgrades/v3_6/upgrade_nodes.yml b/playbooks/common/openshift-cluster/upgrades/v3_6/upgrade_nodes.yml
index 8bed6a8c2..1b10d4e37 100644
--- a/playbooks/common/openshift-cluster/upgrades/v3_6/upgrade_nodes.yml
+++ b/playbooks/common/openshift-cluster/upgrades/v3_6/upgrade_nodes.yml
@@ -90,7 +90,7 @@
- name: Verify docker upgrade targets
hosts: oo_nodes_to_upgrade
tasks:
- - include: ../pre/verify_docker_upgrade_targets.yml
+ - include: ../pre/tasks/verify_docker_upgrade_targets.yml
tags:
- pre_upgrade
diff --git a/playbooks/common/openshift-cluster/upgrades/v3_7/upgrade.yml b/playbooks/common/openshift-cluster/upgrades/v3_7/upgrade.yml
index 4f05d0c64..9ec40723a 100644
--- a/playbooks/common/openshift-cluster/upgrades/v3_7/upgrade.yml
+++ b/playbooks/common/openshift-cluster/upgrades/v3_7/upgrade.yml
@@ -93,7 +93,7 @@
- name: Verify docker upgrade targets
hosts: oo_masters_to_config:oo_nodes_to_upgrade:oo_etcd_to_config
tasks:
- - include: ../pre/verify_docker_upgrade_targets.yml
+ - include: ../pre/tasks/verify_docker_upgrade_targets.yml
tags:
- pre_upgrade
diff --git a/playbooks/common/openshift-cluster/upgrades/v3_7/upgrade_control_plane.yml b/playbooks/common/openshift-cluster/upgrades/v3_7/upgrade_control_plane.yml
index 2ef95e778..f97f34c3b 100644
--- a/playbooks/common/openshift-cluster/upgrades/v3_7/upgrade_control_plane.yml
+++ b/playbooks/common/openshift-cluster/upgrades/v3_7/upgrade_control_plane.yml
@@ -93,7 +93,7 @@
- name: Verify docker upgrade targets
hosts: oo_masters_to_config:oo_etcd_to_config
tasks:
- - include: ../pre/verify_docker_upgrade_targets.yml
+ - include: ../pre/tasks/verify_docker_upgrade_targets.yml
tags:
- pre_upgrade
diff --git a/playbooks/common/openshift-cluster/upgrades/v3_7/upgrade_nodes.yml b/playbooks/common/openshift-cluster/upgrades/v3_7/upgrade_nodes.yml
index abc4c245b..e95b90cd5 100644
--- a/playbooks/common/openshift-cluster/upgrades/v3_7/upgrade_nodes.yml
+++ b/playbooks/common/openshift-cluster/upgrades/v3_7/upgrade_nodes.yml
@@ -90,7 +90,7 @@
- name: Verify docker upgrade targets
hosts: oo_nodes_to_upgrade
tasks:
- - include: ../pre/verify_docker_upgrade_targets.yml
+ - include: ../pre/tasks/verify_docker_upgrade_targets.yml
tags:
- pre_upgrade
diff --git a/playbooks/common/openshift-cluster/upgrades/v3_7/validator.yml b/playbooks/common/openshift-cluster/upgrades/v3_7/validator.yml
index 90e95422b..f76fc68d1 100644
--- a/playbooks/common/openshift-cluster/upgrades/v3_7/validator.yml
+++ b/playbooks/common/openshift-cluster/upgrades/v3_7/validator.yml
@@ -7,6 +7,17 @@
hosts: oo_first_master
roles:
- { role: lib_openshift }
+
tasks:
- name: Check for invalid namespaces and SDN errors
oc_objectvalidator:
+
+ - name: Confirm OpenShift authorization objects are in sync
+ command: >
+ {{ openshift.common.client_binary }} adm migrate authorization
+ when: not openshift.common.version_gte_3_7 | bool
+ changed_when: false
+ register: l_oc_result
+ until: l_oc_result.rc == 0
+ retries: 4
+ delay: 15
diff --git a/playbooks/common/openshift-etcd/config.yml b/playbooks/common/openshift-etcd/config.yml
index 2cb6197d1..f2b85eea1 100644
--- a/playbooks/common/openshift-etcd/config.yml
+++ b/playbooks/common/openshift-etcd/config.yml
@@ -3,6 +3,7 @@
hosts: oo_etcd_to_config
any_errors_fatal: true
roles:
+ - role: os_firewall
- role: openshift_etcd
etcd_peers: "{{ groups.oo_etcd_to_config | default([], true) }}"
etcd_ca_host: "{{ groups.oo_etcd_to_config.0 }}"
diff --git a/playbooks/common/openshift-etcd/migrate.yml b/playbooks/common/openshift-etcd/migrate.yml
index 311ff84b6..a2af7bb21 100644
--- a/playbooks/common/openshift-etcd/migrate.yml
+++ b/playbooks/common/openshift-etcd/migrate.yml
@@ -1,22 +1,12 @@
---
-- include: ../openshift-cluster/evaluate_groups.yml
- tags:
- - always
-
- name: Run pre-checks
hosts: oo_etcd_to_migrate
- tags:
- - always
roles:
- role: etcd_migrate
r_etcd_migrate_action: check
r_etcd_common_embedded_etcd: "{{ groups.oo_etcd_to_config | default([]) | length == 0 }}"
etcd_peer: "{{ ansible_default_ipv4.address }}"
-- include: ../openshift-cluster/initialize_facts.yml
- tags:
- - always
-
# TODO: This will be different for release-3.6 branch
- name: Prepare masters for etcd data migration
hosts: oo_masters_to_config
@@ -36,8 +26,6 @@
- name: Backup v2 data
hosts: oo_etcd_to_migrate
gather_facts: no
- tags:
- - always
roles:
- role: openshift_facts
- role: etcd_common
@@ -66,8 +54,6 @@
- name: Stop etcd
hosts: oo_etcd_to_migrate
gather_facts: no
- tags:
- - always
pre_tasks:
- set_fact:
l_etcd_service: "{{ 'etcd_container' if openshift.common.is_containerized else 'etcd' }}"
@@ -79,8 +65,6 @@
- name: Migrate data on first etcd
hosts: oo_etcd_to_migrate[0]
gather_facts: no
- tags:
- - always
roles:
- role: etcd_migrate
r_etcd_migrate_action: migrate
@@ -92,8 +76,6 @@
- name: Clean data stores on remaining etcd hosts
hosts: oo_etcd_to_migrate[1:]
gather_facts: no
- tags:
- - always
roles:
- role: etcd_migrate
r_etcd_migrate_action: clean_data
diff --git a/playbooks/common/openshift-etcd/scaleup.yml b/playbooks/common/openshift-etcd/scaleup.yml
index 52b90daca..5f8bb1c7a 100644
--- a/playbooks/common/openshift-etcd/scaleup.yml
+++ b/playbooks/common/openshift-etcd/scaleup.yml
@@ -28,13 +28,15 @@
delay: 10
until: etcd_add_check.rc == 0
roles:
+ - role: os_firewall
+ when: etcd_add_check.rc == 0
- role: openshift_etcd
when: etcd_add_check.rc == 0
etcd_peers: "{{ groups.oo_etcd_to_config | union(groups.oo_new_etcd_to_config)| default([], true) }}"
etcd_ca_host: "{{ groups.oo_etcd_to_config.0 }}"
etcd_certificates_etcd_hosts: "{{ groups.oo_etcd_to_config | default([], true) }}"
etcd_initial_cluster_state: "existing"
- initial_etcd_cluster: "{{ etcd_add_check.stdout_lines[3] | regex_replace('ETCD_INITIAL_CLUSTER=','') }}"
+ initial_etcd_cluster: "{{ etcd_add_check.stdout_lines[3] | regex_replace('ETCD_INITIAL_CLUSTER=','') | regex_replace('\"','') }}"
etcd_ca_setup: False
r_etcd_common_etcd_runtime: "{{ openshift.common.etcd_runtime }}"
- role: nickhammond.logrotate
@@ -47,5 +49,7 @@
--ca-file {{ etcd_peer_ca_file }}
-C {{ etcd_peer_url_scheme }}://{{ hostvars[etcd_ca_host].etcd_hostname }}:{{ etcd_client_port }}
cluster-health
- retries: 1
+ register: scaleup_health
+ retries: 3
delay: 30
+ until: scaleup_health.rc == 0
diff --git a/playbooks/common/openshift-etcd/service.yml b/playbooks/common/openshift-etcd/service.yml
deleted file mode 100644
index ced4bddc5..000000000
--- a/playbooks/common/openshift-etcd/service.yml
+++ /dev/null
@@ -1,23 +0,0 @@
----
-- name: Populate g_service_masters host group if needed
- hosts: localhost
- connection: local
- become: no
- gather_facts: no
- tasks:
- - fail: msg="new_cluster_state is required to be injected in this playbook"
- when: new_cluster_state is not defined
-
- - name: Evaluate g_service_etcd
- add_host:
- name: "{{ item }}"
- groups: g_service_etcd
- with_items: "{{ oo_host_group_exp | default([]) }}"
- changed_when: False
-
-- name: Change etcd state on etcd instance(s)
- hosts: g_service_etcd
- connection: ssh
- gather_facts: no
- tasks:
- - service: name=etcd state="{{ new_cluster_state }}"
diff --git a/playbooks/common/openshift-loadbalancer/config.yml b/playbooks/common/openshift-loadbalancer/config.yml
index 2dacc1218..09ed81a83 100644
--- a/playbooks/common/openshift-loadbalancer/config.yml
+++ b/playbooks/common/openshift-loadbalancer/config.yml
@@ -14,4 +14,5 @@
+ openshift_loadbalancer_additional_backends | default([]) }}"
openshift_image_tag: "{{ hostvars[groups.oo_first_master.0].openshift_image_tag }}"
roles:
+ - role: os_firewall
- role: openshift_loadbalancer
diff --git a/playbooks/common/openshift-loadbalancer/service.yml b/playbooks/common/openshift-loadbalancer/service.yml
deleted file mode 100644
index d3762c961..000000000
--- a/playbooks/common/openshift-loadbalancer/service.yml
+++ /dev/null
@@ -1,23 +0,0 @@
----
-- name: Populate g_service_nodes host group if needed
- hosts: localhost
- connection: local
- become: no
- gather_facts: no
- tasks:
- - fail: msg="new_cluster_state is required to be injected in this playbook"
- when: new_cluster_state is not defined
-
- - name: Evaluate g_service_lb
- add_host:
- name: "{{ item }}"
- groups: g_service_lb
- with_items: "{{ oo_host_group_exp | default([]) }}"
- changed_when: False
-
-- name: Change state on lb instance(s)
- hosts: g_service_lb
- connection: ssh
- gather_facts: no
- tasks:
- - service: name=haproxy state="{{ new_cluster_state }}"
diff --git a/playbooks/common/openshift-cluster/additional_config.yml b/playbooks/common/openshift-master/additional_config.yml
index c0ea93d2c..7468c78f0 100644
--- a/playbooks/common/openshift-cluster/additional_config.yml
+++ b/playbooks/common/openshift-master/additional_config.yml
@@ -11,13 +11,13 @@
when: openshift_master_ha | bool and openshift.master.cluster_method == "pacemaker"
- role: openshift_examples
registry_url: "{{ openshift.master.registry_url }}"
- when: openshift.common.install_examples | bool
+ when: openshift_install_examples | default(True)
- role: openshift_hosted_templates
registry_url: "{{ openshift.master.registry_url }}"
- role: openshift_manageiq
- when: openshift.common.use_manageiq | bool
+ when: openshift_use_manageiq | default(false) | bool
- role: cockpit
when: not openshift.common.is_atomic and ( deployment_type in ['atomic-enterprise','openshift-enterprise'] ) and
(osm_use_cockpit | bool or osm_use_cockpit is undefined ) and ( openshift.common.deployment_subtype != 'registry' )
- role: flannel_register
- when: openshift.common.use_flannel | bool
+ when: openshift_use_flannel | default(false) | bool
diff --git a/playbooks/common/openshift-master/config.yml b/playbooks/common/openshift-master/config.yml
index b30450def..c77d7bb87 100644
--- a/playbooks/common/openshift-master/config.yml
+++ b/playbooks/common/openshift-master/config.yml
@@ -1,4 +1,12 @@
---
+- name: Disable excluders
+ hosts: oo_masters_to_config
+ gather_facts: no
+ roles:
+ - role: openshift_excluder
+ r_openshift_excluder_action: disable
+ r_openshift_excluder_service_type: "{{ openshift.common.service_type }}"
+
- name: Gather and set facts for master hosts
hosts: oo_masters_to_config
vars:
@@ -180,6 +188,7 @@
| oo_collect('openshift.common.ip') | default([]) | join(',')
}}"
roles:
+ - role: os_firewall
- role: openshift_master
openshift_ca_host: "{{ groups.oo_first_master.0 }}"
openshift_master_etcd_hosts: "{{ hostvars
@@ -199,11 +208,19 @@
openshift_master_default_registry_value_api: "{{ hostvars[groups.oo_first_master.0].l_default_registry_value_api }}"
openshift_master_default_registry_value_controllers: "{{ hostvars[groups.oo_first_master.0].l_default_registry_value_controllers }}"
- role: nuage_master
- when: openshift.common.use_nuage | bool
+ when: openshift_use_nuage | default(false) | bool
- role: calico_master
- when: openshift.common.use_calico | bool
+ when: openshift_use_calico | default(false) | bool
post_tasks:
- name: Create group for deployment type
group_by: key=oo_masters_deployment_type_{{ openshift.common.deployment_type }}
changed_when: False
+
+- name: Re-enable excluder if it was previously enabled
+ hosts: oo_masters_to_config
+ gather_facts: no
+ roles:
+ - role: openshift_excluder
+ r_openshift_excluder_action: enable
+ r_openshift_excluder_service_type: "{{ openshift.common.service_type }}"
diff --git a/playbooks/common/openshift-master/scaleup.yml b/playbooks/common/openshift-master/scaleup.yml
index bc61ee9bb..17f9ef4bc 100644
--- a/playbooks/common/openshift-master/scaleup.yml
+++ b/playbooks/common/openshift-master/scaleup.yml
@@ -1,11 +1,4 @@
---
-- include: ../openshift-cluster/evaluate_groups.yml
-
-- name: Gather facts
- hosts: oo_etcd_to_config:oo_masters_to_config:oo_nodes_to_config
- roles:
- - openshift_facts
-
- name: Update master count
hosts: oo_masters:!oo_masters_to_config
serial: 1
@@ -50,38 +43,8 @@
delay: 1
changed_when: false
-- name: Configure docker hosts
- hosts: oo_masters_to_config:oo_nodes_to_config
- vars:
- docker_additional_registries: "{{ lookup('oo_option', 'docker_additional_registries') | oo_split }}"
- docker_insecure_registries: "{{ lookup('oo_option', 'docker_insecure_registries') | oo_split }}"
- docker_blocked_registries: "{{ lookup('oo_option', 'docker_blocked_registries') | oo_split }}"
- roles:
- - openshift_facts
- - openshift_docker
-
-- name: Disable excluders
- hosts: oo_masters_to_config
- tags:
- - always
- gather_facts: no
- roles:
- - role: openshift_excluder
- r_openshift_excluder_action: disable
- r_openshift_excluder_service_type: "{{ openshift.common.service_type }}"
-
- include: ../openshift-master/config.yml
- include: ../openshift-loadbalancer/config.yml
- include: ../openshift-node/config.yml
-
-- name: Re-enable excluder if it was previously enabled
- hosts: oo_masters_to_config
- tags:
- - always
- gather_facts: no
- roles:
- - role: openshift_excluder
- r_openshift_excluder_action: enable
- r_openshift_excluder_service_type: "{{ openshift.common.service_type }}"
diff --git a/playbooks/common/openshift-master/service.yml b/playbooks/common/openshift-master/service.yml
deleted file mode 100644
index 48a2731aa..000000000
--- a/playbooks/common/openshift-master/service.yml
+++ /dev/null
@@ -1,23 +0,0 @@
----
-- name: Populate g_service_masters host group if needed
- hosts: localhost
- gather_facts: no
- connection: local
- become: no
- tasks:
- - fail: msg="new_cluster_state is required to be injected in this playbook"
- when: new_cluster_state is not defined
-
- - name: Evaluate g_service_masters
- add_host:
- name: "{{ item }}"
- groups: g_service_masters
- with_items: "{{ oo_host_group_exp | default([]) }}"
- changed_when: False
-
-- name: Change state on master instance(s)
- hosts: g_service_masters
- connection: ssh
- gather_facts: no
- tasks:
- - service: name={{ openshift.common.service_type }}-master state="{{ new_cluster_state }}"
diff --git a/playbooks/common/openshift-nfs/service.yml b/playbooks/common/openshift-nfs/service.yml
deleted file mode 100644
index b1e35e4b1..000000000
--- a/playbooks/common/openshift-nfs/service.yml
+++ /dev/null
@@ -1,21 +0,0 @@
----
-- name: Populate g_service_nfs host group if needed
- hosts: localhost
- gather_facts: no
- tasks:
- - fail: msg="new_cluster_state is required to be injected in this playbook"
- when: new_cluster_state is not defined
-
- - name: Evaluate g_service_nfs
- add_host:
- name: "{{ item }}"
- groups: g_service_nfs
- with_items: "{{ oo_host_group_exp | default([]) }}"
- changed_when: False
-
-- name: Change state on nfs instance(s)
- hosts: g_service_nfs
- connection: ssh
- gather_facts: no
- tasks:
- - service: name=nfs-server state="{{ new_cluster_state }}"
diff --git a/playbooks/common/openshift-node/config.yml b/playbooks/common/openshift-node/config.yml
index ef7d54f9f..c7afc78ac 100644
--- a/playbooks/common/openshift-node/config.yml
+++ b/playbooks/common/openshift-node/config.yml
@@ -1,4 +1,12 @@
---
+- name: Disable excluders
+ hosts: oo_nodes_to_config
+ gather_facts: no
+ roles:
+ - role: openshift_excluder
+ r_openshift_excluder_action: disable
+ r_openshift_excluder_service_type: "{{ openshift.common.service_type }}"
+
- name: Evaluate node groups
hosts: localhost
become: no
@@ -32,6 +40,7 @@
}}"
roles:
+ - role: os_firewall
- role: openshift_node
openshift_ca_host: "{{ groups.oo_first_master.0 }}"
@@ -47,6 +56,7 @@
| oo_collect('openshift.common.hostname') | default([]) | join (',')
}}"
roles:
+ - role: os_firewall
- role: openshift_node
openshift_ca_host: "{{ groups.oo_first_master.0 }}"
@@ -61,14 +71,14 @@
etcd_ca_host: "{{ groups.oo_etcd_to_config.0 }}"
etcd_cert_subdir: "openshift-node-{{ openshift.common.hostname }}"
etcd_cert_config_dir: "{{ openshift.common.config_base }}/node"
- when: openshift.common.use_flannel | bool
+ when: openshift_use_flannel | default(false) | bool
- role: calico
- when: openshift.common.use_calico | bool
+ when: openshift_use_calico | default(false) | bool
- role: nuage_node
- when: openshift.common.use_nuage | bool
+ when: openshift_use_nuage | default(false) | bool
- role: contiv
contiv_role: netplugin
- when: openshift.common.use_contiv | bool
+ when: openshift_use_contiv | default(false) | bool
- role: nickhammond.logrotate
- role: openshift_manage_node
openshift_master_host: "{{ groups.oo_first_master.0 }}"
@@ -76,3 +86,11 @@
- name: Create group for deployment type
group_by: key=oo_nodes_deployment_type_{{ openshift.common.deployment_type }}
changed_when: False
+
+- name: Re-enable excluder if it was previously enabled
+ hosts: oo_nodes_to_config
+ gather_facts: no
+ roles:
+ - role: openshift_excluder
+ r_openshift_excluder_action: enable
+ r_openshift_excluder_service_type: "{{ openshift.common.service_type }}"
diff --git a/playbooks/common/openshift-node/scaleup.yml b/playbooks/common/openshift-node/scaleup.yml
deleted file mode 100644
index 40da8990d..000000000
--- a/playbooks/common/openshift-node/scaleup.yml
+++ /dev/null
@@ -1,50 +0,0 @@
----
-- include: ../openshift-cluster/evaluate_groups.yml
-
-- name: Gather facts
- hosts: oo_etcd_to_config:oo_masters_to_config:oo_nodes_to_config
- roles:
- - openshift_facts
-
-- name: Gather and set facts for first master
- hosts: oo_first_master
- vars:
- openshift_master_count: "{{ groups.oo_masters | length }}"
- pre_tasks:
- - set_fact:
- openshift_master_default_subdomain: "{{ lookup('oo_option', 'openshift_master_default_subdomain') | default(None, true) }}"
- when: openshift_master_default_subdomain is not defined
- roles:
- - openshift_master_facts
-
-- name: Configure docker hosts
- hosts: oo_nodes_to_config
- vars:
- docker_additional_registries: "{{ lookup('oo_option', 'docker_additional_registries') | oo_split }}"
- docker_insecure_registries: "{{ lookup('oo_option', 'docker_insecure_registries') | oo_split }}"
- docker_blocked_registries: "{{ lookup('oo_option', 'docker_blocked_registries') | oo_split }}"
- roles:
- - openshift_facts
- - openshift_docker
-
-- name: Disable excluders
- hosts: oo_nodes_to_config
- tags:
- - always
- gather_facts: no
- roles:
- - role: openshift_excluder
- r_openshift_excluder_action: disable
- r_openshift_excluder_service_type: "{{ openshift.common.service_type }}"
-
-- include: ../openshift-node/config.yml
-
-- name: Re-enable excluder if it was previously enabled
- hosts: oo_nodes_to_config
- tags:
- - always
- gather_facts: no
- roles:
- - role: openshift_excluder
- r_openshift_excluder_action: enable
- r_openshift_excluder_service_type: "{{ openshift.common.service_type }}"
diff --git a/playbooks/common/openshift-node/service.yml b/playbooks/common/openshift-node/service.yml
deleted file mode 100644
index 130a5416f..000000000
--- a/playbooks/common/openshift-node/service.yml
+++ /dev/null
@@ -1,26 +0,0 @@
----
-- name: Populate g_service_nodes host group if needed
- hosts: localhost
- connection: local
- become: no
- gather_facts: no
- tasks:
- - fail: msg="new_cluster_state is required to be injected in this playbook"
- when: new_cluster_state is not defined
-
- - name: Evaluate g_service_nodes
- add_host:
- name: "{{ item }}"
- groups: g_service_nodes
- with_items: "{{ oo_host_group_exp | default([]) }}"
- changed_when: False
-
-- name: Change state on node instance(s)
- hosts: g_service_nodes
- connection: ssh
- gather_facts: no
- tasks:
- - name: Change state on node instance(s)
- service:
- name: "{{ service_type }}-node"
- state: "{{ new_cluster_state }}"
diff --git a/playbooks/gce/README.md b/playbooks/gce/README.md
deleted file mode 100644
index 0514d6f50..000000000
--- a/playbooks/gce/README.md
+++ /dev/null
@@ -1,4 +0,0 @@
-# GCE playbooks
-
-This playbook directory is meant to be driven by [`bin/cluster`](../../bin),
-which is community supported and most use is considered deprecated.
diff --git a/playbooks/gce/openshift-cluster/add_nodes.yml b/playbooks/gce/openshift-cluster/add_nodes.yml
deleted file mode 100644
index 765e03fdc..000000000
--- a/playbooks/gce/openshift-cluster/add_nodes.yml
+++ /dev/null
@@ -1,43 +0,0 @@
----
-- name: Launch instance(s)
- hosts: localhost
- connection: local
- become: no
- gather_facts: no
- vars_files:
- - vars.yml
- vars:
- oo_extend_env: True
- tasks:
- - fail:
- msg: Deployment type not supported for gce provider yet
- when: deployment_type == 'enterprise'
-
- - include: ../../common/openshift-cluster/tasks/set_node_launch_facts.yml
- vars:
- type: "compute"
- count: "{{ num_nodes }}"
- - include: tasks/launch_instances.yml
- vars:
- instances: "{{ node_names }}"
- cluster: "{{ cluster_id }}"
- type: "{{ k8s_type }}"
- g_sub_host_type: "{{ sub_host_type }}"
- gce_machine_type: "{{ lookup('env', 'gce_machine_node_type') | default(lookup('env', 'gce_machine_type'), true) }}"
- gce_machine_image: "{{ lookup('env', 'gce_machine_node_image') | default(lookup('env', 'gce_machine_image'), true) }}"
-
- - include: ../../common/openshift-cluster/tasks/set_node_launch_facts.yml
- vars:
- type: "infra"
- count: "{{ num_infra }}"
- - include: tasks/launch_instances.yml
- vars:
- instances: "{{ node_names }}"
- cluster: "{{ cluster_id }}"
- type: "{{ k8s_type }}"
- g_sub_host_type: "{{ sub_host_type }}"
- gce_machine_type: "{{ lookup('env', 'gce_machine_node_type') | default(lookup('env', 'gce_machine_type'), true) }}"
- gce_machine_image: "{{ lookup('env', 'gce_machine_node_image') | default(lookup('env', 'gce_machine_image'), true) }}"
-
-- include: scaleup.yml
-- include: list.yml
diff --git a/playbooks/gce/openshift-cluster/cluster_hosts.yml b/playbooks/gce/openshift-cluster/cluster_hosts.yml
deleted file mode 100644
index e5f41382b..000000000
--- a/playbooks/gce/openshift-cluster/cluster_hosts.yml
+++ /dev/null
@@ -1,25 +0,0 @@
----
-g_all_hosts: "{{ groups['tag_clusterid-' ~ cluster_id] | default([])
- | intersect(groups['tag_environment-' ~ cluster_env] | default([])) }}"
-
-g_etcd_hosts: "{{ g_all_hosts | intersect(groups['tag_host-type-etcd'] | default([])) }}"
-
-g_new_etcd_hosts: "{{ g_all_hosts | intersect(groups['tag_host-type-new_etcd'] | default([])) }}"
-
-g_lb_hosts: "{{ g_all_hosts | intersect(groups['tag_host-type-lb'] | default([])) }}"
-
-g_nfs_hosts: "{{ g_all_hosts | intersect(groups['tag_host-type-nfs'] | default([])) }}"
-
-g_glusterfs_hosts: "{{ g_all_hosts | intersect(groups['tag_host-type-glusterfs'] | default([])) }}"
-
-g_master_hosts: "{{ g_all_hosts | intersect(groups['tag_host-type-master'] | default([])) }}"
-
-g_new_master_hosts: "{{ g_all_hosts | intersect(groups['tag_host-type-new-master'] | default([])) }}"
-
-g_node_hosts: "{{ g_all_hosts | intersect(groups['tag_host-type-node'] | default([])) }}"
-
-g_new_node_hosts: "{{ g_all_hosts | intersect(groups['tag_host-type-new-node'] | default([])) }}"
-
-g_infra_hosts: "{{ g_node_hosts | intersect(groups['tag_sub-host-type-infra'] | default([])) }}"
-
-g_compute_hosts: "{{ g_node_hosts | intersect(groups['tag_sub-host-type-compute'] | default([])) }}"
diff --git a/playbooks/gce/openshift-cluster/config.yml b/playbooks/gce/openshift-cluster/config.yml
deleted file mode 100644
index 2625d4d05..000000000
--- a/playbooks/gce/openshift-cluster/config.yml
+++ /dev/null
@@ -1,36 +0,0 @@
----
-- hosts: localhost
- gather_facts: no
- tasks:
- - include_vars: vars.yml
- - include_vars: cluster_hosts.yml
- - add_host:
- name: "{{ item }}"
- groups: l_oo_all_hosts
- ansible_ssh_user: "{{ deployment_vars[deployment_type].ssh_user }}"
- ansible_become: "{{ deployment_vars[deployment_type].become }}"
- with_items: "{{ g_all_hosts | default([]) }}"
-
-- hosts: l_oo_all_hosts
- gather_facts: no
- tasks:
- - include_vars: vars.yml
- - include_vars: cluster_hosts.yml
-
-- include: ../../common/openshift-cluster/config.yml
- vars:
- g_ssh_user: "{{ deployment_vars[deployment_type].ssh_user }}"
- g_sudo: "{{ deployment_vars[deployment_type].become }}"
- g_nodeonmaster: true
- openshift_cluster_id: "{{ cluster_id }}"
- openshift_debug_level: "{{ debug_level }}"
- openshift_deployment_type: "{{ deployment_type }}"
- openshift_hostname: "{{ gce_private_ip }}"
- openshift_hosted_registry_selector: 'type=infra'
- openshift_hosted_router_selector: 'type=infra'
- openshift_master_cluster_method: 'native'
- openshift_use_openshift_sdn: "{{ lookup('oo_option', 'use_openshift_sdn') }}"
- os_sdn_network_plugin_name: "{{ lookup('oo_option', 'sdn_network_plugin_name') }}"
- openshift_use_flannel: "{{ lookup('oo_option', 'use_flannel') }}"
- openshift_use_calico: "{{ lookup('oo_option', 'use_calico') }}"
- openshift_use_fluentd: "{{ lookup('oo_option', 'use_fluentd') }}"
diff --git a/playbooks/gce/openshift-cluster/filter_plugins b/playbooks/gce/openshift-cluster/filter_plugins
deleted file mode 120000
index 99a95e4ca..000000000
--- a/playbooks/gce/openshift-cluster/filter_plugins
+++ /dev/null
@@ -1 +0,0 @@
-../../../filter_plugins \ No newline at end of file
diff --git a/playbooks/gce/openshift-cluster/launch.yml b/playbooks/gce/openshift-cluster/launch.yml
deleted file mode 100644
index 7532a678b..000000000
--- a/playbooks/gce/openshift-cluster/launch.yml
+++ /dev/null
@@ -1,67 +0,0 @@
----
-- name: Launch instance(s)
- hosts: localhost
- connection: local
- become: no
- gather_facts: no
- vars_files:
- - vars.yml
- tasks:
- - fail: msg="Deployment type not supported for gce provider yet"
- when: deployment_type == 'enterprise'
-
- - include: ../../common/openshift-cluster/tasks/set_etcd_launch_facts.yml
- - include: tasks/launch_instances.yml
- vars:
- instances: "{{ etcd_names }}"
- cluster: "{{ cluster_id }}"
- type: "{{ k8s_type }}"
- g_sub_host_type: "default"
- gce_machine_type: "{{ lookup('env', 'gce_machine_etcd_type') | default(lookup('env', 'gce_machine_type'), true) }}"
- gce_machine_image: "{{ lookup('env', 'gce_machine_etcd_image') | default(lookup('env', 'gce_machine_image'), true) }}"
-
-
- - include: ../../common/openshift-cluster/tasks/set_master_launch_facts.yml
- - include: tasks/launch_instances.yml
- vars:
- instances: "{{ master_names }}"
- cluster: "{{ cluster_id }}"
- type: "{{ k8s_type }}"
- g_sub_host_type: "default"
- gce_machine_type: "{{ lookup('env', 'gce_machine_master_type') | default(lookup('env', 'gce_machine_type'), true) }}"
- gce_machine_image: "{{ lookup('env', 'gce_machine_master_image') | default(lookup('env', 'gce_machine_image'), true) }}"
-
- - include: ../../common/openshift-cluster/tasks/set_node_launch_facts.yml
- vars:
- type: "compute"
- count: "{{ num_nodes }}"
- - include: tasks/launch_instances.yml
- vars:
- instances: "{{ node_names }}"
- cluster: "{{ cluster_id }}"
- type: "{{ k8s_type }}"
- g_sub_host_type: "{{ sub_host_type }}"
- gce_machine_type: "{{ lookup('env', 'gce_machine_node_type') | default(lookup('env', 'gce_machine_type'), true) }}"
- gce_machine_image: "{{ lookup('env', 'gce_machine_node_image') | default(lookup('env', 'gce_machine_image'), true) }}"
-
- - include: ../../common/openshift-cluster/tasks/set_node_launch_facts.yml
- vars:
- type: "infra"
- count: "{{ num_infra }}"
- - include: tasks/launch_instances.yml
- vars:
- instances: "{{ node_names }}"
- cluster: "{{ cluster_id }}"
- type: "{{ k8s_type }}"
- g_sub_host_type: "{{ sub_host_type }}"
- gce_machine_type: "{{ lookup('env', 'gce_machine_node_type') | default(lookup('env', 'gce_machine_type'), true) }}"
- gce_machine_image: "{{ lookup('env', 'gce_machine_node_image') | default(lookup('env', 'gce_machine_image'), true) }}"
-
- - add_host:
- name: "{{ master_names.0 }}"
- groups: service_master
- when: master_names is defined and master_names.0 is defined
-
-- include: update.yml
-
-- include: list.yml
diff --git a/playbooks/gce/openshift-cluster/list.yml b/playbooks/gce/openshift-cluster/list.yml
deleted file mode 100644
index 34ab09533..000000000
--- a/playbooks/gce/openshift-cluster/list.yml
+++ /dev/null
@@ -1,23 +0,0 @@
----
-- name: Generate oo_list_hosts group
- hosts: localhost
- connection: local
- become: no
- gather_facts: no
- vars_files:
- - vars.yml
- tasks:
- - set_fact: scratch_group=tag_clusterid-{{ cluster_id }}
- when: cluster_id != ''
- - set_fact: scratch_group=all
- when: cluster_id == ''
- - add_host:
- name: "{{ item }}"
- groups: oo_list_hosts
- ansible_ssh_user: "{{ deployment_vars[deployment_type].ssh_user }}"
- ansible_become: "{{ deployment_vars[deployment_type].become }}"
- oo_public_ipv4: "{{ hostvars[item].gce_public_ip }}"
- oo_private_ipv4: "{{ hostvars[item].gce_private_ip }}"
- with_items: "{{ groups[scratch_group] | default([], true) | difference(['localhost']) | difference(groups.status_terminated | default([], true)) }}"
- - debug:
- msg: "{{ hostvars | oo_select_keys(groups[scratch_group] | default([])) | oo_pretty_print_cluster }}"
diff --git a/playbooks/gce/openshift-cluster/lookup_plugins b/playbooks/gce/openshift-cluster/lookup_plugins
deleted file mode 120000
index ac79701db..000000000
--- a/playbooks/gce/openshift-cluster/lookup_plugins
+++ /dev/null
@@ -1 +0,0 @@
-../../../lookup_plugins \ No newline at end of file
diff --git a/playbooks/gce/openshift-cluster/roles b/playbooks/gce/openshift-cluster/roles
deleted file mode 120000
index 20c4c58cf..000000000
--- a/playbooks/gce/openshift-cluster/roles
+++ /dev/null
@@ -1 +0,0 @@
-../../../roles \ No newline at end of file
diff --git a/playbooks/gce/openshift-cluster/service.yml b/playbooks/gce/openshift-cluster/service.yml
deleted file mode 100644
index 13b267976..000000000
--- a/playbooks/gce/openshift-cluster/service.yml
+++ /dev/null
@@ -1,29 +0,0 @@
----
-- name: Call same systemctl command for openshift on all instance(s)
- hosts: localhost
- connection: local
- become: no
- gather_facts: no
- vars_files:
- - vars.yml
- - cluster_hosts.yml
- tasks:
- - fail: msg="cluster_id is required to be injected in this playbook"
- when: cluster_id is not defined
-
- - add_host:
- name: "{{ item }}"
- groups: g_service_nodes
- ansible_ssh_user: "{{ deployment_vars[deployment_type].ssh_user }}"
- ansible_become: "{{ deployment_vars[deployment_type].become }}"
- with_items: "{{ node_hosts | default([]) | difference(['localhost']) | difference(groups.status_terminated) }}"
-
- - add_host:
- name: "{{ item }}"
- groups: g_service_masters
- ansible_ssh_user: "{{ deployment_vars[deployment_type].ssh_user }}"
- ansible_become: "{{ deployment_vars[deployment_type].become }}"
- with_items: "{{ master_hosts | default([]) | difference(['localhost']) | difference(groups.status_terminated) }}"
-
-- include: ../../common/openshift-node/service.yml
-- include: ../../common/openshift-master/service.yml
diff --git a/playbooks/gce/openshift-cluster/tasks/launch_instances.yml b/playbooks/gce/openshift-cluster/tasks/launch_instances.yml
deleted file mode 100644
index 65dd2b71e..000000000
--- a/playbooks/gce/openshift-cluster/tasks/launch_instances.yml
+++ /dev/null
@@ -1,65 +0,0 @@
----
-- name: Launch instance(s)
- gce:
- instance_names: "{{ instances|join(',') }}"
- machine_type: "{{ gce_machine_type | default(deployment_vars[deployment_type].machine_type, true) }}"
- image: "{{ gce_machine_image | default(deployment_vars[deployment_type].image, true) }}"
- service_account_email: "{{ lookup('env', 'gce_service_account_email_address') }}"
- pem_file: "{{ lookup('env', 'gce_service_account_pem_file_path') }}"
- project_id: "{{ lookup('env', 'gce_project_id') }}"
- zone: "{{ lookup('env', 'zone') }}"
- network: "{{ lookup('env', 'network') }}"
- subnetwork: "{{ lookup('env', 'subnetwork') | default(omit, True) }}"
- # unsupported in 1.9.+
- #service_account_permissions: "datastore,logging-write"
- tags:
- - created-by-{{ lookup('env', 'LOGNAME') | regex_replace('[^a-z0-9]+', '') | default(cluster, true) }}
- - environment-{{ cluster_env }}
- - clusterid-{{ cluster_id }}
- - host-type-{{ type }}
- - sub-host-type-{{ g_sub_host_type }}
- metadata:
- startup-script: |
- #!/bin/bash
- echo "Defaults:{{ deployment_vars[deployment_type].ssh_user }} !requiretty" > /etc/sudoers.d/99-{{ deployment_vars[deployment_type].ssh_user }}
-
- when: instances |length > 0
- register: gce
-
-- set_fact:
- node_label:
- # There doesn't seem to be a way to get the region directly, so parse it out of the zone.
- region: "{{ gce.zone | regex_replace('^(.*)-.*$', '\\\\1') }}"
- type: "{{ g_sub_host_type }}"
- when: instances |length > 0 and type == "node"
-
-- set_fact:
- node_label:
- # There doesn't seem to be a way to get the region directly, so parse it out of the zone.
- region: "{{ gce.zone | regex_replace('^(.*)-.*$', '\\\\1') }}"
- type: "{{ type }}"
- when: instances |length > 0 and type != "node"
-
-- name: Add new instances to groups and set variables needed
- add_host:
- hostname: "{{ item.name }}"
- ansible_ssh_host: "{{ item.public_ip }}"
- ansible_ssh_user: "{{ deployment_vars[deployment_type].ssh_user }}"
- ansible_become: "{{ deployment_vars[deployment_type].become }}"
- groups: "{{ item.tags | oo_prepend_strings_in_list('tag_') | join(',') }}"
- gce_public_ip: "{{ item.public_ip }}"
- gce_private_ip: "{{ item.private_ip }}"
- openshift_node_labels: "{{ node_label }}"
- with_items: "{{ gce.instance_data | default([], true) }}"
-
-- name: Wait for ssh
- wait_for: port=22 host={{ item.public_ip }}
- with_items: "{{ gce.instance_data | default([], true) }}"
-
-- name: Wait for user setup
- command: "ssh -o StrictHostKeyChecking=no -o PasswordAuthentication=no -o ConnectTimeout=10 -o UserKnownHostsFile=/dev/null {{ hostvars[item.name].ansible_ssh_user }}@{{ item.public_ip }} echo {{ hostvars[item.name].ansible_ssh_user }} user is setup"
- register: result
- until: result.rc == 0
- retries: 30
- delay: 5
- with_items: "{{ gce.instance_data | default([], true) }}"
diff --git a/playbooks/gce/openshift-cluster/terminate.yml b/playbooks/gce/openshift-cluster/terminate.yml
deleted file mode 100644
index afe269b7c..000000000
--- a/playbooks/gce/openshift-cluster/terminate.yml
+++ /dev/null
@@ -1,58 +0,0 @@
----
-- name: Terminate instance(s)
- hosts: localhost
- connection: local
- become: no
- gather_facts: no
- vars_files:
- - vars.yml
- tasks:
- - add_host:
- name: "{{ item }}"
- groups: oo_hosts_to_terminate
- ansible_ssh_user: "{{ deployment_vars[deployment_type].ssh_user }}"
- ansible_become: "{{ deployment_vars[deployment_type].become }}"
- with_items: "{{ (groups['tag_clusterid-' ~ cluster_id] | default([])) | difference(['localhost']) }}"
-
-- name: Unsubscribe VMs
- hosts: oo_hosts_to_terminate
- vars_files:
- - vars.yml
- roles:
- - role: rhel_unsubscribe
- when: deployment_type in ['atomic-enterprise', 'enterprise', 'openshift-enterprise'] and
- ansible_distribution == "RedHat" and
- lookup('oo_option', 'rhel_skip_subscription') | default(rhsub_skip, True) |
- default('no', True) | lower in ['no', 'false']
-
-- name: Terminate instances(s)
- hosts: localhost
- become: no
- connection: local
- gather_facts: no
- vars_files:
- - vars.yml
- tasks:
- - name: Terminate instances that were previously launched
- local_action:
- module: gce
- state: 'absent'
- name: "{{ item }}"
- service_account_email: "{{ lookup('env', 'gce_service_account_email_address') }}"
- pem_file: "{{ lookup('env', 'gce_service_account_pem_file_path') }}"
- project_id: "{{ lookup('env', 'gce_project_id') }}"
- zone: "{{ lookup('env', 'zone') }}"
- with_items: "{{ groups['oo_hosts_to_terminate'] | default([], true) }}"
- when: item is defined
-
-#- include: ../openshift-node/terminate.yml
-# vars:
-# gce_service_account_email: "{{ lookup('env', 'gce_service_account_email_address') }}"
-# gce_pem_file: "{{ lookup('env', 'gce_service_account_pem_file_path') }}"
-# gce_project_id: "{{ lookup('env', 'gce_project_id') }}"
-#
-#- include: ../openshift-master/terminate.yml
-# vars:
-# gce_service_account_email: "{{ lookup('env', 'gce_service_account_email_address') }}"
-# gce_pem_file: "{{ lookup('env', 'gce_service_account_pem_file_path') }}"
-# gce_project_id: "{{ lookup('env', 'gce_project_id') }}"
diff --git a/playbooks/gce/openshift-cluster/update.yml b/playbooks/gce/openshift-cluster/update.yml
deleted file mode 100644
index 6d2af3d26..000000000
--- a/playbooks/gce/openshift-cluster/update.yml
+++ /dev/null
@@ -1,34 +0,0 @@
----
-- hosts: localhost
- gather_facts: no
- tasks:
- - include_vars: vars.yml
- - include_vars: cluster_hosts.yml
- - add_host:
- name: "{{ item }}"
- groups: l_oo_all_hosts
- with_items: "{{ g_all_hosts }}"
-
-- hosts: l_oo_all_hosts
- gather_facts: no
- tasks:
- - include_vars: vars.yml
- - include_vars: cluster_hosts.yml
-
-- name: Populate oo_hosts_to_update group
- hosts: localhost
- connection: local
- become: no
- gather_facts: no
- tasks:
- - name: Evaluate oo_hosts_to_update
- add_host:
- name: "{{ item }}"
- groups: oo_hosts_to_update
- ansible_ssh_user: "{{ deployment_vars[deployment_type].ssh_user }}"
- ansible_become: "{{ deployment_vars[deployment_type].become }}"
- with_items: "{{ g_all_hosts | default([]) }}"
-
-- include: ../../common/openshift-cluster/update_repos_and_packages.yml
-
-- include: config.yml
diff --git a/playbooks/gce/openshift-cluster/vars.yml b/playbooks/gce/openshift-cluster/vars.yml
deleted file mode 100644
index 13c754c1e..000000000
--- a/playbooks/gce/openshift-cluster/vars.yml
+++ /dev/null
@@ -1,18 +0,0 @@
----
-debug_level: 2
-
-deployment_rhel7_ent_base:
- image: "{{ lookup('oo_option', 'image_name') | default('rhel-7', True) }}"
- machine_type: "{{ lookup('oo_option', 'machine_type') | default('n1-standard-1', True) }}"
- ssh_user: "{{ lookup('env', 'gce_ssh_user') | default(ansible_ssh_user, true) }}"
- become: yes
-
-deployment_vars:
- origin:
- image: "{{ lookup('oo_option', 'image_name') | default('centos-7', True) }}"
- machine_type: "{{ lookup('oo_option', 'machine_type') | default('n1-standard-1', True) }}"
- ssh_user: "{{ lookup('env', 'gce_ssh_user') | default(ansible_ssh_user, true) }}"
- become: yes
- enterprise: "{{ deployment_rhel7_ent_base }}"
- openshift-enterprise: "{{ deployment_rhel7_ent_base }}"
- atomic-enterprise: "{{ deployment_rhel7_ent_base }}"
diff --git a/playbooks/libvirt/README.md b/playbooks/libvirt/README.md
deleted file mode 100644
index 3ce46a76f..000000000
--- a/playbooks/libvirt/README.md
+++ /dev/null
@@ -1,4 +0,0 @@
-# libvirt playbooks
-
-This playbook directory is meant to be driven by [`bin/cluster`](../../bin),
-which is community supported and most use is considered deprecated.
diff --git a/playbooks/libvirt/openshift-cluster/cluster_hosts.yml b/playbooks/libvirt/openshift-cluster/cluster_hosts.yml
deleted file mode 100644
index e5f41382b..000000000
--- a/playbooks/libvirt/openshift-cluster/cluster_hosts.yml
+++ /dev/null
@@ -1,25 +0,0 @@
----
-g_all_hosts: "{{ groups['tag_clusterid-' ~ cluster_id] | default([])
- | intersect(groups['tag_environment-' ~ cluster_env] | default([])) }}"
-
-g_etcd_hosts: "{{ g_all_hosts | intersect(groups['tag_host-type-etcd'] | default([])) }}"
-
-g_new_etcd_hosts: "{{ g_all_hosts | intersect(groups['tag_host-type-new_etcd'] | default([])) }}"
-
-g_lb_hosts: "{{ g_all_hosts | intersect(groups['tag_host-type-lb'] | default([])) }}"
-
-g_nfs_hosts: "{{ g_all_hosts | intersect(groups['tag_host-type-nfs'] | default([])) }}"
-
-g_glusterfs_hosts: "{{ g_all_hosts | intersect(groups['tag_host-type-glusterfs'] | default([])) }}"
-
-g_master_hosts: "{{ g_all_hosts | intersect(groups['tag_host-type-master'] | default([])) }}"
-
-g_new_master_hosts: "{{ g_all_hosts | intersect(groups['tag_host-type-new-master'] | default([])) }}"
-
-g_node_hosts: "{{ g_all_hosts | intersect(groups['tag_host-type-node'] | default([])) }}"
-
-g_new_node_hosts: "{{ g_all_hosts | intersect(groups['tag_host-type-new-node'] | default([])) }}"
-
-g_infra_hosts: "{{ g_node_hosts | intersect(groups['tag_sub-host-type-infra'] | default([])) }}"
-
-g_compute_hosts: "{{ g_node_hosts | intersect(groups['tag_sub-host-type-compute'] | default([])) }}"
diff --git a/playbooks/libvirt/openshift-cluster/config.yml b/playbooks/libvirt/openshift-cluster/config.yml
deleted file mode 100644
index 569e00da2..000000000
--- a/playbooks/libvirt/openshift-cluster/config.yml
+++ /dev/null
@@ -1,39 +0,0 @@
----
-# TODO: need to figure out a plan for setting hostname, currently the default
-# is localhost, so no hostname value (or public_hostname) value is getting
-# assigned
-
-- include: ../../common/openshift-cluster/std_include.yml
-
-- hosts: localhost
- gather_facts: no
- tasks:
- - include_vars: vars.yml
- - include_vars: cluster_hosts.yml
- - add_host:
- name: "{{ item }}"
- groups: l_oo_all_hosts
- with_items: "{{ g_all_hosts | default([]) }}"
-
-- hosts: l_oo_all_hosts
- gather_facts: no
- tasks:
- - include_vars: vars.yml
- - include_vars: cluster_hosts.yml
-
-- include: ../../common/openshift-cluster/config.yml
- vars:
- g_ssh_user: "{{ deployment_vars[deployment_type].ssh_user }}"
- g_sudo: "{{ deployment_vars[deployment_type].become }}"
- g_nodeonmaster: true
- openshift_cluster_id: "{{ cluster_id }}"
- openshift_debug_level: "{{ debug_level }}"
- openshift_deployment_type: "{{ deployment_type }}"
- openshift_hosted_registry_selector: 'type=infra'
- openshift_hosted_router_selector: 'type=infra'
- openshift_master_cluster_method: 'native'
- openshift_use_openshift_sdn: "{{ lookup('oo_option', 'use_openshift_sdn') }}"
- os_sdn_network_plugin_name: "{{ lookup('oo_option', 'sdn_network_plugin_name') }}"
- openshift_use_flannel: "{{ lookup('oo_option', 'use_flannel') }}"
- openshift_use_calico: "{{ lookup('oo_option', 'use_calico') }}"
- openshift_use_fluentd: "{{ lookup('oo_option', 'use_fluentd') }}"
diff --git a/playbooks/libvirt/openshift-cluster/filter_plugins b/playbooks/libvirt/openshift-cluster/filter_plugins
deleted file mode 120000
index 99a95e4ca..000000000
--- a/playbooks/libvirt/openshift-cluster/filter_plugins
+++ /dev/null
@@ -1 +0,0 @@
-../../../filter_plugins \ No newline at end of file
diff --git a/playbooks/libvirt/openshift-cluster/launch.yml b/playbooks/libvirt/openshift-cluster/launch.yml
deleted file mode 100644
index 2475b9d6b..000000000
--- a/playbooks/libvirt/openshift-cluster/launch.yml
+++ /dev/null
@@ -1,57 +0,0 @@
----
-- name: Launch instance(s)
- hosts: localhost
- become: no
- connection: local
- gather_facts: no
- vars_files:
- - vars.yml
- vars:
- image_url: "{{ deployment_vars[deployment_type].image.url }}"
- image_sha256: "{{ deployment_vars[deployment_type].image.sha256 }}"
- image_name: "{{ deployment_vars[deployment_type].image.name }}"
- image_compression: "{{ deployment_vars[deployment_type].image.compression }}"
- tasks:
- - include: tasks/configure_libvirt.yml
-
- - include: ../../common/openshift-cluster/tasks/set_etcd_launch_facts.yml
- - include: tasks/launch_instances.yml
- vars:
- instances: "{{ etcd_names }}"
- cluster: "{{ cluster_id }}"
- type: "{{ k8s_type }}"
- g_sub_host_type: "default"
-
- - include: ../../common/openshift-cluster/tasks/set_master_launch_facts.yml
- - include: tasks/launch_instances.yml
- vars:
- instances: "{{ master_names }}"
- cluster: "{{ cluster_id }}"
- type: "{{ k8s_type }}"
- g_sub_host_type: "default"
-
- - include: ../../common/openshift-cluster/tasks/set_node_launch_facts.yml
- vars:
- type: "compute"
- count: "{{ num_nodes }}"
- - include: tasks/launch_instances.yml
- vars:
- instances: "{{ node_names }}"
- cluster: "{{ cluster_id }}"
- type: "{{ k8s_type }}"
- g_sub_host_type: "{{ sub_host_type }}"
-
- - include: ../../common/openshift-cluster/tasks/set_node_launch_facts.yml
- vars:
- type: "infra"
- count: "{{ num_infra }}"
- - include: tasks/launch_instances.yml
- vars:
- instances: "{{ node_names }}"
- cluster: "{{ cluster_id }}"
- type: "{{ k8s_type }}"
- g_sub_host_type: "{{ sub_host_type }}"
-
-- include: update.yml
-
-- include: list.yml
diff --git a/playbooks/libvirt/openshift-cluster/list.yml b/playbooks/libvirt/openshift-cluster/list.yml
deleted file mode 100644
index 579cd7ac6..000000000
--- a/playbooks/libvirt/openshift-cluster/list.yml
+++ /dev/null
@@ -1,23 +0,0 @@
----
-- name: Generate oo_list_hosts group
- hosts: localhost
- become: no
- connection: local
- gather_facts: no
- vars_files:
- - vars.yml
- tasks:
- - set_fact: scratch_group=tag_clusterid-{{ cluster_id }}
- when: cluster_id != ''
- - set_fact: scratch_group=all
- when: cluster_id == ''
- - add_host:
- name: "{{ item }}"
- groups: oo_list_hosts
- ansible_ssh_user: "{{ deployment_vars[deployment_type].ssh_user }}"
- ansible_become: "{{ deployment_vars[deployment_type].become }}"
- oo_public_ipv4: ""
- oo_private_ipv4: "{{ hostvars[item].libvirt_ip_address }}"
- with_items: "{{ groups[scratch_group] | default([]) | difference(['localhost']) }}"
- - debug:
- msg: "{{ hostvars | oo_select_keys(groups[scratch_group] | default([])) | oo_pretty_print_cluster }}"
diff --git a/playbooks/libvirt/openshift-cluster/lookup_plugins b/playbooks/libvirt/openshift-cluster/lookup_plugins
deleted file mode 120000
index ac79701db..000000000
--- a/playbooks/libvirt/openshift-cluster/lookup_plugins
+++ /dev/null
@@ -1 +0,0 @@
-../../../lookup_plugins \ No newline at end of file
diff --git a/playbooks/libvirt/openshift-cluster/roles b/playbooks/libvirt/openshift-cluster/roles
deleted file mode 120000
index 20c4c58cf..000000000
--- a/playbooks/libvirt/openshift-cluster/roles
+++ /dev/null
@@ -1 +0,0 @@
-../../../roles \ No newline at end of file
diff --git a/playbooks/libvirt/openshift-cluster/service.yml b/playbooks/libvirt/openshift-cluster/service.yml
deleted file mode 100644
index 8bd24a8cf..000000000
--- a/playbooks/libvirt/openshift-cluster/service.yml
+++ /dev/null
@@ -1,34 +0,0 @@
----
-# TODO: need to figure out a plan for setting hostname, currently the default
-# is localhost, so no hostname value (or public_hostname) value is getting
-# assigned
-
-- name: Call same systemctl command for openshift on all instance(s)
- hosts: localhost
- become: no
- connection: local
- gather_facts: no
- vars_files:
- - vars.yml
- tasks:
- - fail: msg="cluster_id is required to be injected in this playbook"
- when: cluster_id is not defined
-
- - name: Evaluate g_service_masters
- add_host:
- name: "{{ item }}"
- ansible_ssh_user: "{{ deployment_vars[deployment_type].ssh_user }}"
- ansible_become: "{{ deployment_vars[deployment_type].become }}"
- groups: g_service_masters
- with_items: "{{ g_master_hosts | default([]) }}"
-
- - name: Evaluate g_service_nodes
- add_host:
- name: "{{ item }}"
- ansible_ssh_user: "{{ deployment_vars[deployment_type].ssh_user }}"
- ansible_become: "{{ deployment_vars[deployment_type].become }}"
- groups: g_service_nodes
- with_items: "{{ g_node_hosts | default([]) }}"
-
-- include: ../../common/openshift-node/service.yml
-- include: ../../common/openshift-master/service.yml
diff --git a/playbooks/libvirt/openshift-cluster/tasks/configure_libvirt.yml b/playbooks/libvirt/openshift-cluster/tasks/configure_libvirt.yml
deleted file mode 100644
index f237c1a60..000000000
--- a/playbooks/libvirt/openshift-cluster/tasks/configure_libvirt.yml
+++ /dev/null
@@ -1,6 +0,0 @@
----
-- include: configure_libvirt_storage_pool.yml
- when: libvirt_storage_pool is defined and libvirt_storage_pool_path is defined
-
-- include: configure_libvirt_network.yml
- when: libvirt_network is defined
diff --git a/playbooks/libvirt/openshift-cluster/tasks/configure_libvirt_network.yml b/playbooks/libvirt/openshift-cluster/tasks/configure_libvirt_network.yml
deleted file mode 100644
index b42ca83af..000000000
--- a/playbooks/libvirt/openshift-cluster/tasks/configure_libvirt_network.yml
+++ /dev/null
@@ -1,11 +0,0 @@
----
-- name: Create the libvirt network for OpenShift
- virt_net:
- name: '{{ libvirt_network }}'
- state: '{{ item }}'
- autostart: 'yes'
- xml: "{{ lookup('template', 'network.xml') }}"
- uri: '{{ libvirt_uri }}'
- with_items:
- - present
- - active
diff --git a/playbooks/libvirt/openshift-cluster/tasks/configure_libvirt_storage_pool.yml b/playbooks/libvirt/openshift-cluster/tasks/configure_libvirt_storage_pool.yml
deleted file mode 100644
index 8685624ec..000000000
--- a/playbooks/libvirt/openshift-cluster/tasks/configure_libvirt_storage_pool.yml
+++ /dev/null
@@ -1,30 +0,0 @@
----
-- name: Create libvirt storage directory for openshift
- file:
- dest: "{{ libvirt_storage_pool_path }}"
- state: directory
-
-# We need to set permissions on the directory and any items created under the directory, so we need to call the acl module with and without default set.
-- acl:
- default: '{{ item.default }}'
- entity: kvm
- etype: group
- name: "{{ libvirt_storage_pool_path }}"
- permissions: '{{ item.permissions }}'
- state: present
- with_items:
- - default: no
- permissions: x
- - default: yes
- permissions: rwx
-
-- name: Create the libvirt storage pool for OpenShift
- virt_pool:
- name: '{{ libvirt_storage_pool }}'
- state: '{{ item }}'
- autostart: 'yes'
- xml: "{{ lookup('template', 'storage-pool.xml') }}"
- uri: '{{ libvirt_uri }}'
- with_items:
- - present
- - active
diff --git a/playbooks/libvirt/openshift-cluster/tasks/launch_instances.yml b/playbooks/libvirt/openshift-cluster/tasks/launch_instances.yml
deleted file mode 100644
index 4df86effa..000000000
--- a/playbooks/libvirt/openshift-cluster/tasks/launch_instances.yml
+++ /dev/null
@@ -1,142 +0,0 @@
----
-# TODO: Add support for choosing base image based on deployment_type and os
-# wanted (os wanted needs support added in bin/cluster with sane defaults:
-# fedora/centos for origin, rhel for enterprise)
-
-# TODO: create a role to encapsulate some of this complexity, possibly also
-# create a module to manage the storage tasks, network tasks, and possibly
-# even handle the libvirt tasks to set metadata in the domain xml and be able
-# to create/query data about vms without having to use xml the python libvirt
-# bindings look like a good candidate for this
-
-- name: Download Base Cloud image
- get_url:
- url: '{{ image_url }}'
- sha256sum: '{{ image_sha256 }}'
- dest: '{{ libvirt_storage_pool_path }}/{{ [image_name, image_compression] | difference([""]) | join(".") }}'
- when: ( lookup("oo_option", "skip_image_download") | default("no", True) | lower ) in ["false", "no"]
- register: downloaded_image
-
-- name: Uncompress xz compressed base cloud image
- command: 'unxz -kf {{ libvirt_storage_pool_path }}/{{ [image_name, image_compression] | join(".") }}'
- args:
- creates: '{{ libvirt_storage_pool_path }}/{{ image_name }}'
- when: image_compression in ["xz"] and downloaded_image.changed
-
-- name: Uncompress tgz compressed base cloud image
- command: 'tar zxvf {{ libvirt_storage_pool_path }}/{{ [image_name, image_compression] | join(".") }}'
- args:
- creates: '{{ libvirt_storage_pool_path }}/{{ image_name }}'
- when: image_compression in ["tgz"] and downloaded_image.changed
-
-- name: Uncompress gzip compressed base cloud image
- command: 'gunzip {{ libvirt_storage_pool_path }}/{{ [image_name, image_compression] | join(".") }}'
- args:
- creates: '{{ libvirt_storage_pool_path }}/{{ image_name }}'
- when: image_compression in ["gz"] and downloaded_image.changed
-
-- name: Create the cloud-init config drive path
- file:
- dest: '{{ libvirt_storage_pool_path }}/{{ item }}_configdrive/'
- state: directory
- with_items: '{{ instances }}'
-
-- name: Create the cloud-init config drive files
- template:
- src: '{{ item[1] }}'
- dest: '{{ libvirt_storage_pool_path }}/{{ item[0] }}_configdrive/{{ item[1] }}'
- with_nested:
- - '{{ instances }}'
- - [ user-data, meta-data ]
-
-- name: Check for genisoimage
- command: which genisoimage
- register: which_genisoimage
-
-- name: Create the cloud-init config drive
- command: "{{ 'genisoimage' if which_genisoimage.rc == 0 else 'mkisofs' }} -output {{ libvirt_storage_pool_path }}/{{ item }}_cloud-init.iso -volid cidata -joliet -rock user-data meta-data"
- args:
- chdir: "{{ libvirt_storage_pool_path }}/{{ item }}_configdrive/"
- creates: "{{ libvirt_storage_pool_path }}/{{ item }}_cloud-init.iso"
- with_items: '{{ instances }}'
-
-- name: Refresh the libvirt storage pool for openshift
- command: 'virsh -c {{ libvirt_uri }} pool-refresh {{ libvirt_storage_pool }}'
-
-- name: Create VM drives
- command: 'virsh -c {{ libvirt_uri }} vol-create-as {{ libvirt_storage_pool }} {{ item }}.qcow2 10G --format qcow2 --backing-vol {{ image_name }} --backing-vol-format qcow2'
- with_items: '{{ instances }}'
-
-- name: Create VM docker drives
- command: 'virsh -c {{ libvirt_uri }} vol-create-as {{ libvirt_storage_pool }} {{ item }}-docker.qcow2 10G --format qcow2 --allocation 0'
- with_items: '{{ instances }}'
-
-- name: Create VMs
- virt:
- name: '{{ item }}'
- command: define
- xml: "{{ lookup('template', '../templates/domain.xml') }}"
- uri: '{{ libvirt_uri }}'
- with_items: '{{ instances }}'
-
-- name: Start VMs
- virt:
- name: '{{ item }}'
- state: running
- uri: '{{ libvirt_uri }}'
- with_items: '{{ instances }}'
-
-- name: Wait for the VMs to get an IP
- shell: 'virsh -c {{ libvirt_uri }} net-dhcp-leases {{ libvirt_network }} | egrep -c ''{{ instances | join("|") }}'''
- register: nb_allocated_ips
- until: nb_allocated_ips.stdout == '{{ instances | length }}'
- retries: 60
- delay: 3
- when: instances | length != 0
-
-- name: Collect IP addresses of the VMs
- shell: 'virsh -c {{ libvirt_uri }} net-dhcp-leases {{ libvirt_network }} | awk ''$6 == "{{ item }}" {gsub(/\/.*/, "", $5); print $5}'''
- register: scratch_ip
- with_items: '{{ instances }}'
-
-- set_fact:
- ips: "{{ scratch_ip.results | default([]) | oo_collect('stdout') }}"
-
-- set_fact:
- node_label:
- type: "{{ g_sub_host_type }}"
- when: instances | length > 0 and type == "node"
-
-- set_fact:
- node_label:
- type: "{{ type }}"
- when: instances | length > 0 and type != "node"
-
-- name: Add new instances
- add_host:
- hostname: '{{ item.0 }}'
- ansible_ssh_host: '{{ item.1 }}'
- ansible_ssh_user: "{{ deployment_vars[deployment_type].ssh_user }}"
- ansible_become: "{{ deployment_vars[deployment_type].become }}"
- groups: "tag_environment-{{ cluster_env }}, tag_host-type-{{ type }}, tag_sub-host-type-{{ g_sub_host_type }}, tag_clusterid-{{ cluster_id }}"
- openshift_node_labels: "{{ node_label }}"
- libvirt_ip_address: "{{ item.1 }}"
- with_together:
- - '{{ instances }}'
- - '{{ ips }}'
-
-- name: Wait for ssh
- wait_for:
- host: '{{ item }}'
- port: 22
- with_items: '{{ ips }}'
-
-- name: Wait for openshift user setup
- command: 'ssh -o StrictHostKeyChecking=no -o PasswordAuthentication=no -o ConnectTimeout=10 -o UserKnownHostsFile=/dev/null openshift@{{ item.1 }} echo openshift user is setup'
- register: result
- until: result.rc == 0
- retries: 30
- delay: 1
- with_together:
- - '{{ instances }}'
- - '{{ ips }}'
diff --git a/playbooks/libvirt/openshift-cluster/templates/domain.xml b/playbooks/libvirt/openshift-cluster/templates/domain.xml
deleted file mode 100644
index 88504a5f6..000000000
--- a/playbooks/libvirt/openshift-cluster/templates/domain.xml
+++ /dev/null
@@ -1,65 +0,0 @@
-<domain type='kvm' id='8'>
- <name>{{ item }}</name>
- <memory unit='MiB'>{{ libvirt_instance_memory_mib }}</memory>
- <metadata xmlns:ansible="https://github.com/ansible/ansible">
- <ansible:tags>
- <ansible:tag>environment-{{ cluster_env }}</ansible:tag>
- <ansible:tag>clusterid-{{ cluster }}</ansible:tag>
- <ansible:tag>host-type-{{ type }}</ansible:tag>
- <ansible:tag>sub-host-type-{{ g_sub_host_type }}</ansible:tag>
- </ansible:tags>
- </metadata>
- <vcpu placement='static'>{{ libvirt_instance_vcpu }}</vcpu>
- <os>
- <type arch='x86_64' machine='pc'>hvm</type>
- <boot dev='hd'/>
- </os>
- <features>
- <acpi/>
- <apic/>
- <pae/>
- </features>
- <cpu mode='host-model'>
- <model fallback='allow'/>
- </cpu>
- <clock offset='utc'>
- <timer name='rtc' tickpolicy='catchup'/>
- <timer name='pit' tickpolicy='delay'/>
- <timer name='hpet' present='no'/>
- </clock>
- <on_poweroff>destroy</on_poweroff>
- <on_reboot>restart</on_reboot>
- <on_crash>restart</on_crash>
- <devices>
- <emulator>/usr/bin/qemu-system-x86_64</emulator>
- <disk type='file' device='disk'>
- <driver name='qemu' type='qcow2' discard='unmap'/>
- <source file='{{ libvirt_storage_pool_path }}/{{ item }}.qcow2'/>
- <target dev='sda' bus='scsi'/>
- </disk>
- <disk type='file' device='disk'>
- <driver name='qemu' type='qcow2' discard='unmap'/>
- <source file='{{ libvirt_storage_pool_path }}/{{ item }}-docker.qcow2'/>
- <target dev='sdb' bus='scsi'/>
- </disk>
- <disk type='file' device='cdrom'>
- <driver name='qemu' type='raw'/>
- <source file='{{ libvirt_storage_pool_path }}/{{ item }}_cloud-init.iso'/>
- <target dev='sdc' bus='scsi'/>
- <readonly/>
- </disk>
- <controller type='scsi' model='virtio-scsi' />
- <interface type='network'>
- <source network='{{ libvirt_network }}'/>
- <model type='virtio'/>
- </interface>
- <serial type='pty'>
- <target port='0'/>
- </serial>
- <console type='pty'>
- <target type='serial' port='0'/>
- </console>
- <memballoon model='virtio'>
- </memballoon>
- </devices>
-</domain>
diff --git a/playbooks/libvirt/openshift-cluster/templates/meta-data b/playbooks/libvirt/openshift-cluster/templates/meta-data
deleted file mode 100644
index 6b421770d..000000000
--- a/playbooks/libvirt/openshift-cluster/templates/meta-data
+++ /dev/null
@@ -1,3 +0,0 @@
-instance-id: {{ item[0] }}
-hostname: {{ item[0] }}
-local-hostname: {{ item[0] }}.example.com
diff --git a/playbooks/libvirt/openshift-cluster/templates/network.xml b/playbooks/libvirt/openshift-cluster/templates/network.xml
deleted file mode 100644
index 0ce2a8342..000000000
--- a/playbooks/libvirt/openshift-cluster/templates/network.xml
+++ /dev/null
@@ -1,23 +0,0 @@
-<network>
- <name>{{ libvirt_network }}</name>
- <forward mode='nat'>
- <nat>
- <port start='1024' end='65535'/>
- </nat>
- </forward>
- <!-- TODO: query for first available virbr interface available -->
- <bridge name='virbr3' stp='on' delay='0'/>
- <!-- TODO: make overridable -->
- <domain name='example.com' localOnly='yes' />
- <dns>
- <!-- TODO: automatically add host entries -->
- </dns>
- <!-- TODO: query for available address space -->
- <ip address='192.168.55.1' netmask='255.255.255.0'>
- <dhcp>
- <range start='192.168.55.2' end='192.168.55.254'/>
- <!-- TODO: add static entries addresses for the hosts to be created -->
- </dhcp>
- </ip>
-</network>
-
diff --git a/playbooks/libvirt/openshift-cluster/templates/storage-pool.xml b/playbooks/libvirt/openshift-cluster/templates/storage-pool.xml
deleted file mode 100644
index da139afd0..000000000
--- a/playbooks/libvirt/openshift-cluster/templates/storage-pool.xml
+++ /dev/null
@@ -1,6 +0,0 @@
-<pool type='dir'>
- <name>{{ libvirt_storage_pool }}</name>
- <target>
- <path>{{ libvirt_storage_pool_path }}</path>
- </target>
-</pool>
diff --git a/playbooks/libvirt/openshift-cluster/templates/user-data b/playbooks/libvirt/openshift-cluster/templates/user-data
deleted file mode 100644
index fbcf7c886..000000000
--- a/playbooks/libvirt/openshift-cluster/templates/user-data
+++ /dev/null
@@ -1,43 +0,0 @@
-#cloud-config
-disable_root: true
-
-hostname: {{ item[0] }}
-fqdn: {{ item[0] }}.example.com
-
-mounts:
-- [ sdb ]
-
-users:
- - default
- - name: root
- ssh_authorized_keys:
- - {{ lookup('file', '~/.ssh/id_rsa.pub') }}
-
-system_info:
- default_user:
- name: openshift
- sudo: ["ALL=(ALL) NOPASSWD: ALL"]
-
-ssh_authorized_keys:
- - {{ lookup('file', '~/.ssh/id_rsa.pub') }}
-
-write_files:
- - path: /etc/sudoers.d/00-openshift-no-requiretty
- permissions: 440
- content: |
- Defaults:openshift !requiretty
- - path: /etc/sysconfig/docker-storage-setup
- owner: root:root
- permissions: '0644'
- content: |
- DEVS=/dev/sdb
- VG=docker_vg
- EXTRA_DOCKER_STORAGE_OPTIONS='--storage-opt dm.blkdiscard=true'
- - path: /etc/systemd/system/fstrim.timer.d/hourly.conf
- content: |
- [Timer]
- OnCalendar=hourly
-
-runcmd:
- - NETWORK_CONFIG=/etc/sysconfig/network-scripts/ifcfg-eth0; if ! grep DHCP_HOSTNAME ${NETWORK_CONFIG}; then echo 'DHCP_HOSTNAME="{{ item[0] }}.example.com"' >> ${NETWORK_CONFIG}; fi; pkill -9 dhclient; service network restart
- - systemctl enable --now fstrim.timer
diff --git a/playbooks/libvirt/openshift-cluster/terminate.yml b/playbooks/libvirt/openshift-cluster/terminate.yml
deleted file mode 100644
index 8a63d11a5..000000000
--- a/playbooks/libvirt/openshift-cluster/terminate.yml
+++ /dev/null
@@ -1,70 +0,0 @@
----
-# TODO: does not handle a non-existent cluster gracefully
-
-- name: Terminate instance(s)
- hosts: localhost
- become: no
- connection: local
- gather_facts: no
- vars_files:
- - vars.yml
- tasks:
- - set_fact: cluster_group=tag_clusterid-{{ cluster_id }}
- - add_host:
- name: "{{ item }}"
- groups: oo_hosts_to_terminate
- ansible_ssh_user: "{{ deployment_vars[deployment_type].ssh_user }}"
- ansible_become: "{{ deployment_vars[deployment_type].become }}"
- with_items: '{{ groups[cluster_group] | default([]) }}'
-
-- name: Unsubscribe VMs
- hosts: oo_hosts_to_terminate
- vars_files:
- - vars.yml
- roles:
- - role: rhel_unsubscribe
- when: deployment_type in ['atomic-enterprise', 'enterprise', 'openshift-enterprise'] and
- ansible_distribution == "RedHat" and
- lookup('oo_option', 'rhel_skip_subscription') | default(rhsub_skip, True) |
- default('no', True) | lower in ['no', 'false']
-
-- name: Terminate instance(s)
- hosts: localhost
- become: no
- connection: local
- gather_facts: no
- vars_files:
- - vars.yml
- tasks:
- - name: Destroy VMs
- virt:
- name: '{{ item[0] }}'
- command: '{{ item[1] }}'
- uri: '{{ libvirt_uri }}'
- with_nested:
- - "{{ groups['oo_hosts_to_terminate'] }}"
- - [ destroy, undefine ]
-
- - name: Delete VM drives
- command: 'virsh -c {{ libvirt_uri }} vol-delete --pool {{ libvirt_storage_pool }} {{ item }}.qcow2'
- args:
- removes: '{{ libvirt_storage_pool_path }}/{{ item }}.qcow2'
- with_items: "{{ groups['oo_hosts_to_terminate'] }}"
-
- - name: Delete VM docker drives
- command: 'virsh -c {{ libvirt_uri }} vol-delete --pool {{ libvirt_storage_pool }} {{ item }}-docker.qcow2'
- args:
- removes: '{{ libvirt_storage_pool_path }}/{{ item }}-docker.qcow2'
- with_items: "{{ groups['oo_hosts_to_terminate'] }}"
-
- - name: Delete the VM cloud-init image
- file:
- path: '{{ libvirt_storage_pool_path }}/{{ item }}_cloud-init.iso'
- state: absent
- with_items: "{{ groups['oo_hosts_to_terminate'] }}"
-
- - name: Remove the cloud-init config directory
- file:
- path: '{{ libvirt_storage_pool_path }}/{{ item }}_configdrive/'
- state: absent
- with_items: "{{ groups['oo_hosts_to_terminate'] }}"
diff --git a/playbooks/libvirt/openshift-cluster/update.yml b/playbooks/libvirt/openshift-cluster/update.yml
deleted file mode 100644
index a152135fc..000000000
--- a/playbooks/libvirt/openshift-cluster/update.yml
+++ /dev/null
@@ -1,37 +0,0 @@
----
-- hosts: localhost
- gather_facts: no
- tasks:
- - include_vars: vars.yml
- - include_vars: cluster_hosts.yml
- - add_host:
- name: "{{ item }}"
- groups: l_oo_all_hosts
- with_items: '{{ g_all_hosts }}'
-
-- hosts: l_oo_all_hosts
- gather_facts: no
- tasks:
- - include_vars: vars.yml
- - include_vars: cluster_hosts.yml
-
-- name: Populate oo_hosts_to_update group
- hosts: localhost
- connection: local
- become: no
- gather_facts: no
- vars_files:
- - vars.yml
- - cluster_hosts.yml
- tasks:
- - name: Evaluate oo_hosts_to_update
- add_host:
- name: "{{ item }}"
- groups: oo_hosts_to_update
- ansible_ssh_user: "{{ deployment_vars[deployment_type].ssh_user }}"
- ansible_become: "{{ deployment_vars[deployment_type].become }}"
- with_items: '{{ g_all_hosts | default([]) }}'
-
-- include: ../../common/openshift-cluster/update_repos_and_packages.yml
-
-- include: config.yml
diff --git a/playbooks/libvirt/openshift-cluster/vars.yml b/playbooks/libvirt/openshift-cluster/vars.yml
deleted file mode 100644
index 5156789e7..000000000
--- a/playbooks/libvirt/openshift-cluster/vars.yml
+++ /dev/null
@@ -1,40 +0,0 @@
----
-default_pool_path: "{{ lookup('env','HOME') }}/libvirt-storage-pool-openshift-ansible"
-libvirt_storage_pool_path: "{{ lookup('oo_option', 'libvirt_storage_pool_path') | default(default_pool_path, True) }}"
-libvirt_storage_pool: "{{ lookup('oo_option', 'libvirt_storage_pool') | default('openshift-ansible', True) }}"
-libvirt_network: "{{ lookup('oo_option', 'libvirt_network') | default('openshift-ansible', True) }}"
-libvirt_instance_memory_mib: "{{ lookup('oo_option', 'libvirt_instance_memory_mib') | default(1024, True) }}"
-libvirt_instance_vcpu: "{{ lookup('oo_option', 'libvirt_instance_vcpu') | default(2, True) }}"
-libvirt_uri: "{{ lookup('oo_option', 'libvirt_uri') | default('qemu:///system', True) }}"
-debug_level: 2
-
-# Automatic download of the qcow2 image for RHEL cannot be done directly from the RedHat portal because it requires authentication.
-# The default value of image_url for enterprise and openshift-enterprise deployment types below won't work.
-deployment_rhel7_ent_base:
- image:
- url: "{{ lookup('oo_option', 'image_url') |
- default('https://access.cdn.redhat.com//content/origin/files/sha256/25/25f880767ec6bf71beb532e17f1c45231640bbfdfbbb1dffb79d2c1b328388e0/rhel-guest-image-7.2-20151102.0.x86_64.qcow2', True) }}"
- name: "{{ lookup('oo_option', 'image_name') |
- default('rhel-guest-image-7.2-20151102.0.x86_64.qcow2', True) }}"
- sha256: "{{ lookup('oo_option', 'image_sha256') |
- default('25f880767ec6bf71beb532e17f1c45231640bbfdfbbb1dffb79d2c1b328388e0', True) }}"
- compression: ""
- ssh_user: openshift
- become: yes
-
-deployment_vars:
- origin:
- image:
- url: "{{ lookup('oo_option', 'image_url') |
- default('http://cloud.centos.org/centos/7/images/CentOS-7-x86_64-GenericCloud-1602.qcow2.xz', True) }}"
- compression: "{{ lookup('oo_option', 'image_compression') |
- default('xz', True) }}"
- name: "{{ lookup('oo_option', 'image_name') |
- default('CentOS-7-x86_64-GenericCloud.qcow2', True) }}"
- sha256: "{{ lookup('oo_option', 'image_sha256') |
- default('dd0f5e610e7c5ffacaca35ed7a78a19142a588f4543da77b61c1fb0d74400471', True) }}"
- ssh_user: openshift
- become: yes
- enterprise: "{{ deployment_rhel7_ent_base }}"
- openshift-enterprise: "{{ deployment_rhel7_ent_base }}"
- atomic-enterprise: "{{ deployment_rhel7_ent_base }}"
diff --git a/playbooks/openstack/README.md b/playbooks/openstack/README.md
deleted file mode 100644
index a6d8d6995..000000000
--- a/playbooks/openstack/README.md
+++ /dev/null
@@ -1,4 +0,0 @@
-# OpenStack playbooks
-
-This playbook directory is meant to be driven by [`bin/cluster`](../../bin),
-which is community supported and most use is considered deprecated.
diff --git a/playbooks/openstack/openshift-cluster/cluster_hosts.yml b/playbooks/openstack/openshift-cluster/cluster_hosts.yml
deleted file mode 100644
index 12c9fd442..000000000
--- a/playbooks/openstack/openshift-cluster/cluster_hosts.yml
+++ /dev/null
@@ -1,25 +0,0 @@
----
-g_all_hosts: "{{ groups['meta-clusterid_' ~ cluster_id] | default([])
- | intersect(groups['meta-environment_' ~ cluster_env] | default([])) }}"
-
-g_etcd_hosts: "{{ g_all_hosts | intersect(groups['meta-host-type_etcd'] | default([])) }}"
-
-g_new_etcd_hosts: "{{ g_all_hosts | intersect(groups['meta-host-type_new_etcd'] | default([])) }}"
-
-g_lb_hosts: "{{ g_all_hosts | intersect(groups['meta-host-type_lb'] | default([])) }}"
-
-g_nfs_hosts: "{{ g_all_hosts | intersect(groups['meta-host-type_nfs'] | default([])) }}"
-
-g_glusterfs_hosts: "{{ g_all_hosts | intersect(groups['tag_host-type-glusterfs'] | default([])) }}"
-
-g_master_hosts: "{{ g_all_hosts | intersect(groups['meta-host-type_master'] | default([])) }}"
-
-g_new_master_hosts: "{{ g_all_hosts | intersect(groups['meta-host-type_new_master'] | default([])) }}"
-
-g_node_hosts: "{{ g_all_hosts | intersect(groups['meta-host-type_node'] | default([])) }}"
-
-g_new_node_hosts: "{{ g_all_hosts | intersect(groups['meta-host-type_new_node'] | default([])) }}"
-
-g_infra_hosts: "{{ g_node_hosts | intersect(groups['meta-sub-host-type_infra'] | default([])) }}"
-
-g_compute_hosts: "{{ g_node_hosts | intersect(groups['meta-sub-host-type_compute'] | default([])) }}"
diff --git a/playbooks/openstack/openshift-cluster/config.yml b/playbooks/openstack/openshift-cluster/config.yml
deleted file mode 100644
index f9ddb9469..000000000
--- a/playbooks/openstack/openshift-cluster/config.yml
+++ /dev/null
@@ -1,33 +0,0 @@
----
-- hosts: localhost
- gather_facts: no
- tasks:
- - include_vars: vars.yml
- - include_vars: cluster_hosts.yml
- - add_host:
- name: "{{ item }}"
- groups: l_oo_all_hosts
- with_items: "{{ g_all_hosts | default([]) }}"
-
-- hosts: l_oo_all_hosts
- gather_facts: no
- tasks:
- - include_vars: vars.yml
- - include_vars: cluster_hosts.yml
-
-- include: ../../common/openshift-cluster/config.yml
- vars:
- g_nodeonmaster: true
- g_ssh_user: "{{ deployment_vars[deployment_type].ssh_user }}"
- g_sudo: "{{ deployment_vars[deployment_type].become }}"
- openshift_cluster_id: "{{ cluster_id }}"
- openshift_debug_level: "{{ debug_level }}"
- openshift_deployment_type: "{{ deployment_type }}"
- openshift_hosted_registry_selector: 'type=infra'
- openshift_hosted_router_selector: 'type=infra'
- openshift_master_cluster_method: 'native'
- openshift_use_openshift_sdn: "{{ lookup('oo_option', 'use_openshift_sdn') }}"
- os_sdn_network_plugin_name: "{{ lookup('oo_option', 'sdn_network_plugin_name') }}"
- openshift_use_flannel: "{{ lookup('oo_option', 'use_flannel') }}"
- openshift_use_calico: "{{ lookup('oo_option', 'use_calico') }}"
- openshift_use_fluentd: "{{ lookup('oo_option', 'use_fluentd') }}"
diff --git a/playbooks/openstack/openshift-cluster/files/heat_stack.yaml b/playbooks/openstack/openshift-cluster/files/heat_stack.yaml
deleted file mode 100644
index 82329eac1..000000000
--- a/playbooks/openstack/openshift-cluster/files/heat_stack.yaml
+++ /dev/null
@@ -1,508 +0,0 @@
-heat_template_version: 2014-10-16
-
-description: OpenShift cluster
-
-parameters:
-
- cluster_env:
- type: string
- label: Cluster environment
- description: Environment of the cluster
-
- cluster_id:
- type: string
- label: Cluster ID
- description: Identifier of the cluster
-
- subnet_24_prefix:
- type: string
- label: subnet /24 prefix
- description: /24 subnet prefix of the network of the cluster (dot separated number triplet)
-
- dns_nameservers:
- type: comma_delimited_list
- label: DNS nameservers list
- description: List of DNS nameservers
-
- external_net:
- type: string
- label: External network
- description: Name of the external network
- default: external
-
- ssh_public_key:
- type: string
- label: SSH public key
- description: SSH public key
- hidden: true
-
- ssh_incoming:
- type: string
- label: Source of ssh connections
- description: Source of legitimate ssh connections
- default: 0.0.0.0/0
-
- node_port_incoming:
- type: string
- label: Source of node port connections
- description: Authorized sources targeting node ports
- default: 0.0.0.0/0
-
- num_etcd:
- type: number
- label: Number of etcd nodes
- description: Number of etcd nodes
-
- num_masters:
- type: number
- label: Number of masters
- description: Number of masters
-
- num_nodes:
- type: number
- label: Number of compute nodes
- description: Number of compute nodes
-
- num_infra:
- type: number
- label: Number of infrastructure nodes
- description: Number of infrastructure nodes
-
- etcd_image:
- type: string
- label: Etcd image
- description: Name of the image for the etcd servers
-
- master_image:
- type: string
- label: Master image
- description: Name of the image for the master servers
-
- node_image:
- type: string
- label: Node image
- description: Name of the image for the compute node servers
-
- infra_image:
- type: string
- label: Infra image
- description: Name of the image for the infra node servers
-
- etcd_flavor:
- type: string
- label: Etcd flavor
- description: Flavor of the etcd servers
-
- master_flavor:
- type: string
- label: Master flavor
- description: Flavor of the master servers
-
- node_flavor:
- type: string
- label: Node flavor
- description: Flavor of the compute node servers
-
- infra_flavor:
- type: string
- label: Infra flavor
- description: Flavor of the infra node servers
-
-outputs:
-
- etcd_names:
- description: Name of the etcds
- value: { get_attr: [ etcd, name ] }
-
- etcd_ips:
- description: IPs of the etcds
- value: { get_attr: [ etcd, private_ip ] }
-
- etcd_floating_ips:
- description: Floating IPs of the etcds
- value: { get_attr: [ etcd, floating_ip ] }
-
- master_names:
- description: Name of the masters
- value: { get_attr: [ masters, name ] }
-
- master_ips:
- description: IPs of the masters
- value: { get_attr: [ masters, private_ip ] }
-
- master_floating_ips:
- description: Floating IPs of the masters
- value: { get_attr: [ masters, floating_ip ] }
-
- node_names:
- description: Name of the nodes
- value: { get_attr: [ compute_nodes, name ] }
-
- node_ips:
- description: IPs of the nodes
- value: { get_attr: [ compute_nodes, private_ip ] }
-
- node_floating_ips:
- description: Floating IPs of the nodes
- value: { get_attr: [ compute_nodes, floating_ip ] }
-
- infra_names:
- description: Name of the nodes
- value: { get_attr: [ infra_nodes, name ] }
-
- infra_ips:
- description: IPs of the nodes
- value: { get_attr: [ infra_nodes, private_ip ] }
-
- infra_floating_ips:
- description: Floating IPs of the nodes
- value: { get_attr: [ infra_nodes, floating_ip ] }
-
-resources:
-
- net:
- type: OS::Neutron::Net
- properties:
- name:
- str_replace:
- template: openshift-ansible-cluster_id-net
- params:
- cluster_id: { get_param: cluster_id }
-
- subnet:
- type: OS::Neutron::Subnet
- properties:
- name:
- str_replace:
- template: openshift-ansible-cluster_id-subnet
- params:
- cluster_id: { get_param: cluster_id }
- network: { get_resource: net }
- cidr:
- str_replace:
- template: subnet_24_prefix.0/24
- params:
- subnet_24_prefix: { get_param: subnet_24_prefix }
- dns_nameservers: { get_param: dns_nameservers }
-
- router:
- type: OS::Neutron::Router
- properties:
- name:
- str_replace:
- template: openshift-ansible-cluster_id-router
- params:
- cluster_id: { get_param: cluster_id }
- external_gateway_info:
- network: { get_param: external_net }
-
- interface:
- type: OS::Neutron::RouterInterface
- properties:
- router_id: { get_resource: router }
- subnet_id: { get_resource: subnet }
-
- keypair:
- type: OS::Nova::KeyPair
- properties:
- name:
- str_replace:
- template: openshift-ansible-cluster_id-keypair
- params:
- cluster_id: { get_param: cluster_id }
- public_key: { get_param: ssh_public_key }
-
- master-secgrp:
- type: OS::Neutron::SecurityGroup
- properties:
- name:
- str_replace:
- template: openshift-ansible-cluster_id-master-secgrp
- params:
- cluster_id: { get_param: cluster_id }
- description:
- str_replace:
- template: Security group for cluster_id OpenShift cluster master
- params:
- cluster_id: { get_param: cluster_id }
- rules:
- - direction: ingress
- protocol: tcp
- port_range_min: 22
- port_range_max: 22
- remote_ip_prefix: { get_param: ssh_incoming }
- - direction: ingress
- protocol: tcp
- port_range_min: 4001
- port_range_max: 4001
- - direction: ingress
- protocol: tcp
- port_range_min: 8443
- port_range_max: 8443
- - direction: ingress
- protocol: tcp
- port_range_min: 8444
- port_range_max: 8444
- - direction: ingress
- protocol: tcp
- port_range_min: 53
- port_range_max: 53
- - direction: ingress
- protocol: udp
- port_range_min: 53
- port_range_max: 53
- - direction: ingress
- protocol: tcp
- port_range_min: 8053
- port_range_max: 8053
- - direction: ingress
- protocol: udp
- port_range_min: 8053
- port_range_max: 8053
- - direction: ingress
- protocol: tcp
- port_range_min: 24224
- port_range_max: 24224
- - direction: ingress
- protocol: udp
- port_range_min: 24224
- port_range_max: 24224
- - direction: ingress
- protocol: tcp
- port_range_min: 2224
- port_range_max: 2224
- - direction: ingress
- protocol: udp
- port_range_min: 5404
- port_range_max: 5404
- - direction: ingress
- protocol: udp
- port_range_min: 5405
- port_range_max: 5405
- - direction: ingress
- protocol: tcp
- port_range_min: 9090
- port_range_max: 9090
-
- etcd-secgrp:
- type: OS::Neutron::SecurityGroup
- properties:
- name:
- str_replace:
- template: openshift-ansible-cluster_id-etcd-secgrp
- params:
- cluster_id: { get_param: cluster_id }
- description:
- str_replace:
- template: Security group for cluster_id etcd cluster
- params:
- cluster_id: { get_param: cluster_id }
- rules:
- - direction: ingress
- protocol: tcp
- port_range_min: 22
- port_range_max: 22
- remote_ip_prefix: { get_param: ssh_incoming }
- - direction: ingress
- protocol: tcp
- port_range_min: 2379
- port_range_max: 2379
- remote_mode: remote_group_id
- remote_group_id: { get_resource: master-secgrp }
- - direction: ingress
- protocol: tcp
- port_range_min: 2380
- port_range_max: 2380
- remote_mode: remote_group_id
-
- node-secgrp:
- type: OS::Neutron::SecurityGroup
- properties:
- name:
- str_replace:
- template: openshift-ansible-cluster_id-node-secgrp
- params:
- cluster_id: { get_param: cluster_id }
- description:
- str_replace:
- template: Security group for cluster_id OpenShift cluster nodes
- params:
- cluster_id: { get_param: cluster_id }
- rules:
- - direction: ingress
- protocol: tcp
- port_range_min: 22
- port_range_max: 22
- remote_ip_prefix: { get_param: ssh_incoming }
- - direction: ingress
- protocol: tcp
- port_range_min: 10250
- port_range_max: 10250
- remote_mode: remote_group_id
- - direction: ingress
- protocol: udp
- port_range_min: 4789
- port_range_max: 4789
- remote_mode: remote_group_id
- - direction: ingress
- protocol: tcp
- port_range_min: 30000
- port_range_max: 32767
- remote_ip_prefix: { get_param: node_port_incoming }
-
- infra-secgrp:
- type: OS::Neutron::SecurityGroup
- properties:
- name:
- str_replace:
- template: openshift-ansible-cluster_id-infra-secgrp
- params:
- cluster_id: { get_param: cluster_id }
- description:
- str_replace:
- template: Security group for cluster_id OpenShift infrastructure cluster nodes
- params:
- cluster_id: { get_param: cluster_id }
- rules:
- - direction: ingress
- protocol: tcp
- port_range_min: 80
- port_range_max: 80
- - direction: ingress
- protocol: tcp
- port_range_min: 443
- port_range_max: 443
-
- etcd:
- type: OS::Heat::ResourceGroup
- properties:
- count: { get_param: num_etcd }
- resource_def:
- type: heat_stack_server.yaml
- properties:
- name:
- str_replace:
- template: cluster_id-k8s_type-%index%
- params:
- cluster_id: { get_param: cluster_id }
- k8s_type: etcd
- cluster_env: { get_param: cluster_env }
- cluster_id: { get_param: cluster_id }
- type: etcd
- image: { get_param: etcd_image }
- flavor: { get_param: etcd_flavor }
- key_name: { get_resource: keypair }
- net: { get_resource: net }
- subnet: { get_resource: subnet }
- secgrp:
- - { get_resource: etcd-secgrp }
- floating_network: { get_param: external_net }
- net_name:
- str_replace:
- template: openshift-ansible-cluster_id-net
- params:
- cluster_id: { get_param: cluster_id }
- depends_on:
- - interface
-
- masters:
- type: OS::Heat::ResourceGroup
- properties:
- count: { get_param: num_masters }
- resource_def:
- type: heat_stack_server.yaml
- properties:
- name:
- str_replace:
- template: cluster_id-k8s_type-%index%
- params:
- cluster_id: { get_param: cluster_id }
- k8s_type: master
- cluster_env: { get_param: cluster_env }
- cluster_id: { get_param: cluster_id }
- type: master
- image: { get_param: master_image }
- flavor: { get_param: master_flavor }
- key_name: { get_resource: keypair }
- net: { get_resource: net }
- subnet: { get_resource: subnet }
- secgrp:
- - { get_resource: master-secgrp }
- - { get_resource: node-secgrp }
- floating_network: { get_param: external_net }
- net_name:
- str_replace:
- template: openshift-ansible-cluster_id-net
- params:
- cluster_id: { get_param: cluster_id }
- depends_on:
- - interface
-
- compute_nodes:
- type: OS::Heat::ResourceGroup
- properties:
- count: { get_param: num_nodes }
- resource_def:
- type: heat_stack_server.yaml
- properties:
- name:
- str_replace:
- template: cluster_id-k8s_type-sub_host_type-%index%
- params:
- cluster_id: { get_param: cluster_id }
- k8s_type: node
- sub_host_type: compute
- cluster_env: { get_param: cluster_env }
- cluster_id: { get_param: cluster_id }
- type: node
- subtype: compute
- image: { get_param: node_image }
- flavor: { get_param: node_flavor }
- key_name: { get_resource: keypair }
- net: { get_resource: net }
- subnet: { get_resource: subnet }
- secgrp:
- - { get_resource: node-secgrp }
- floating_network: { get_param: external_net }
- net_name:
- str_replace:
- template: openshift-ansible-cluster_id-net
- params:
- cluster_id: { get_param: cluster_id }
- depends_on:
- - interface
-
- infra_nodes:
- type: OS::Heat::ResourceGroup
- properties:
- count: { get_param: num_infra }
- resource_def:
- type: heat_stack_server.yaml
- properties:
- name:
- str_replace:
- template: cluster_id-k8s_type-sub_host_type-%index%
- params:
- cluster_id: { get_param: cluster_id }
- k8s_type: node
- sub_host_type: infra
- cluster_env: { get_param: cluster_env }
- cluster_id: { get_param: cluster_id }
- type: node
- subtype: infra
- image: { get_param: infra_image }
- flavor: { get_param: infra_flavor }
- key_name: { get_resource: keypair }
- net: { get_resource: net }
- subnet: { get_resource: subnet }
- secgrp:
- - { get_resource: node-secgrp }
- - { get_resource: infra-secgrp }
- floating_network: { get_param: external_net }
- net_name:
- str_replace:
- template: openshift-ansible-cluster_id-net
- params:
- cluster_id: { get_param: cluster_id }
- depends_on:
- - interface
diff --git a/playbooks/openstack/openshift-cluster/files/heat_stack_server.yaml b/playbooks/openstack/openshift-cluster/files/heat_stack_server.yaml
deleted file mode 100644
index 435139849..000000000
--- a/playbooks/openstack/openshift-cluster/files/heat_stack_server.yaml
+++ /dev/null
@@ -1,152 +0,0 @@
-heat_template_version: 2014-10-16
-
-description: OpenShift cluster server
-
-parameters:
-
- name:
- type: string
- label: Name
- description: Name
-
- cluster_env:
- type: string
- label: Cluster environment
- description: Environment of the cluster
-
- cluster_id:
- type: string
- label: Cluster ID
- description: Identifier of the cluster
-
- type:
- type: string
- label: Type
- description: Type master or node
-
- subtype:
- type: string
- label: Sub-type
- description: Sub-type compute or infra for nodes, default otherwise
- default: default
-
- key_name:
- type: string
- label: Key name
- description: Key name of keypair
-
- image:
- type: string
- label: Image
- description: Name of the image
-
- flavor:
- type: string
- label: Flavor
- description: Name of the flavor
-
- net:
- type: string
- label: Net ID
- description: Net resource
-
- net_name:
- type: string
- label: Net name
- description: Net name
-
- subnet:
- type: string
- label: Subnet ID
- description: Subnet resource
-
- secgrp:
- type: comma_delimited_list
- label: Security groups
- description: Security group resources
-
- floating_network:
- type: string
- label: Floating network
- description: Network to allocate floating IP from
-
-outputs:
-
- name:
- description: Name of the server
- value: { get_attr: [ server, name ] }
-
- private_ip:
- description: Private IP of the server
- value:
- get_attr:
- - server
- - addresses
- - { get_param: net_name }
- - 0
- - addr
-
- floating_ip:
- description: Floating IP of the server
- value:
- get_attr:
- - server
- - addresses
- - { get_param: net_name }
- - 1
- - addr
-
-resources:
-
- server:
- type: OS::Nova::Server
- properties:
- name: { get_param: name }
- key_name: { get_param: key_name }
- image: { get_param: image }
- flavor: { get_param: flavor }
- networks:
- - port: { get_resource: port }
- user_data: { get_resource: config }
- user_data_format: RAW
- metadata:
- environment: { get_param: cluster_env }
- clusterid: { get_param: cluster_id }
- host-type: { get_param: type }
- sub-host-type: { get_param: subtype }
-
- port:
- type: OS::Neutron::Port
- properties:
- network: { get_param: net }
- fixed_ips:
- - subnet: { get_param: subnet }
- security_groups: { get_param: secgrp }
-
- floating-ip:
- type: OS::Neutron::FloatingIP
- properties:
- floating_network: { get_param: floating_network }
- port_id: { get_resource: port }
-
- config:
- type: OS::Heat::CloudConfig
- properties:
- cloud_config:
- disable_root: true
-
- hostname: { get_param: name }
-
- system_info:
- default_user:
- name: openshift
- sudo: ["ALL=(ALL) NOPASSWD: ALL"]
-
- write_files:
- - path: /etc/sudoers.d/00-openshift-no-requiretty
- permissions: 440
- # content: Defaults:openshift !requiretty
- # Encoded in base64 to be sure that we do not forget the trailing newline or
- # sudo will not be able to parse that file
- encoding: b64
- content: RGVmYXVsdHM6b3BlbnNoaWZ0ICFyZXF1aXJldHR5Cg==
diff --git a/playbooks/openstack/openshift-cluster/filter_plugins b/playbooks/openstack/openshift-cluster/filter_plugins
deleted file mode 120000
index 99a95e4ca..000000000
--- a/playbooks/openstack/openshift-cluster/filter_plugins
+++ /dev/null
@@ -1 +0,0 @@
-../../../filter_plugins \ No newline at end of file
diff --git a/playbooks/openstack/openshift-cluster/launch.yml b/playbooks/openstack/openshift-cluster/launch.yml
deleted file mode 100644
index c0bc12f55..000000000
--- a/playbooks/openstack/openshift-cluster/launch.yml
+++ /dev/null
@@ -1,191 +0,0 @@
----
-- name: Launch instance(s)
- hosts: localhost
- become: no
- connection: local
- gather_facts: no
- vars_files:
- - vars.yml
- tasks:
- # TODO: Write an Ansible module for dealing with HEAT stacks
- # Dealing with the outputs is currently terrible
-
- - name: Check OpenStack stack
- command: 'heat stack-show openshift-ansible-{{ cluster_id }}-stack'
- register: stack_show_result
- changed_when: false
- failed_when: stack_show_result.rc != 0 and 'Stack not found' not in stack_show_result.stderr
-
- - set_fact:
- heat_stack_action: 'stack-create'
- when: stack_show_result.rc == 1
- - set_fact:
- heat_stack_action: 'stack-update'
- when: stack_show_result.rc == 0
-
- - name: Create or Update OpenStack Stack
- command: 'heat {{ heat_stack_action }} -f {{ openstack_infra_heat_stack }}
- --timeout {{ openstack_heat_timeout }}
- -P cluster_env={{ cluster_env }}
- -P cluster_id={{ cluster_id }}
- -P subnet_24_prefix={{ openstack_subnet_24_prefix }}
- -P dns_nameservers={{ openstack_network_dns | join(",") }}
- -P external_net={{ openstack_network_external_net }}
- -P ssh_public_key="{{ openstack_ssh_public_key }}"
- -P ssh_incoming={{ openstack_ssh_access_from }}
- -P node_port_incoming={{ openstack_node_port_access_from }}
- -P num_etcd={{ num_etcd }}
- -P num_masters={{ num_masters }}
- -P num_nodes={{ num_nodes }}
- -P num_infra={{ num_infra }}
- -P etcd_image={{ deployment_vars[deployment_type].image }}
- -P master_image={{ deployment_vars[deployment_type].image }}
- -P node_image={{ deployment_vars[deployment_type].image }}
- -P infra_image={{ deployment_vars[deployment_type].image }}
- -P etcd_flavor={{ openstack_flavor["etcd"] }}
- -P master_flavor={{ openstack_flavor["master"] }}
- -P node_flavor={{ openstack_flavor["node"] }}
- -P infra_flavor={{ openstack_flavor["infra"] }}
- openshift-ansible-{{ cluster_id }}-stack'
- args:
- chdir: '{{ playbook_dir }}'
-
- - name: Wait for OpenStack Stack readiness
- shell: 'heat stack-show openshift-ansible-{{ cluster_id }}-stack | awk ''$2 == "stack_status" {print $4}'''
- register: stack_show_status_result
- until: stack_show_status_result.stdout not in ['CREATE_IN_PROGRESS', 'UPDATE_IN_PROGRESS']
- retries: 30
- delay: 5
-
- - name: Display the stack resources
- command: 'heat resource-list openshift-ansible-{{ cluster_id }}-stack'
- register: stack_resource_list_result
- when: stack_show_status_result.stdout not in ['CREATE_COMPLETE', 'UPDATE_COMPLETE']
-
- - name: Display the stack status
- command: 'heat stack-show openshift-ansible-{{ cluster_id }}-stack'
- register: stack_show_result
- when: stack_show_status_result.stdout not in ['CREATE_COMPLETE', 'UPDATE_COMPLETE']
-
- - name: Delete the stack
- command: 'heat stack-delete openshift-ansible-{{ cluster_id }}-stack'
- when: stack_show_status_result.stdout not in ['CREATE_COMPLETE', 'UPDATE_COMPLETE']
-
- - fail:
- msg: |
-
- +--------------------------------------+
- | ^ |
- | /!\ Failed to create the heat stack |
- | /___\ |
- +--------------------------------------+
-
- Here is the list of stack resources and their status:
- {{ stack_resource_list_result.stdout }}
-
- Here is the status of the stack:
- {{ stack_show_result.stdout }}
-
- ^ Failed to create the heat stack
- /!\
- /___\ Please check the `stack_status_reason` line in the above array to know why.
- when: stack_show_status_result.stdout not in ['CREATE_COMPLETE', 'UPDATE_COMPLETE']
-
- - name: Read OpenStack Stack outputs
- command: 'heat stack-show openshift-ansible-{{ cluster_id }}-stack'
- register: stack_show_result
-
- - set_fact:
- parsed_outputs: "{{ stack_show_result | oo_parse_heat_stack_outputs }}"
-
- - name: Add new etcd instances groups and variables
- add_host:
- hostname: '{{ item[0] }}'
- ansible_ssh_host: '{{ item[2] }}'
- ansible_ssh_user: "{{ deployment_vars[deployment_type].ssh_user }}"
- ansible_become: "{{ deployment_vars[deployment_type].become }}"
- groups: 'meta-environment_{{ cluster_env }}, meta-host-type_etcd, meta-sub-host-type_default, meta-clusterid_{{ cluster_id }}'
- openshift_node_labels:
- type: "etcd"
- openstack:
- public_v4: '{{ item[2] }}'
- private_v4: '{{ item[1] }}'
- with_together:
- - '{{ parsed_outputs.etcd_names }}'
- - '{{ parsed_outputs.etcd_ips }}'
- - '{{ parsed_outputs.etcd_floating_ips }}'
-
- - name: Add new master instances groups and variables
- add_host:
- hostname: '{{ item[0] }}'
- ansible_ssh_host: '{{ item[2] }}'
- ansible_ssh_user: "{{ deployment_vars[deployment_type].ssh_user }}"
- ansible_become: "{{ deployment_vars[deployment_type].become }}"
- groups: 'meta-environment_{{ cluster_env }}, meta-host-type_master, meta-sub-host-type_default, meta-clusterid_{{ cluster_id }}'
- openshift_node_labels:
- type: "master"
- openstack:
- public_v4: '{{ item[2] }}'
- private_v4: '{{ item[1] }}'
- with_together:
- - '{{ parsed_outputs.master_names }}'
- - '{{ parsed_outputs.master_ips }}'
- - '{{ parsed_outputs.master_floating_ips }}'
-
- - name: Add new node instances groups and variables
- add_host:
- hostname: '{{ item[0] }}'
- ansible_ssh_host: '{{ item[2] }}'
- ansible_ssh_user: "{{ deployment_vars[deployment_type].ssh_user }}"
- ansible_become: "{{ deployment_vars[deployment_type].become }}"
- groups: 'meta-environment_{{ cluster_env }}, meta-host-type_node, meta-sub-host-type_compute, meta-clusterid_{{ cluster_id }}'
- openshift_node_labels:
- type: "compute"
- openstack:
- public_v4: '{{ item[2] }}'
- private_v4: '{{ item[1] }}'
- with_together:
- - '{{ parsed_outputs.node_names }}'
- - '{{ parsed_outputs.node_ips }}'
- - '{{ parsed_outputs.node_floating_ips }}'
-
- - name: Add new infra instances groups and variables
- add_host:
- hostname: '{{ item[0] }}'
- ansible_ssh_host: '{{ item[2] }}'
- ansible_ssh_user: "{{ deployment_vars[deployment_type].ssh_user }}"
- ansible_become: "{{ deployment_vars[deployment_type].become }}"
- groups: 'meta-environment_{{ cluster_env }}, meta-host-type_node, meta-sub-host-type_infra, meta-clusterid_{{ cluster_id }}'
- openshift_node_labels:
- type: "infra"
- openstack:
- public_v4: '{{ item[2] }}'
- private_v4: '{{ item[1] }}'
- with_together:
- - '{{ parsed_outputs.infra_names }}'
- - '{{ parsed_outputs.infra_ips }}'
- - '{{ parsed_outputs.infra_floating_ips }}'
-
- - name: Wait for ssh
- wait_for:
- host: '{{ item }}'
- port: 22
- with_flattened:
- - '{{ parsed_outputs.master_floating_ips }}'
- - '{{ parsed_outputs.node_floating_ips }}'
- - '{{ parsed_outputs.infra_floating_ips }}'
-
- - name: Wait for user setup
- command: 'ssh -o StrictHostKeyChecking=no -o PasswordAuthentication=no -o ConnectTimeout=10 -o UserKnownHostsFile=/dev/null {{ deployment_vars[deployment_type].ssh_user }}@{{ item }} echo {{ deployment_vars[deployment_type].ssh_user }} user is setup'
- register: result
- until: result.rc == 0
- retries: 30
- delay: 1
- with_flattened:
- - '{{ parsed_outputs.master_floating_ips }}'
- - '{{ parsed_outputs.node_floating_ips }}'
- - '{{ parsed_outputs.infra_floating_ips }}'
-
-- include: update.yml
-
-- include: list.yml
diff --git a/playbooks/openstack/openshift-cluster/list.yml b/playbooks/openstack/openshift-cluster/list.yml
deleted file mode 100644
index 6c6f671be..000000000
--- a/playbooks/openstack/openshift-cluster/list.yml
+++ /dev/null
@@ -1,24 +0,0 @@
----
-- name: Generate oo_list_hosts group
- hosts: localhost
- become: no
- connection: local
- gather_facts: no
- vars_files:
- - vars.yml
- tasks:
- - set_fact: scratch_group=meta-clusterid_{{ cluster_id }}
- when: cluster_id != ''
- - set_fact: scratch_group=all
- when: cluster_id == ''
- - add_host:
- name: "{{ item }}"
- groups: oo_list_hosts
- ansible_ssh_user: "{{ deployment_vars[deployment_type].ssh_user }}"
- ansible_ssh_host: "{{ hostvars[item].ansible_ssh_host | default(item) }}"
- ansible_become: "{{ deployment_vars[deployment_type].become }}"
- oo_public_ipv4: "{{ hostvars[item].openstack.public_v4 }}"
- oo_private_ipv4: "{{ hostvars[item].openstack.private_v4 }}"
- with_items: "{{ groups[scratch_group] | default([]) | difference(['localhost']) }}"
- - debug:
- msg: "{{ hostvars | oo_select_keys(groups[scratch_group] | default([])) | oo_pretty_print_cluster('meta-') }}"
diff --git a/playbooks/openstack/openshift-cluster/lookup_plugins b/playbooks/openstack/openshift-cluster/lookup_plugins
deleted file mode 120000
index ac79701db..000000000
--- a/playbooks/openstack/openshift-cluster/lookup_plugins
+++ /dev/null
@@ -1 +0,0 @@
-../../../lookup_plugins \ No newline at end of file
diff --git a/playbooks/openstack/openshift-cluster/roles b/playbooks/openstack/openshift-cluster/roles
deleted file mode 120000
index 20c4c58cf..000000000
--- a/playbooks/openstack/openshift-cluster/roles
+++ /dev/null
@@ -1 +0,0 @@
-../../../roles \ No newline at end of file
diff --git a/playbooks/openstack/openshift-cluster/terminate.yml b/playbooks/openstack/openshift-cluster/terminate.yml
deleted file mode 100644
index affb57117..000000000
--- a/playbooks/openstack/openshift-cluster/terminate.yml
+++ /dev/null
@@ -1,49 +0,0 @@
----
-- name: Terminate instance(s)
- hosts: localhost
- become: no
- connection: local
- gather_facts: no
- vars_files:
- - vars.yml
- tasks:
- - add_host:
- name: "{{ item }}"
- groups: oo_hosts_to_terminate
- ansible_ssh_user: "{{ deployment_vars[deployment_type].ssh_user }}"
- ansible_become: "{{ deployment_vars[deployment_type].become }}"
- with_items: "{{ (groups['meta-environment_' ~ cluster_env]|default([])) | intersect(groups['meta-clusterid_' ~ cluster_id ]|default([])) }}"
-
-- name: Unsubscribe VMs
- hosts: oo_hosts_to_terminate
- vars_files:
- - vars.yml
- roles:
- - role: rhel_unsubscribe
- when: deployment_type in ['atomic-enterprise', 'enterprise', 'openshift-enterprise'] and
- ansible_distribution == "RedHat" and
- lookup('oo_option', 'rhel_skip_subscription') | default(rhsub_skip, True) |
- default('no', True) | lower in ['no', 'false']
-
-- hosts: localhost
- become: no
- connection: local
- gather_facts: no
- vars_files:
- - vars.yml
- tasks:
- - name: Delete the OpenStack Stack
- command: 'heat stack-delete openshift-ansible-{{ cluster_id }}-stack'
- register: stack_delete_result
- changed_when: stack_delete_result.rc == 0
- failed_when: stack_delete_result.rc != 0 and 'could not be found' not in stack_delete_result.stdout
-
- - name: Wait for the completion of the OpenStack Stack deletion
- shell: 'heat stack-show openshift-ansible-{{ cluster_id }}-stack | awk ''$2 == "stack_status" {print $4}'''
- when: stack_delete_result.changed
- register: stack_show_result
- until: stack_show_result.stdout != 'DELETE_IN_PROGRESS'
- retries: 60
- delay: 5
- failed_when: '"Stack not found" not in stack_show_result.stderr and
- stack_show_result.stdout != "DELETE_COMPLETE"'
diff --git a/playbooks/openstack/openshift-cluster/update.yml b/playbooks/openstack/openshift-cluster/update.yml
deleted file mode 100644
index 6d2af3d26..000000000
--- a/playbooks/openstack/openshift-cluster/update.yml
+++ /dev/null
@@ -1,34 +0,0 @@
----
-- hosts: localhost
- gather_facts: no
- tasks:
- - include_vars: vars.yml
- - include_vars: cluster_hosts.yml
- - add_host:
- name: "{{ item }}"
- groups: l_oo_all_hosts
- with_items: "{{ g_all_hosts }}"
-
-- hosts: l_oo_all_hosts
- gather_facts: no
- tasks:
- - include_vars: vars.yml
- - include_vars: cluster_hosts.yml
-
-- name: Populate oo_hosts_to_update group
- hosts: localhost
- connection: local
- become: no
- gather_facts: no
- tasks:
- - name: Evaluate oo_hosts_to_update
- add_host:
- name: "{{ item }}"
- groups: oo_hosts_to_update
- ansible_ssh_user: "{{ deployment_vars[deployment_type].ssh_user }}"
- ansible_become: "{{ deployment_vars[deployment_type].become }}"
- with_items: "{{ g_all_hosts | default([]) }}"
-
-- include: ../../common/openshift-cluster/update_repos_and_packages.yml
-
-- include: config.yml
diff --git a/playbooks/openstack/openshift-cluster/vars.yml b/playbooks/openstack/openshift-cluster/vars.yml
deleted file mode 100644
index ba2855b73..000000000
--- a/playbooks/openstack/openshift-cluster/vars.yml
+++ /dev/null
@@ -1,38 +0,0 @@
-# yamllint disable rule:colons
----
-debug_level: 2
-openstack_infra_heat_stack: "{{ lookup('oo_option', 'infra_heat_stack' ) |
- default('files/heat_stack.yaml', True) }}"
-openstack_subnet_24_prefix: "{{ lookup('oo_option', 'subnet_24_prefix' ) |
- default('192.168.' + ( ( 1048576 | random % 256 ) | string() ), True) }}"
-openstack_network_external_net: "{{ lookup('oo_option', 'external_net' ) |
- default('external', True) }}"
-openstack_network_dns: "{{ lookup('oo_option', 'dns' ) |
- default('8.8.8.8,8.8.4.4', True) | oo_split() }}"
-openstack_ssh_public_key: "{{ lookup('file', lookup('oo_option', 'public_key') |
- default('~/.ssh/id_rsa.pub', True)) }}"
-openstack_ssh_access_from: "{{ lookup('oo_option', 'ssh_from') |
- default('0.0.0.0/0', True) }}"
-openstack_node_port_access_from: "{{ lookup('oo_option', 'node_port_from') |
- default('0.0.0.0/0', True) }}"
-openstack_heat_timeout: "{{ lookup('oo_option', 'heat_timeout') |
- default('3', True) }}"
-openstack_flavor:
- etcd: "{{ lookup('oo_option', 'etcd_flavor' ) | default('m1.small', True) }}"
- master: "{{ lookup('oo_option', 'master_flavor' ) | default('m1.small', True) }}"
- infra: "{{ lookup('oo_option', 'infra_flavor' ) | default('m1.small', True) }}"
- node: "{{ lookup('oo_option', 'node_flavor' ) | default('m1.medium', True) }}"
-
-deployment_rhel7_ent_base:
- image: "{{ lookup('oo_option', 'image_name') | default('rhel-guest-image-7.2-20151102.0.x86_64', True) }}"
- ssh_user: openshift
- become: yes
-
-deployment_vars:
- origin:
- image: "{{ lookup('oo_option', 'image_name') | default('centos-70-raw', True) }}"
- ssh_user: openshift
- become: yes
- enterprise: "{{ deployment_rhel7_ent_base }}"
- openshift-enterprise: "{{ deployment_rhel7_ent_base }}"
- atomic-enterprise: "{{ deployment_rhel7_ent_base }}"