summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--.tito/packages/openshift-ansible2
-rw-r--r--README.md12
-rw-r--r--README_AWS.md200
-rw-r--r--README_GCE.md136
-rw-r--r--README_libvirt.md163
-rw-r--r--README_openstack.md87
-rw-r--r--README_vagrant.md1
-rw-r--r--bin/README.md6
-rwxr-xr-xbin/cluster424
-rw-r--r--docs/repo_structure.md6
-rw-r--r--inventory/README.md6
-rw-r--r--inventory/aws/hosts/ec2.ini189
-rwxr-xr-xinventory/aws/hosts/ec2.py1511
-rw-r--r--inventory/aws/hosts/hosts1
-rw-r--r--inventory/byo/hosts.origin.example42
-rw-r--r--inventory/byo/hosts.ose.example50
-rwxr-xr-xinventory/gce/hosts/gce.py477
-rw-r--r--inventory/gce/hosts/hosts1
-rw-r--r--inventory/libvirt/hosts/hosts1
-rw-r--r--inventory/libvirt/hosts/libvirt.ini20
-rwxr-xr-xinventory/libvirt/hosts/libvirt_generic.py191
-rw-r--r--inventory/openstack/hosts/hosts1
-rwxr-xr-xinventory/openstack/hosts/openstack.py247
-rw-r--r--openshift-ansible.spec37
-rw-r--r--playbooks/README.md2
-rw-r--r--playbooks/aws/README.md97
-rw-r--r--playbooks/aws/openshift-cluster/add_nodes.yml35
-rw-r--r--playbooks/aws/openshift-cluster/build_ami.yml34
-rw-r--r--playbooks/aws/openshift-cluster/cluster_hosts.yml25
-rw-r--r--playbooks/aws/openshift-cluster/config.yml37
-rw-r--r--playbooks/aws/openshift-cluster/launch.yml54
-rw-r--r--playbooks/aws/openshift-cluster/list.yml23
-rw-r--r--playbooks/aws/openshift-cluster/scaleup.yml32
-rw-r--r--playbooks/aws/openshift-cluster/service.yml31
-rw-r--r--playbooks/aws/openshift-cluster/tasks/launch_instances.yml188
-rw-r--r--playbooks/aws/openshift-cluster/templates/user_data.j222
-rw-r--r--playbooks/aws/openshift-cluster/terminate.yml77
-rw-r--r--playbooks/aws/openshift-cluster/update.yml34
-rw-r--r--playbooks/aws/openshift-cluster/vars.yml43
-rw-r--r--playbooks/byo/openshift-master/scaleup.yml2
-rw-r--r--playbooks/byo/openshift-node/scaleup.yml2
-rw-r--r--playbooks/byo/vagrant.yml4
-rw-r--r--playbooks/common/README.md7
-rw-r--r--playbooks/common/openshift-cluster/config.yml4
-rw-r--r--playbooks/common/openshift-cluster/evaluate_groups.yml8
-rw-r--r--playbooks/common/openshift-cluster/initialize_facts.yml2
-rw-r--r--playbooks/common/openshift-cluster/initialize_firewall.yml7
-rw-r--r--playbooks/common/openshift-cluster/update_repos_and_packages.yml18
-rw-r--r--playbooks/common/openshift-cluster/upgrades/docker/docker_upgrade.yml2
-rw-r--r--playbooks/common/openshift-cluster/upgrades/docker/tasks/restart.yml (renamed from playbooks/common/openshift-cluster/upgrades/docker/restart.yml)0
-rw-r--r--playbooks/common/openshift-cluster/upgrades/docker/tasks/upgrade.yml (renamed from playbooks/common/openshift-cluster/upgrades/docker/upgrade.yml)0
-rw-r--r--playbooks/common/openshift-cluster/upgrades/pre/tasks/verify_docker_upgrade_targets.yml (renamed from playbooks/common/openshift-cluster/upgrades/pre/verify_docker_upgrade_targets.yml)0
-rw-r--r--playbooks/common/openshift-cluster/upgrades/upgrade_control_plane.yml2
-rw-r--r--playbooks/common/openshift-cluster/upgrades/v3_3/upgrade.yml2
-rw-r--r--playbooks/common/openshift-cluster/upgrades/v3_3/upgrade_control_plane.yml2
-rw-r--r--playbooks/common/openshift-cluster/upgrades/v3_3/upgrade_nodes.yml2
-rw-r--r--playbooks/common/openshift-cluster/upgrades/v3_4/upgrade.yml2
-rw-r--r--playbooks/common/openshift-cluster/upgrades/v3_4/upgrade_control_plane.yml2
-rw-r--r--playbooks/common/openshift-cluster/upgrades/v3_4/upgrade_nodes.yml2
-rw-r--r--playbooks/common/openshift-cluster/upgrades/v3_5/upgrade.yml2
-rw-r--r--playbooks/common/openshift-cluster/upgrades/v3_5/upgrade_control_plane.yml2
-rw-r--r--playbooks/common/openshift-cluster/upgrades/v3_5/upgrade_nodes.yml2
-rw-r--r--playbooks/common/openshift-cluster/upgrades/v3_6/upgrade.yml2
-rw-r--r--playbooks/common/openshift-cluster/upgrades/v3_6/upgrade_control_plane.yml2
-rw-r--r--playbooks/common/openshift-cluster/upgrades/v3_6/upgrade_nodes.yml2
-rw-r--r--playbooks/common/openshift-cluster/upgrades/v3_7/upgrade.yml2
-rw-r--r--playbooks/common/openshift-cluster/upgrades/v3_7/upgrade_control_plane.yml2
-rw-r--r--playbooks/common/openshift-cluster/upgrades/v3_7/upgrade_nodes.yml2
-rw-r--r--playbooks/common/openshift-etcd/config.yml1
-rw-r--r--playbooks/common/openshift-etcd/scaleup.yml8
-rw-r--r--playbooks/common/openshift-etcd/service.yml23
-rw-r--r--playbooks/common/openshift-loadbalancer/config.yml1
-rw-r--r--playbooks/common/openshift-loadbalancer/service.yml23
-rw-r--r--playbooks/common/openshift-master/config.yml1
-rw-r--r--playbooks/common/openshift-master/scaleup.yml17
-rw-r--r--playbooks/common/openshift-master/service.yml23
-rw-r--r--playbooks/common/openshift-nfs/service.yml21
-rw-r--r--playbooks/common/openshift-node/config.yml2
-rw-r--r--playbooks/common/openshift-node/scaleup.yml28
-rw-r--r--playbooks/common/openshift-node/service.yml26
-rw-r--r--playbooks/gce/README.md4
-rw-r--r--playbooks/gce/openshift-cluster/add_nodes.yml43
-rw-r--r--playbooks/gce/openshift-cluster/cluster_hosts.yml25
-rw-r--r--playbooks/gce/openshift-cluster/config.yml36
l---------playbooks/gce/openshift-cluster/filter_plugins1
-rw-r--r--playbooks/gce/openshift-cluster/launch.yml67
-rw-r--r--playbooks/gce/openshift-cluster/list.yml23
l---------playbooks/gce/openshift-cluster/lookup_plugins1
l---------playbooks/gce/openshift-cluster/roles1
-rw-r--r--playbooks/gce/openshift-cluster/service.yml29
-rw-r--r--playbooks/gce/openshift-cluster/tasks/launch_instances.yml65
-rw-r--r--playbooks/gce/openshift-cluster/terminate.yml58
-rw-r--r--playbooks/gce/openshift-cluster/update.yml34
-rw-r--r--playbooks/gce/openshift-cluster/vars.yml18
-rw-r--r--playbooks/libvirt/README.md4
-rw-r--r--playbooks/libvirt/openshift-cluster/cluster_hosts.yml25
-rw-r--r--playbooks/libvirt/openshift-cluster/config.yml39
l---------playbooks/libvirt/openshift-cluster/filter_plugins1
-rw-r--r--playbooks/libvirt/openshift-cluster/launch.yml57
-rw-r--r--playbooks/libvirt/openshift-cluster/list.yml23
l---------playbooks/libvirt/openshift-cluster/lookup_plugins1
l---------playbooks/libvirt/openshift-cluster/roles1
-rw-r--r--playbooks/libvirt/openshift-cluster/service.yml34
-rw-r--r--playbooks/libvirt/openshift-cluster/tasks/configure_libvirt.yml6
-rw-r--r--playbooks/libvirt/openshift-cluster/tasks/configure_libvirt_network.yml11
-rw-r--r--playbooks/libvirt/openshift-cluster/tasks/configure_libvirt_storage_pool.yml30
-rw-r--r--playbooks/libvirt/openshift-cluster/tasks/launch_instances.yml142
-rw-r--r--playbooks/libvirt/openshift-cluster/templates/domain.xml65
-rw-r--r--playbooks/libvirt/openshift-cluster/templates/meta-data3
-rw-r--r--playbooks/libvirt/openshift-cluster/templates/network.xml23
-rw-r--r--playbooks/libvirt/openshift-cluster/templates/storage-pool.xml6
-rw-r--r--playbooks/libvirt/openshift-cluster/templates/user-data43
-rw-r--r--playbooks/libvirt/openshift-cluster/terminate.yml70
-rw-r--r--playbooks/libvirt/openshift-cluster/update.yml37
-rw-r--r--playbooks/libvirt/openshift-cluster/vars.yml40
-rw-r--r--playbooks/openstack/README.md4
-rw-r--r--playbooks/openstack/openshift-cluster/cluster_hosts.yml25
-rw-r--r--playbooks/openstack/openshift-cluster/config.yml33
-rw-r--r--playbooks/openstack/openshift-cluster/files/heat_stack.yaml508
-rw-r--r--playbooks/openstack/openshift-cluster/files/heat_stack_server.yaml152
l---------playbooks/openstack/openshift-cluster/filter_plugins1
-rw-r--r--playbooks/openstack/openshift-cluster/launch.yml191
-rw-r--r--playbooks/openstack/openshift-cluster/list.yml24
l---------playbooks/openstack/openshift-cluster/lookup_plugins1
l---------playbooks/openstack/openshift-cluster/roles1
-rw-r--r--playbooks/openstack/openshift-cluster/terminate.yml49
-rw-r--r--playbooks/openstack/openshift-cluster/update.yml34
-rw-r--r--playbooks/openstack/openshift-cluster/vars.yml38
-rw-r--r--roles/calico/defaults/main.yaml2
-rw-r--r--roles/calico_master/defaults/main.yaml2
-rw-r--r--roles/docker/tasks/main.yml4
-rw-r--r--roles/docker/templates/crio.conf.j22
-rw-r--r--roles/etcd/defaults/main.yaml4
-rw-r--r--roles/etcd_migrate/tasks/migrate.yml9
-rw-r--r--roles/lib_openshift/library/oc_route.py7
-rw-r--r--roles/lib_openshift/src/class/oc_route.py7
-rw-r--r--roles/nuage_node/handlers/main.yaml2
-rw-r--r--roles/nuage_node/tasks/main.yaml5
-rw-r--r--roles/nuage_node/vars/main.yaml2
-rw-r--r--roles/openshift_ami_prep/defaults/main.yml50
-rw-r--r--roles/openshift_ami_prep/tasks/main.yml42
-rw-r--r--roles/openshift_ami_prep/tasks/yum_repos.yml14
-rw-r--r--roles/openshift_cfme/defaults/main.yml1
-rw-r--r--roles/openshift_cfme/templates/miq-pv-db.yaml.j22
-rw-r--r--roles/openshift_cfme/templates/miq-pv-region.yaml.j22
-rw-r--r--roles/openshift_cfme/templates/miq-pv-server.yaml.j22
-rw-r--r--roles/openshift_cli/tasks/main.yml2
-rw-r--r--roles/openshift_docker_facts/tasks/main.yml2
-rw-r--r--roles/openshift_health_checker/openshift_checks/docker_image_availability.py5
-rw-r--r--roles/openshift_hosted/defaults/main.yml4
-rw-r--r--roles/openshift_logging/tasks/annotate_ops_projects.yaml17
-rw-r--r--roles/openshift_logging/tasks/install_logging.yaml1
-rw-r--r--roles/openshift_logging/vars/main.yaml2
-rw-r--r--roles/openshift_logging_elasticsearch/tasks/main.yaml4
-rw-r--r--roles/openshift_master/defaults/main.yml5
-rw-r--r--roles/openshift_master/tasks/main.yml16
-rw-r--r--roles/openshift_node/defaults/main.yml63
-rw-r--r--roles/openshift_node/handlers/main.yml1
-rw-r--r--roles/openshift_node/meta/main.yml1
-rw-r--r--roles/openshift_node/tasks/bootstrap.yml55
-rw-r--r--roles/openshift_node/tasks/config.yml111
-rw-r--r--roles/openshift_node/tasks/install.yml33
-rw-r--r--roles/openshift_node/tasks/main.yml189
-rw-r--r--roles/openshift_node/tasks/openvswitch_system_container.yml2
-rw-r--r--roles/openshift_node/tasks/systemd_units.yml46
-rw-r--r--roles/openshift_node/templates/node.service.j22
-rw-r--r--roles/openshift_node/templates/node.yaml.v1.j22
-rw-r--r--roles/openshift_node/templates/openshift.docker.node.dep.service2
-rw-r--r--roles/openshift_node_certificates/defaults/main.yml1
-rw-r--r--roles/openshift_node_dnsmasq/tasks/main.yml11
-rw-r--r--roles/openshift_repos/tasks/main.yaml27
-rw-r--r--roles/openshift_repos/templates/yum_repo.j214
-rw-r--r--roles/openshift_service_catalog/tasks/install.yml44
-rw-r--r--roles/openshift_service_catalog/templates/api_server_service.j213
-rw-r--r--roles/openshift_service_catalog/templates/controller_manager_service.j213
-rw-r--r--roles/openshift_version/tasks/set_version_containerized.yml2
-rw-r--r--setup.py31
-rw-r--r--tox.ini1
178 files changed, 637 insertions, 7178 deletions
diff --git a/.tito/packages/openshift-ansible b/.tito/packages/openshift-ansible
index 9af073db8..259ab3192 100644
--- a/.tito/packages/openshift-ansible
+++ b/.tito/packages/openshift-ansible
@@ -1 +1 @@
-3.7.0-0.118.0 ./
+3.7.0-0.123.0 ./
diff --git a/README.md b/README.md
index 315c90063..021254527 100644
--- a/README.md
+++ b/README.md
@@ -67,14 +67,10 @@ you are not running a stable release.
dnf install -y ansible pyOpenSSL python-cryptography python-lxml
```
-2. Setup for a specific cloud:
-
- - [AWS](http://github.com/openshift/openshift-ansible/blob/master/README_AWS.md)
- - [GCE](http://github.com/openshift/openshift-ansible/blob/master/README_GCE.md)
- - [local VMs](http://github.com/openshift/openshift-ansible/blob/master/README_libvirt.md)
- - Bring your own host deployments:
- - [OpenShift Enterprise](https://docs.openshift.com/enterprise/latest/install_config/install/advanced_install.html)
- - [OpenShift Origin](https://docs.openshift.org/latest/install_config/install/advanced_install.html)
+2. OpenShift Installation Documentation:
+
+ - [OpenShift Enterprise](https://docs.openshift.com/enterprise/latest/install_config/install/advanced_install.html)
+ - [OpenShift Origin](https://docs.openshift.org/latest/install_config/install/advanced_install.html)
## Containerized OpenShift Ansible
diff --git a/README_AWS.md b/README_AWS.md
deleted file mode 100644
index 650a921a4..000000000
--- a/README_AWS.md
+++ /dev/null
@@ -1,200 +0,0 @@
-:warning: **WARNING** :warning: This feature is community supported and has not been tested by Red Hat. Visit [docs.openshift.com](https://docs.openshift.com) for [OpenShift Enterprise](https://docs.openshift.com/enterprise/latest/install_config/install/planning.html) or [OpenShift Origin](https://docs.openshift.org/latest/install_config/install/planning.html) supported installation docs.
-
-AWS Setup Instructions
-======================
-
-Get AWS API credentials
------------------------
-1. [AWS credentials documentation](http://docs.aws.amazon.com/AWSSimpleQueueService/latest/SQSGettingStartedGuide/AWSCredentials.html)
-
-
-Create a credentials file
--------------------------
-1. Create a credentials file (eg ~/.aws_creds) that looks something like this (variables must have have these exact names).
-```
- export AWS_ACCESS_KEY_ID='AKIASTUFF'
- export AWS_SECRET_ACCESS_KEY='STUFF'
-```
-2. source this file
-```
- source ~/.aws_creds
-```
-Note: You must source this file before running any Ansible commands.
-
-Alternatively, you could configure credentials in either ~/.boto or ~/.aws/credentials, see the [boto docs](http://docs.pythonboto.org/en/latest/boto_config_tut.html) for the format.
-
-Subscribe to CentOS
--------------------
-
-1. [CentOS on AWS](https://aws.amazon.com/marketplace/pp/B00O7WM7QW)
-
-
-Set up Security Group
----------------------
-By default, a cluster is launched into the `public` security group. Make sure you allow hosts to talk to each other on port `4789` for SDN.
-You may also want to allow access from the outside world on the following ports:
-
-```
-• 22/TCP - ssh
-• 80/TCP - Web Apps
-• 443/TCP - Web Apps (https)
-• 4789/UDP - SDN / VXLAN
-• 8443/TCP - OpenShift Console
-• 10250/TCP - kubelet
-```
-
-
-Determine your subnet and setup the VPC
----------------------------------------
-
-In the AWS VPC console, look up your subnet ID for the region you want to use and set it as such:
-
-- export ec2_vpc_subnet='my_vpc_subnet'
-
-Go to Your VPCs, select the VPC, and under Actions -> DNS Hostnames, set to Yes and Save.
-
-
-(Optional) Setup your $HOME/.ssh/config file
--------------------------------------------
-In case of a cluster creation, or any other case where you don't know the machine hostname in advance, you can use `.ssh/config`
-to setup a private key file to allow ansible to connect to the created hosts.
-
-To do so, add the the following entry to your $HOME/.ssh/config file and make it point to the private key file which allows you to login on AWS.
-```
-Host *.compute-1.amazonaws.com
- IdentityFile $HOME/.ssh/my_private_key.pem
-```
-
-Alternatively, you can configure your ssh-agent to hold the credentials to connect to your AWS instances.
-
-(Optional) Choose where the cluster will be launched
-----------------------------------------------------
-
-By default, a cluster is launched with the following configuration:
-
-- Instance type: m4.large
-- AMI: ami-7a9e9812 (for online deployments, ami-61bbf104 for origin deployments and ami-10663b78 for enterprise deployments)
-- Region: us-east-1
-- Keypair name: libra
-- Security group: public
-
-#### Master specific defaults:
-- Master root volume size: 10 (in GiBs)
-- Master root volume type: gp2
-- Master root volume iops: 500 (only applicable when volume type is io1)
-
-#### Node specific defaults:
-- Node root volume size: 10 (in GiBs)
-- Node root volume type: gp2
-- Node root volume iops: 500 (only applicable when volume type is io1)
-- Docker volume size: 25 (in GiBs)
-- Docker volume ephemeral: true (Whether the docker volume is ephemeral)
-- Docker volume type: gp2 (only applicable if ephemeral is false)
-- Docker volume iops: 500 (only applicable when volume type is io1)
-
-### Specifying ec2 instance type.
-
-#### All instances:
-
-- export ec2_instance_type='m4.large'
-
-#### Master instances:
-
-- export ec2_master_instance_type='m4.large'
-
-#### Infra node instances:
-
-- export ec2_infra_instance_type='m4.large'
-
-#### Non-infra node instances:
-
-- export ec2_node_instance_type='m4.large'
-
-#### etcd instances:
-
-- export ec2_etcd_instance_type='m4.large'
-
-If needed, these values can be changed by setting environment variables on your system.
-
-- export ec2_image='ami-307b3658'
-- export ec2_region='us-east-1'
-- export ec2_keypair='libra'
-- export ec2_security_groups="['public']"
-- export ec2_assign_public_ip='true'
-- export os_etcd_root_vol_size='20'
-- export os_etcd_root_vol_type='standard'
-- export os_etcd_vol_size='20'
-- export os_etcd_vol_type='standard'
-- export os_master_root_vol_size='20'
-- export os_master_root_vol_type='standard'
-- export os_node_root_vol_size='15'
-- export os_docker_vol_size='50'
-- export os_docker_vol_ephemeral='false'
-
-Install Dependencies
---------------------
-1. Ansible requires python-boto for aws operations:
-
-Fedora
-```
- dnf install -y ansible python-boto pyOpenSSL
-```
-
-RHEL/CentOS
-```
- yum install -y ansible python-boto pyOpenSSL
-```
-OSX:
-```
- pip install -U pyopenssl boto
-```
-
-
-Test The Setup
---------------
-1. cd openshift-ansible
-1. Try to list all instances (Passing an empty string as the cluster_id
-argument will result in all ec2 instances being listed)
-```
- bin/cluster list aws ''
-```
-
-Creating a cluster
-------------------
-1. To create a cluster with one master and two nodes
-```
- bin/cluster create aws <cluster-id>
-```
-
-Updating a cluster
----------------------
-1. To update the cluster
-```
- bin/cluster update aws <cluster-id>
-```
-
-Terminating a cluster
----------------------
-1. To terminate the cluster
-```
- bin/cluster terminate aws <cluster-id>
-```
-
-Specifying a deployment type
----------------------------
-The --deployment-type flag can be passed to bin/cluster to specify the deployment type
-1. To launch an OpenShift Enterprise cluster (requires a valid subscription):
-```
- bin/cluster create aws --deployment-type=openshift-enterprise <cluster-id>
-```
-Note: If no deployment type is specified, then the default is origin.
-
-
-## Post-ansible steps
-
-You should now be ready to follow the **What's Next?** section of the advanced installation guide to deploy your router, registry, and other components.
-
-Refer to the advanced installation guide for your deployment type:
-
-* [OpenShift Enterprise](https://docs.openshift.com/enterprise/latest/install_config/install/advanced_install.html#what-s-next)
-* [OpenShift Origin](https://docs.openshift.org/latest/install_config/install/advanced_install.html#what-s-next)
diff --git a/README_GCE.md b/README_GCE.md
deleted file mode 100644
index 99c8715de..000000000
--- a/README_GCE.md
+++ /dev/null
@@ -1,136 +0,0 @@
-:warning: **WARNING** :warning: This feature is community supported and has not been tested by Red Hat. Visit [docs.openshift.com](https://docs.openshift.com) for [OpenShift Enterprise](https://docs.openshift.com/enterprise/latest/install_config/install/index.html) or [OpenShift Origin](https://docs.openshift.org/latest/install_config/install/index.html) supported installation docs.
-
-GCE Setup Instructions
-======================
-
-Get a gce service key
----------------------
-1. Ask your GCE project administrator for a GCE service key
-
-Note: If your GCE project does not show a Service Account under <Project>/APIs & auth/Credentials, you will need to use "Create new Client ID" to create a Service Account before your administrator can create the service key for you.
-
-
-Convert a GCE service key into a pem (for ansible)
---------------------------------------------------
-1. mkdir -p ~/.gce
-1. The gce service key looks something like this: projectname-ef83bd90f261.p12
-.. The ef83bd90f261 part is the public hash (GCE_KEY_HASH), The projectname part, is the project name (PROJECT_NAME).
-1. Be in the same directory as the p12 key file.
-1. The commands below should be copy / paste-able
-1. Run these commands:
-```
- # Temporarily set hash variable and project name
- export GCE_KEY_HASH=ef83bd90f261
- export PROJECT_NAME=Project Name
- export PROJECT_ID=Project ID
-
- # Convert the service key (note: 'notasecret' is literally what we want here)
- openssl pkcs12 -in "${PROJECT_NAME}-${GCE_KEY_HASH}.p12" -passin pass:notasecret -nodes -nocerts | openssl rsa -out ${PROJECT_ID}-${GCE_KEY_HASH}.pem
-
- # Move the converted service key to the .gce dir
- mv ${PROJECT_ID}-${GCE_KEY_HASH}.pem ~/.gce
-```
-
-1. Once this is done, put the original service key file (projectname-ef83bd90f261.p12) somewhere safe, or delete it (your call, I don not know what else we will use it for, and we can always regen it if needed).
-
-
-Create a gce.ini file for GCE
---------------------------------
-* gce_service_account_email_address - Found in "APIs & auth" -> Credentials -> "Service Account" -> "Email Address"
-* gce_service_account_pem_file_path - Full path from previous steps
-* gce_project_id - Found in "Projects", it list all the gce projects you are associated with. The page lists their "Project Name" and "Project ID". You want the "Project ID"
-
-Mandatory customization variables (check the values according to your tenant):
-* zone = europe-west1-d
-* network = default
-
-Optional Variable Overrides:
-* gce_ssh_user - ssh user, defaults to the current logged in user
-* gce_machine_type = n1-standard-1 - default machine type
-* gce_machine_etcd_type = n1-standard-1 - machine type for etcd hosts
-* gce_machine_master_type = n1-standard-1 - machine type for master hosts
-* gce_machine_node_type = n1-standard-1 - machine type for node hosts
-* gce_machine_image = centos-7 - default image
-* gce_machine_etcd_image = centos-7 - image for etcd hosts
-* gce_machine_master_image = centos-7 - image for master hosts
-* gce_machine_node_image = centos-7 - image for node hosts
-
-
-1. vi ~/.gce/gce.ini
-1. make the contents look like this:
-```
-[gce]
-gce_service_account_email_address = long...@developer.gserviceaccount.com
-gce_service_account_pem_file_path = /full/path/to/project_id-gce_key_hash.pem
-gce_project_id = project_id
-zone = europe-west1-d
-network = default
-gce_machine_type = n1-standard-2
-gce_machine_master_type = n1-standard-1
-gce_machine_node_type = n1-standard-2
-gce_machine_image = centos-7
-gce_machine_master_image = centos-7
-gce_machine_node_image = centos-7
-
-```
-1. Define the environment variable GCE_INI_PATH so gce.py can pick it up and bin/cluster can also read it
-```
-export GCE_INI_PATH=~/.gce/gce.ini
-```
-
-
-Install Dependencies
---------------------
-1. Ansible requires libcloud for gce operations:
-```
- yum install -y ansible python-libcloud
-```
-
-> Installation using Mac OSX requires pycrypto library
->
-> <kbd>$ pip install pycrypto</kbd>
-
-Test The Setup
---------------
-1. cd openshift-ansible/
-1. Try to list all instances (Passing an empty string as the cluster_id
-argument will result in all gce instances being listed)
-```
- bin/cluster list gce ''
-```
-
-Creating a cluster
-------------------
-1. To create a cluster with one master, one infra node, and two compute nodes
-```
- bin/cluster create gce <cluster-id>
-```
-1. To create a cluster with 3 masters, 3 etcd hosts, 2 infra nodes and 10
-compute nodes
-```
- bin/cluster create gce -m 3 -e 3 -i 2 -n 10 <cluster-id>
-```
-
-Updating a cluster
----------------------
-1. To update the cluster
-```
- bin/cluster update gce <cluster-id>
-```
-
-Add additional nodes
----------------------
-1. To add additional infra nodes
-```
- bin/cluster add-nodes gce -i <num nodes> <cluster-id>
-```
-1. To add additional compute nodes
-```
- bin/cluster add-nodes gce -n <num nodes> <cluster-id>
-```
-Terminating a cluster
----------------------
-1. To terminate the cluster
-```
- bin/cluster terminate gce <cluster-id>
-```
diff --git a/README_libvirt.md b/README_libvirt.md
deleted file mode 100644
index 1661681a0..000000000
--- a/README_libvirt.md
+++ /dev/null
@@ -1,163 +0,0 @@
-:warning: **WARNING** :warning: This feature is community supported and has not been tested by Red Hat. Visit [docs.openshift.com](https://docs.openshift.com) for [OpenShift Enterprise](https://docs.openshift.com/enterprise/latest/install_config/install/index.html) or [OpenShift Origin](https://docs.openshift.org/latest/install_config/install/index.html) supported installation docs.
-
-LIBVIRT Setup instructions
-==========================
-
-`libvirt` is an `openshift-ansible` provider that uses `libvirt` to create local Fedora VMs that are provisioned exactly the same way that cloud VMs would be provisioned.
-
-This makes `libvirt` useful to develop, test and debug OpenShift and openshift-ansible locally on the developer’s workstation before going to the cloud.
-
-Install dependencies
---------------------
-
-1. Install [ansible](http://www.ansible.com/)
-2. Install [dnsmasq](http://www.thekelleys.org.uk/dnsmasq/doc.html)
-3. Install [ebtables](http://ebtables.netfilter.org/)
-4. Install [qemu and qemu-system-x86](http://wiki.qemu.org/Main_Page)
-5. Install [libvirt-python and libvirt](http://libvirt.org/)
-6. Install [genisoimage](http://cdrkit.org/) or [mkisofs](http://cdrtools.sourceforge.net/private/cdrecord.html)
-7. Enable and start the libvirt daemon, e.g:
- - `systemctl enable libvirtd`
- - `systemctl start libvirtd`
-8. [Grant libvirt access to your user¹](https://libvirt.org/aclpolkit.html)
-9. Check that your `$HOME` is accessible to the qemu user²
-10. Configure dns resolution on the host³
-11. Install libselinux-python
-12. Ensure you have an SSH private and public keypair at `~/.ssh/id_rsa` and `~/.ssh/id_rsa.pub`⁴
-
-#### ¹ Depending on your distribution, libvirt access may be denied by default or may require a password at each access.
-
-You can test it with the following command:
-
-```
-virsh -c qemu:///system pool-list
-```
-
-If you have access error messages, please read https://libvirt.org/acl.html and https://libvirt.org/aclpolkit.html .
-
-In short, if your libvirt has been compiled with Polkit support (ex: Arch, Fedora 21), you can create `/etc/polkit-1/rules.d/50-org.libvirt.unix.manage.rules` as follows to grant full access to libvirt to `$USER`
-
-```
-sudo /bin/sh -c "cat - > /etc/polkit-1/rules.d/50-org.libvirt.unix.manage.rules" << EOF
-polkit.addRule(function(action, subject) {
- if (action.id == "org.libvirt.unix.manage" &&
- subject.user == "$USER") {
- return polkit.Result.YES;
- polkit.log("action=" + action);
- polkit.log("subject=" + subject);
- }
-});
-EOF
-```
-
-If your libvirt has not been compiled with Polkit (ex: Ubuntu 14.04.1 LTS), check the permissions on the libvirt unix socket:
-
-```
-ls -l /var/run/libvirt/libvirt-sock
-srwxrwx--- 1 root libvirtd 0 févr. 12 16:03 /var/run/libvirt/libvirt-sock
-
-usermod -a -G libvirtd $USER
-# $USER needs to logout/login to have the new group be taken into account
-```
-
-(Replace `$USER` with your login name)
-
-#### ² Qemu will run with a specific user. It must have access to the VMs drives
-
-All the disk drive resources needed by the VMs (Fedora disk image, cloud-init files) are put inside `~/libvirt-storage-pool-openshift/`.
-
-As we’re using the `qemu:///system` instance of libvirt, qemu will run with a specific `user:group` distinct from your user. It is configured in `/etc/libvirt/qemu.conf`. That qemu user must have access to that libvirt storage pool.
-
-If your `$HOME` is world readable, everything is fine. If your `$HOME` is private, `ansible` will fail with an error message like:
-
-```
-error: Cannot access storage file '$HOME/libvirt-storage-pool-openshift/lenaic-master-216d8.qcow2' (as uid:99, gid:78): Permission denied
-```
-
-In order to fix that issue, you have several possibilities:
- * set `libvirt_storage_pool_path` inside `playbooks/libvirt/openshift-cluster/launch.yml` and `playbooks/libvirt/openshift-cluster/terminate.yml` to a directory:
- * backed by a filesystem with a lot of free disk space
- * writable by your user;
- * accessible by the qemu user.
- * Grant the qemu user access to the storage pool.
-
-On Arch or Fedora 22+:
-
-```
-setfacl -m g:kvm:--x ~
-```
-
-#### ³ Enabling DNS resolution to your guest VMs with NetworkManager
-
-- Verify NetworkManager is configured to use dnsmasq:
-
-```sh
-$ sudo vi /etc/NetworkManager/NetworkManager.conf
-[main]
-dns=dnsmasq
-```
-
-- Configure dnsmasq to use the Virtual Network router for example.com:
-
-```sh
-sudo vi /etc/NetworkManager/dnsmasq.d/libvirt_dnsmasq.conf
-server=/example.com/192.168.55.1
-```
-
-#### ⁴ Private and public keypair in ~/.ssh/id_rsa and ~/.ssh/id_rsa.pub
-
-This playbook uses SSH keys to communicate with the libvirt-driven virtual machines. At this time the names of those keys are fixed and cannot be changed.
-
-
-Test The Setup
---------------
-
-1. cd openshift-ansible/
-2. Try to list all instances (Passing an empty string as the cluster_id argument will result in all libvirt instances being listed)
-
-```
- bin/cluster list libvirt ''
-```
-
-Configuration
--------------
-
-The following options can be passed via the `-o` flag of the `create` command or as environment variables:
-
-* `image_url` (default to `http://cloud.centos.org/centos/7/images/CentOS-7-x86_64-GenericCloud.qcow2.xz`): URL of the QCOW2 image to download
-* `image_name` (default to `CentOS-7-x86_64-GenericCloud.qcow2`): Name of the QCOW2 image to boot the VMs on
-* `image_compression` (default to `xz`): Source QCOW2 compression (only xz supported at this time)
-* `image_sha256` (default to `dd0f5e610e7c5ffacaca35ed7a78a19142a588f4543da77b61c1fb0d74400471`): Expected SHA256 checksum of the downloaded image
-* `libvirt_storage_pool` (default to `openshift-ansible`): name of the libvirt storage pool for the VM images. It will be created if it does not exist
-* `libvirt_storage_pool_path` (default to `$HOME/libvirt-storage-pool-openshift-ansible`): path to `libvirt_storage_pool`, i.e. where the VM images are stored
-* `libvirt_network` (default to `openshift-ansible`): name of the libvirt network that the VMs will use. It will be created if it does not exist
-* `libvirt_instance_memory_mib` (default to `1024`): memory of the VMs in MiB
-* `libvirt_instance_vcpu` (default to `2`): number of vCPUs of the VMs
-* `skip_image_download` (default to `no`): Skip QCOW2 image download. This requires the `image_name` QCOW2 image to be already present in `$HOME/libvirt-storage-pool-openshift-ansible`
-
-Creating a cluster
-------------------
-
-1. To create a cluster with one master and two nodes
-
-```
- bin/cluster create libvirt lenaic
-```
-
-Updating a cluster
-------------------
-
-1. To update the cluster
-
-```
- bin/cluster update libvirt lenaic
-```
-
-Terminating a cluster
----------------------
-
-1. To terminate the cluster
-
-```
- bin/cluster terminate libvirt lenaic
-```
diff --git a/README_openstack.md b/README_openstack.md
deleted file mode 100644
index 2578488c7..000000000
--- a/README_openstack.md
+++ /dev/null
@@ -1,87 +0,0 @@
-:warning: **WARNING** :warning: This feature is community supported and has not been tested by Red Hat. Visit [docs.openshift.com](https://docs.openshift.com) for [OpenShift Enterprise](https://docs.openshift.com/enterprise/latest/install_config/install/index.html) or [OpenShift Origin](https://docs.openshift.org/latest/install_config/install/index.html) supported installation docs.
-
-OPENSTACK Setup instructions
-============================
-
-Requirements
-------------
-
-The OpenStack instance must have Neutron and Heat enabled.
-
-Install Dependencies
---------------------
-
-1. The OpenStack python clients for Nova, Neutron and Heat are required:
-
-* `python-novaclient`
-* `python-neutronclient`
-* `python-heatclient`
-
-On Fedora:
-```
- dnf install -y ansible python-novaclient python-neutronclient python-heatclient
-```
-
-On RHEL / CentOS:
-```
- yum install -y ansible python-novaclient python-neutronclient python-heatclient
- sudo pip install shade
-```
-
-Configuration
--------------
-
-The following options can be passed via the `-o` flag of the `create` command:
-
-* `infra_heat_stack` (default to `playbooks/openstack/openshift-cluster/files/heat_stack.yaml`): filename of the HEAT template to use to create the cluster infrastructure
-
-The following options are used only by `heat_stack.yaml`. They are so used only if the `infra_heat_stack` option is left with its default value.
-
-* `image_name`: Name of the image to use to spawn VMs
-* `public_key` (default to `~/.ssh/id_rsa.pub`): filename of the ssh public key
-* `etcd_flavor` (default to `m1.small`): The ID or name of the flavor for the etcd nodes
-* `master_flavor` (default to `m1.small`): The ID or name of the flavor for the master
-* `node_flavor` (default to `m1.medium`): The ID or name of the flavor for the compute nodes
-* `infra_flavor` (default to `m1.small`): The ID or name of the flavor for the infrastructure nodes
-* `network_prefix` (default to `openshift-ansible-<cluster_id>`): prefix prepended to all network objects (net, subnet, router, security groups)
-* `dns` (default to `8.8.8.8,8.8.4.4`): comma separated list of DNS to use
-* `net_cidr` (default to `192.168.<rand()>.0/24`): CIDR of the network created by `heat_stack.yaml`
-* `external_net` (default to `external`): Name of the external network to connect to
-* `floating_ip_pool` (default to `external`): comma separated list of floating IP pools
-* `ssh_from` (default to `0.0.0.0/0`): IPs authorized to connect to the VMs via ssh
-* `node_port_from` (default to `0.0.0.0/0`): IPs authorized to connect to the services exposed via nodePort
-* `heat_timeout` (default to `3`): Timeout (in minutes) passed to heat for create or update stack.
-
-
-Creating a cluster
-------------------
-
-1. To create a cluster with one master and two nodes
-
-```
- bin/cluster create openstack <cluster-id>
-```
-
-2. To create a cluster with one master and three nodes, a custom VM image and custom DNS:
-
-```
- bin/cluster create -n 3 -o image_name=rhel-7.1-openshift-2015.05.21 -o dns=172.16.50.210,172.16.50.250 openstack lenaic
-```
-
-Updating a cluster
-------------------
-
-1. To update the cluster
-
-```
- bin/cluster update openstack <cluster-id>
-```
-
-Terminating a cluster
----------------------
-
-1. To terminate the cluster
-
-```
- bin/cluster terminate openstack <cluster-id>
-```
diff --git a/README_vagrant.md b/README_vagrant.md
deleted file mode 100644
index cb62e31d8..000000000
--- a/README_vagrant.md
+++ /dev/null
@@ -1 +0,0 @@
-The Vagrant-based installation has been moved to: https://github.com/openshift/openshift-ansible-contrib/tree/master/vagrant
diff --git a/bin/README.md b/bin/README.md
deleted file mode 100644
index fec17cb9b..000000000
--- a/bin/README.md
+++ /dev/null
@@ -1,6 +0,0 @@
-# The `bin/cluster` tool
-
-This tool was meant to be the entry point for managing OpenShift clusters,
-running against different "providers" (`aws`, `gce`, `libvirt`, `openstack`),
-though its use is now deprecated in favor of the [`byo`](../playbooks/byo)
-playbooks.
diff --git a/bin/cluster b/bin/cluster
deleted file mode 100755
index f77eb36ad..000000000
--- a/bin/cluster
+++ /dev/null
@@ -1,424 +0,0 @@
-#!/usr/bin/env python2
-
-import argparse
-import ConfigParser
-import os
-import sys
-import subprocess
-import traceback
-
-
-class Cluster(object):
- """
- Provide Command, Control and Configuration (c3) Interface for OpenShift Clusters
- """
-
- def __init__(self):
- # setup ansible ssh environment
- if 'ANSIBLE_SSH_ARGS' not in os.environ:
- os.environ['ANSIBLE_SSH_ARGS'] = (
- '-o ForwardAgent=yes '
- '-o StrictHostKeyChecking=no '
- '-o UserKnownHostsFile=/dev/null '
- '-o ControlMaster=auto '
- '-o ControlPersist=600s '
- )
- # Because of `UserKnownHostsFile=/dev/null`
- # our `.ssh/known_hosts` file most probably misses the ssh host public keys
- # of our servers.
- # In that case, ansible serializes the execution of ansible modules
- # because we might be interactively prompted to accept the ssh host public keys.
- # Because of `StrictHostKeyChecking=no` we know that we won't be prompted
- # So, we don't want our modules execution to be serialized.
- os.environ['ANSIBLE_HOST_KEY_CHECKING'] = 'False'
- # TODO: A more secure way to proceed would consist in dynamically
- # retrieving the ssh host public keys from the IaaS interface
- if 'ANSIBLE_SSH_PIPELINING' not in os.environ:
- os.environ['ANSIBLE_SSH_PIPELINING'] = 'True'
-
- def get_deployment_type(self, args):
- """
- Get the deployment_type based on the environment variables and the
- command line arguments
- :param args: command line arguments provided by the user
- :return: string representing the deployment type
- """
- deployment_type = 'origin'
- if args.deployment_type:
- deployment_type = args.deployment_type
- elif 'OS_DEPLOYMENT_TYPE' in os.environ:
- deployment_type = os.environ['OS_DEPLOYMENT_TYPE']
- return deployment_type
-
-
- def create(self, args):
- """
- Create an OpenShift cluster for given provider
- :param args: command line arguments provided by user
- """
- cluster = {'cluster_id': args.cluster_id,
- 'deployment_type': self.get_deployment_type(args)}
- playbook = "playbooks/{0}/openshift-cluster/launch.yml".format(args.provider)
- inventory = self.setup_provider(args.provider)
-
- cluster['num_masters'] = args.masters
- cluster['num_nodes'] = args.nodes
- cluster['num_infra'] = args.infra
- cluster['num_etcd'] = args.etcd
- cluster['cluster_env'] = args.env
-
- if args.cloudprovider and args.provider == 'openstack':
- cluster['openshift_cloudprovider_kind'] = 'openstack'
- cluster['openshift_cloudprovider_openstack_auth_url'] = os.getenv('OS_AUTH_URL')
- cluster['openshift_cloudprovider_openstack_username'] = os.getenv('OS_USERNAME')
- cluster['openshift_cloudprovider_openstack_password'] = os.getenv('OS_PASSWORD')
- if 'OS_USER_DOMAIN_ID' in os.environ:
- cluster['openshift_cloudprovider_openstack_domain_id'] = os.getenv('OS_USER_DOMAIN_ID')
- if 'OS_USER_DOMAIN_NAME' in os.environ:
- cluster['openshift_cloudprovider_openstack_domain_name'] = os.getenv('OS_USER_DOMAIN_NAME')
- if 'OS_PROJECT_ID' in os.environ or 'OS_TENANT_ID' in os.environ:
- cluster['openshift_cloudprovider_openstack_tenant_id'] = os.getenv('OS_PROJECT_ID',os.getenv('OS_TENANT_ID'))
- if 'OS_PROJECT_NAME' is os.environ or 'OS_TENANT_NAME' in os.environ:
- cluster['openshift_cloudprovider_openstack_tenant_name'] = os.getenv('OS_PROJECT_NAME',os.getenv('OS_TENANT_NAME'))
- if 'OS_REGION_NAME' in os.environ:
- cluster['openshift_cloudprovider_openstack_region'] = os.getenv('OS_REGION_NAME')
-
- self.action(args, inventory, cluster, playbook)
-
- def add_nodes(self, args):
- """
- Add nodes to an existing cluster for given provider
- :param args: command line arguments provided by user
- """
- cluster = {'cluster_id': args.cluster_id,
- 'deployment_type': self.get_deployment_type(args),
- }
- playbook = "playbooks/{0}/openshift-cluster/add_nodes.yml".format(args.provider)
- inventory = self.setup_provider(args.provider)
-
- cluster['num_nodes'] = args.nodes
- cluster['num_infra'] = args.infra
- cluster['cluster_env'] = args.env
-
- self.action(args, inventory, cluster, playbook)
-
- def terminate(self, args):
- """
- Destroy OpenShift cluster
- :param args: command line arguments provided by user
- """
- cluster = {'cluster_id': args.cluster_id,
- 'deployment_type': self.get_deployment_type(args),
- 'cluster_env': args.env,
- }
- playbook = "playbooks/{0}/openshift-cluster/terminate.yml".format(args.provider)
- inventory = self.setup_provider(args.provider)
-
- self.action(args, inventory, cluster, playbook)
-
- def list(self, args):
- """
- List VMs in cluster
- :param args: command line arguments provided by user
- """
- cluster = {'cluster_id': args.cluster_id,
- 'deployment_type': self.get_deployment_type(args),
- 'cluster_env': args.env,
- }
- playbook = "playbooks/{0}/openshift-cluster/list.yml".format(args.provider)
- inventory = self.setup_provider(args.provider)
-
- self.action(args, inventory, cluster, playbook)
-
- def config(self, args):
- """
- Configure or reconfigure OpenShift across clustered VMs
- :param args: command line arguments provided by user
- """
- cluster = {'cluster_id': args.cluster_id,
- 'deployment_type': self.get_deployment_type(args),
- 'cluster_env': args.env,
- }
- playbook = "playbooks/{0}/openshift-cluster/config.yml".format(args.provider)
- inventory = self.setup_provider(args.provider)
-
- self.action(args, inventory, cluster, playbook)
-
- def update(self, args):
- """
- Update to latest OpenShift across clustered VMs
- :param args: command line arguments provided by user
- """
- cluster = {'cluster_id': args.cluster_id,
- 'deployment_type': self.get_deployment_type(args),
- 'cluster_env': args.env,
- }
-
- playbook = "playbooks/{0}/openshift-cluster/update.yml".format(args.provider)
- inventory = self.setup_provider(args.provider)
-
- self.action(args, inventory, cluster, playbook)
-
- def service(self, args):
- """
- Make the same service call across all nodes in the cluster
- :param args: command line arguments provided by user
- """
- cluster = {'cluster_id': args.cluster_id,
- 'deployment_type': self.get_deployment_type(args),
- 'new_cluster_state': args.state,
- 'cluster_env': args.env,
- }
-
- playbook = "playbooks/{0}/openshift-cluster/service.yml".format(args.provider)
- inventory = self.setup_provider(args.provider)
-
- self.action(args, inventory, cluster, playbook)
-
- def setup_provider(self, provider):
- """
- Setup ansible playbook environment
- :param provider: command line arguments provided by user
- :return: path to inventory for given provider
- """
- config = ConfigParser.ConfigParser()
- if 'gce' == provider:
- gce_ini_default_path = os.path.join('inventory/gce/hosts/gce.ini')
- gce_ini_path = os.environ.get('GCE_INI_PATH', gce_ini_default_path)
- if os.path.exists(gce_ini_path):
- config.readfp(open(gce_ini_path))
-
- for key in config.options('gce'):
- os.environ[key] = config.get('gce', key)
-
- inventory = '-i inventory/gce/hosts'
- elif 'aws' == provider:
- config.readfp(open('inventory/aws/hosts/ec2.ini'))
-
- for key in config.options('ec2'):
- os.environ[key] = config.get('ec2', key)
-
- inventory = '-i inventory/aws/hosts'
-
- key_vars = ['AWS_ACCESS_KEY_ID', 'AWS_SECRET_ACCESS_KEY']
- key_missing = [key for key in key_vars if key not in os.environ]
-
- boto_conf_files = ['~/.aws/credentials', '~/.boto']
- conf_exists = lambda conf: os.path.isfile(os.path.expanduser(conf))
- boto_configs = [conf for conf in boto_conf_files if conf_exists(conf)]
-
- if len(key_missing) > 0 and len(boto_configs) == 0:
- raise ValueError("PROVIDER aws requires {0} environment variable(s). See README_AWS.md".format(key_missing))
-
- elif 'libvirt' == provider:
- inventory = '-i inventory/libvirt/hosts'
- elif 'openstack' == provider:
- inventory = '-i inventory/openstack/hosts'
- else:
- # this code should never be reached
- raise ValueError("invalid PROVIDER {0}".format(provider))
-
- return inventory
-
- def action(self, args, inventory, cluster, playbook):
- """
- Build ansible-playbook command line and execute
- :param args: command line arguments provided by user
- :param inventory: derived provider library
- :param cluster: cluster variables for kubernetes
- :param playbook: ansible playbook to execute
- """
-
- verbose = ''
- if args.verbose > 0:
- verbose = '-{0}'.format('v' * args.verbose)
-
- if args.option:
- for opt in args.option:
- k, v = opt.split('=', 1)
- cluster['cli_' + k] = v
-
- ansible_extra_vars = '-e \'{0}\''.format(
- ' '.join(['%s=%s' % (key, value) for (key, value) in cluster.items()])
- )
-
- command = 'ansible-playbook {0} {1} {2} {3}'.format(
- verbose, inventory, ansible_extra_vars, playbook
- )
-
- if args.profile:
- command = 'ANSIBLE_CALLBACK_PLUGINS=ansible-profile/callback_plugins ' + command
-
- if args.verbose > 1:
- command = 'time {0}'.format(command)
-
- if args.verbose > 0:
- sys.stderr.write('RUN [{0}]\n'.format(command))
- sys.stderr.flush()
-
- try:
- subprocess.check_call(command, shell=True)
- except subprocess.CalledProcessError as exc:
- raise ActionFailed("ACTION [{0}] failed: {1}"
- .format(args.action, exc))
-
-
-class ActionFailed(Exception):
- """
- Raised when action failed.
- """
- pass
-
-
-if __name__ == '__main__':
- """
- User command to invoke ansible playbooks in a "known" configuration
-
- Reads ~/.openshift-ansible for default configuration items
- [DEFAULT]
- validate_cluster_ids = False
- cluster_ids = marketing,sales
- providers = gce,aws,libvirt,openstack
- """
-
- warning = ("================================================================================\n"
- "ATTENTION: You are running a community supported utility that has not been\n"
- "tested by Red Hat. Visit https://docs.openshift.com for supported installation\n"
- "instructions.\n"
- "================================================================================\n\n")
- sys.stderr.write(warning)
-
- cluster_config = ConfigParser.SafeConfigParser({
- 'cluster_ids': 'marketing,sales',
- 'validate_cluster_ids': 'False',
- 'providers': 'gce,aws,libvirt,openstack',
- })
-
- path = os.path.expanduser("~/.openshift-ansible")
- if os.path.isfile(path):
- cluster_config.read(path)
-
- cluster = Cluster()
-
- parser = argparse.ArgumentParser(
- formatter_class=argparse.RawDescriptionHelpFormatter,
- description='Python wrapper to ensure proper configuration for OpenShift ansible playbooks',
- epilog='''\
-This wrapper is overriding the following ansible variables:
-
- * ANSIBLE_SSH_ARGS:
- If not set in the environment, this wrapper will use the following value:
- `-o ForwardAgent=yes -o StrictHostKeyChecking=no -o UserKnownHostsFile=/dev/null -o ControlMaster=auto -o ControlPersist=600s`
- If set in the environment, the environment variable value is left untouched and used.
-
- * ANSIBLE_SSH_PIPELINING:
- If not set in the environment, this wrapper will set it to `True`.
- If you experience issues with Ansible SSH pipelining, you can disable it by explicitly setting this environment variable to `False`.
-'''
- )
- parser.add_argument('-v', '--verbose', action='count',
- help='Multiple -v options increase the verbosity')
- parser.add_argument('--version', action='version', version='%(prog)s 0.3')
-
- meta_parser = argparse.ArgumentParser(add_help=False)
- providers = cluster_config.get('DEFAULT', 'providers').split(',')
- meta_parser.add_argument('provider', choices=providers, help='provider')
-
- if cluster_config.get('DEFAULT', 'validate_cluster_ids').lower() in ("yes", "true", "1"):
- meta_parser.add_argument('cluster_id', choices=cluster_config.get('DEFAULT', 'cluster_ids').split(','),
- help='prefix for cluster VM names')
- else:
- meta_parser.add_argument('cluster_id', help='prefix for cluster VM names')
-
- meta_parser.add_argument('-t', '--deployment-type',
- choices=['origin', 'atomic-enterprise', 'openshift-enterprise'],
- help='Deployment type. (default: origin)')
- meta_parser.add_argument('-o', '--option', action='append',
- help='options')
-
- meta_parser.add_argument('--env', default='dev', type=str,
- help='environment for the cluster. Defaults to \'dev\'.')
-
- meta_parser.add_argument('-p', '--profile', action='store_true',
- help='Enable playbook profiling')
-
- action_parser = parser.add_subparsers(dest='action', title='actions',
- description='Choose from valid actions')
-
- create_parser = action_parser.add_parser('create', help='Create a cluster',
- parents=[meta_parser])
- create_parser.add_argument('-c', '--cloudprovider', action='store_true',
- help='Enable the cloudprovider')
- create_parser.add_argument('-m', '--masters', default=1, type=int,
- help='number of masters to create in cluster')
- create_parser.add_argument('-n', '--nodes', default=2, type=int,
- help='number of nodes to create in cluster')
- create_parser.add_argument('-i', '--infra', default=1, type=int,
- help='number of infra nodes to create in cluster')
- create_parser.add_argument('-e', '--etcd', default=0, type=int,
- help='number of external etcd hosts to create in cluster')
- create_parser.set_defaults(func=cluster.create)
-
-
- create_parser = action_parser.add_parser('add-nodes', help='Add nodes to a cluster',
- parents=[meta_parser])
- create_parser.add_argument('-n', '--nodes', default=1, type=int,
- help='number of nodes to add to the cluster')
- create_parser.add_argument('-i', '--infra', default=1, type=int,
- help='number of infra nodes to add to the cluster')
- create_parser.set_defaults(func=cluster.add_nodes)
-
-
- config_parser = action_parser.add_parser('config',
- help='Configure or reconfigure a cluster',
- parents=[meta_parser])
- config_parser.set_defaults(func=cluster.config)
-
- terminate_parser = action_parser.add_parser('terminate',
- help='Destroy a cluster',
- parents=[meta_parser])
- terminate_parser.add_argument('-f', '--force', action='store_true',
- help='Destroy cluster without confirmation')
- terminate_parser.set_defaults(func=cluster.terminate)
-
- update_parser = action_parser.add_parser('update',
- help='Update OpenShift across cluster',
- parents=[meta_parser])
- update_parser.add_argument('-f', '--force', action='store_true',
- help='Update cluster without confirmation')
- update_parser.set_defaults(func=cluster.update)
-
- list_parser = action_parser.add_parser('list', help='List VMs in cluster',
- parents=[meta_parser])
- list_parser.set_defaults(func=cluster.list)
-
- service_parser = action_parser.add_parser('service', help='service for openshift across cluster',
- parents=[meta_parser])
- # choices are the only ones valid for the ansible service module: http://docs.ansible.com/service_module.html
- service_parser.add_argument('state', choices=['started', 'stopped', 'restarted', 'reloaded'],
- help='make service call across cluster')
- service_parser.set_defaults(func=cluster.service)
-
- args = parser.parse_args()
-
- if 'terminate' == args.action and not args.force:
- answer = raw_input("This will destroy the ENTIRE {0} cluster. Are you sure? [y/N] ".format(args.cluster_id))
- if answer not in ['y', 'Y']:
- sys.stderr.write('\nACTION [terminate] aborted by user!\n')
- exit(1)
-
- if 'update' == args.action and not args.force:
- answer = raw_input(
- "This is destructive and could corrupt {0} cluster. Continue? [y/N] ".format(args.cluster_id))
- if answer not in ['y', 'Y']:
- sys.stderr.write('\nACTION [update] aborted by user!\n')
- exit(1)
-
- try:
- args.func(args)
- except Exception as exc:
- if args.verbose:
- traceback.print_exc(file=sys.stderr)
- else:
- print >>sys.stderr, exc
- exit(1)
diff --git a/docs/repo_structure.md b/docs/repo_structure.md
index f598f22c3..49300f80c 100644
--- a/docs/repo_structure.md
+++ b/docs/repo_structure.md
@@ -28,12 +28,6 @@ These are plugins used in playbooks and roles:
```
.
-├── bin [DEPRECATED] Contains the `bin/cluster` script, a
-│ wrapper around the Ansible playbooks that ensures proper
-│ configuration, and facilitates installing, updating,
-│ destroying and configuring OpenShift clusters.
-│ Note: this tool is kept in the repository for legacy
-│ reasons and will be removed at some point.
└── utils Contains the `atomic-openshift-installer` command, an
interactive CLI utility to install OpenShift across a
set of hosts.
diff --git a/inventory/README.md b/inventory/README.md
index b61bfff18..5e26e3c32 100644
--- a/inventory/README.md
+++ b/inventory/README.md
@@ -2,8 +2,4 @@
You can install OpenShift on:
-* [Amazon Web Services](aws/hosts/)
-* [BYO](byo/) (Bring your own), use this inventory config file to install OpenShift on your bare metal servers
-* [GCE](gce/) (Google Compute Engine)
-* [libvirt](libvirt/hosts/)
-* [OpenStack](openstack/hosts/)
+* [BYO](byo/) (Bring your own), use this inventory config file to install OpenShift on your pre-existing hosts
diff --git a/inventory/aws/hosts/ec2.ini b/inventory/aws/hosts/ec2.ini
deleted file mode 100644
index 64c097d47..000000000
--- a/inventory/aws/hosts/ec2.ini
+++ /dev/null
@@ -1,189 +0,0 @@
-# Ansible EC2 external inventory script settings
-#
-
-[ec2]
-
-# to talk to a private eucalyptus instance uncomment these lines
-# and edit edit eucalyptus_host to be the host name of your cloud controller
-#eucalyptus = True
-#eucalyptus_host = clc.cloud.domain.org
-
-# AWS regions to make calls to. Set this to 'all' to make request to all regions
-# in AWS and merge the results together. Alternatively, set this to a comma
-# separated list of regions. E.g. 'us-east-1,us-west-1,us-west-2'
-regions = all
-regions_exclude = us-gov-west-1,cn-north-1
-
-# When generating inventory, Ansible needs to know how to address a server.
-# Each EC2 instance has a lot of variables associated with it. Here is the list:
-# http://docs.pythonboto.org/en/latest/ref/ec2.html#module-boto.ec2.instance
-# Below are 2 variables that are used as the address of a server:
-# - destination_variable
-# - vpc_destination_variable
-
-# This is the normal destination variable to use. If you are running Ansible
-# from outside EC2, then 'public_dns_name' makes the most sense. If you are
-# running Ansible from within EC2, then perhaps you want to use the internal
-# address, and should set this to 'private_dns_name'. The key of an EC2 tag
-# may optionally be used; however the boto instance variables hold precedence
-# in the event of a collision.
-destination_variable = public_dns_name
-
-# This allows you to override the inventory_name with an ec2 variable, instead
-# of using the destination_variable above. Addressing (aka ansible_ssh_host)
-# will still use destination_variable. Tags should be written as 'tag_TAGNAME'.
-hostname_variable = tag_Name
-
-# For server inside a VPC, using DNS names may not make sense. When an instance
-# has 'subnet_id' set, this variable is used. If the subnet is public, setting
-# this to 'ip_address' will return the public IP address. For instances in a
-# private subnet, this should be set to 'private_ip_address', and Ansible must
-# be run from within EC2. The key of an EC2 tag may optionally be used; however
-# the boto instance variables hold precedence in the event of a collision.
-# WARNING: - instances that are in the private vpc, _without_ public ip address
-# will not be listed in the inventory until You set:
-# vpc_destination_variable = private_ip_address
-vpc_destination_variable = ip_address
-
-# The following two settings allow flexible ansible host naming based on a
-# python format string and a comma-separated list of ec2 tags. Note that:
-#
-# 1) If the tags referenced are not present for some instances, empty strings
-# will be substituted in the format string.
-# 2) This overrides both destination_variable and vpc_destination_variable.
-#
-#destination_format = {0}.{1}.example.com
-#destination_format_tags = Name,environment
-
-# To tag instances on EC2 with the resource records that point to them from
-# Route53, uncomment and set 'route53' to True.
-route53 = False
-
-# To exclude RDS instances from the inventory, uncomment and set to False.
-rds = False
-
-# To exclude ElastiCache instances from the inventory, uncomment and set to False.
-elasticache = False
-
-# Additionally, you can specify the list of zones to exclude looking up in
-# 'route53_excluded_zones' as a comma-separated list.
-# route53_excluded_zones = samplezone1.com, samplezone2.com
-
-# By default, only EC2 instances in the 'running' state are returned. Set
-# 'all_instances' to True to return all instances regardless of state.
-all_instances = False
-
-# By default, only EC2 instances in the 'running' state are returned. Specify
-# EC2 instance states to return as a comma-separated list. This
-# option is overridden when 'all_instances' is True.
-# instance_states = pending, running, shutting-down, terminated, stopping, stopped
-
-# By default, only RDS instances in the 'available' state are returned. Set
-# 'all_rds_instances' to True return all RDS instances regardless of state.
-all_rds_instances = False
-
-# Include RDS cluster information (Aurora etc.)
-include_rds_clusters = False
-
-# By default, only ElastiCache clusters and nodes in the 'available' state
-# are returned. Set 'all_elasticache_clusters' and/or 'all_elastic_nodes'
-# to True return all ElastiCache clusters and nodes, regardless of state.
-#
-# Note that all_elasticache_nodes only applies to listed clusters. That means
-# if you set all_elastic_clusters to false, no node will be return from
-# unavailable clusters, regardless of the state and to what you set for
-# all_elasticache_nodes.
-all_elasticache_replication_groups = False
-all_elasticache_clusters = False
-all_elasticache_nodes = False
-
-# API calls to EC2 are slow. For this reason, we cache the results of an API
-# call. Set this to the path you want cache files to be written to. Two files
-# will be written to this directory:
-# - ansible-ec2.cache
-# - ansible-ec2.index
-cache_path = ~/.ansible/tmp
-
-# The number of seconds a cache file is considered valid. After this many
-# seconds, a new API call will be made, and the cache file will be updated.
-# To disable the cache, set this value to 0
-cache_max_age = 300
-
-# Organize groups into a nested/hierarchy instead of a flat namespace.
-nested_groups = False
-
-# Replace - tags when creating groups to avoid issues with ansible
-replace_dash_in_groups = False
-
-# If set to true, any tag of the form "a,b,c" is expanded into a list
-# and the results are used to create additional tag_* inventory groups.
-expand_csv_tags = False
-
-# The EC2 inventory output can become very large. To manage its size,
-# configure which groups should be created.
-group_by_instance_id = True
-group_by_region = True
-group_by_availability_zone = True
-group_by_ami_id = True
-group_by_instance_type = True
-group_by_key_pair = True
-group_by_vpc_id = True
-group_by_security_group = True
-group_by_tag_keys = True
-group_by_tag_none = True
-group_by_route53_names = True
-group_by_rds_engine = True
-group_by_rds_parameter_group = True
-group_by_elasticache_engine = True
-group_by_elasticache_cluster = True
-group_by_elasticache_parameter_group = True
-group_by_elasticache_replication_group = True
-
-# If you only want to include hosts that match a certain regular expression
-# pattern_include = staging-*
-
-# If you want to exclude any hosts that match a certain regular expression
-# pattern_exclude = staging-*
-
-# Instance filters can be used to control which instances are retrieved for
-# inventory. For the full list of possible filters, please read the EC2 API
-# docs: http://docs.aws.amazon.com/AWSEC2/latest/APIReference/ApiReference-query-DescribeInstances.html#query-DescribeInstances-filters
-# Filters are key/value pairs separated by '=', to list multiple filters use
-# a list separated by commas. See examples below.
-
-# Retrieve only instances with (key=value) env=staging tag
-# instance_filters = tag:env=staging
-
-# Retrieve only instances with role=webservers OR role=dbservers tag
-# instance_filters = tag:role=webservers,tag:role=dbservers
-
-# Retrieve only t1.micro instances OR instances with tag env=staging
-# instance_filters = instance-type=t1.micro,tag:env=staging
-
-# You can use wildcards in filter values also. Below will list instances which
-# tag Name value matches webservers1*
-# (ex. webservers15, webservers1a, webservers123 etc)
-# instance_filters = tag:Name=webservers1*
-
-# A boto configuration profile may be used to separate out credentials
-# see http://boto.readthedocs.org/en/latest/boto_config_tut.html
-# boto_profile = some-boto-profile-name
-
-
-[credentials]
-
-# The AWS credentials can optionally be specified here. Credentials specified
-# here are ignored if the environment variable AWS_ACCESS_KEY_ID or
-# AWS_PROFILE is set, or if the boto_profile property above is set.
-#
-# Supplying AWS credentials here is not recommended, as it introduces
-# non-trivial security concerns. When going down this route, please make sure
-# to set access permissions for this file correctly, e.g. handle it the same
-# way as you would a private SSH key.
-#
-# Unlike the boto and AWS configure files, this section does not support
-# profiles.
-#
-# aws_access_key_id = AXXXXXXXXXXXXXX
-# aws_secret_access_key = XXXXXXXXXXXXXXXXXXX
-# aws_security_token = XXXXXXXXXXXXXXXXXXXXXXXXXXXX
diff --git a/inventory/aws/hosts/ec2.py b/inventory/aws/hosts/ec2.py
deleted file mode 100755
index b71458a29..000000000
--- a/inventory/aws/hosts/ec2.py
+++ /dev/null
@@ -1,1511 +0,0 @@
-#!/usr/bin/env python2
-# pylint: skip-file
-
-'''
-EC2 external inventory script
-=================================
-
-Generates inventory that Ansible can understand by making API request to
-AWS EC2 using the Boto library.
-
-NOTE: This script assumes Ansible is being executed where the environment
-variables needed for Boto have already been set:
- export AWS_ACCESS_KEY_ID='AK123'
- export AWS_SECRET_ACCESS_KEY='abc123'
-
-This script also assumes there is an ec2.ini file alongside it. To specify a
-different path to ec2.ini, define the EC2_INI_PATH environment variable:
-
- export EC2_INI_PATH=/path/to/my_ec2.ini
-
-If you're using eucalyptus you need to set the above variables and
-you need to define:
-
- export EC2_URL=http://hostname_of_your_cc:port/services/Eucalyptus
-
-If you're using boto profiles (requires boto>=2.24.0) you can choose a profile
-using the --boto-profile command line argument (e.g. ec2.py --boto-profile prod) or using
-the AWS_PROFILE variable:
-
- AWS_PROFILE=prod ansible-playbook -i ec2.py myplaybook.yml
-
-For more details, see: http://docs.pythonboto.org/en/latest/boto_config_tut.html
-
-When run against a specific host, this script returns the following variables:
- - ec2_ami_launch_index
- - ec2_architecture
- - ec2_association
- - ec2_attachTime
- - ec2_attachment
- - ec2_attachmentId
- - ec2_block_devices
- - ec2_client_token
- - ec2_deleteOnTermination
- - ec2_description
- - ec2_deviceIndex
- - ec2_dns_name
- - ec2_eventsSet
- - ec2_group_name
- - ec2_hypervisor
- - ec2_id
- - ec2_image_id
- - ec2_instanceState
- - ec2_instance_type
- - ec2_ipOwnerId
- - ec2_ip_address
- - ec2_item
- - ec2_kernel
- - ec2_key_name
- - ec2_launch_time
- - ec2_monitored
- - ec2_monitoring
- - ec2_networkInterfaceId
- - ec2_ownerId
- - ec2_persistent
- - ec2_placement
- - ec2_platform
- - ec2_previous_state
- - ec2_private_dns_name
- - ec2_private_ip_address
- - ec2_publicIp
- - ec2_public_dns_name
- - ec2_ramdisk
- - ec2_reason
- - ec2_region
- - ec2_requester_id
- - ec2_root_device_name
- - ec2_root_device_type
- - ec2_security_group_ids
- - ec2_security_group_names
- - ec2_shutdown_state
- - ec2_sourceDestCheck
- - ec2_spot_instance_request_id
- - ec2_state
- - ec2_state_code
- - ec2_state_reason
- - ec2_status
- - ec2_subnet_id
- - ec2_tenancy
- - ec2_virtualization_type
- - ec2_vpc_id
-
-These variables are pulled out of a boto.ec2.instance object. There is a lack of
-consistency with variable spellings (camelCase and underscores) since this
-just loops through all variables the object exposes. It is preferred to use the
-ones with underscores when multiple exist.
-
-In addition, if an instance has AWS Tags associated with it, each tag is a new
-variable named:
- - ec2_tag_[Key] = [Value]
-
-Security groups are comma-separated in 'ec2_security_group_ids' and
-'ec2_security_group_names'.
-'''
-
-# (c) 2012, Peter Sankauskas
-#
-# This file is part of Ansible,
-#
-# Ansible is free software: you can redistribute it and/or modify
-# it under the terms of the GNU General Public License as published by
-# the Free Software Foundation, either version 3 of the License, or
-# (at your option) any later version.
-#
-# Ansible is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-# GNU General Public License for more details.
-#
-# You should have received a copy of the GNU General Public License
-# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
-
-######################################################################
-
-import sys
-import os
-import argparse
-import re
-from time import time
-import boto
-from boto import ec2
-from boto import rds
-from boto import elasticache
-from boto import route53
-import six
-
-from ansible.module_utils import ec2 as ec2_utils
-
-HAS_BOTO3 = False
-try:
- import boto3
- HAS_BOTO3 = True
-except ImportError:
- pass
-
-from six.moves import configparser
-from collections import defaultdict
-
-try:
- import json
-except ImportError:
- import simplejson as json
-
-
-class Ec2Inventory(object):
-
- def _empty_inventory(self):
- return {"_meta" : {"hostvars" : {}}}
-
- def __init__(self):
- ''' Main execution path '''
-
- # Inventory grouped by instance IDs, tags, security groups, regions,
- # and availability zones
- self.inventory = self._empty_inventory()
-
- # Index of hostname (address) to instance ID
- self.index = {}
-
- # Boto profile to use (if any)
- self.boto_profile = None
-
- # AWS credentials.
- self.credentials = {}
-
- # Read settings and parse CLI arguments
- self.parse_cli_args()
- self.read_settings()
-
- # Make sure that profile_name is not passed at all if not set
- # as pre 2.24 boto will fall over otherwise
- if self.boto_profile:
- if not hasattr(boto.ec2.EC2Connection, 'profile_name'):
- self.fail_with_error("boto version must be >= 2.24 to use profile")
-
- # Cache
- if self.args.refresh_cache:
- self.do_api_calls_update_cache()
- elif not self.is_cache_valid():
- self.do_api_calls_update_cache()
-
- # Data to print
- if self.args.host:
- data_to_print = self.get_host_info()
-
- elif self.args.list:
- # Display list of instances for inventory
- if self.inventory == self._empty_inventory():
- data_to_print = self.get_inventory_from_cache()
- else:
- data_to_print = self.json_format_dict(self.inventory, True)
-
- print(data_to_print)
-
-
- def is_cache_valid(self):
- ''' Determines if the cache files have expired, or if it is still valid '''
-
- if os.path.isfile(self.cache_path_cache):
- mod_time = os.path.getmtime(self.cache_path_cache)
- current_time = time()
- if (mod_time + self.cache_max_age) > current_time:
- if os.path.isfile(self.cache_path_index):
- return True
-
- return False
-
-
- def read_settings(self):
- ''' Reads the settings from the ec2.ini file '''
- if six.PY3:
- config = configparser.ConfigParser()
- else:
- config = configparser.SafeConfigParser()
- ec2_default_ini_path = os.path.join(os.path.dirname(os.path.realpath(__file__)), 'ec2.ini')
- ec2_ini_path = os.path.expanduser(os.path.expandvars(os.environ.get('EC2_INI_PATH', ec2_default_ini_path)))
- config.read(ec2_ini_path)
-
- # is eucalyptus?
- self.eucalyptus_host = None
- self.eucalyptus = False
- if config.has_option('ec2', 'eucalyptus'):
- self.eucalyptus = config.getboolean('ec2', 'eucalyptus')
- if self.eucalyptus and config.has_option('ec2', 'eucalyptus_host'):
- self.eucalyptus_host = config.get('ec2', 'eucalyptus_host')
-
- # Regions
- self.regions = []
- configRegions = config.get('ec2', 'regions')
- configRegions_exclude = config.get('ec2', 'regions_exclude')
- if (configRegions == 'all'):
- if self.eucalyptus_host:
- self.regions.append(boto.connect_euca(host=self.eucalyptus_host).region.name, **self.credentials)
- else:
- for regionInfo in ec2.regions():
- if regionInfo.name not in configRegions_exclude:
- self.regions.append(regionInfo.name)
- else:
- self.regions = configRegions.split(",")
-
- # Destination addresses
- self.destination_variable = config.get('ec2', 'destination_variable')
- self.vpc_destination_variable = config.get('ec2', 'vpc_destination_variable')
-
- if config.has_option('ec2', 'hostname_variable'):
- self.hostname_variable = config.get('ec2', 'hostname_variable')
- else:
- self.hostname_variable = None
-
- if config.has_option('ec2', 'destination_format') and \
- config.has_option('ec2', 'destination_format_tags'):
- self.destination_format = config.get('ec2', 'destination_format')
- self.destination_format_tags = config.get('ec2', 'destination_format_tags').split(',')
- else:
- self.destination_format = None
- self.destination_format_tags = None
-
- # Route53
- self.route53_enabled = config.getboolean('ec2', 'route53')
- self.route53_excluded_zones = []
- if config.has_option('ec2', 'route53_excluded_zones'):
- self.route53_excluded_zones.extend(
- config.get('ec2', 'route53_excluded_zones', '').split(','))
-
- # Include RDS instances?
- self.rds_enabled = True
- if config.has_option('ec2', 'rds'):
- self.rds_enabled = config.getboolean('ec2', 'rds')
-
- # Include RDS cluster instances?
- if config.has_option('ec2', 'include_rds_clusters'):
- self.include_rds_clusters = config.getboolean('ec2', 'include_rds_clusters')
- else:
- self.include_rds_clusters = False
-
- # Include ElastiCache instances?
- self.elasticache_enabled = True
- if config.has_option('ec2', 'elasticache'):
- self.elasticache_enabled = config.getboolean('ec2', 'elasticache')
-
- # Return all EC2 instances?
- if config.has_option('ec2', 'all_instances'):
- self.all_instances = config.getboolean('ec2', 'all_instances')
- else:
- self.all_instances = False
-
- # Instance states to be gathered in inventory. Default is 'running'.
- # Setting 'all_instances' to 'yes' overrides this option.
- ec2_valid_instance_states = [
- 'pending',
- 'running',
- 'shutting-down',
- 'terminated',
- 'stopping',
- 'stopped'
- ]
- self.ec2_instance_states = []
- if self.all_instances:
- self.ec2_instance_states = ec2_valid_instance_states
- elif config.has_option('ec2', 'instance_states'):
- for instance_state in config.get('ec2', 'instance_states').split(','):
- instance_state = instance_state.strip()
- if instance_state not in ec2_valid_instance_states:
- continue
- self.ec2_instance_states.append(instance_state)
- else:
- self.ec2_instance_states = ['running']
-
- # Return all RDS instances? (if RDS is enabled)
- if config.has_option('ec2', 'all_rds_instances') and self.rds_enabled:
- self.all_rds_instances = config.getboolean('ec2', 'all_rds_instances')
- else:
- self.all_rds_instances = False
-
- # Return all ElastiCache replication groups? (if ElastiCache is enabled)
- if config.has_option('ec2', 'all_elasticache_replication_groups') and self.elasticache_enabled:
- self.all_elasticache_replication_groups = config.getboolean('ec2', 'all_elasticache_replication_groups')
- else:
- self.all_elasticache_replication_groups = False
-
- # Return all ElastiCache clusters? (if ElastiCache is enabled)
- if config.has_option('ec2', 'all_elasticache_clusters') and self.elasticache_enabled:
- self.all_elasticache_clusters = config.getboolean('ec2', 'all_elasticache_clusters')
- else:
- self.all_elasticache_clusters = False
-
- # Return all ElastiCache nodes? (if ElastiCache is enabled)
- if config.has_option('ec2', 'all_elasticache_nodes') and self.elasticache_enabled:
- self.all_elasticache_nodes = config.getboolean('ec2', 'all_elasticache_nodes')
- else:
- self.all_elasticache_nodes = False
-
- # boto configuration profile (prefer CLI argument)
- self.boto_profile = self.args.boto_profile
- if config.has_option('ec2', 'boto_profile') and not self.boto_profile:
- self.boto_profile = config.get('ec2', 'boto_profile')
-
- # AWS credentials (prefer environment variables)
- if not (self.boto_profile or os.environ.get('AWS_ACCESS_KEY_ID') or
- os.environ.get('AWS_PROFILE')):
- if config.has_option('credentials', 'aws_access_key_id'):
- aws_access_key_id = config.get('credentials', 'aws_access_key_id')
- else:
- aws_access_key_id = None
- if config.has_option('credentials', 'aws_secret_access_key'):
- aws_secret_access_key = config.get('credentials', 'aws_secret_access_key')
- else:
- aws_secret_access_key = None
- if config.has_option('credentials', 'aws_security_token'):
- aws_security_token = config.get('credentials', 'aws_security_token')
- else:
- aws_security_token = None
- if aws_access_key_id:
- self.credentials = {
- 'aws_access_key_id': aws_access_key_id,
- 'aws_secret_access_key': aws_secret_access_key
- }
- if aws_security_token:
- self.credentials['security_token'] = aws_security_token
-
- # Cache related
- cache_dir = os.path.expanduser(config.get('ec2', 'cache_path'))
- if self.boto_profile:
- cache_dir = os.path.join(cache_dir, 'profile_' + self.boto_profile)
- if not os.path.exists(cache_dir):
- os.makedirs(cache_dir)
-
- cache_name = 'ansible-ec2'
- aws_profile = lambda: (self.boto_profile or
- os.environ.get('AWS_PROFILE') or
- os.environ.get('AWS_ACCESS_KEY_ID') or
- self.credentials.get('aws_access_key_id', None))
- if aws_profile():
- cache_name = '%s-%s' % (cache_name, aws_profile())
- self.cache_path_cache = cache_dir + "/%s.cache" % cache_name
- self.cache_path_index = cache_dir + "/%s.index" % cache_name
- self.cache_max_age = config.getint('ec2', 'cache_max_age')
-
- if config.has_option('ec2', 'expand_csv_tags'):
- self.expand_csv_tags = config.getboolean('ec2', 'expand_csv_tags')
- else:
- self.expand_csv_tags = False
-
- # Configure nested groups instead of flat namespace.
- if config.has_option('ec2', 'nested_groups'):
- self.nested_groups = config.getboolean('ec2', 'nested_groups')
- else:
- self.nested_groups = False
-
- # Replace dash or not in group names
- if config.has_option('ec2', 'replace_dash_in_groups'):
- self.replace_dash_in_groups = config.getboolean('ec2', 'replace_dash_in_groups')
- else:
- self.replace_dash_in_groups = True
-
- # Configure which groups should be created.
- group_by_options = [
- 'group_by_instance_id',
- 'group_by_region',
- 'group_by_availability_zone',
- 'group_by_ami_id',
- 'group_by_instance_type',
- 'group_by_key_pair',
- 'group_by_vpc_id',
- 'group_by_security_group',
- 'group_by_tag_keys',
- 'group_by_tag_none',
- 'group_by_route53_names',
- 'group_by_rds_engine',
- 'group_by_rds_parameter_group',
- 'group_by_elasticache_engine',
- 'group_by_elasticache_cluster',
- 'group_by_elasticache_parameter_group',
- 'group_by_elasticache_replication_group',
- ]
- for option in group_by_options:
- if config.has_option('ec2', option):
- setattr(self, option, config.getboolean('ec2', option))
- else:
- setattr(self, option, True)
-
- # Do we need to just include hosts that match a pattern?
- try:
- pattern_include = config.get('ec2', 'pattern_include')
- if pattern_include and len(pattern_include) > 0:
- self.pattern_include = re.compile(pattern_include)
- else:
- self.pattern_include = None
- except configparser.NoOptionError:
- self.pattern_include = None
-
- # Do we need to exclude hosts that match a pattern?
- try:
- pattern_exclude = config.get('ec2', 'pattern_exclude');
- if pattern_exclude and len(pattern_exclude) > 0:
- self.pattern_exclude = re.compile(pattern_exclude)
- else:
- self.pattern_exclude = None
- except configparser.NoOptionError:
- self.pattern_exclude = None
-
- # Instance filters (see boto and EC2 API docs). Ignore invalid filters.
- self.ec2_instance_filters = defaultdict(list)
- if config.has_option('ec2', 'instance_filters'):
-
- filters = [f for f in config.get('ec2', 'instance_filters').split(',') if f]
-
- for instance_filter in filters:
- instance_filter = instance_filter.strip()
- if not instance_filter or '=' not in instance_filter:
- continue
- filter_key, filter_value = [x.strip() for x in instance_filter.split('=', 1)]
- if not filter_key:
- continue
- self.ec2_instance_filters[filter_key].append(filter_value)
-
- def parse_cli_args(self):
- ''' Command line argument processing '''
-
- parser = argparse.ArgumentParser(description='Produce an Ansible Inventory file based on EC2')
- parser.add_argument('--list', action='store_true', default=True,
- help='List instances (default: True)')
- parser.add_argument('--host', action='store',
- help='Get all the variables about a specific instance')
- parser.add_argument('--refresh-cache', action='store_true', default=False,
- help='Force refresh of cache by making API requests to EC2 (default: False - use cache files)')
- parser.add_argument('--profile', '--boto-profile', action='store', dest='boto_profile',
- help='Use boto profile for connections to EC2')
- self.args = parser.parse_args()
-
-
- def do_api_calls_update_cache(self):
- ''' Do API calls to each region, and save data in cache files '''
-
- if self.route53_enabled:
- self.get_route53_records()
-
- for region in self.regions:
- self.get_instances_by_region(region)
- if self.rds_enabled:
- self.get_rds_instances_by_region(region)
- if self.elasticache_enabled:
- self.get_elasticache_clusters_by_region(region)
- self.get_elasticache_replication_groups_by_region(region)
- if self.include_rds_clusters:
- self.include_rds_clusters_by_region(region)
-
- self.write_to_cache(self.inventory, self.cache_path_cache)
- self.write_to_cache(self.index, self.cache_path_index)
-
- def connect(self, region):
- ''' create connection to api server'''
- if self.eucalyptus:
- conn = boto.connect_euca(host=self.eucalyptus_host, **self.credentials)
- conn.APIVersion = '2010-08-31'
- else:
- conn = self.connect_to_aws(ec2, region)
- return conn
-
- def boto_fix_security_token_in_profile(self, connect_args):
- ''' monkey patch for boto issue boto/boto#2100 '''
- profile = 'profile ' + self.boto_profile
- if boto.config.has_option(profile, 'aws_security_token'):
- connect_args['security_token'] = boto.config.get(profile, 'aws_security_token')
- return connect_args
-
- def connect_to_aws(self, module, region):
- connect_args = self.credentials
-
- # only pass the profile name if it's set (as it is not supported by older boto versions)
- if self.boto_profile:
- connect_args['profile_name'] = self.boto_profile
- self.boto_fix_security_token_in_profile(connect_args)
-
- conn = module.connect_to_region(region, **connect_args)
- # connect_to_region will fail "silently" by returning None if the region name is wrong or not supported
- if conn is None:
- self.fail_with_error("region name: %s likely not supported, or AWS is down. connection to region failed." % region)
- return conn
-
- def get_instances_by_region(self, region):
- ''' Makes an AWS EC2 API call to the list of instances in a particular
- region '''
-
- try:
- conn = self.connect(region)
- reservations = []
- if self.ec2_instance_filters:
- for filter_key, filter_values in self.ec2_instance_filters.items():
- reservations.extend(conn.get_all_instances(filters = { filter_key : filter_values }))
- else:
- reservations = conn.get_all_instances()
-
- # Pull the tags back in a second step
- # AWS are on record as saying that the tags fetched in the first `get_all_instances` request are not
- # reliable and may be missing, and the only way to guarantee they are there is by calling `get_all_tags`
- instance_ids = []
- for reservation in reservations:
- instance_ids.extend([instance.id for instance in reservation.instances])
-
- max_filter_value = 199
- tags = []
- for i in range(0, len(instance_ids), max_filter_value):
- tags.extend(conn.get_all_tags(filters={'resource-type': 'instance', 'resource-id': instance_ids[i:i+max_filter_value]}))
-
- tags_by_instance_id = defaultdict(dict)
- for tag in tags:
- tags_by_instance_id[tag.res_id][tag.name] = tag.value
-
- for reservation in reservations:
- for instance in reservation.instances:
- instance.tags = tags_by_instance_id[instance.id]
- self.add_instance(instance, region)
-
- except boto.exception.BotoServerError as e:
- if e.error_code == 'AuthFailure':
- error = self.get_auth_error_message()
- else:
- backend = 'Eucalyptus' if self.eucalyptus else 'AWS'
- error = "Error connecting to %s backend.\n%s" % (backend, e.message)
- self.fail_with_error(error, 'getting EC2 instances')
-
- def get_rds_instances_by_region(self, region):
- ''' Makes an AWS API call to the list of RDS instances in a particular
- region '''
-
- try:
- conn = self.connect_to_aws(rds, region)
- if conn:
- marker = None
- while True:
- instances = conn.get_all_dbinstances(marker=marker)
- marker = instances.marker
- for instance in instances:
- self.add_rds_instance(instance, region)
- if not marker:
- break
- except boto.exception.BotoServerError as e:
- error = e.reason
-
- if e.error_code == 'AuthFailure':
- error = self.get_auth_error_message()
- if not e.reason == "Forbidden":
- error = "Looks like AWS RDS is down:\n%s" % e.message
- self.fail_with_error(error, 'getting RDS instances')
-
- def include_rds_clusters_by_region(self, region):
- if not HAS_BOTO3:
- self.fail_with_error("Working with RDS clusters requires boto3 - please install boto3 and try again",
- "getting RDS clusters")
-
- client = ec2_utils.boto3_inventory_conn('client', 'rds', region, **self.credentials)
-
- marker, clusters = '', []
- while marker is not None:
- resp = client.describe_db_clusters(Marker=marker)
- clusters.extend(resp["DBClusters"])
- marker = resp.get('Marker', None)
-
- account_id = boto.connect_iam().get_user().arn.split(':')[4]
- c_dict = {}
- for c in clusters:
- # remove these datetime objects as there is no serialisation to json
- # currently in place and we don't need the data yet
- if 'EarliestRestorableTime' in c:
- del c['EarliestRestorableTime']
- if 'LatestRestorableTime' in c:
- del c['LatestRestorableTime']
-
- if self.ec2_instance_filters == {}:
- matches_filter = True
- else:
- matches_filter = False
-
- try:
- # arn:aws:rds:<region>:<account number>:<resourcetype>:<name>
- tags = client.list_tags_for_resource(
- ResourceName='arn:aws:rds:' + region + ':' + account_id + ':cluster:' + c['DBClusterIdentifier'])
- c['Tags'] = tags['TagList']
-
- if self.ec2_instance_filters:
- for filter_key, filter_values in self.ec2_instance_filters.items():
- # get AWS tag key e.g. tag:env will be 'env'
- tag_name = filter_key.split(":", 1)[1]
- # Filter values is a list (if you put multiple values for the same tag name)
- matches_filter = any(d['Key'] == tag_name and d['Value'] in filter_values for d in c['Tags'])
-
- if matches_filter:
- # it matches a filter, so stop looking for further matches
- break
-
- except Exception as e:
- if e.message.find('DBInstanceNotFound') >= 0:
- # AWS RDS bug (2016-01-06) means deletion does not fully complete and leave an 'empty' cluster.
- # Ignore errors when trying to find tags for these
- pass
-
- # ignore empty clusters caused by AWS bug
- if len(c['DBClusterMembers']) == 0:
- continue
- elif matches_filter:
- c_dict[c['DBClusterIdentifier']] = c
-
- self.inventory['db_clusters'] = c_dict
-
- def get_elasticache_clusters_by_region(self, region):
- ''' Makes an AWS API call to the list of ElastiCache clusters (with
- nodes' info) in a particular region.'''
-
- # ElastiCache boto module doesn't provide a get_all_intances method,
- # that's why we need to call describe directly (it would be called by
- # the shorthand method anyway...)
- try:
- conn = self.connect_to_aws(elasticache, region)
- if conn:
- # show_cache_node_info = True
- # because we also want nodes' information
- response = conn.describe_cache_clusters(None, None, None, True)
-
- except boto.exception.BotoServerError as e:
- error = e.reason
-
- if e.error_code == 'AuthFailure':
- error = self.get_auth_error_message()
- if not e.reason == "Forbidden":
- error = "Looks like AWS ElastiCache is down:\n%s" % e.message
- self.fail_with_error(error, 'getting ElastiCache clusters')
-
- try:
- # Boto also doesn't provide wrapper classes to CacheClusters or
- # CacheNodes. Because of that we can't make use of the get_list
- # method in the AWSQueryConnection. Let's do the work manually
- clusters = response['DescribeCacheClustersResponse']['DescribeCacheClustersResult']['CacheClusters']
-
- except KeyError as e:
- error = "ElastiCache query to AWS failed (unexpected format)."
- self.fail_with_error(error, 'getting ElastiCache clusters')
-
- for cluster in clusters:
- self.add_elasticache_cluster(cluster, region)
-
- def get_elasticache_replication_groups_by_region(self, region):
- ''' Makes an AWS API call to the list of ElastiCache replication groups
- in a particular region.'''
-
- # ElastiCache boto module doesn't provide a get_all_intances method,
- # that's why we need to call describe directly (it would be called by
- # the shorthand method anyway...)
- try:
- conn = self.connect_to_aws(elasticache, region)
- if conn:
- response = conn.describe_replication_groups()
-
- except boto.exception.BotoServerError as e:
- error = e.reason
-
- if e.error_code == 'AuthFailure':
- error = self.get_auth_error_message()
- if not e.reason == "Forbidden":
- error = "Looks like AWS ElastiCache [Replication Groups] is down:\n%s" % e.message
- self.fail_with_error(error, 'getting ElastiCache clusters')
-
- try:
- # Boto also doesn't provide wrapper classes to ReplicationGroups
- # Because of that we can't make use of the get_list method in the
- # AWSQueryConnection. Let's do the work manually
- replication_groups = response['DescribeReplicationGroupsResponse']['DescribeReplicationGroupsResult']['ReplicationGroups']
-
- except KeyError as e:
- error = "ElastiCache [Replication Groups] query to AWS failed (unexpected format)."
- self.fail_with_error(error, 'getting ElastiCache clusters')
-
- for replication_group in replication_groups:
- self.add_elasticache_replication_group(replication_group, region)
-
- def get_auth_error_message(self):
- ''' create an informative error message if there is an issue authenticating'''
- errors = ["Authentication error retrieving ec2 inventory."]
- if None in [os.environ.get('AWS_ACCESS_KEY_ID'), os.environ.get('AWS_SECRET_ACCESS_KEY')]:
- errors.append(' - No AWS_ACCESS_KEY_ID or AWS_SECRET_ACCESS_KEY environment vars found')
- else:
- errors.append(' - AWS_ACCESS_KEY_ID and AWS_SECRET_ACCESS_KEY environment vars found but may not be correct')
-
- boto_paths = ['/etc/boto.cfg', '~/.boto', '~/.aws/credentials']
- boto_config_found = list(p for p in boto_paths if os.path.isfile(os.path.expanduser(p)))
- if len(boto_config_found) > 0:
- errors.append(" - Boto configs found at '%s', but the credentials contained may not be correct" % ', '.join(boto_config_found))
- else:
- errors.append(" - No Boto config found at any expected location '%s'" % ', '.join(boto_paths))
-
- return '\n'.join(errors)
-
- def fail_with_error(self, err_msg, err_operation=None):
- '''log an error to std err for ansible-playbook to consume and exit'''
- if err_operation:
- err_msg = 'ERROR: "{err_msg}", while: {err_operation}'.format(
- err_msg=err_msg, err_operation=err_operation)
- sys.stderr.write(err_msg)
- sys.exit(1)
-
- def get_instance(self, region, instance_id):
- conn = self.connect(region)
-
- reservations = conn.get_all_instances([instance_id])
- for reservation in reservations:
- for instance in reservation.instances:
- return instance
-
- def add_instance(self, instance, region):
- ''' Adds an instance to the inventory and index, as long as it is
- addressable '''
-
- # Only return instances with desired instance states
- if instance.state not in self.ec2_instance_states:
- return
-
- # Select the best destination address
- if self.destination_format and self.destination_format_tags:
- dest = self.destination_format.format(*[ getattr(instance, 'tags').get(tag, '') for tag in self.destination_format_tags ])
- elif instance.subnet_id:
- dest = getattr(instance, self.vpc_destination_variable, None)
- if dest is None:
- dest = getattr(instance, 'tags').get(self.vpc_destination_variable, None)
- else:
- dest = getattr(instance, self.destination_variable, None)
- if dest is None:
- dest = getattr(instance, 'tags').get(self.destination_variable, None)
-
- if not dest:
- # Skip instances we cannot address (e.g. private VPC subnet)
- return
-
- # Set the inventory name
- hostname = None
- if self.hostname_variable:
- if self.hostname_variable.startswith('tag_'):
- hostname = instance.tags.get(self.hostname_variable[4:], None)
- else:
- hostname = getattr(instance, self.hostname_variable)
-
- # If we can't get a nice hostname, use the destination address
- if not hostname:
- hostname = dest
- else:
- hostname = self.to_safe(hostname).lower()
-
- # if we only want to include hosts that match a pattern, skip those that don't
- if self.pattern_include and not self.pattern_include.match(hostname):
- return
-
- # if we need to exclude hosts that match a pattern, skip those
- if self.pattern_exclude and self.pattern_exclude.match(hostname):
- return
-
- # Add to index
- self.index[hostname] = [region, instance.id]
-
- # Inventory: Group by instance ID (always a group of 1)
- if self.group_by_instance_id:
- self.inventory[instance.id] = [hostname]
- if self.nested_groups:
- self.push_group(self.inventory, 'instances', instance.id)
-
- # Inventory: Group by region
- if self.group_by_region:
- self.push(self.inventory, region, hostname)
- if self.nested_groups:
- self.push_group(self.inventory, 'regions', region)
-
- # Inventory: Group by availability zone
- if self.group_by_availability_zone:
- self.push(self.inventory, instance.placement, hostname)
- if self.nested_groups:
- if self.group_by_region:
- self.push_group(self.inventory, region, instance.placement)
- self.push_group(self.inventory, 'zones', instance.placement)
-
- # Inventory: Group by Amazon Machine Image (AMI) ID
- if self.group_by_ami_id:
- ami_id = self.to_safe(instance.image_id)
- self.push(self.inventory, ami_id, hostname)
- if self.nested_groups:
- self.push_group(self.inventory, 'images', ami_id)
-
- # Inventory: Group by instance type
- if self.group_by_instance_type:
- type_name = self.to_safe('type_' + instance.instance_type)
- self.push(self.inventory, type_name, hostname)
- if self.nested_groups:
- self.push_group(self.inventory, 'types', type_name)
-
- # Inventory: Group by key pair
- if self.group_by_key_pair and instance.key_name:
- key_name = self.to_safe('key_' + instance.key_name)
- self.push(self.inventory, key_name, hostname)
- if self.nested_groups:
- self.push_group(self.inventory, 'keys', key_name)
-
- # Inventory: Group by VPC
- if self.group_by_vpc_id and instance.vpc_id:
- vpc_id_name = self.to_safe('vpc_id_' + instance.vpc_id)
- self.push(self.inventory, vpc_id_name, hostname)
- if self.nested_groups:
- self.push_group(self.inventory, 'vpcs', vpc_id_name)
-
- # Inventory: Group by security group
- if self.group_by_security_group:
- try:
- for group in instance.groups:
- key = self.to_safe("security_group_" + group.name)
- self.push(self.inventory, key, hostname)
- if self.nested_groups:
- self.push_group(self.inventory, 'security_groups', key)
- except AttributeError:
- self.fail_with_error('\n'.join(['Package boto seems a bit older.',
- 'Please upgrade boto >= 2.3.0.']))
-
- # Inventory: Group by tag keys
- if self.group_by_tag_keys:
- for k, v in instance.tags.items():
- if self.expand_csv_tags and v and ',' in v:
- values = map(lambda x: x.strip(), v.split(','))
- else:
- values = [v]
-
- for v in values:
- if v:
- key = self.to_safe("tag_" + k + "=" + v)
- else:
- key = self.to_safe("tag_" + k)
- self.push(self.inventory, key, hostname)
- if self.nested_groups:
- self.push_group(self.inventory, 'tags', self.to_safe("tag_" + k))
- if v:
- self.push_group(self.inventory, self.to_safe("tag_" + k), key)
-
- # Inventory: Group by Route53 domain names if enabled
- if self.route53_enabled and self.group_by_route53_names:
- route53_names = self.get_instance_route53_names(instance)
- for name in route53_names:
- self.push(self.inventory, name, hostname)
- if self.nested_groups:
- self.push_group(self.inventory, 'route53', name)
-
- # Global Tag: instances without tags
- if self.group_by_tag_none and len(instance.tags) == 0:
- self.push(self.inventory, 'tag_none', hostname)
- if self.nested_groups:
- self.push_group(self.inventory, 'tags', 'tag_none')
-
- # Global Tag: tag all EC2 instances
- self.push(self.inventory, 'ec2', hostname)
-
- self.inventory["_meta"]["hostvars"][hostname] = self.get_host_info_dict_from_instance(instance)
- self.inventory["_meta"]["hostvars"][hostname]['ansible_ssh_host'] = dest
-
-
- def add_rds_instance(self, instance, region):
- ''' Adds an RDS instance to the inventory and index, as long as it is
- addressable '''
-
- # Only want available instances unless all_rds_instances is True
- if not self.all_rds_instances and instance.status != 'available':
- return
-
- # Select the best destination address
- dest = instance.endpoint[0]
-
- if not dest:
- # Skip instances we cannot address (e.g. private VPC subnet)
- return
-
- # Set the inventory name
- hostname = None
- if self.hostname_variable:
- if self.hostname_variable.startswith('tag_'):
- hostname = instance.tags.get(self.hostname_variable[4:], None)
- else:
- hostname = getattr(instance, self.hostname_variable)
-
- # If we can't get a nice hostname, use the destination address
- if not hostname:
- hostname = dest
-
- hostname = self.to_safe(hostname).lower()
-
- # Add to index
- self.index[hostname] = [region, instance.id]
-
- # Inventory: Group by instance ID (always a group of 1)
- if self.group_by_instance_id:
- self.inventory[instance.id] = [hostname]
- if self.nested_groups:
- self.push_group(self.inventory, 'instances', instance.id)
-
- # Inventory: Group by region
- if self.group_by_region:
- self.push(self.inventory, region, hostname)
- if self.nested_groups:
- self.push_group(self.inventory, 'regions', region)
-
- # Inventory: Group by availability zone
- if self.group_by_availability_zone:
- self.push(self.inventory, instance.availability_zone, hostname)
- if self.nested_groups:
- if self.group_by_region:
- self.push_group(self.inventory, region, instance.availability_zone)
- self.push_group(self.inventory, 'zones', instance.availability_zone)
-
- # Inventory: Group by instance type
- if self.group_by_instance_type:
- type_name = self.to_safe('type_' + instance.instance_class)
- self.push(self.inventory, type_name, hostname)
- if self.nested_groups:
- self.push_group(self.inventory, 'types', type_name)
-
- # Inventory: Group by VPC
- if self.group_by_vpc_id and instance.subnet_group and instance.subnet_group.vpc_id:
- vpc_id_name = self.to_safe('vpc_id_' + instance.subnet_group.vpc_id)
- self.push(self.inventory, vpc_id_name, hostname)
- if self.nested_groups:
- self.push_group(self.inventory, 'vpcs', vpc_id_name)
-
- # Inventory: Group by security group
- if self.group_by_security_group:
- try:
- if instance.security_group:
- key = self.to_safe("security_group_" + instance.security_group.name)
- self.push(self.inventory, key, hostname)
- if self.nested_groups:
- self.push_group(self.inventory, 'security_groups', key)
-
- except AttributeError:
- self.fail_with_error('\n'.join(['Package boto seems a bit older.',
- 'Please upgrade boto >= 2.3.0.']))
-
-
- # Inventory: Group by engine
- if self.group_by_rds_engine:
- self.push(self.inventory, self.to_safe("rds_" + instance.engine), hostname)
- if self.nested_groups:
- self.push_group(self.inventory, 'rds_engines', self.to_safe("rds_" + instance.engine))
-
- # Inventory: Group by parameter group
- if self.group_by_rds_parameter_group:
- self.push(self.inventory, self.to_safe("rds_parameter_group_" + instance.parameter_group.name), hostname)
- if self.nested_groups:
- self.push_group(self.inventory, 'rds_parameter_groups', self.to_safe("rds_parameter_group_" + instance.parameter_group.name))
-
- # Global Tag: all RDS instances
- self.push(self.inventory, 'rds', hostname)
-
- self.inventory["_meta"]["hostvars"][hostname] = self.get_host_info_dict_from_instance(instance)
- self.inventory["_meta"]["hostvars"][hostname]['ansible_ssh_host'] = dest
-
- def add_elasticache_cluster(self, cluster, region):
- ''' Adds an ElastiCache cluster to the inventory and index, as long as
- it's nodes are addressable '''
-
- # Only want available clusters unless all_elasticache_clusters is True
- if not self.all_elasticache_clusters and cluster['CacheClusterStatus'] != 'available':
- return
-
- # Select the best destination address
- if 'ConfigurationEndpoint' in cluster and cluster['ConfigurationEndpoint']:
- # Memcached cluster
- dest = cluster['ConfigurationEndpoint']['Address']
- is_redis = False
- else:
- # Redis sigle node cluster
- # Because all Redis clusters are single nodes, we'll merge the
- # info from the cluster with info about the node
- dest = cluster['CacheNodes'][0]['Endpoint']['Address']
- is_redis = True
-
- if not dest:
- # Skip clusters we cannot address (e.g. private VPC subnet)
- return
-
- # Add to index
- self.index[dest] = [region, cluster['CacheClusterId']]
-
- # Inventory: Group by instance ID (always a group of 1)
- if self.group_by_instance_id:
- self.inventory[cluster['CacheClusterId']] = [dest]
- if self.nested_groups:
- self.push_group(self.inventory, 'instances', cluster['CacheClusterId'])
-
- # Inventory: Group by region
- if self.group_by_region and not is_redis:
- self.push(self.inventory, region, dest)
- if self.nested_groups:
- self.push_group(self.inventory, 'regions', region)
-
- # Inventory: Group by availability zone
- if self.group_by_availability_zone and not is_redis:
- self.push(self.inventory, cluster['PreferredAvailabilityZone'], dest)
- if self.nested_groups:
- if self.group_by_region:
- self.push_group(self.inventory, region, cluster['PreferredAvailabilityZone'])
- self.push_group(self.inventory, 'zones', cluster['PreferredAvailabilityZone'])
-
- # Inventory: Group by node type
- if self.group_by_instance_type and not is_redis:
- type_name = self.to_safe('type_' + cluster['CacheNodeType'])
- self.push(self.inventory, type_name, dest)
- if self.nested_groups:
- self.push_group(self.inventory, 'types', type_name)
-
- # Inventory: Group by VPC (information not available in the current
- # AWS API version for ElastiCache)
-
- # Inventory: Group by security group
- if self.group_by_security_group and not is_redis:
-
- # Check for the existence of the 'SecurityGroups' key and also if
- # this key has some value. When the cluster is not placed in a SG
- # the query can return None here and cause an error.
- if 'SecurityGroups' in cluster and cluster['SecurityGroups'] is not None:
- for security_group in cluster['SecurityGroups']:
- key = self.to_safe("security_group_" + security_group['SecurityGroupId'])
- self.push(self.inventory, key, dest)
- if self.nested_groups:
- self.push_group(self.inventory, 'security_groups', key)
-
- # Inventory: Group by engine
- if self.group_by_elasticache_engine and not is_redis:
- self.push(self.inventory, self.to_safe("elasticache_" + cluster['Engine']), dest)
- if self.nested_groups:
- self.push_group(self.inventory, 'elasticache_engines', self.to_safe(cluster['Engine']))
-
- # Inventory: Group by parameter group
- if self.group_by_elasticache_parameter_group:
- self.push(self.inventory, self.to_safe("elasticache_parameter_group_" + cluster['CacheParameterGroup']['CacheParameterGroupName']), dest)
- if self.nested_groups:
- self.push_group(self.inventory, 'elasticache_parameter_groups', self.to_safe(cluster['CacheParameterGroup']['CacheParameterGroupName']))
-
- # Inventory: Group by replication group
- if self.group_by_elasticache_replication_group and 'ReplicationGroupId' in cluster and cluster['ReplicationGroupId']:
- self.push(self.inventory, self.to_safe("elasticache_replication_group_" + cluster['ReplicationGroupId']), dest)
- if self.nested_groups:
- self.push_group(self.inventory, 'elasticache_replication_groups', self.to_safe(cluster['ReplicationGroupId']))
-
- # Global Tag: all ElastiCache clusters
- self.push(self.inventory, 'elasticache_clusters', cluster['CacheClusterId'])
-
- host_info = self.get_host_info_dict_from_describe_dict(cluster)
-
- self.inventory["_meta"]["hostvars"][dest] = host_info
-
- # Add the nodes
- for node in cluster['CacheNodes']:
- self.add_elasticache_node(node, cluster, region)
-
- def add_elasticache_node(self, node, cluster, region):
- ''' Adds an ElastiCache node to the inventory and index, as long as
- it is addressable '''
-
- # Only want available nodes unless all_elasticache_nodes is True
- if not self.all_elasticache_nodes and node['CacheNodeStatus'] != 'available':
- return
-
- # Select the best destination address
- dest = node['Endpoint']['Address']
-
- if not dest:
- # Skip nodes we cannot address (e.g. private VPC subnet)
- return
-
- node_id = self.to_safe(cluster['CacheClusterId'] + '_' + node['CacheNodeId'])
-
- # Add to index
- self.index[dest] = [region, node_id]
-
- # Inventory: Group by node ID (always a group of 1)
- if self.group_by_instance_id:
- self.inventory[node_id] = [dest]
- if self.nested_groups:
- self.push_group(self.inventory, 'instances', node_id)
-
- # Inventory: Group by region
- if self.group_by_region:
- self.push(self.inventory, region, dest)
- if self.nested_groups:
- self.push_group(self.inventory, 'regions', region)
-
- # Inventory: Group by availability zone
- if self.group_by_availability_zone:
- self.push(self.inventory, cluster['PreferredAvailabilityZone'], dest)
- if self.nested_groups:
- if self.group_by_region:
- self.push_group(self.inventory, region, cluster['PreferredAvailabilityZone'])
- self.push_group(self.inventory, 'zones', cluster['PreferredAvailabilityZone'])
-
- # Inventory: Group by node type
- if self.group_by_instance_type:
- type_name = self.to_safe('type_' + cluster['CacheNodeType'])
- self.push(self.inventory, type_name, dest)
- if self.nested_groups:
- self.push_group(self.inventory, 'types', type_name)
-
- # Inventory: Group by VPC (information not available in the current
- # AWS API version for ElastiCache)
-
- # Inventory: Group by security group
- if self.group_by_security_group:
-
- # Check for the existence of the 'SecurityGroups' key and also if
- # this key has some value. When the cluster is not placed in a SG
- # the query can return None here and cause an error.
- if 'SecurityGroups' in cluster and cluster['SecurityGroups'] is not None:
- for security_group in cluster['SecurityGroups']:
- key = self.to_safe("security_group_" + security_group['SecurityGroupId'])
- self.push(self.inventory, key, dest)
- if self.nested_groups:
- self.push_group(self.inventory, 'security_groups', key)
-
- # Inventory: Group by engine
- if self.group_by_elasticache_engine:
- self.push(self.inventory, self.to_safe("elasticache_" + cluster['Engine']), dest)
- if self.nested_groups:
- self.push_group(self.inventory, 'elasticache_engines', self.to_safe("elasticache_" + cluster['Engine']))
-
- # Inventory: Group by parameter group (done at cluster level)
-
- # Inventory: Group by replication group (done at cluster level)
-
- # Inventory: Group by ElastiCache Cluster
- if self.group_by_elasticache_cluster:
- self.push(self.inventory, self.to_safe("elasticache_cluster_" + cluster['CacheClusterId']), dest)
-
- # Global Tag: all ElastiCache nodes
- self.push(self.inventory, 'elasticache_nodes', dest)
-
- host_info = self.get_host_info_dict_from_describe_dict(node)
-
- if dest in self.inventory["_meta"]["hostvars"]:
- self.inventory["_meta"]["hostvars"][dest].update(host_info)
- else:
- self.inventory["_meta"]["hostvars"][dest] = host_info
-
- def add_elasticache_replication_group(self, replication_group, region):
- ''' Adds an ElastiCache replication group to the inventory and index '''
-
- # Only want available clusters unless all_elasticache_replication_groups is True
- if not self.all_elasticache_replication_groups and replication_group['Status'] != 'available':
- return
-
- # Select the best destination address (PrimaryEndpoint)
- dest = replication_group['NodeGroups'][0]['PrimaryEndpoint']['Address']
-
- if not dest:
- # Skip clusters we cannot address (e.g. private VPC subnet)
- return
-
- # Add to index
- self.index[dest] = [region, replication_group['ReplicationGroupId']]
-
- # Inventory: Group by ID (always a group of 1)
- if self.group_by_instance_id:
- self.inventory[replication_group['ReplicationGroupId']] = [dest]
- if self.nested_groups:
- self.push_group(self.inventory, 'instances', replication_group['ReplicationGroupId'])
-
- # Inventory: Group by region
- if self.group_by_region:
- self.push(self.inventory, region, dest)
- if self.nested_groups:
- self.push_group(self.inventory, 'regions', region)
-
- # Inventory: Group by availability zone (doesn't apply to replication groups)
-
- # Inventory: Group by node type (doesn't apply to replication groups)
-
- # Inventory: Group by VPC (information not available in the current
- # AWS API version for replication groups
-
- # Inventory: Group by security group (doesn't apply to replication groups)
- # Check this value in cluster level
-
- # Inventory: Group by engine (replication groups are always Redis)
- if self.group_by_elasticache_engine:
- self.push(self.inventory, 'elasticache_redis', dest)
- if self.nested_groups:
- self.push_group(self.inventory, 'elasticache_engines', 'redis')
-
- # Global Tag: all ElastiCache clusters
- self.push(self.inventory, 'elasticache_replication_groups', replication_group['ReplicationGroupId'])
-
- host_info = self.get_host_info_dict_from_describe_dict(replication_group)
-
- self.inventory["_meta"]["hostvars"][dest] = host_info
-
- def get_route53_records(self):
- ''' Get and store the map of resource records to domain names that
- point to them. '''
-
- r53_conn = route53.Route53Connection()
- all_zones = r53_conn.get_zones()
-
- route53_zones = [ zone for zone in all_zones if zone.name[:-1]
- not in self.route53_excluded_zones ]
-
- self.route53_records = {}
-
- for zone in route53_zones:
- rrsets = r53_conn.get_all_rrsets(zone.id)
-
- for record_set in rrsets:
- record_name = record_set.name
-
- if record_name.endswith('.'):
- record_name = record_name[:-1]
-
- for resource in record_set.resource_records:
- self.route53_records.setdefault(resource, set())
- self.route53_records[resource].add(record_name)
-
-
- def get_instance_route53_names(self, instance):
- ''' Check if an instance is referenced in the records we have from
- Route53. If it is, return the list of domain names pointing to said
- instance. If nothing points to it, return an empty list. '''
-
- instance_attributes = [ 'public_dns_name', 'private_dns_name',
- 'ip_address', 'private_ip_address' ]
-
- name_list = set()
-
- for attrib in instance_attributes:
- try:
- value = getattr(instance, attrib)
- except AttributeError:
- continue
-
- if value in self.route53_records:
- name_list.update(self.route53_records[value])
-
- return list(name_list)
-
- def get_host_info_dict_from_instance(self, instance):
- instance_vars = {}
- for key in vars(instance):
- value = getattr(instance, key)
- key = self.to_safe('ec2_' + key)
-
- # Handle complex types
- # state/previous_state changed to properties in boto in https://github.com/boto/boto/commit/a23c379837f698212252720d2af8dec0325c9518
- if key == 'ec2__state':
- instance_vars['ec2_state'] = instance.state or ''
- instance_vars['ec2_state_code'] = instance.state_code
- elif key == 'ec2__previous_state':
- instance_vars['ec2_previous_state'] = instance.previous_state or ''
- instance_vars['ec2_previous_state_code'] = instance.previous_state_code
- elif type(value) in [int, bool]:
- instance_vars[key] = value
- elif isinstance(value, six.string_types):
- instance_vars[key] = value.strip()
- elif type(value) == type(None):
- instance_vars[key] = ''
- elif key == 'ec2_region':
- instance_vars[key] = value.name
- elif key == 'ec2__placement':
- instance_vars['ec2_placement'] = value.zone
- elif key == 'ec2_tags':
- for k, v in value.items():
- if self.expand_csv_tags and ',' in v:
- v = list(map(lambda x: x.strip(), v.split(',')))
- key = self.to_safe('ec2_tag_' + k)
- instance_vars[key] = v
- elif key == 'ec2_groups':
- group_ids = []
- group_names = []
- for group in value:
- group_ids.append(group.id)
- group_names.append(group.name)
- instance_vars["ec2_security_group_ids"] = ','.join([str(i) for i in group_ids])
- instance_vars["ec2_security_group_names"] = ','.join([str(i) for i in group_names])
- elif key == 'ec2_block_device_mapping':
- instance_vars["ec2_block_devices"] = {}
- for k, v in value.items():
- instance_vars["ec2_block_devices"][ os.path.basename(k) ] = v.volume_id
- else:
- pass
- # TODO Product codes if someone finds them useful
- #print key
- #print type(value)
- #print value
-
- return instance_vars
-
- def get_host_info_dict_from_describe_dict(self, describe_dict):
- ''' Parses the dictionary returned by the API call into a flat list
- of parameters. This method should be used only when 'describe' is
- used directly because Boto doesn't provide specific classes. '''
-
- # I really don't agree with prefixing everything with 'ec2'
- # because EC2, RDS and ElastiCache are different services.
- # I'm just following the pattern used until now to not break any
- # compatibility.
-
- host_info = {}
- for key in describe_dict:
- value = describe_dict[key]
- key = self.to_safe('ec2_' + self.uncammelize(key))
-
- # Handle complex types
-
- # Target: Memcached Cache Clusters
- if key == 'ec2_configuration_endpoint' and value:
- host_info['ec2_configuration_endpoint_address'] = value['Address']
- host_info['ec2_configuration_endpoint_port'] = value['Port']
-
- # Target: Cache Nodes and Redis Cache Clusters (single node)
- if key == 'ec2_endpoint' and value:
- host_info['ec2_endpoint_address'] = value['Address']
- host_info['ec2_endpoint_port'] = value['Port']
-
- # Target: Redis Replication Groups
- if key == 'ec2_node_groups' and value:
- host_info['ec2_endpoint_address'] = value[0]['PrimaryEndpoint']['Address']
- host_info['ec2_endpoint_port'] = value[0]['PrimaryEndpoint']['Port']
- replica_count = 0
- for node in value[0]['NodeGroupMembers']:
- if node['CurrentRole'] == 'primary':
- host_info['ec2_primary_cluster_address'] = node['ReadEndpoint']['Address']
- host_info['ec2_primary_cluster_port'] = node['ReadEndpoint']['Port']
- host_info['ec2_primary_cluster_id'] = node['CacheClusterId']
- elif node['CurrentRole'] == 'replica':
- host_info['ec2_replica_cluster_address_'+ str(replica_count)] = node['ReadEndpoint']['Address']
- host_info['ec2_replica_cluster_port_'+ str(replica_count)] = node['ReadEndpoint']['Port']
- host_info['ec2_replica_cluster_id_'+ str(replica_count)] = node['CacheClusterId']
- replica_count += 1
-
- # Target: Redis Replication Groups
- if key == 'ec2_member_clusters' and value:
- host_info['ec2_member_clusters'] = ','.join([str(i) for i in value])
-
- # Target: All Cache Clusters
- elif key == 'ec2_cache_parameter_group':
- host_info["ec2_cache_node_ids_to_reboot"] = ','.join([str(i) for i in value['CacheNodeIdsToReboot']])
- host_info['ec2_cache_parameter_group_name'] = value['CacheParameterGroupName']
- host_info['ec2_cache_parameter_apply_status'] = value['ParameterApplyStatus']
-
- # Target: Almost everything
- elif key == 'ec2_security_groups':
-
- # Skip if SecurityGroups is None
- # (it is possible to have the key defined but no value in it).
- if value is not None:
- sg_ids = []
- for sg in value:
- sg_ids.append(sg['SecurityGroupId'])
- host_info["ec2_security_group_ids"] = ','.join([str(i) for i in sg_ids])
-
- # Target: Everything
- # Preserve booleans and integers
- elif type(value) in [int, bool]:
- host_info[key] = value
-
- # Target: Everything
- # Sanitize string values
- elif isinstance(value, six.string_types):
- host_info[key] = value.strip()
-
- # Target: Everything
- # Replace None by an empty string
- elif type(value) == type(None):
- host_info[key] = ''
-
- else:
- # Remove non-processed complex types
- pass
-
- return host_info
-
- def get_host_info(self):
- ''' Get variables about a specific host '''
-
- if len(self.index) == 0:
- # Need to load index from cache
- self.load_index_from_cache()
-
- if not self.args.host in self.index:
- # try updating the cache
- self.do_api_calls_update_cache()
- if not self.args.host in self.index:
- # host might not exist anymore
- return self.json_format_dict({}, True)
-
- (region, instance_id) = self.index[self.args.host]
-
- instance = self.get_instance(region, instance_id)
- return self.json_format_dict(self.get_host_info_dict_from_instance(instance), True)
-
- def push(self, my_dict, key, element):
- ''' Push an element onto an array that may not have been defined in
- the dict '''
- group_info = my_dict.setdefault(key, [])
- if isinstance(group_info, dict):
- host_list = group_info.setdefault('hosts', [])
- host_list.append(element)
- else:
- group_info.append(element)
-
- def push_group(self, my_dict, key, element):
- ''' Push a group as a child of another group. '''
- parent_group = my_dict.setdefault(key, {})
- if not isinstance(parent_group, dict):
- parent_group = my_dict[key] = {'hosts': parent_group}
- child_groups = parent_group.setdefault('children', [])
- if element not in child_groups:
- child_groups.append(element)
-
- def get_inventory_from_cache(self):
- ''' Reads the inventory from the cache file and returns it as a JSON
- object '''
-
- cache = open(self.cache_path_cache, 'r')
- json_inventory = cache.read()
- return json_inventory
-
-
- def load_index_from_cache(self):
- ''' Reads the index from the cache file sets self.index '''
-
- cache = open(self.cache_path_index, 'r')
- json_index = cache.read()
- self.index = json.loads(json_index)
-
-
- def write_to_cache(self, data, filename):
- ''' Writes data in JSON format to a file '''
-
- json_data = self.json_format_dict(data, True)
- cache = open(filename, 'w')
- cache.write(json_data)
- cache.close()
-
- def uncammelize(self, key):
- temp = re.sub('(.)([A-Z][a-z]+)', r'\1_\2', key)
- return re.sub('([a-z0-9])([A-Z])', r'\1_\2', temp).lower()
-
- def to_safe(self, word):
- ''' Converts 'bad' characters in a string to underscores so they can be used as Ansible groups '''
- regex = "[^A-Za-z0-9\_"
- if not self.replace_dash_in_groups:
- regex += "\-"
- return re.sub(regex + "]", "_", word)
-
- def json_format_dict(self, data, pretty=False):
- ''' Converts a dict to a JSON object and dumps it as a formatted
- string '''
-
- if pretty:
- return json.dumps(data, sort_keys=True, indent=2)
- else:
- return json.dumps(data)
-
-
-# Run the script
-Ec2Inventory()
diff --git a/inventory/aws/hosts/hosts b/inventory/aws/hosts/hosts
deleted file mode 100644
index 3996e577e..000000000
--- a/inventory/aws/hosts/hosts
+++ /dev/null
@@ -1 +0,0 @@
-localhost ansible_connection=local ansible_become=no ansible_python_interpreter='/usr/bin/env python2'
diff --git a/inventory/byo/hosts.origin.example b/inventory/byo/hosts.origin.example
index 396383725..ad69bd587 100644
--- a/inventory/byo/hosts.origin.example
+++ b/inventory/byo/hosts.origin.example
@@ -114,7 +114,7 @@ openshift_release=v3.6
# Instead of using docker, replacec it with cri-o
# NOTE: This uses openshift_docker_systemcontainer_image_registry_override as it's override
# just as container-engine does.
-#openshift_docker_use_crio=False
+#openshift_use_crio=False
# Force the registry to use for the docker/crio system container. By default the registry
# will be built off of the deployment type and ansible_distribution. Only
# use this option if you are sure you know what you are doing!
@@ -380,45 +380,9 @@ openshift_master_identity_providers=[{'name': 'htpasswd_auth', 'login': 'true',
# and is in the form of a list. If no data is passed then a default router will be
# created. There are multiple combinations of router sharding. The one described
# below supports routers on separate nodes.
-#openshift_hosted_routers:
-#- name: router1
-# stats_port: 1936
-# ports:
-# - 80:80
-# - 443:443
-# replicas: 1
-# namespace: default
-# serviceaccount: router
-# selector: type=router1
-# images: "openshift3/ose-${component}:${version}"
-# edits: []
-# certificate:
-# certfile: /path/to/certificate/abc.crt
-# keyfile: /path/to/certificate/abc.key
-# cafile: /path/to/certificate/ca.crt
-#- name: router2
-# stats_port: 1936
-# ports:
-# - 80:80
-# - 443:443
-# replicas: 1
-# namespace: default
-# serviceaccount: router
-# selector: type=router2
-# images: "openshift3/ose-${component}:${version}"
-# certificate:
-# certfile: /path/to/certificate/xyz.crt
-# keyfile: /path/to/certificate/xyz.key
-# cafile: /path/to/certificate/ca.crt
-# edits:
-# # ROUTE_LABELS sets the router to listen for routes
-# # tagged with the provided values
-# - key: spec.template.spec.containers[0].env
-# value:
-# name: ROUTE_LABELS
-# value: "route=external"
-# action: append
#
+#openshift_hosted_routers=[{'name': 'router1', 'certificate': {'certfile': '/path/to/certificate/abc.crt', 'keyfile': '/path/to/certificate/abc.key', 'cafile': '/path/to/certificate/ca.crt'}, 'replicas': 1, 'serviceaccount': 'router', 'namespace': 'default', 'stats_port': 1936, 'edits': [], 'images': 'openshift3/ose-${component}:${version}', 'selector': 'type=router1', 'ports': ['80:80', '443:443']}, {'name': 'router2', 'certificate': {'certfile': '/path/to/certificate/xyz.crt', 'keyfile': '/path/to/certificate/xyz.key', 'cafile': '/path/to/certificate/ca.crt'}, 'replicas': 1, 'serviceaccount': 'router', 'namespace': 'default', 'stats_port': 1936, 'edits': [{'action': 'append', 'key': 'spec.template.spec.containers[0].env', 'value': {'name': 'ROUTE_LABELS', 'value': 'route=external'}}], 'images': 'openshift3/ose-${component}:${version}', 'selector': 'type=router2', 'ports': ['80:80', '443:443']}]
+
# OpenShift Registry Console Options
# Override the console image prefix for enterprise deployments, not used in origin
# default is "registry.access.redhat.com/openshift3/" and the image appended is "registry-console"
diff --git a/inventory/byo/hosts.ose.example b/inventory/byo/hosts.ose.example
index fa4cc4f26..b52806bc7 100644
--- a/inventory/byo/hosts.ose.example
+++ b/inventory/byo/hosts.ose.example
@@ -114,7 +114,7 @@ openshift_release=v3.6
# Install and run cri-o along side docker
# NOTE: This uses openshift_docker_systemcontainer_image_registry_override as it's override
# just as container-engine does.
-#openshift_docker_use_crio=False
+#openshift_use_crio=False
# Force the registry to use for the container-engine/crio system container. By default the registry
# will be built off of the deployment type and ansible_distribution. Only
# use this option if you are sure you know what you are doing!
@@ -170,6 +170,14 @@ openshift_release=v3.6
# modify image streams to point at that registry by setting the following to true
#openshift_examples_modify_imagestreams=true
+# If oreg_url points to a registry requiring authentication, provide the following:
+#oreg_auth_user=some_user
+#oreg_auth_password='my-pass'
+# NOTE: oreg_url must be defined by the user for oreg_auth_* to have any affect.
+# oreg_auth_pass should be generated from running docker login.
+# To update registry auth credentials, uncomment the following:
+#oreg_auth_credentials_replace: True
+
# OpenShift repository configuration
#openshift_additional_repos=[{'id': 'ose-devel', 'name': 'ose-devel', 'baseurl': 'http://example.com/puddle/build/AtomicOpenShift/3.1/latest/RH7-RHOSE-3.0/$basearch/os', 'enabled': 1, 'gpgcheck': 0}]
#openshift_repos_enable_testing=false
@@ -379,44 +387,8 @@ openshift_master_identity_providers=[{'name': 'htpasswd_auth', 'login': 'true',
# and is in the form of a list. If no data is passed then a default router will be
# created. There are multiple combinations of router sharding. The one described
# below supports routers on separate nodes.
-#openshift_hosted_routers:
-#- name: router1
-# stats_port: 1936
-# ports:
-# - 80:80
-# - 443:443
-# replicas: 1
-# namespace: default
-# serviceaccount: router
-# selector: type=router1
-# images: "openshift3/ose-${component}:${version}"
-# edits: []
-# certificate:
-# certfile: /path/to/certificate/abc.crt
-# keyfile: /path/to/certificate/abc.key
-# cafile: /path/to/certificate/ca.crt
-#- name: router2
-# stats_port: 1936
-# ports:
-# - 80:80
-# - 443:443
-# replicas: 1
-# namespace: default
-# serviceaccount: router
-# selector: type=router2
-# images: "openshift3/ose-${component}:${version}"
-# certificate:
-# certfile: /path/to/certificate/xyz.crt
-# keyfile: /path/to/certificate/xyz.key
-# cafile: /path/to/certificate/ca.crt
-# edits:
-# # ROUTE_LABELS sets the router to listen for routes
-# # tagged with the provided values
-# - key: spec.template.spec.containers[0].env
-# value:
-# name: ROUTE_LABELS
-# value: "route=external"
-# action: append
+#
+#openshift_hosted_routers=[{'name': 'router1', 'certificate': {'certfile': '/path/to/certificate/abc.crt', 'keyfile': '/path/to/certificate/abc.key', 'cafile': '/path/to/certificate/ca.crt'}, 'replicas': 1, 'serviceaccount': 'router', 'namespace': 'default', 'stats_port': 1936, 'edits': [], 'images': 'openshift3/ose-${component}:${version}', 'selector': 'type=router1', 'ports': ['80:80', '443:443']}, {'name': 'router2', 'certificate': {'certfile': '/path/to/certificate/xyz.crt', 'keyfile': '/path/to/certificate/xyz.key', 'cafile': '/path/to/certificate/ca.crt'}, 'replicas': 1, 'serviceaccount': 'router', 'namespace': 'default', 'stats_port': 1936, 'edits': [{'action': 'append', 'key': 'spec.template.spec.containers[0].env', 'value': {'name': 'ROUTE_LABELS', 'value': 'route=external'}}], 'images': 'openshift3/ose-${component}:${version}', 'selector': 'type=router2', 'ports': ['80:80', '443:443']}]
# OpenShift Registry Console Options
# Override the console image prefix for enterprise deployments, not used in origin
diff --git a/inventory/gce/hosts/gce.py b/inventory/gce/hosts/gce.py
deleted file mode 100755
index 2be46a58c..000000000
--- a/inventory/gce/hosts/gce.py
+++ /dev/null
@@ -1,477 +0,0 @@
-#!/usr/bin/env python2
-# pylint: skip-file
-# Copyright 2013 Google Inc.
-#
-# This file is part of Ansible
-#
-# Ansible is free software: you can redistribute it and/or modify
-# it under the terms of the GNU General Public License as published by
-# the Free Software Foundation, either version 3 of the License, or
-# (at your option) any later version.
-#
-# Ansible is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-# GNU General Public License for more details.
-#
-# You should have received a copy of the GNU General Public License
-# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
-
-'''
-GCE external inventory script
-=================================
-
-Generates inventory that Ansible can understand by making API requests
-Google Compute Engine via the libcloud library. Full install/configuration
-instructions for the gce* modules can be found in the comments of
-ansible/test/gce_tests.py.
-
-When run against a specific host, this script returns the following variables
-based on the data obtained from the libcloud Node object:
- - gce_uuid
- - gce_id
- - gce_image
- - gce_machine_type
- - gce_private_ip
- - gce_public_ip
- - gce_name
- - gce_description
- - gce_status
- - gce_zone
- - gce_tags
- - gce_metadata
- - gce_network
-
-When run in --list mode, instances are grouped by the following categories:
- - zone:
- zone group name examples are us-central1-b, europe-west1-a, etc.
- - instance tags:
- An entry is created for each tag. For example, if you have two instances
- with a common tag called 'foo', they will both be grouped together under
- the 'tag_foo' name.
- - network name:
- the name of the network is appended to 'network_' (e.g. the 'default'
- network will result in a group named 'network_default')
- - machine type
- types follow a pattern like n1-standard-4, g1-small, etc.
- - running status:
- group name prefixed with 'status_' (e.g. status_running, status_stopped,..)
- - image:
- when using an ephemeral/scratch disk, this will be set to the image name
- used when creating the instance (e.g. debian-7-wheezy-v20130816). when
- your instance was created with a root persistent disk it will be set to
- 'persistent_disk' since there is no current way to determine the image.
-
-Examples:
- Execute uname on all instances in the us-central1-a zone
- $ ansible -i gce.py us-central1-a -m shell -a "/bin/uname -a"
-
- Use the GCE inventory script to print out instance specific information
- $ contrib/inventory/gce.py --host my_instance
-
-Author: Eric Johnson <erjohnso@google.com>
-Contributors: Matt Hite <mhite@hotmail.com>, Tom Melendez <supertom@google.com>
-Version: 0.0.3
-'''
-
-__requires__ = ['pycrypto>=2.6']
-try:
- import pkg_resources
-except ImportError:
- # Use pkg_resources to find the correct versions of libraries and set
- # sys.path appropriately when there are multiversion installs. We don't
- # fail here as there is code that better expresses the errors where the
- # library is used.
- pass
-
-USER_AGENT_PRODUCT="Ansible-gce_inventory_plugin"
-USER_AGENT_VERSION="v2"
-
-import sys
-import os
-import argparse
-
-from time import time
-
-import ConfigParser
-
-import logging
-logging.getLogger('libcloud.common.google').addHandler(logging.NullHandler())
-
-try:
- import json
-except ImportError:
- import simplejson as json
-
-try:
- from libcloud.compute.types import Provider
- from libcloud.compute.providers import get_driver
- _ = Provider.GCE
-except:
- sys.exit("GCE inventory script requires libcloud >= 0.13")
-
-
-class CloudInventoryCache(object):
- def __init__(self, cache_name='ansible-cloud-cache', cache_path='/tmp',
- cache_max_age=300):
- cache_dir = os.path.expanduser(cache_path)
- if not os.path.exists(cache_dir):
- os.makedirs(cache_dir)
- self.cache_path_cache = os.path.join(cache_dir, cache_name)
-
- self.cache_max_age = cache_max_age
-
- def is_valid(self, max_age=None):
- ''' Determines if the cache files have expired, or if it is still valid '''
-
- if max_age is None:
- max_age = self.cache_max_age
-
- if os.path.isfile(self.cache_path_cache):
- mod_time = os.path.getmtime(self.cache_path_cache)
- current_time = time()
- if (mod_time + max_age) > current_time:
- return True
-
- return False
-
- def get_all_data_from_cache(self, filename=''):
- ''' Reads the JSON inventory from the cache file. Returns Python dictionary. '''
-
- data = ''
- if not filename:
- filename = self.cache_path_cache
- with open(filename, 'r') as cache:
- data = cache.read()
- return json.loads(data)
-
- def write_to_cache(self, data, filename=''):
- ''' Writes data to file as JSON. Returns True. '''
- if not filename:
- filename = self.cache_path_cache
- json_data = json.dumps(data)
- with open(filename, 'w') as cache:
- cache.write(json_data)
- return True
-
-
-class GceInventory(object):
- def __init__(self):
- # Cache object
- self.cache = None
- # dictionary containing inventory read from disk
- self.inventory = {}
-
- # Read settings and parse CLI arguments
- self.parse_cli_args()
- self.config = self.get_config()
- self.driver = self.get_gce_driver()
- self.ip_type = self.get_inventory_options()
- if self.ip_type:
- self.ip_type = self.ip_type.lower()
-
- # Cache management
- start_inventory_time = time()
- cache_used = False
- if self.args.refresh_cache or not self.cache.is_valid():
- self.do_api_calls_update_cache()
- else:
- self.load_inventory_from_cache()
- cache_used = True
- self.inventory['_meta']['stats'] = {'use_cache': True}
- self.inventory['_meta']['stats'] = {
- 'inventory_load_time': time() - start_inventory_time,
- 'cache_used': cache_used
- }
-
- # Just display data for specific host
- if self.args.host:
- print(self.json_format_dict(
- self.inventory['_meta']['hostvars'][self.args.host],
- pretty=self.args.pretty))
- else:
- # Otherwise, assume user wants all instances grouped
- zones = self.parse_env_zones()
- print(self.json_format_dict(self.inventory,
- pretty=self.args.pretty))
- sys.exit(0)
-
- def get_config(self):
- """
- Reads the settings from the gce.ini file.
-
- Populates a SafeConfigParser object with defaults and
- attempts to read an .ini-style configuration from the filename
- specified in GCE_INI_PATH. If the environment variable is
- not present, the filename defaults to gce.ini in the current
- working directory.
- """
- gce_ini_default_path = os.path.join(
- os.path.dirname(os.path.realpath(__file__)), "gce.ini")
- gce_ini_path = os.environ.get('GCE_INI_PATH', gce_ini_default_path)
-
- # Create a ConfigParser.
- # This provides empty defaults to each key, so that environment
- # variable configuration (as opposed to INI configuration) is able
- # to work.
- config = ConfigParser.SafeConfigParser(defaults={
- 'gce_service_account_email_address': '',
- 'gce_service_account_pem_file_path': '',
- 'gce_project_id': '',
- 'libcloud_secrets': '',
- 'inventory_ip_type': '',
- 'cache_path': '~/.ansible/tmp',
- 'cache_max_age': '300'
- })
- if 'gce' not in config.sections():
- config.add_section('gce')
- if 'inventory' not in config.sections():
- config.add_section('inventory')
- if 'cache' not in config.sections():
- config.add_section('cache')
-
- config.read(gce_ini_path)
-
- #########
- # Section added for processing ini settings
- #########
-
- # Set the instance_states filter based on config file options
- self.instance_states = []
- if config.has_option('gce', 'instance_states'):
- states = config.get('gce', 'instance_states')
- # Ignore if instance_states is an empty string.
- if states:
- self.instance_states = states.split(',')
-
- # Caching
- cache_path = config.get('cache', 'cache_path')
- cache_max_age = config.getint('cache', 'cache_max_age')
- # TOOD(supertom): support project-specific caches
- cache_name = 'ansible-gce.cache'
- self.cache = CloudInventoryCache(cache_path=cache_path,
- cache_max_age=cache_max_age,
- cache_name=cache_name)
- return config
-
- def get_inventory_options(self):
- """Determine inventory options. Environment variables always
- take precedence over configuration files."""
- ip_type = self.config.get('inventory', 'inventory_ip_type')
- # If the appropriate environment variables are set, they override
- # other configuration
- ip_type = os.environ.get('INVENTORY_IP_TYPE', ip_type)
- return ip_type
-
- def get_gce_driver(self):
- """Determine the GCE authorization settings and return a
- libcloud driver.
- """
- # Attempt to get GCE params from a configuration file, if one
- # exists.
- secrets_path = self.config.get('gce', 'libcloud_secrets')
- secrets_found = False
- try:
- import secrets
- args = list(getattr(secrets, 'GCE_PARAMS', []))
- kwargs = getattr(secrets, 'GCE_KEYWORD_PARAMS', {})
- secrets_found = True
- except:
- pass
-
- if not secrets_found and secrets_path:
- if not secrets_path.endswith('secrets.py'):
- err = "Must specify libcloud secrets file as "
- err += "/absolute/path/to/secrets.py"
- sys.exit(err)
- sys.path.append(os.path.dirname(secrets_path))
- try:
- import secrets
- args = list(getattr(secrets, 'GCE_PARAMS', []))
- kwargs = getattr(secrets, 'GCE_KEYWORD_PARAMS', {})
- secrets_found = True
- except:
- pass
- if not secrets_found:
- args = [
- self.config.get('gce','gce_service_account_email_address'),
- self.config.get('gce','gce_service_account_pem_file_path')
- ]
- kwargs = {'project': self.config.get('gce', 'gce_project_id')}
-
- # If the appropriate environment variables are set, they override
- # other configuration; process those into our args and kwargs.
- args[0] = os.environ.get('GCE_EMAIL', args[0])
- args[1] = os.environ.get('GCE_PEM_FILE_PATH', args[1])
- kwargs['project'] = os.environ.get('GCE_PROJECT', kwargs['project'])
-
- # Retrieve and return the GCE driver.
- gce = get_driver(Provider.GCE)(*args, **kwargs)
- gce.connection.user_agent_append(
- '%s/%s' % (USER_AGENT_PRODUCT, USER_AGENT_VERSION),
- )
- return gce
-
- def parse_env_zones(self):
- '''returns a list of comma separated zones parsed from the GCE_ZONE environment variable.
- If provided, this will be used to filter the results of the grouped_instances call'''
- import csv
- reader = csv.reader([os.environ.get('GCE_ZONE',"")], skipinitialspace=True)
- zones = [r for r in reader]
- return [z for z in zones[0]]
-
- def parse_cli_args(self):
- ''' Command line argument processing '''
-
- parser = argparse.ArgumentParser(
- description='Produce an Ansible Inventory file based on GCE')
- parser.add_argument('--list', action='store_true', default=True,
- help='List instances (default: True)')
- parser.add_argument('--host', action='store',
- help='Get all information about an instance')
- parser.add_argument('--pretty', action='store_true', default=False,
- help='Pretty format (default: False)')
- parser.add_argument(
- '--refresh-cache', action='store_true', default=False,
- help='Force refresh of cache by making API requests (default: False - use cache files)')
- self.args = parser.parse_args()
-
-
- def node_to_dict(self, inst):
- md = {}
-
- if inst is None:
- return {}
-
- if 'items' in inst.extra['metadata']:
- for entry in inst.extra['metadata']['items']:
- md[entry['key']] = entry['value']
-
- net = inst.extra['networkInterfaces'][0]['network'].split('/')[-1]
- # default to exernal IP unless user has specified they prefer internal
- if self.ip_type == 'internal':
- ssh_host = inst.private_ips[0]
- else:
- ssh_host = inst.public_ips[0] if len(inst.public_ips) >= 1 else inst.private_ips[0]
-
- return {
- 'gce_uuid': inst.uuid,
- 'gce_id': inst.id,
- 'gce_image': inst.image,
- 'gce_machine_type': inst.size,
- 'gce_private_ip': inst.private_ips[0],
- 'gce_public_ip': inst.public_ips[0] if len(inst.public_ips) >= 1 else None,
- 'gce_name': inst.name,
- 'gce_description': inst.extra['description'],
- 'gce_status': inst.extra['status'],
- 'gce_zone': inst.extra['zone'].name,
- 'gce_tags': inst.extra['tags'],
- 'gce_metadata': md,
- 'gce_network': net,
- # Hosts don't have a public name, so we add an IP
- 'ansible_ssh_host': ssh_host
- }
-
- def load_inventory_from_cache(self):
- ''' Loads inventory from JSON on disk. '''
-
- try:
- self.inventory = self.cache.get_all_data_from_cache()
- hosts = self.inventory['_meta']['hostvars']
- except Exception as e:
- print(
- "Invalid inventory file %s. Please rebuild with -refresh-cache option."
- % (self.cache.cache_path_cache))
- raise
-
- def do_api_calls_update_cache(self):
- ''' Do API calls and save data in cache. '''
- zones = self.parse_env_zones()
- data = self.group_instances(zones)
- self.cache.write_to_cache(data)
- self.inventory = data
-
- def list_nodes(self):
- all_nodes = []
- params, more_results = {'maxResults': 500}, True
- while more_results:
- self.driver.connection.gce_params=params
- all_nodes.extend(self.driver.list_nodes())
- more_results = 'pageToken' in params
- return all_nodes
-
- def group_instances(self, zones=None):
- '''Group all instances'''
- groups = {}
- meta = {}
- meta["hostvars"] = {}
-
- for node in self.list_nodes():
-
- # This check filters on the desired instance states defined in the
- # config file with the instance_states config option.
- #
- # If the instance_states list is _empty_ then _ALL_ states are returned.
- #
- # If the instance_states list is _populated_ then check the current
- # state against the instance_states list
- if self.instance_states and not node.extra['status'] in self.instance_states:
- continue
-
- name = node.name
-
- meta["hostvars"][name] = self.node_to_dict(node)
-
- zone = node.extra['zone'].name
-
- # To avoid making multiple requests per zone
- # we list all nodes and then filter the results
- if zones and zone not in zones:
- continue
-
- if zone in groups: groups[zone].append(name)
- else: groups[zone] = [name]
-
- tags = node.extra['tags']
- for t in tags:
- if t.startswith('group-'):
- tag = t[6:]
- else:
- tag = 'tag_%s' % t
- if tag in groups: groups[tag].append(name)
- else: groups[tag] = [name]
-
- net = node.extra['networkInterfaces'][0]['network'].split('/')[-1]
- net = 'network_%s' % net
- if net in groups: groups[net].append(name)
- else: groups[net] = [name]
-
- machine_type = node.size
- if machine_type in groups: groups[machine_type].append(name)
- else: groups[machine_type] = [name]
-
- image = node.image and node.image or 'persistent_disk'
- if image in groups: groups[image].append(name)
- else: groups[image] = [name]
-
- status = node.extra['status']
- stat = 'status_%s' % status.lower()
- if stat in groups: groups[stat].append(name)
- else: groups[stat] = [name]
-
- groups["_meta"] = meta
-
- return groups
-
- def json_format_dict(self, data, pretty=False):
- ''' Converts a dict to a JSON object and dumps it as a formatted
- string '''
-
- if pretty:
- return json.dumps(data, sort_keys=True, indent=2)
- else:
- return json.dumps(data)
-
-# Run the script
-if __name__ == '__main__':
- GceInventory()
diff --git a/inventory/gce/hosts/hosts b/inventory/gce/hosts/hosts
deleted file mode 100644
index 3996e577e..000000000
--- a/inventory/gce/hosts/hosts
+++ /dev/null
@@ -1 +0,0 @@
-localhost ansible_connection=local ansible_become=no ansible_python_interpreter='/usr/bin/env python2'
diff --git a/inventory/libvirt/hosts/hosts b/inventory/libvirt/hosts/hosts
deleted file mode 100644
index 3996e577e..000000000
--- a/inventory/libvirt/hosts/hosts
+++ /dev/null
@@ -1 +0,0 @@
-localhost ansible_connection=local ansible_become=no ansible_python_interpreter='/usr/bin/env python2'
diff --git a/inventory/libvirt/hosts/libvirt.ini b/inventory/libvirt/hosts/libvirt.ini
deleted file mode 100644
index 62ff204dd..000000000
--- a/inventory/libvirt/hosts/libvirt.ini
+++ /dev/null
@@ -1,20 +0,0 @@
-# Ansible libvirt external inventory script settings
-#
-
-[libvirt]
-
-uri = qemu:///system
-
-# API calls to libvirt can be slow. For this reason, we cache the results of an API
-# call. Set this to the path you want cache files to be written to. Two files
-# will be written to this directory:
-# - ansible-libvirt.cache
-# - ansible-libvirt.index
-cache_path = /tmp
-
-# The number of seconds a cache file is considered valid. After this many
-# seconds, a new API call will be made, and the cache file will be updated.
-cache_max_age = 900
-
-
-
diff --git a/inventory/libvirt/hosts/libvirt_generic.py b/inventory/libvirt/hosts/libvirt_generic.py
deleted file mode 100755
index d63e07b64..000000000
--- a/inventory/libvirt/hosts/libvirt_generic.py
+++ /dev/null
@@ -1,191 +0,0 @@
-#!/usr/bin/env python2
-# pylint: skip-file
-
-'''
-libvirt external inventory script
-=================================
-
-Ansible has a feature where instead of reading from /etc/ansible/hosts
-as a text file, it can query external programs to obtain the list
-of hosts, groups the hosts are in, and even variables to assign to each host.
-
-To use this, copy this file over /etc/ansible/hosts and chmod +x the file.
-This, more or less, allows you to keep one central database containing
-info about all of your managed instances.
-
-'''
-
-# (c) 2015, Jason DeTiberus <jdetiber@redhat.com>
-#
-# This file is part of Ansible,
-#
-# Ansible is free software: you can redistribute it and/or modify
-# it under the terms of the GNU General Public License as published by
-# the Free Software Foundation, either version 3 of the License, or
-# (at your option) any later version.
-#
-# Ansible is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-# GNU General Public License for more details.
-#
-# You should have received a copy of the GNU General Public License
-# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
-
-######################################################################
-
-import argparse
-import ConfigParser
-import os
-import sys
-import libvirt
-import xml.etree.ElementTree as ET
-
-try:
- import json
-except ImportError:
- import simplejson as json
-
-
-class LibvirtInventory(object):
- ''' libvirt dynamic inventory '''
-
- def __init__(self):
- ''' Main execution path '''
-
- self.inventory = dict() # A list of groups and the hosts in that group
- self.cache = dict() # Details about hosts in the inventory
-
- # Read settings and parse CLI arguments
- self.read_settings()
- self.parse_cli_args()
-
- if self.args.host:
- print(_json_format_dict(self.get_host_info(), self.args.pretty))
- elif self.args.list:
- print(_json_format_dict(self.get_inventory(), self.args.pretty))
- else: # default action with no options
- print(_json_format_dict(self.get_inventory(), self.args.pretty))
-
- def read_settings(self):
- ''' Reads the settings from the libvirt.ini file '''
-
- config = ConfigParser.SafeConfigParser()
- config.read(
- os.path.dirname(os.path.realpath(__file__)) + '/libvirt.ini'
- )
- self.libvirt_uri = config.get('libvirt', 'uri')
-
- def parse_cli_args(self):
- ''' Command line argument processing '''
-
- parser = argparse.ArgumentParser(
- description='Produce an Ansible Inventory file based on libvirt'
- )
- parser.add_argument(
- '--list',
- action='store_true',
- default=True,
- help='List instances (default: True)'
- )
- parser.add_argument(
- '--host',
- action='store',
- help='Get all the variables about a specific instance'
- )
- parser.add_argument(
- '--pretty',
- action='store_true',
- default=False,
- help='Pretty format (default: False)'
- )
- self.args = parser.parse_args()
-
- def get_host_info(self):
- ''' Get variables about a specific host '''
-
- inventory = self.get_inventory()
- if self.args.host in inventory['_meta']['hostvars']:
- return inventory['_meta']['hostvars'][self.args.host]
-
- def get_inventory(self):
- ''' Construct the inventory '''
-
- inventory = dict(_meta=dict(hostvars=dict()))
-
- conn = libvirt.openReadOnly(self.libvirt_uri)
- if conn is None:
- print("Failed to open connection to %s" % self.libvirt_uri)
- sys.exit(1)
-
- domains = conn.listAllDomains()
- if domains is None:
- print("Failed to list domains for connection %s" % self.libvirt_uri)
- sys.exit(1)
-
- for domain in domains:
- hostvars = dict(libvirt_name=domain.name(),
- libvirt_id=domain.ID(),
- libvirt_uuid=domain.UUIDString())
- domain_name = domain.name()
-
- # TODO: add support for guests that are not in a running state
- state, _ = domain.state()
- # 2 is the state for a running guest
- if state != 1:
- continue
-
- hostvars['libvirt_status'] = 'running'
-
- root = ET.fromstring(domain.XMLDesc())
- ansible_ns = {'ansible': 'https://github.com/ansible/ansible'}
- for tag_elem in root.findall('./metadata/ansible:tags/ansible:tag', ansible_ns):
- tag = tag_elem.text
- _push(inventory, "tag_%s" % tag, domain_name)
- _push(hostvars, 'libvirt_tags', tag)
-
- # TODO: support more than one network interface, also support
- # interface types other than 'network'
- interface = root.find("./devices/interface[@type='network']")
- if interface is not None:
- source_elem = interface.find('source')
- mac_elem = interface.find('mac')
- if source_elem is not None and \
- mac_elem is not None:
- # Adding this to disable pylint check specifically
- # ignoring libvirt-python versions that
- # do not include DHCPLeases
- # This is needed until we upgrade the build bot to
- # RHEL7 (>= 1.2.6 libvirt)
- # pylint: disable=no-member
- dhcp_leases = conn.networkLookupByName(source_elem.get('network')) \
- .DHCPLeases(mac_elem.get('address'))
- if len(dhcp_leases) > 0:
- ip_address = dhcp_leases[0]['ipaddr']
- hostvars['ansible_ssh_host'] = ip_address
- hostvars['libvirt_ip_address'] = ip_address
-
- inventory['_meta']['hostvars'][domain_name] = hostvars
-
- return inventory
-
-def _push(my_dict, key, element):
- '''
- Push element to the my_dict[key] list.
- After having initialized my_dict[key] if it dosn't exist.
- '''
-
- if key in my_dict:
- my_dict[key].append(element)
- else:
- my_dict[key] = [element]
-
-def _json_format_dict(data, pretty=False):
- ''' Serialize data to a JSON formated str '''
-
- if pretty:
- return json.dumps(data, sort_keys=True, indent=2)
- else:
- return json.dumps(data)
-
-LibvirtInventory()
diff --git a/inventory/openstack/hosts/hosts b/inventory/openstack/hosts/hosts
deleted file mode 100644
index 9b63e98f4..000000000
--- a/inventory/openstack/hosts/hosts
+++ /dev/null
@@ -1 +0,0 @@
-localhost ansible_become=no ansible_python_interpreter='/usr/bin/env python2' connection=local
diff --git a/inventory/openstack/hosts/openstack.py b/inventory/openstack/hosts/openstack.py
deleted file mode 100755
index deefd3b5d..000000000
--- a/inventory/openstack/hosts/openstack.py
+++ /dev/null
@@ -1,247 +0,0 @@
-#!/usr/bin/env python
-# pylint: skip-file
-
-# Copyright (c) 2012, Marco Vito Moscaritolo <marco@agavee.com>
-# Copyright (c) 2013, Jesse Keating <jesse.keating@rackspace.com>
-# Copyright (c) 2015, Hewlett-Packard Development Company, L.P.
-# Copyright (c) 2016, Rackspace Australia
-#
-# This module is free software: you can redistribute it and/or modify
-# it under the terms of the GNU General Public License as published by
-# the Free Software Foundation, either version 3 of the License, or
-# (at your option) any later version.
-#
-# This software is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-# GNU General Public License for more details.
-#
-# You should have received a copy of the GNU General Public License
-# along with this software. If not, see <http://www.gnu.org/licenses/>.
-
-# The OpenStack Inventory module uses os-client-config for configuration.
-# https://github.com/stackforge/os-client-config
-# This means it will either:
-# - Respect normal OS_* environment variables like other OpenStack tools
-# - Read values from a clouds.yaml file.
-# If you want to configure via clouds.yaml, you can put the file in:
-# - Current directory
-# - ~/.config/openstack/clouds.yaml
-# - /etc/openstack/clouds.yaml
-# - /etc/ansible/openstack.yml
-# The clouds.yaml file can contain entries for multiple clouds and multiple
-# regions of those clouds. If it does, this inventory module will connect to
-# all of them and present them as one contiguous inventory.
-#
-# See the adjacent openstack.yml file for an example config file
-# There are two ansible inventory specific options that can be set in
-# the inventory section.
-# expand_hostvars controls whether or not the inventory will make extra API
-# calls to fill out additional information about each server
-# use_hostnames changes the behavior from registering every host with its UUID
-# and making a group of its hostname to only doing this if the
-# hostname in question has more than one server
-# fail_on_errors causes the inventory to fail and return no hosts if one cloud
-# has failed (for example, bad credentials or being offline).
-# When set to False, the inventory will return hosts from
-# whichever other clouds it can contact. (Default: True)
-
-import argparse
-import collections
-import os
-import sys
-import time
-from distutils.version import StrictVersion
-
-try:
- import json
-except:
- import simplejson as json
-
-import os_client_config
-import shade
-import shade.inventory
-
-CONFIG_FILES = ['/etc/ansible/openstack.yaml', '/etc/ansible/openstack.yml']
-
-
-def get_groups_from_server(server_vars, namegroup=True):
- groups = []
-
- region = server_vars['region']
- cloud = server_vars['cloud']
- metadata = server_vars.get('metadata', {})
-
- # Create a group for the cloud
- groups.append(cloud)
-
- # Create a group on region
- groups.append(region)
-
- # And one by cloud_region
- groups.append("%s_%s" % (cloud, region))
-
- # Check if group metadata key in servers' metadata
- if 'group' in metadata:
- groups.append(metadata['group'])
-
- for extra_group in metadata.get('groups', '').split(','):
- if extra_group:
- groups.append(extra_group.strip())
-
- groups.append('instance-%s' % server_vars['id'])
- if namegroup:
- groups.append(server_vars['name'])
-
- for key in ('flavor', 'image'):
- if 'name' in server_vars[key]:
- groups.append('%s-%s' % (key, server_vars[key]['name']))
-
- for key, value in iter(metadata.items()):
- groups.append('meta-%s_%s' % (key, value))
-
- az = server_vars.get('az', None)
- if az:
- # Make groups for az, region_az and cloud_region_az
- groups.append(az)
- groups.append('%s_%s' % (region, az))
- groups.append('%s_%s_%s' % (cloud, region, az))
- return groups
-
-
-def get_host_groups(inventory, refresh=False):
- (cache_file, cache_expiration_time) = get_cache_settings()
- if is_cache_stale(cache_file, cache_expiration_time, refresh=refresh):
- groups = to_json(get_host_groups_from_cloud(inventory))
- open(cache_file, 'w').write(groups)
- else:
- groups = open(cache_file, 'r').read()
- return groups
-
-
-def append_hostvars(hostvars, groups, key, server, namegroup=False):
- hostvars[key] = dict(
- ansible_ssh_host=server['interface_ip'],
- openstack=server)
- for group in get_groups_from_server(server, namegroup=namegroup):
- groups[group].append(key)
-
-
-def get_host_groups_from_cloud(inventory):
- groups = collections.defaultdict(list)
- firstpass = collections.defaultdict(list)
- hostvars = {}
- list_args = {}
- if hasattr(inventory, 'extra_config'):
- use_hostnames = inventory.extra_config['use_hostnames']
- list_args['expand'] = inventory.extra_config['expand_hostvars']
- if StrictVersion(shade.__version__) >= StrictVersion("1.6.0"):
- list_args['fail_on_cloud_config'] = \
- inventory.extra_config['fail_on_errors']
- else:
- use_hostnames = False
-
- for server in inventory.list_hosts(**list_args):
-
- if 'interface_ip' not in server:
- continue
- firstpass[server['name']].append(server)
- for name, servers in firstpass.items():
- if len(servers) == 1 and use_hostnames:
- append_hostvars(hostvars, groups, name, servers[0])
- else:
- server_ids = set()
- # Trap for duplicate results
- for server in servers:
- server_ids.add(server['id'])
- if len(server_ids) == 1 and use_hostnames:
- append_hostvars(hostvars, groups, name, servers[0])
- else:
- for server in servers:
- append_hostvars(
- hostvars, groups, server['id'], server,
- namegroup=True)
- groups['_meta'] = {'hostvars': hostvars}
- return groups
-
-
-def is_cache_stale(cache_file, cache_expiration_time, refresh=False):
- ''' Determines if cache file has expired, or if it is still valid '''
- if refresh:
- return True
- if os.path.isfile(cache_file) and os.path.getsize(cache_file) > 0:
- mod_time = os.path.getmtime(cache_file)
- current_time = time.time()
- if (mod_time + cache_expiration_time) > current_time:
- return False
- return True
-
-
-def get_cache_settings():
- config = os_client_config.config.OpenStackConfig(
- config_files=os_client_config.config.CONFIG_FILES + CONFIG_FILES)
- # For inventory-wide caching
- cache_expiration_time = config.get_cache_expiration_time()
- cache_path = config.get_cache_path()
- if not os.path.exists(cache_path):
- os.makedirs(cache_path)
- cache_file = os.path.join(cache_path, 'ansible-inventory.cache')
- return (cache_file, cache_expiration_time)
-
-
-def to_json(in_dict):
- return json.dumps(in_dict, sort_keys=True, indent=2)
-
-
-def parse_args():
- parser = argparse.ArgumentParser(description='OpenStack Inventory Module')
- parser.add_argument('--private',
- action='store_true',
- help='Use private address for ansible host')
- parser.add_argument('--refresh', action='store_true',
- help='Refresh cached information')
- parser.add_argument('--debug', action='store_true', default=False,
- help='Enable debug output')
- group = parser.add_mutually_exclusive_group(required=True)
- group.add_argument('--list', action='store_true',
- help='List active servers')
- group.add_argument('--host', help='List details about the specific host')
-
- return parser.parse_args()
-
-
-def main():
- args = parse_args()
- try:
- config_files = os_client_config.config.CONFIG_FILES + CONFIG_FILES
- shade.simple_logging(debug=args.debug)
- inventory_args = dict(
- refresh=args.refresh,
- config_files=config_files,
- private=args.private,
- )
- if hasattr(shade.inventory.OpenStackInventory, 'extra_config'):
- inventory_args.update(dict(
- config_key='ansible',
- config_defaults={
- 'use_hostnames': False,
- 'expand_hostvars': True,
- 'fail_on_errors': True,
- }
- ))
-
- inventory = shade.inventory.OpenStackInventory(**inventory_args)
-
- if args.list:
- output = get_host_groups(inventory, refresh=args.refresh)
- elif args.host:
- output = to_json(inventory.get_host(args.host))
- print(output)
- except shade.OpenStackCloudException as e:
- sys.stderr.write('%s\n' % e.message)
- sys.exit(1)
- sys.exit(0)
-
-
-if __name__ == '__main__':
- main()
diff --git a/openshift-ansible.spec b/openshift-ansible.spec
index 5ae1bf3d3..0692114d3 100644
--- a/openshift-ansible.spec
+++ b/openshift-ansible.spec
@@ -10,7 +10,7 @@
Name: openshift-ansible
Version: 3.7.0
-Release: 0.118.0%{?dist}
+Release: 0.123.0%{?dist}
Summary: Openshift and Atomic Enterprise Ansible
License: ASL 2.0
URL: https://github.com/openshift/openshift-ansible
@@ -280,6 +280,41 @@ Atomic OpenShift Utilities includes
%changelog
+* Wed Aug 30 2017 Jenkins CD Merge Bot <smunilla@redhat.com> 3.7.0-0.123.0
+-
+
+* Wed Aug 30 2017 Jenkins CD Merge Bot <smunilla@redhat.com> 3.7.0-0.122.0
+- Update openshift_hosted_routers example to be in ini format.
+ (abutcher@redhat.com)
+- Update calico to v2.5 (djosborne10@gmail.com)
+
+* Wed Aug 30 2017 Jenkins CD Merge Bot <smunilla@redhat.com> 3.7.0-0.121.0
+- Revert "logging set memory request to limit" (sdodson@redhat.com)
+- Move firewall install and fix scaleup playbooks (rteague@redhat.com)
+- Fix group conditional requirements (rteague@redhat.com)
+- Updating openshift_service_catalog to use oc_service over oc_obj to resolve
+ idempotency issues being seen from rerunning role (ewolinet@redhat.com)
+- annotate the infra projects for logging to fix bz1480988
+ (jcantril@redhat.com)
+- docker_image_availability: timeout skopeo inspect (lmeyer@redhat.com)
+- Fix scaleup on containerized installations (sdodson@redhat.com)
+- bug 1480878. Default pvc for logging (jcantril@redhat.com)
+- logging set memory request to limit (jcantril@redhat.com)
+- openshift_cfme: add nfs directory support (fsimonce@redhat.com)
+
+* Tue Aug 29 2017 Jenkins CD Merge Bot <smunilla@redhat.com> 3.7.0-0.120.0
+- Nuage changes to add custom mounts for atomic-openshift-node service
+ (rohan.s.parulekar@nuagenetworks.net)
+- Add independent registry auth support (mgugino@redhat.com)
+- roles: use openshift_use_crio (gscrivan@redhat.com)
+- cri-o: change to system runc (gscrivan@redhat.com)
+- cri-o: rename openshift_docker_use_crio to openshift_use_crio
+ (gscrivan@redhat.com)
+- Remove unsupported playbooks and utilities (rteague@redhat.com)
+- Updating default tag for enterprise installation for ASB
+ (ewolinet@redhat.com)
+- Only validate certificates that are passed to oc_route (zgalor@redhat.com)
+
* Mon Aug 28 2017 Jenkins CD Merge Bot <smunilla@redhat.com> 3.7.0-0.118.0
- Fix origin metrics and logging container version
(gevorg15@users.noreply.github.com)
diff --git a/playbooks/README.md b/playbooks/README.md
index 5857a9f59..290d4c082 100644
--- a/playbooks/README.md
+++ b/playbooks/README.md
@@ -12,8 +12,6 @@ And:
- [`adhoc`](adhoc) is a generic home for playbooks and tasks that are community
supported and not officially maintained.
-- [`aws`](aws), [`gce`](gce), [`libvirt`](libvirt) and [`openstack`](openstack)
- are related to the [`bin/cluster`](../bin) tool and its usage is deprecated.
Refer to the `README.md` file in each playbook directory for more information
about them.
diff --git a/playbooks/aws/README.md b/playbooks/aws/README.md
index 410d98a9c..0fb29ca06 100644
--- a/playbooks/aws/README.md
+++ b/playbooks/aws/README.md
@@ -1,9 +1,5 @@
# AWS playbooks
-Parts of this playbook directory are meant to be driven by [`bin/cluster`](../../bin),
-which is community supported and use is considered **deprecated**.
-
-
## Provisioning
With recent desire for provisioning from customers and developers alike, the AWS
@@ -38,6 +34,7 @@ Before any provisioning may occur, AWS account credentials must be present in th
The newly added playbooks are the following:
- build_ami.yml
- provision.yml
+- provision_nodes.yml
The current expected work flow should be to provide the `vars.yml` file with the
desired settings for cluster instances. These settings are AWS specific and should
@@ -56,19 +53,6 @@ provision:
# when creating an encrypted AMI please specify use_encryption
use_encryption: False
- yum_repositories: # this is an example repository but it requires sslclient info. Use a valid yum repository for openshift rpms
- - name: openshift-repo
- file: openshift-repo
- description: OpenShift Builds
- baseurl: https://mirror.openshift.com/enterprise/online-int/latest/x86_64/os/
- enabled: yes
- gpgcheck: no
- sslverify: no
- # client cert and key required for this repository
- sslclientcert: "/var/lib/yum/client-cert.pem"
- sslclientkey: "/var/lib/yum/client-key.pem"
- gpgkey: "https://mirror.ops.rhcloud.com/libra/keys/RPM-GPG-KEY-redhat-release https://mirror.ops.rhcloud.com/libra/keys/RPM-GPG-KEY-redhat-beta https://mirror.ops.rhcloud.com/libra/keys/RPM-GPG-KEY-redhat-openshifthosted"
-
# for s3 registry backend
openshift_registry_s3: True
@@ -127,40 +111,11 @@ provision:
```
Repeat the following setup for the infra and compute node groups. This most likely
- will not need editing but if further customization is required these parameters
+ will not need editing but if the install requires further customization then these parameters
can be updated.
#### Step 1
-Once the vars.yml file has been updated with the correct settings for the desired AWS account then we are ready to build an AMI.
-
-```
-$ ansible-playbook build_ami.yml
-```
-
-1. This script will build a VPC. Default name will be clusterid if not specified.
-2. Create an ssh key required for the instance.
-3. Create an instance.
-4. Run some setup roles to ensure packages and services are correctly configured.
-5. Create the AMI.
-6. If encryption is desired
- - A KMS key is created with the name of $clusterid
- - An encrypted AMI will be produced with $clusterid KMS key
-7. Terminate the instance used to configure the AMI.
-
-#### Step 2
-
-Now that we have created an AMI for our Openshift installation, that AMI id needs to be placed in the `vars.yml` file. To do so update the following fields (The AMI can be captured from the output of the previous step or found in the ec2 console under AMIs):
-
-```
- # when creating an encrypted AMI please specify use_encryption
- use_encryption: False # defaults to false
-```
-
-**Note**: If using encryption, specify with `use_encryption: True`. This will ensure to take the recently created AMI and encrypt it to be used later. If encryption is not desired then set the value to false. The AMI id will be fetched and used according to its most recent creation date.
-
-#### Step 3
-
Create an openshift-ansible inventory file to use for a byo installation. The exception here is that there will be no hosts specified by the inventory file. Here is an example:
```ini
@@ -175,10 +130,20 @@ nodes
etcd
[OSEv3:vars]
-# cluster specific settings maybe be placed here
+################################################################################
+# Ensure these variables are set for bootstrap
+################################################################################
+openshift_master_bootstrap_enabled=True
+
openshift_hosted_router_wait=False
openshift_hosted_registry_wait=False
+# Repository for installation
+openshift_additional_repos=[{'name': 'openshift-repo', 'id': 'openshift-repo', 'baseurl': 'https://mirror.openshift.com/enterprise/enterprise-3.6/latest/x86_64/os/', 'enabled': 'yes', 'gpgcheck': 0, 'sslverify': 'no', 'sslclientcert': '/var/lib/yum/client-cert.pem', 'sslclientkey': '/var/lib/yum/client-key.pem', 'gpgkey': 'https://mirror.ops.rhcloud.com/libra/keys/RPM-GPG-KEY-redhat-release https://mirror.ops.rhcloud.com/libra/keys/RPM-GPG-KEY-redhat-beta https://mirror.ops.rhcloud.com/libra/keys/RPM-GPG-KEY-redhat-openshifthosted'}]
+
+################################################################################
+# cluster specific settings maybe be placed here
+
[masters]
[etcd]
@@ -188,12 +153,46 @@ openshift_hosted_registry_wait=False
There are more examples of cluster inventory settings [`here`](../../inventory/byo/).
+In order to create the bootstrapable AMI we need to create an openshift-ansible inventory file. This file enables us to create the AMI using the openshift-ansible node roles.
+
+
+#### Step 2
+
+Once the vars.yml file has been updated with the correct settings for the desired AWS account then we are ready to build an AMI.
+
+```
+$ ansible-playbook -i inventory.yml build_ami.yml
+```
+
+1. This script will build a VPC. Default name will be clusterid if not specified.
+2. Create an ssh key required for the instance.
+3. Create an instance.
+4. Run some setup roles to ensure packages and services are correctly configured.
+5. Create the AMI.
+6. If encryption is desired
+ - A KMS key is created with the name of $clusterid
+ - An encrypted AMI will be produced with $clusterid KMS key
+7. Terminate the instance used to configure the AMI.
+
+
+#### Step 3
+
+Now that we have created an AMI for our Openshift installation, that AMI id needs to be placed in the `vars.yml` file. To do so update the following fields (The AMI can be captured from the output of the previous step or found in the ec2 console under AMIs):
+
+```
+ # when creating an encrypted AMI please specify use_encryption
+ use_encryption: False # defaults to false
+```
+
+**Note**: If using encryption, specify with `use_encryption: True`. This will ensure to take the recently created AMI and encrypt it to be used later. If encryption is not desired then set the value to false. The AMI id will be fetched and used according to its most recent creation date.
+
+
#### Step 4
We are ready to create the master instances and install Openshift.
```
-$ ansible-playbook -i <inventory from step 3> provision.yml
+$ ansible-playbook -i <inventory from step 1> provision.yml
```
This playbook runs through the following steps:
diff --git a/playbooks/aws/openshift-cluster/add_nodes.yml b/playbooks/aws/openshift-cluster/add_nodes.yml
deleted file mode 100644
index 0e8eb90c1..000000000
--- a/playbooks/aws/openshift-cluster/add_nodes.yml
+++ /dev/null
@@ -1,35 +0,0 @@
----
-- name: Launch instance(s)
- hosts: localhost
- connection: local
- become: no
- gather_facts: no
- vars_files:
- - vars.yml
- vars:
- oo_extend_env: True
- tasks:
- - include: ../../common/openshift-cluster/tasks/set_node_launch_facts.yml
- vars:
- type: "compute"
- count: "{{ num_nodes }}"
- - include: tasks/launch_instances.yml
- vars:
- instances: "{{ node_names }}"
- cluster: "{{ cluster_id }}"
- type: "{{ k8s_type }}"
- g_sub_host_type: "{{ sub_host_type }}"
-
- - include: ../../common/openshift-cluster/tasks/set_node_launch_facts.yml
- vars:
- type: "infra"
- count: "{{ num_infra }}"
- - include: tasks/launch_instances.yml
- vars:
- instances: "{{ node_names }}"
- cluster: "{{ cluster_id }}"
- type: "{{ k8s_type }}"
- g_sub_host_type: "{{ sub_host_type }}"
-
-- include: scaleup.yml
-- include: list.yml
diff --git a/playbooks/aws/openshift-cluster/build_ami.yml b/playbooks/aws/openshift-cluster/build_ami.yml
index fa708ffa1..d27874200 100644
--- a/playbooks/aws/openshift-cluster/build_ami.yml
+++ b/playbooks/aws/openshift-cluster/build_ami.yml
@@ -60,24 +60,39 @@
timeout: 300
search_regex: OpenSSH
- - name: add host to group
+ - name: add host to nodes
add_host:
+ groups: nodes
name: "{{ amibase.tagged_instances.0.public_dns_name }}"
- groups: amibase
-- hosts: amibase
+ - name: set the user to perform installation
+ set_fact:
+ ansible_ssh_user: root
+
+- name: normalize groups
+ include: ../../byo/openshift-cluster/initialize_groups.yml
+
+- name: run the std_include
+ include: ../../common/openshift-cluster/evaluate_groups.yml
+
+- name: run the std_include
+ include: ../../common/openshift-cluster/initialize_facts.yml
+
+- name: run the std_include
+ include: ../../common/openshift-cluster/initialize_openshift_repos.yml
+
+- hosts: nodes
remote_user: root
tasks:
- - name: included required variables
+ - name: get the necessary vars for ami building
include_vars: vars.yml
+ - set_fact:
+ openshift_node_bootstrap: True
+
- name: run openshift image preparation
include_role:
- name: openshift_ami_prep
- vars:
- r_openshift_ami_prep_yum_repositories: "{{ provision.build.yum_repositories }}"
- r_openshift_ami_prep_node: atomic-openshift-node
- r_openshift_ami_prep_master: atomic-openshift-master
+ name: openshift_node
- hosts: localhost
connection: local
@@ -90,6 +105,7 @@
state: present
description: "This was provisioned {{ ansible_date_time.iso8601 }}"
name: "{{ provision.build.ami_name }}{{ lookup('pipe', 'date +%Y%m%d%H%M')}}"
+ tags: "{{ provision.build.openshift_ami_tags }}"
wait: yes
register: amioutput
diff --git a/playbooks/aws/openshift-cluster/cluster_hosts.yml b/playbooks/aws/openshift-cluster/cluster_hosts.yml
deleted file mode 100644
index c2f4dfedc..000000000
--- a/playbooks/aws/openshift-cluster/cluster_hosts.yml
+++ /dev/null
@@ -1,25 +0,0 @@
----
-g_all_hosts: "{{ groups['tag_clusterid_' ~ cluster_id] | default([])
- | intersect(groups['tag_environment_' ~ cluster_env] | default([])) }}"
-
-g_etcd_hosts: "{{ g_all_hosts | intersect(groups['tag_host-type_etcd'] | default([])) }}"
-
-g_new_etcd_hosts: "{{ g_all_hosts | intersect(groups['tag_host-type_new_etcd'] | default([])) }}"
-
-g_lb_hosts: "{{ g_all_hosts | intersect(groups['tag_host-type_lb'] | default([])) }}"
-
-g_nfs_hosts: "{{ g_all_hosts | intersect(groups['tag_host-type_nfs'] | default([])) }}"
-
-g_glusterfs_hosts: "{{ g_all_hosts | intersect(groups['tag_host-type-glusterfs'] | default([])) }}"
-
-g_master_hosts: "{{ g_all_hosts | intersect(groups['tag_host-type_master'] | default([])) }}"
-
-g_new_master_hosts: "{{ g_all_hosts | intersect(groups['tag_host-type_new_master'] | default([])) }}"
-
-g_node_hosts: "{{ g_all_hosts | intersect(groups['tag_host-type_node'] | default([])) }}"
-
-g_new_node_hosts: "{{ g_all_hosts | intersect(groups['tag_host-type_new_node'] | default([])) }}"
-
-g_infra_hosts: "{{ g_node_hosts | intersect(groups['tag_sub-host-type_infra'] | default([])) }}"
-
-g_compute_hosts: "{{ g_node_hosts | intersect(groups['tag_sub-host-type_compute'] | default([])) }}"
diff --git a/playbooks/aws/openshift-cluster/config.yml b/playbooks/aws/openshift-cluster/config.yml
deleted file mode 100644
index 821a0f30e..000000000
--- a/playbooks/aws/openshift-cluster/config.yml
+++ /dev/null
@@ -1,37 +0,0 @@
----
-- hosts: localhost
- gather_facts: no
- tasks:
- - include_vars: vars.yml
- - include_vars: cluster_hosts.yml
- - add_host:
- name: "{{ item }}"
- groups: l_oo_all_hosts
- with_items: "{{ g_all_hosts | default([]) }}"
-
-- hosts: l_oo_all_hosts
- gather_facts: no
- tasks:
- - include_vars: vars.yml
- - include_vars: cluster_hosts.yml
-
-- include: ../../common/openshift-cluster/config.yml
- vars:
- g_ssh_user: "{{ deployment_vars[deployment_type].ssh_user }}"
- g_sudo: "{{ deployment_vars[deployment_type].become }}"
- g_nodeonmaster: true
- openshift_cluster_id: "{{ cluster_id }}"
- openshift_debug_level: "{{ debug_level }}"
- openshift_deployment_type: "{{ deployment_type }}"
- openshift_public_hostname: "{{ ec2_ip_address }}"
- openshift_hosted_registry_selector: 'type=infra'
- openshift_hosted_router_selector: 'type=infra'
- openshift_node_labels:
- region: "{{ deployment_vars[deployment_type].region }}"
- type: "{{ hostvars[inventory_hostname]['ec2_tag_sub-host-type'] }}"
- openshift_master_cluster_method: 'native'
- openshift_use_openshift_sdn: "{{ lookup('oo_option', 'use_openshift_sdn') }}"
- os_sdn_network_plugin_name: "{{ lookup('oo_option', 'sdn_network_plugin_name') }}"
- openshift_use_flannel: "{{ lookup('oo_option', 'use_flannel') }}"
- openshift_use_calico: "{{ lookup('oo_option', 'use_calico') }}"
- openshift_use_fluentd: "{{ lookup('oo_option', 'use_fluentd') }}"
diff --git a/playbooks/aws/openshift-cluster/launch.yml b/playbooks/aws/openshift-cluster/launch.yml
deleted file mode 100644
index 3edace493..000000000
--- a/playbooks/aws/openshift-cluster/launch.yml
+++ /dev/null
@@ -1,54 +0,0 @@
----
-- name: Launch instance(s)
- hosts: localhost
- connection: local
- become: no
- gather_facts: no
- vars_files:
- - vars.yml
- tasks:
- - include: ../../common/openshift-cluster/tasks/set_etcd_launch_facts.yml
- - include: tasks/launch_instances.yml
- vars:
- instances: "{{ etcd_names }}"
- cluster: "{{ cluster_id }}"
- type: "{{ k8s_type }}"
- g_sub_host_type: "default"
-
- - include: ../../common/openshift-cluster/tasks/set_master_launch_facts.yml
- - include: tasks/launch_instances.yml
- vars:
- instances: "{{ master_names }}"
- cluster: "{{ cluster_id }}"
- type: "{{ k8s_type }}"
- g_sub_host_type: "default"
-
- - include: ../../common/openshift-cluster/tasks/set_node_launch_facts.yml
- vars:
- type: "compute"
- count: "{{ num_nodes }}"
- - include: tasks/launch_instances.yml
- vars:
- instances: "{{ node_names }}"
- cluster: "{{ cluster_id }}"
- type: "{{ k8s_type }}"
- g_sub_host_type: "{{ sub_host_type }}"
-
- - include: ../../common/openshift-cluster/tasks/set_node_launch_facts.yml
- vars:
- type: "infra"
- count: "{{ num_infra }}"
- - include: tasks/launch_instances.yml
- vars:
- instances: "{{ node_names }}"
- cluster: "{{ cluster_id }}"
- type: "{{ k8s_type }}"
- g_sub_host_type: "{{ sub_host_type }}"
-
- - add_host:
- name: "{{ master_names.0 }}"
- groups: service_master
- when: master_names is defined and master_names.0 is defined
-
-- include: update.yml
-- include: list.yml
diff --git a/playbooks/aws/openshift-cluster/list.yml b/playbooks/aws/openshift-cluster/list.yml
deleted file mode 100644
index ed8aac398..000000000
--- a/playbooks/aws/openshift-cluster/list.yml
+++ /dev/null
@@ -1,23 +0,0 @@
----
-- name: Generate oo_list_hosts group
- hosts: localhost
- gather_facts: no
- connection: local
- become: no
- vars_files:
- - vars.yml
- tasks:
- - set_fact: scratch_group=tag_clusterid_{{ cluster_id }}
- when: cluster_id != ''
- - set_fact: scratch_group=all
- when: cluster_id == ''
- - add_host:
- name: "{{ item }}"
- groups: oo_list_hosts
- ansible_ssh_user: "{{ deployment_vars[deployment_type].ssh_user }}"
- ansible_become: "{{ deployment_vars[deployment_type].become }}"
- oo_public_ipv4: "{{ hostvars[item].ec2_ip_address }}"
- oo_private_ipv4: "{{ hostvars[item].ec2_private_ip_address }}"
- with_items: "{{ groups[scratch_group] | default([]) | difference(['localhost']) }}"
- - debug:
- msg: "{{ hostvars | oo_select_keys(groups[scratch_group] | default([])) | oo_pretty_print_cluster }}"
diff --git a/playbooks/aws/openshift-cluster/scaleup.yml b/playbooks/aws/openshift-cluster/scaleup.yml
deleted file mode 100644
index 6fa9142a0..000000000
--- a/playbooks/aws/openshift-cluster/scaleup.yml
+++ /dev/null
@@ -1,32 +0,0 @@
----
-
-- hosts: localhost
- gather_facts: no
- connection: local
- become: no
- vars_files:
- - vars.yml
- tasks:
- - name: Evaluate oo_hosts_to_update
- add_host:
- name: "{{ item }}"
- groups: oo_hosts_to_update
- ansible_ssh_user: "{{ deployment_vars[deployment_type].ssh_user }}"
- ansible_become: "{{ deployment_vars[deployment_type].become }}"
- with_items: "{{ groups.nodes_to_add }}"
-
-- include: ../../common/openshift-cluster/update_repos_and_packages.yml
-
-- include: ../../common/openshift-cluster/scaleup.yml
- vars_files:
- - ../../aws/openshift-cluster/vars.yml
- - ../../aws/openshift-cluster/cluster_hosts.yml
- vars:
- g_new_node_hosts: "{{ groups.nodes_to_add }}"
- g_ssh_user: "{{ deployment_vars[deployment_type].ssh_user }}"
- g_sudo: "{{ deployment_vars[deployment_type].become }}"
- g_nodeonmaster: true
- openshift_cluster_id: "{{ cluster_id }}"
- openshift_debug_level: "{{ debug_level }}"
- openshift_deployment_type: "{{ deployment_type }}"
- openshift_public_hostname: "{{ ec2_ip_address }}"
diff --git a/playbooks/aws/openshift-cluster/service.yml b/playbooks/aws/openshift-cluster/service.yml
deleted file mode 100644
index f7f4812bb..000000000
--- a/playbooks/aws/openshift-cluster/service.yml
+++ /dev/null
@@ -1,31 +0,0 @@
----
-- name: Call same systemctl command for openshift on all instance(s)
- hosts: localhost
- connection: local
- become: no
- gather_facts: no
- vars_files:
- - vars.yml
- - cluster_hosts.yml
- tasks:
- - fail: msg="cluster_id is required to be injected in this playbook"
- when: cluster_id is not defined
-
- - name: Evaluate g_service_masters
- add_host:
- name: "{{ item }}"
- groups: g_service_masters
- ansible_ssh_user: "{{ deployment_vars[deployment_type].ssh_user }}"
- ansible_become: "{{ deployment_vars[deployment_type].become }}"
- with_items: "{{ master_hosts | default([]) }}"
-
- - name: Evaluate g_service_nodes
- add_host:
- name: "{{ item }}"
- groups: g_service_nodes
- ansible_ssh_user: "{{ deployment_vars[deployment_type].ssh_user }}"
- ansible_become: "{{ deployment_vars[deployment_type].become }}"
- with_items: "{{ node_hosts | default([]) }}"
-
-- include: ../../common/openshift-node/service.yml
-- include: ../../common/openshift-master/service.yml
diff --git a/playbooks/aws/openshift-cluster/tasks/launch_instances.yml b/playbooks/aws/openshift-cluster/tasks/launch_instances.yml
deleted file mode 100644
index 608512b79..000000000
--- a/playbooks/aws/openshift-cluster/tasks/launch_instances.yml
+++ /dev/null
@@ -1,188 +0,0 @@
----
-- set_fact:
- created_by: "{{ lookup('env', 'LOGNAME')|default(cluster, true) }}"
- docker_vol_ephemeral: "{{ lookup('env', 'os_docker_vol_ephemeral') | default(false, true) }}"
- cluster: "{{ cluster_id }}"
- env: "{{ cluster_env }}"
- host_type: "{{ type }}"
- sub_host_type: "{{ g_sub_host_type }}"
-
-- set_fact:
- ec2_instance_type: "{{ lookup('env', 'ec2_master_instance_type') | default(deployment_vars[deployment_type].type, true) }}"
- ec2_security_groups: "{{ lookup('env', 'ec2_master_security_groups') | default(deployment_vars[deployment_type].security_groups, true) }}"
- when: host_type == "master" and sub_host_type == "default"
-
-- set_fact:
- ec2_instance_type: "{{ lookup('env', 'ec2_etcd_instance_type') | default(deployment_vars[deployment_type].type, true) }}"
- ec2_security_groups: "{{ lookup('env', 'ec2_etcd_security_groups') | default(deployment_vars[deployment_type].security_groups, true) }}"
- when: host_type == "etcd" and sub_host_type == "default"
-
-- set_fact:
- ec2_instance_type: "{{ lookup('env', 'ec2_infra_instance_type') | default(deployment_vars[deployment_type].type, true) }}"
- ec2_security_groups: "{{ lookup('env', 'ec2_infra_security_groups') | default(deployment_vars[deployment_type].security_groups, true) }}"
- when: host_type == "node" and sub_host_type == "infra"
-
-- set_fact:
- ec2_instance_type: "{{ lookup('env', 'ec2_node_instance_type') | default(deployment_vars[deployment_type].type, true) }}"
- ec2_security_groups: "{{ lookup('env', 'ec2_node_security_groups') | default(deployment_vars[deployment_type].security_groups, true) }}"
- when: host_type == "node" and sub_host_type == "compute"
-
-- set_fact:
- ec2_instance_type: "{{ deployment_vars[deployment_type].type }}"
- when: ec2_instance_type is not defined
-- set_fact:
- ec2_security_groups: "{{ deployment_vars[deployment_type].security_groups }}"
- when: ec2_security_groups is not defined
-
-- name: Find amis for deployment_type
- ec2_ami_find:
- region: "{{ deployment_vars[deployment_type].region }}"
- ami_id: "{{ deployment_vars[deployment_type].image }}"
- name: "{{ deployment_vars[deployment_type].image_name }}"
- register: ami_result
-
-- fail: msg="Could not find requested ami"
- when: not ami_result.results
-
-- set_fact:
- latest_ami: "{{ ami_result.results | oo_ami_selector(deployment_vars[deployment_type].image_name) }}"
- volume_defs:
- etcd:
- root:
- volume_size: "{{ lookup('env', 'os_etcd_root_vol_size') | default(25, true) }}"
- device_type: "{{ lookup('env', 'os_etcd_root_vol_type') | default('gp2', true) }}"
- iops: "{{ lookup('env', 'os_etcd_root_vol_iops') | default(500, true) }}"
- master:
- root:
- volume_size: "{{ lookup('env', 'os_master_root_vol_size') | default(25, true) }}"
- device_type: "{{ lookup('env', 'os_master_root_vol_type') | default('gp2', true) }}"
- iops: "{{ lookup('env', 'os_master_root_vol_iops') | default(500, true) }}"
- docker:
- volume_size: "{{ lookup('env', 'os_docker_vol_size') | default(10, true) }}"
- device_type: "{{ lookup('env', 'os_docker_vol_type') | default('gp2', true) }}"
- iops: "{{ lookup('env', 'os_docker_vol_iops') | default(500, true) }}"
- node:
- root:
- volume_size: "{{ lookup('env', 'os_node_root_vol_size') | default(85, true) }}"
- device_type: "{{ lookup('env', 'os_node_root_vol_type') | default('gp2', true) }}"
- iops: "{{ lookup('env', 'os_node_root_vol_iops') | default(500, true) }}"
- docker:
- volume_size: "{{ lookup('env', 'os_docker_vol_size') | default(32, true) }}"
- device_type: "{{ lookup('env', 'os_docker_vol_type') | default('gp2', true) }}"
- iops: "{{ lookup('env', 'os_docker_vol_iops') | default(500, true) }}"
-
-- set_fact:
- volumes: "{{ volume_defs | oo_ec2_volume_definition(host_type, docker_vol_ephemeral | bool) }}"
-
-- name: Launch instance(s)
- ec2:
- state: present
- region: "{{ deployment_vars[deployment_type].region }}"
- keypair: "{{ deployment_vars[deployment_type].keypair }}"
- group: "{{ deployment_vars[deployment_type].security_groups }}"
- instance_type: "{{ ec2_instance_type }}"
- image: "{{ deployment_vars[deployment_type].image }}"
- count: "{{ instances | length }}"
- vpc_subnet_id: "{{ deployment_vars[deployment_type].vpc_subnet }}"
- assign_public_ip: "{{ deployment_vars[deployment_type].assign_public_ip }}"
- user_data: "{{ lookup('template', '../templates/user_data.j2') }}"
- wait: yes
- instance_tags:
- created-by: "{{ created_by }}"
- clusterid: "{{ cluster }}"
- environment: "{{ cluster_env }}"
- host-type: "{{ host_type }}"
- sub-host-type: "{{ sub_host_type }}"
- volumes: "{{ volumes }}"
- register: ec2
-
-- name: Add Name tag to instances
- ec2_tag: resource={{ item.1.id }} region={{ deployment_vars[deployment_type].region }} state=present
- with_together:
- - "{{ instances }}"
- - "{{ ec2.instances }}"
- args:
- tags:
- Name: "{{ item.0 }}"
-
-- set_fact:
- instance_groups: >
- tag_created-by_{{ created_by }}, tag_clusterid_{{ cluster }},
- tag_environment_{{ cluster_env }}, tag_host-type_{{ host_type }},
- tag_sub-host-type_{{ sub_host_type }}
-
-- set_fact:
- node_label:
- region: "{{ deployment_vars[deployment_type].region }}"
- type: "{{sub_host_type}}"
- when: host_type == "node"
-
-- set_fact:
- node_label:
- region: "{{ deployment_vars[deployment_type].region }}"
- type: "{{host_type}}"
- when: host_type != "node"
-
-- set_fact:
- logrotate:
- - name: syslog
- path: |
- /var/log/cron
- /var/log/maillog
- /var/log/messages
- /var/log/secure
- /var/log/spooler"
- options:
- - daily
- - rotate 7
- - compress
- - sharedscripts
- - missingok
- scripts:
- postrotate: "/bin/kill -HUP `cat /var/run/syslogd.pid 2> /dev/null` 2> /dev/null || true"
-
-- name: Add new instances groups and variables
- add_host:
- hostname: "{{ item.0 }}"
- ansible_ssh_host: "{{ item.1.dns_name }}"
- ansible_ssh_user: "{{ deployment_vars[deployment_type].ssh_user }}"
- ansible_become: "{{ deployment_vars[deployment_type].become }}"
- groups: "{{ instance_groups }}"
- ec2_private_ip_address: "{{ item.1.private_ip }}"
- ec2_ip_address: "{{ item.1.public_ip }}"
- ec2_tag_sub-host-type: "{{ sub_host_type }}"
- openshift_node_labels: "{{ node_label }}"
- logrotate_scripts: "{{ logrotate }}"
- with_together:
- - "{{ instances }}"
- - "{{ ec2.instances }}"
-
-- name: Add new instances to nodes_to_add group if needed
- add_host:
- hostname: "{{ item.0 }}"
- ansible_ssh_host: "{{ item.1.dns_name }}"
- ansible_ssh_user: "{{ deployment_vars[deployment_type].ssh_user }}"
- ansible_become: "{{ deployment_vars[deployment_type].become }}"
- groups: nodes_to_add
- ec2_private_ip_address: "{{ item.1.private_ip }}"
- ec2_ip_address: "{{ item.1.public_ip }}"
- openshift_node_labels: "{{ node_label }}"
- logrotate_scripts: "{{ logrotate }}"
- with_together:
- - "{{ instances }}"
- - "{{ ec2.instances }}"
- when: oo_extend_env is defined and oo_extend_env | bool
-
-- name: Wait for ssh
- wait_for: "port=22 host={{ item.dns_name }}"
- with_items: "{{ ec2.instances }}"
-
-- name: Wait for user setup
- command: "ssh -o StrictHostKeyChecking=no -o PasswordAuthentication=no -o ConnectTimeout=10 -o UserKnownHostsFile=/dev/null {{ hostvars[item.0].ansible_ssh_user }}@{{ item.1.dns_name }} echo {{ hostvars[item.0].ansible_ssh_user }} user is setup"
- register: result
- until: result.rc == 0
- retries: 20
- delay: 10
- with_together:
- - "{{ instances }}"
- - "{{ ec2.instances }}"
diff --git a/playbooks/aws/openshift-cluster/templates/user_data.j2 b/playbooks/aws/openshift-cluster/templates/user_data.j2
deleted file mode 100644
index b1087f9c4..000000000
--- a/playbooks/aws/openshift-cluster/templates/user_data.j2
+++ /dev/null
@@ -1,22 +0,0 @@
-#cloud-config
-{% if type in ['node', 'master'] and 'docker' in volume_defs[type] %}
-mounts:
-- [ xvdb ]
-- [ ephemeral0 ]
-{% endif %}
-
-write_files:
-{% if type in ['node', 'master'] and 'docker' in volume_defs[type] %}
-- content: |
- DEVS=/dev/xvdb
- VG=docker_vg
- path: /etc/sysconfig/docker-storage-setup
- owner: root:root
- permissions: '0644'
-{% endif %}
-{% if deployment_vars[deployment_type].become | bool %}
-- path: /etc/sudoers.d/99-{{ deployment_vars[deployment_type].ssh_user }}-cloud-init-requiretty
- permissions: 440
- content: |
- Defaults:{{ deployment_vars[deployment_type].ssh_user }} !requiretty
-{% endif %}
diff --git a/playbooks/aws/openshift-cluster/terminate.yml b/playbooks/aws/openshift-cluster/terminate.yml
deleted file mode 100644
index 1f15aa4bf..000000000
--- a/playbooks/aws/openshift-cluster/terminate.yml
+++ /dev/null
@@ -1,77 +0,0 @@
----
-- name: Terminate instance(s)
- hosts: localhost
- connection: local
- become: no
- gather_facts: no
- vars_files:
- - vars.yml
- tasks:
- - add_host:
- name: "{{ item }}"
- groups: oo_hosts_to_terminate
- ansible_ssh_user: "{{ deployment_vars[deployment_type].ssh_user }}"
- ansible_become: "{{ deployment_vars[deployment_type].become }}"
- with_items: "{{ (groups['tag_clusterid_' ~ cluster_id] | default([])) | difference(['localhost']) }}"
-
-- name: Unsubscribe VMs
- hosts: oo_hosts_to_terminate
- roles:
- - role: rhel_unsubscribe
- when: deployment_type in ['atomic-enterprise', 'enterprise', 'openshift-enterprise'] and
- ansible_distribution == "RedHat" and
- lookup('oo_option', 'rhel_skip_subscription') | default(rhsub_skip, True) |
- default('no', True) | lower in ['no', 'false']
-
-- name: Terminate instances
- hosts: localhost
- connection: local
- become: no
- gather_facts: no
- tasks:
- - name: Remove tags from instances
- ec2_tag:
- resource: "{{ hostvars[item]['ec2_id'] }}"
- region: "{{ hostvars[item]['ec2_region'] }}"
- state: absent
- tags:
- environment: "{{ hostvars[item]['ec2_tag_environment'] }}"
- clusterid: "{{ hostvars[item]['ec2_tag_clusterid'] }}"
- host-type: "{{ hostvars[item]['ec2_tag_host-type'] }}"
- sub_host_type: "{{ hostvars[item]['ec2_tag_sub-host-type'] }}"
- with_items: "{{ groups.oo_hosts_to_terminate }}"
- when: "'oo_hosts_to_terminate' in groups"
-
- - name: Terminate instances
- ec2:
- state: absent
- instance_ids: ["{{ hostvars[item].ec2_id }}"]
- region: "{{ hostvars[item].ec2_region }}"
- ignore_errors: yes
- register: ec2_term
- with_items: "{{ groups.oo_hosts_to_terminate }}"
- when: "'oo_hosts_to_terminate' in groups"
-
- # Fail if any of the instances failed to terminate with an error other
- # than 403 Forbidden
- - fail:
- msg: "Terminating instance {{ item.ec2_id }} failed with message {{ item.msg }}"
- when: "'oo_hosts_to_terminate' in groups and item.has_key('failed') and item.failed"
- with_items: "{{ ec2_term.results }}"
-
- - name: Stop instance if termination failed
- ec2:
- state: stopped
- instance_ids: ["{{ item.item.ec2_id }}"]
- region: "{{ item.item.ec2_region }}"
- register: ec2_stop
- when: "'oo_hosts_to_terminate' in groups and item.has_key('failed') and item.failed"
- with_items: "{{ ec2_term.results }}"
-
- - name: Rename stopped instances
- ec2_tag: resource={{ item.item.item.ec2_id }} region={{ item.item.item.ec2_region }} state=present
- args:
- tags:
- Name: "{{ item.item.item.ec2_tag_Name }}-terminate"
- with_items: "{{ ec2_stop.results }}"
- when: ec2_stop | changed
diff --git a/playbooks/aws/openshift-cluster/update.yml b/playbooks/aws/openshift-cluster/update.yml
deleted file mode 100644
index ed05d61ed..000000000
--- a/playbooks/aws/openshift-cluster/update.yml
+++ /dev/null
@@ -1,34 +0,0 @@
----
-- hosts: localhost
- gather_facts: no
- tasks:
- - include_vars: vars.yml
- - include_vars: cluster_hosts.yml
- - add_host:
- name: "{{ item }}"
- groups: l_oo_all_hosts
- with_items: "{{ g_all_hosts }}"
-
-- hosts: l_oo_all_hosts
- gather_facts: no
- tasks:
- - include_vars: vars.yml
- - include_vars: cluster_hosts.yml
-
-- name: Update - Populate oo_hosts_to_update group
- hosts: localhost
- connection: local
- become: no
- gather_facts: no
- tasks:
- - name: Update - Evaluate oo_hosts_to_update
- add_host:
- name: "{{ item }}"
- groups: oo_hosts_to_update
- ansible_ssh_user: "{{ deployment_vars[deployment_type].ssh_user }}"
- ansible_become: "{{ deployment_vars[deployment_type].become }}"
- with_items: "{{ g_all_hosts | default([]) }}"
-
-- include: ../../common/openshift-cluster/update_repos_and_packages.yml
-
-- include: config.yml
diff --git a/playbooks/aws/openshift-cluster/vars.yml b/playbooks/aws/openshift-cluster/vars.yml
index b2b0716be..47da03cb7 100644
--- a/playbooks/aws/openshift-cluster/vars.yml
+++ b/playbooks/aws/openshift-cluster/vars.yml
@@ -1,36 +1,4 @@
---
-debug_level: 2
-
-deployment_rhel7_ent_base:
- # rhel-7.1, requires cloud access subscription
- image: "{{ lookup('oo_option', 'ec2_image') | default('ami-10251c7a', True) }}"
- image_name: "{{ lookup('oo_option', 'ec2_image_name') | default(None, True) }}"
- region: "{{ lookup('oo_option', 'ec2_region') | default('us-east-1', True) }}"
- ssh_user: ec2-user
- become: yes
- keypair: "{{ lookup('oo_option', 'ec2_keypair') | default('libra', True) }}"
- type: "{{ lookup('oo_option', 'ec2_instance_type') | default('m4.large', True) }}"
- security_groups: "{{ lookup('oo_option', 'ec2_security_groups') | default([ 'public' ], True) }}"
- vpc_subnet: "{{ lookup('oo_option', 'ec2_vpc_subnet') | default(omit, True) }}"
- assign_public_ip: "{{ lookup('oo_option', 'ec2_assign_public_ip') | default(omit, True) }}"
-
-deployment_vars:
- origin:
- # centos-7, requires marketplace
- image: "{{ lookup('oo_option', 'ec2_image') | default('ami-6d1c2007', True) }}"
- image_name: "{{ lookup('oo_option', 'ec2_image_name') | default(None, True) }}"
- region: "{{ lookup('oo_option', 'ec2_region') | default('us-east-1', True) }}"
- ssh_user: centos
- become: yes
- keypair: "{{ lookup('oo_option', 'ec2_keypair') | default('libra', True) }}"
- type: "{{ lookup('oo_option', 'ec2_instance_type') | default('m4.large', True) }}"
- security_groups: "{{ lookup('oo_option', 'ec2_security_groups') | default([ 'public' ], True) }}"
- vpc_subnet: "{{ lookup('oo_option', 'ec2_vpc_subnet') | default(omit, True) }}"
- assign_public_ip: "{{ lookup('oo_option', 'ec2_assign_public_ip') | default(omit, True) }}"
-
- enterprise: "{{ deployment_rhel7_ent_base }}"
- openshift-enterprise: "{{ deployment_rhel7_ent_base }}"
- atomic-enterprise: "{{ deployment_rhel7_ent_base }}"
clusterid: mycluster
region: us-east-1
@@ -42,17 +10,6 @@ provision:
build: # build specific variables here
ami_name: "openshift-gi-"
base_image: ami-bdd5d6ab # base image for AMI to build from
- yum_repositories: # this is an example repository but it requires sslclient info
- - name: openshift-repo
- file: openshift-repo
- description: OpenShift Builds
- baseurl: https://mirror.openshift.com/enterprise/online-int/latest/x86_64/os/
- enabled: yes
- gpgcheck: no
- sslverify: no
- sslclientcert: "/var/lib/yum/client-cert.pem"
- sslclientkey: "/var/lib/yum/client-key.pem"
- gpgkey: "https://mirror.ops.rhcloud.com/libra/keys/RPM-GPG-KEY-redhat-release https://mirror.ops.rhcloud.com/libra/keys/RPM-GPG-KEY-redhat-beta https://mirror.ops.rhcloud.com/libra/keys/RPM-GPG-KEY-redhat-openshifthosted"
# when creating an encrypted AMI please specify use_encryption
use_encryption: False
diff --git a/playbooks/byo/openshift-master/scaleup.yml b/playbooks/byo/openshift-master/scaleup.yml
index 64811e80d..e3ef704e5 100644
--- a/playbooks/byo/openshift-master/scaleup.yml
+++ b/playbooks/byo/openshift-master/scaleup.yml
@@ -15,6 +15,8 @@
when:
- (g_new_master_hosts | default([]) | length == 0) or (g_new_node_hosts | default([]) | length == 0)
+- include: ../../common/openshift-cluster/std_include.yml
+
- include: ../../common/openshift-master/scaleup.yml
vars:
openshift_cluster_id: "{{ cluster_id | default('default') }}"
diff --git a/playbooks/byo/openshift-node/scaleup.yml b/playbooks/byo/openshift-node/scaleup.yml
index fda89b1ea..0225623c6 100644
--- a/playbooks/byo/openshift-node/scaleup.yml
+++ b/playbooks/byo/openshift-node/scaleup.yml
@@ -14,6 +14,8 @@
when:
- g_new_node_hosts | default([]) | length == 0
+- include: ../../common/openshift-cluster/std_include.yml
+
- include: ../../common/openshift-node/scaleup.yml
vars:
openshift_cluster_id: "{{ cluster_id | default('default') }}"
diff --git a/playbooks/byo/vagrant.yml b/playbooks/byo/vagrant.yml
deleted file mode 100644
index 76246e7b0..000000000
--- a/playbooks/byo/vagrant.yml
+++ /dev/null
@@ -1,4 +0,0 @@
----
-- include: rhel_subscribe.yml
-
-- include: config.yml
diff --git a/playbooks/common/README.md b/playbooks/common/README.md
index 0b5e26989..968bd99cb 100644
--- a/playbooks/common/README.md
+++ b/playbooks/common/README.md
@@ -1,9 +1,8 @@
# Common playbooks
This directory has a generic set of playbooks that are included by playbooks in
-[`byo`](../byo), as well as other playbooks related to the
-[`bin/cluster`](../../bin) tool.
+[`byo`](../byo).
Note: playbooks in this directory use generic group names that do not line up
-with the groups used by the `byo` playbooks or `bin/cluster` derived playbooks,
-requiring an explicit remapping of groups.
+with the groups used by the `byo` playbooks, requiring an explicit remapping of
+groups.
diff --git a/playbooks/common/openshift-cluster/config.yml b/playbooks/common/openshift-cluster/config.yml
index e1df71112..26b27ba39 100644
--- a/playbooks/common/openshift-cluster/config.yml
+++ b/playbooks/common/openshift-cluster/config.yml
@@ -18,10 +18,6 @@
- docker_image_availability
- docker_storage
-- include: initialize_firewall.yml
- tags:
- - always
-
- hosts: localhost
tasks:
- fail:
diff --git a/playbooks/common/openshift-cluster/evaluate_groups.yml b/playbooks/common/openshift-cluster/evaluate_groups.yml
index c56b07037..a1ae14a1f 100644
--- a/playbooks/common/openshift-cluster/evaluate_groups.yml
+++ b/playbooks/common/openshift-cluster/evaluate_groups.yml
@@ -13,12 +13,12 @@
- name: Evaluate groups - g_master_hosts or g_new_master_hosts required
fail:
msg: This playbook requires g_master_hosts or g_new_master_hosts to be set
- when: g_master_hosts is not defined or g_new_master_hosts is not defined
+ when: g_master_hosts is not defined and g_new_master_hosts is not defined
- name: Evaluate groups - g_node_hosts or g_new_node_hosts required
fail:
msg: This playbook requires g_node_hosts or g_new_node_hosts to be set
- when: g_node_hosts is not defined or g_new_node_hosts is not defined
+ when: g_node_hosts is not defined and g_new_node_hosts is not defined
- name: Evaluate groups - g_lb_hosts required
fail:
@@ -117,7 +117,7 @@
add_host:
name: "{{ item }}"
groups: oo_etcd_hosts_to_backup
- with_items: "{{ groups.oo_etcd_to_config if groups.oo_etcd_to_config is defined and groups.oo_etcd_to_config | length > 0 else groups.oo_first_master }}"
+ with_items: "{{ groups.oo_etcd_to_config if groups.oo_etcd_to_config is defined and groups.oo_etcd_to_config | length > 0 else (groups.oo_first_master | default([])) }}"
changed_when: False
- name: Evaluate oo_nodes_to_config
@@ -173,5 +173,5 @@
groups: oo_etcd_to_migrate
ansible_ssh_user: "{{ g_ssh_user | default(omit) }}"
ansible_become: "{{ g_sudo | default(omit) }}"
- with_items: "{{ groups.oo_etcd_to_config if groups.oo_etcd_to_config | default([]) | length != 0 else groups.oo_first_master }}"
+ with_items: "{{ groups.oo_etcd_to_config if groups.oo_etcd_to_config | default([]) | length != 0 else (groups.oo_first_master |default([]))}}"
changed_when: no
diff --git a/playbooks/common/openshift-cluster/initialize_facts.yml b/playbooks/common/openshift-cluster/initialize_facts.yml
index 65be436c6..9eaf3bc34 100644
--- a/playbooks/common/openshift-cluster/initialize_facts.yml
+++ b/playbooks/common/openshift-cluster/initialize_facts.yml
@@ -157,4 +157,4 @@
- name: initialize_facts set_fact on openshift_docker_hosted_registry_network
set_fact:
- openshift_docker_hosted_registry_network: "{{ hostvars[groups.oo_first_master.0].openshift.common.portal_net }}"
+ openshift_docker_hosted_registry_network: "{{ '' if 'oo_first_master' not in groups else hostvars[groups.oo_first_master.0].openshift.common.portal_net }}"
diff --git a/playbooks/common/openshift-cluster/initialize_firewall.yml b/playbooks/common/openshift-cluster/initialize_firewall.yml
deleted file mode 100644
index f0374fbc7..000000000
--- a/playbooks/common/openshift-cluster/initialize_firewall.yml
+++ /dev/null
@@ -1,7 +0,0 @@
----
-- name: Initialize host firewall
- hosts: oo_all_hosts
- tasks:
- - name: Install and configure the proper firewall settings
- include_role:
- name: os_firewall
diff --git a/playbooks/common/openshift-cluster/update_repos_and_packages.yml b/playbooks/common/openshift-cluster/update_repos_and_packages.yml
deleted file mode 100644
index be956fca5..000000000
--- a/playbooks/common/openshift-cluster/update_repos_and_packages.yml
+++ /dev/null
@@ -1,18 +0,0 @@
----
-- include: evaluate_groups.yml
-
-- name: Subscribe hosts, update repos and update OS packages
- hosts: oo_hosts_to_update
- roles:
- # Explicitly calling openshift_facts because it appears that when
- # rhel_subscribe is skipped that the openshift_facts dependency for
- # openshift_repos is also skipped (this is the case at least for Ansible
- # 2.0.2)
- - openshift_facts
- - role: rhel_subscribe
- when: deployment_type in ["enterprise", "atomic-enterprise", "openshift-enterprise"] and
- ansible_distribution == "RedHat" and
- lookup('oo_option', 'rhel_skip_subscription') | default(rhsub_skip, True) |
- default('no', True) | lower in ['no', 'false']
- - openshift_repos
- - os_update_latest
diff --git a/playbooks/common/openshift-cluster/upgrades/docker/docker_upgrade.yml b/playbooks/common/openshift-cluster/upgrades/docker/docker_upgrade.yml
index 02b8a9d3c..7cc13137f 100644
--- a/playbooks/common/openshift-cluster/upgrades/docker/docker_upgrade.yml
+++ b/playbooks/common/openshift-cluster/upgrades/docker/docker_upgrade.yml
@@ -60,7 +60,7 @@
retries: 60
delay: 60
- - include: upgrade.yml
+ - include: tasks/upgrade.yml
when: l_docker_upgrade is defined and l_docker_upgrade | bool
- name: Set node schedulability
diff --git a/playbooks/common/openshift-cluster/upgrades/docker/restart.yml b/playbooks/common/openshift-cluster/upgrades/docker/tasks/restart.yml
index 83f16ac0d..83f16ac0d 100644
--- a/playbooks/common/openshift-cluster/upgrades/docker/restart.yml
+++ b/playbooks/common/openshift-cluster/upgrades/docker/tasks/restart.yml
diff --git a/playbooks/common/openshift-cluster/upgrades/docker/upgrade.yml b/playbooks/common/openshift-cluster/upgrades/docker/tasks/upgrade.yml
index 808cc562c..808cc562c 100644
--- a/playbooks/common/openshift-cluster/upgrades/docker/upgrade.yml
+++ b/playbooks/common/openshift-cluster/upgrades/docker/tasks/upgrade.yml
diff --git a/playbooks/common/openshift-cluster/upgrades/pre/verify_docker_upgrade_targets.yml b/playbooks/common/openshift-cluster/upgrades/pre/tasks/verify_docker_upgrade_targets.yml
index 9d8b73cff..9d8b73cff 100644
--- a/playbooks/common/openshift-cluster/upgrades/pre/verify_docker_upgrade_targets.yml
+++ b/playbooks/common/openshift-cluster/upgrades/pre/tasks/verify_docker_upgrade_targets.yml
diff --git a/playbooks/common/openshift-cluster/upgrades/upgrade_control_plane.yml b/playbooks/common/openshift-cluster/upgrades/upgrade_control_plane.yml
index fbb355703..18f10437d 100644
--- a/playbooks/common/openshift-cluster/upgrades/upgrade_control_plane.yml
+++ b/playbooks/common/openshift-cluster/upgrades/upgrade_control_plane.yml
@@ -281,7 +281,7 @@
roles:
- openshift_facts
tasks:
- - include: docker/upgrade.yml
+ - include: docker/tasks/upgrade.yml
when: l_docker_upgrade is defined and l_docker_upgrade | bool and not openshift.common.is_atomic | bool
- name: Drain and upgrade master nodes
diff --git a/playbooks/common/openshift-cluster/upgrades/v3_3/upgrade.yml b/playbooks/common/openshift-cluster/upgrades/v3_3/upgrade.yml
index 0f6fb46a4..a241ef039 100644
--- a/playbooks/common/openshift-cluster/upgrades/v3_3/upgrade.yml
+++ b/playbooks/common/openshift-cluster/upgrades/v3_3/upgrade.yml
@@ -89,7 +89,7 @@
- name: Verify docker upgrade targets
hosts: oo_masters_to_config:oo_nodes_to_upgrade:oo_etcd_to_config
tasks:
- - include: ../pre/verify_docker_upgrade_targets.yml
+ - include: ../pre/tasks/verify_docker_upgrade_targets.yml
tags:
- pre_upgrade
diff --git a/playbooks/common/openshift-cluster/upgrades/v3_3/upgrade_control_plane.yml b/playbooks/common/openshift-cluster/upgrades/v3_3/upgrade_control_plane.yml
index cfba788a8..54c85f0fb 100644
--- a/playbooks/common/openshift-cluster/upgrades/v3_3/upgrade_control_plane.yml
+++ b/playbooks/common/openshift-cluster/upgrades/v3_3/upgrade_control_plane.yml
@@ -93,7 +93,7 @@
- name: Verify docker upgrade targets
hosts: oo_masters_to_config:oo_etcd_to_config
tasks:
- - include: ../pre/verify_docker_upgrade_targets.yml
+ - include: ../pre/tasks/verify_docker_upgrade_targets.yml
tags:
- pre_upgrade
diff --git a/playbooks/common/openshift-cluster/upgrades/v3_3/upgrade_nodes.yml b/playbooks/common/openshift-cluster/upgrades/v3_3/upgrade_nodes.yml
index 1054f430e..cee4e9087 100644
--- a/playbooks/common/openshift-cluster/upgrades/v3_3/upgrade_nodes.yml
+++ b/playbooks/common/openshift-cluster/upgrades/v3_3/upgrade_nodes.yml
@@ -90,7 +90,7 @@
- name: Verify docker upgrade targets
hosts: oo_nodes_to_upgrade
tasks:
- - include: ../pre/verify_docker_upgrade_targets.yml
+ - include: ../pre/tasks/verify_docker_upgrade_targets.yml
tags:
- pre_upgrade
diff --git a/playbooks/common/openshift-cluster/upgrades/v3_4/upgrade.yml b/playbooks/common/openshift-cluster/upgrades/v3_4/upgrade.yml
index 783289c87..ae217ba2e 100644
--- a/playbooks/common/openshift-cluster/upgrades/v3_4/upgrade.yml
+++ b/playbooks/common/openshift-cluster/upgrades/v3_4/upgrade.yml
@@ -89,7 +89,7 @@
- name: Verify docker upgrade targets
hosts: oo_masters_to_config:oo_nodes_to_upgrade:oo_etcd_to_config
tasks:
- - include: ../pre/verify_docker_upgrade_targets.yml
+ - include: ../pre/tasks/verify_docker_upgrade_targets.yml
tags:
- pre_upgrade
diff --git a/playbooks/common/openshift-cluster/upgrades/v3_4/upgrade_control_plane.yml b/playbooks/common/openshift-cluster/upgrades/v3_4/upgrade_control_plane.yml
index 8aa443c3c..d7cb38d03 100644
--- a/playbooks/common/openshift-cluster/upgrades/v3_4/upgrade_control_plane.yml
+++ b/playbooks/common/openshift-cluster/upgrades/v3_4/upgrade_control_plane.yml
@@ -93,7 +93,7 @@
- name: Verify docker upgrade targets
hosts: oo_masters_to_config:oo_etcd_to_config
tasks:
- - include: ../pre/verify_docker_upgrade_targets.yml
+ - include: ../pre/tasks/verify_docker_upgrade_targets.yml
tags:
- pre_upgrade
diff --git a/playbooks/common/openshift-cluster/upgrades/v3_4/upgrade_nodes.yml b/playbooks/common/openshift-cluster/upgrades/v3_4/upgrade_nodes.yml
index 436795694..8531e6045 100644
--- a/playbooks/common/openshift-cluster/upgrades/v3_4/upgrade_nodes.yml
+++ b/playbooks/common/openshift-cluster/upgrades/v3_4/upgrade_nodes.yml
@@ -90,7 +90,7 @@
- name: Verify docker upgrade targets
hosts: oo_nodes_to_upgrade
tasks:
- - include: ../pre/verify_docker_upgrade_targets.yml
+ - include: ../pre/tasks/verify_docker_upgrade_targets.yml
tags:
- pre_upgrade
diff --git a/playbooks/common/openshift-cluster/upgrades/v3_5/upgrade.yml b/playbooks/common/openshift-cluster/upgrades/v3_5/upgrade.yml
index 9a000265e..a3d0d6305 100644
--- a/playbooks/common/openshift-cluster/upgrades/v3_5/upgrade.yml
+++ b/playbooks/common/openshift-cluster/upgrades/v3_5/upgrade.yml
@@ -89,7 +89,7 @@
- name: Verify docker upgrade targets
hosts: oo_masters_to_config:oo_nodes_to_upgrade:oo_etcd_to_config
tasks:
- - include: ../pre/verify_docker_upgrade_targets.yml
+ - include: ../pre/tasks/verify_docker_upgrade_targets.yml
tags:
- pre_upgrade
diff --git a/playbooks/common/openshift-cluster/upgrades/v3_5/upgrade_control_plane.yml b/playbooks/common/openshift-cluster/upgrades/v3_5/upgrade_control_plane.yml
index 2dd9676c7..5fee56615 100644
--- a/playbooks/common/openshift-cluster/upgrades/v3_5/upgrade_control_plane.yml
+++ b/playbooks/common/openshift-cluster/upgrades/v3_5/upgrade_control_plane.yml
@@ -93,7 +93,7 @@
- name: Verify docker upgrade targets
hosts: oo_masters_to_config:oo_etcd_to_config
tasks:
- - include: ../pre/verify_docker_upgrade_targets.yml
+ - include: ../pre/tasks/verify_docker_upgrade_targets.yml
tags:
- pre_upgrade
diff --git a/playbooks/common/openshift-cluster/upgrades/v3_5/upgrade_nodes.yml b/playbooks/common/openshift-cluster/upgrades/v3_5/upgrade_nodes.yml
index d5fe8285e..e29d0f8e6 100644
--- a/playbooks/common/openshift-cluster/upgrades/v3_5/upgrade_nodes.yml
+++ b/playbooks/common/openshift-cluster/upgrades/v3_5/upgrade_nodes.yml
@@ -90,7 +90,7 @@
- name: Verify docker upgrade targets
hosts: oo_nodes_to_upgrade
tasks:
- - include: ../pre/verify_docker_upgrade_targets.yml
+ - include: ../pre/tasks/verify_docker_upgrade_targets.yml
tags:
- pre_upgrade
diff --git a/playbooks/common/openshift-cluster/upgrades/v3_6/upgrade.yml b/playbooks/common/openshift-cluster/upgrades/v3_6/upgrade.yml
index 8ceab09f4..51acd17da 100644
--- a/playbooks/common/openshift-cluster/upgrades/v3_6/upgrade.yml
+++ b/playbooks/common/openshift-cluster/upgrades/v3_6/upgrade.yml
@@ -93,7 +93,7 @@
- name: Verify docker upgrade targets
hosts: oo_masters_to_config:oo_nodes_to_upgrade:oo_etcd_to_config
tasks:
- - include: ../pre/verify_docker_upgrade_targets.yml
+ - include: ../pre/tasks/verify_docker_upgrade_targets.yml
tags:
- pre_upgrade
diff --git a/playbooks/common/openshift-cluster/upgrades/v3_6/upgrade_control_plane.yml b/playbooks/common/openshift-cluster/upgrades/v3_6/upgrade_control_plane.yml
index f765e9064..9fe059ac9 100644
--- a/playbooks/common/openshift-cluster/upgrades/v3_6/upgrade_control_plane.yml
+++ b/playbooks/common/openshift-cluster/upgrades/v3_6/upgrade_control_plane.yml
@@ -93,7 +93,7 @@
- name: Verify docker upgrade targets
hosts: oo_masters_to_config:oo_etcd_to_config
tasks:
- - include: ../pre/verify_docker_upgrade_targets.yml
+ - include: ../pre/tasks/verify_docker_upgrade_targets.yml
tags:
- pre_upgrade
diff --git a/playbooks/common/openshift-cluster/upgrades/v3_6/upgrade_nodes.yml b/playbooks/common/openshift-cluster/upgrades/v3_6/upgrade_nodes.yml
index 8bed6a8c2..1b10d4e37 100644
--- a/playbooks/common/openshift-cluster/upgrades/v3_6/upgrade_nodes.yml
+++ b/playbooks/common/openshift-cluster/upgrades/v3_6/upgrade_nodes.yml
@@ -90,7 +90,7 @@
- name: Verify docker upgrade targets
hosts: oo_nodes_to_upgrade
tasks:
- - include: ../pre/verify_docker_upgrade_targets.yml
+ - include: ../pre/tasks/verify_docker_upgrade_targets.yml
tags:
- pre_upgrade
diff --git a/playbooks/common/openshift-cluster/upgrades/v3_7/upgrade.yml b/playbooks/common/openshift-cluster/upgrades/v3_7/upgrade.yml
index 4f05d0c64..9ec40723a 100644
--- a/playbooks/common/openshift-cluster/upgrades/v3_7/upgrade.yml
+++ b/playbooks/common/openshift-cluster/upgrades/v3_7/upgrade.yml
@@ -93,7 +93,7 @@
- name: Verify docker upgrade targets
hosts: oo_masters_to_config:oo_nodes_to_upgrade:oo_etcd_to_config
tasks:
- - include: ../pre/verify_docker_upgrade_targets.yml
+ - include: ../pre/tasks/verify_docker_upgrade_targets.yml
tags:
- pre_upgrade
diff --git a/playbooks/common/openshift-cluster/upgrades/v3_7/upgrade_control_plane.yml b/playbooks/common/openshift-cluster/upgrades/v3_7/upgrade_control_plane.yml
index 2ef95e778..f97f34c3b 100644
--- a/playbooks/common/openshift-cluster/upgrades/v3_7/upgrade_control_plane.yml
+++ b/playbooks/common/openshift-cluster/upgrades/v3_7/upgrade_control_plane.yml
@@ -93,7 +93,7 @@
- name: Verify docker upgrade targets
hosts: oo_masters_to_config:oo_etcd_to_config
tasks:
- - include: ../pre/verify_docker_upgrade_targets.yml
+ - include: ../pre/tasks/verify_docker_upgrade_targets.yml
tags:
- pre_upgrade
diff --git a/playbooks/common/openshift-cluster/upgrades/v3_7/upgrade_nodes.yml b/playbooks/common/openshift-cluster/upgrades/v3_7/upgrade_nodes.yml
index abc4c245b..e95b90cd5 100644
--- a/playbooks/common/openshift-cluster/upgrades/v3_7/upgrade_nodes.yml
+++ b/playbooks/common/openshift-cluster/upgrades/v3_7/upgrade_nodes.yml
@@ -90,7 +90,7 @@
- name: Verify docker upgrade targets
hosts: oo_nodes_to_upgrade
tasks:
- - include: ../pre/verify_docker_upgrade_targets.yml
+ - include: ../pre/tasks/verify_docker_upgrade_targets.yml
tags:
- pre_upgrade
diff --git a/playbooks/common/openshift-etcd/config.yml b/playbooks/common/openshift-etcd/config.yml
index 2cb6197d1..f2b85eea1 100644
--- a/playbooks/common/openshift-etcd/config.yml
+++ b/playbooks/common/openshift-etcd/config.yml
@@ -3,6 +3,7 @@
hosts: oo_etcd_to_config
any_errors_fatal: true
roles:
+ - role: os_firewall
- role: openshift_etcd
etcd_peers: "{{ groups.oo_etcd_to_config | default([], true) }}"
etcd_ca_host: "{{ groups.oo_etcd_to_config.0 }}"
diff --git a/playbooks/common/openshift-etcd/scaleup.yml b/playbooks/common/openshift-etcd/scaleup.yml
index 52b90daca..5f8bb1c7a 100644
--- a/playbooks/common/openshift-etcd/scaleup.yml
+++ b/playbooks/common/openshift-etcd/scaleup.yml
@@ -28,13 +28,15 @@
delay: 10
until: etcd_add_check.rc == 0
roles:
+ - role: os_firewall
+ when: etcd_add_check.rc == 0
- role: openshift_etcd
when: etcd_add_check.rc == 0
etcd_peers: "{{ groups.oo_etcd_to_config | union(groups.oo_new_etcd_to_config)| default([], true) }}"
etcd_ca_host: "{{ groups.oo_etcd_to_config.0 }}"
etcd_certificates_etcd_hosts: "{{ groups.oo_etcd_to_config | default([], true) }}"
etcd_initial_cluster_state: "existing"
- initial_etcd_cluster: "{{ etcd_add_check.stdout_lines[3] | regex_replace('ETCD_INITIAL_CLUSTER=','') }}"
+ initial_etcd_cluster: "{{ etcd_add_check.stdout_lines[3] | regex_replace('ETCD_INITIAL_CLUSTER=','') | regex_replace('\"','') }}"
etcd_ca_setup: False
r_etcd_common_etcd_runtime: "{{ openshift.common.etcd_runtime }}"
- role: nickhammond.logrotate
@@ -47,5 +49,7 @@
--ca-file {{ etcd_peer_ca_file }}
-C {{ etcd_peer_url_scheme }}://{{ hostvars[etcd_ca_host].etcd_hostname }}:{{ etcd_client_port }}
cluster-health
- retries: 1
+ register: scaleup_health
+ retries: 3
delay: 30
+ until: scaleup_health.rc == 0
diff --git a/playbooks/common/openshift-etcd/service.yml b/playbooks/common/openshift-etcd/service.yml
deleted file mode 100644
index ced4bddc5..000000000
--- a/playbooks/common/openshift-etcd/service.yml
+++ /dev/null
@@ -1,23 +0,0 @@
----
-- name: Populate g_service_masters host group if needed
- hosts: localhost
- connection: local
- become: no
- gather_facts: no
- tasks:
- - fail: msg="new_cluster_state is required to be injected in this playbook"
- when: new_cluster_state is not defined
-
- - name: Evaluate g_service_etcd
- add_host:
- name: "{{ item }}"
- groups: g_service_etcd
- with_items: "{{ oo_host_group_exp | default([]) }}"
- changed_when: False
-
-- name: Change etcd state on etcd instance(s)
- hosts: g_service_etcd
- connection: ssh
- gather_facts: no
- tasks:
- - service: name=etcd state="{{ new_cluster_state }}"
diff --git a/playbooks/common/openshift-loadbalancer/config.yml b/playbooks/common/openshift-loadbalancer/config.yml
index 2dacc1218..09ed81a83 100644
--- a/playbooks/common/openshift-loadbalancer/config.yml
+++ b/playbooks/common/openshift-loadbalancer/config.yml
@@ -14,4 +14,5 @@
+ openshift_loadbalancer_additional_backends | default([]) }}"
openshift_image_tag: "{{ hostvars[groups.oo_first_master.0].openshift_image_tag }}"
roles:
+ - role: os_firewall
- role: openshift_loadbalancer
diff --git a/playbooks/common/openshift-loadbalancer/service.yml b/playbooks/common/openshift-loadbalancer/service.yml
deleted file mode 100644
index d3762c961..000000000
--- a/playbooks/common/openshift-loadbalancer/service.yml
+++ /dev/null
@@ -1,23 +0,0 @@
----
-- name: Populate g_service_nodes host group if needed
- hosts: localhost
- connection: local
- become: no
- gather_facts: no
- tasks:
- - fail: msg="new_cluster_state is required to be injected in this playbook"
- when: new_cluster_state is not defined
-
- - name: Evaluate g_service_lb
- add_host:
- name: "{{ item }}"
- groups: g_service_lb
- with_items: "{{ oo_host_group_exp | default([]) }}"
- changed_when: False
-
-- name: Change state on lb instance(s)
- hosts: g_service_lb
- connection: ssh
- gather_facts: no
- tasks:
- - service: name=haproxy state="{{ new_cluster_state }}"
diff --git a/playbooks/common/openshift-master/config.yml b/playbooks/common/openshift-master/config.yml
index b30450def..cd25dd211 100644
--- a/playbooks/common/openshift-master/config.yml
+++ b/playbooks/common/openshift-master/config.yml
@@ -180,6 +180,7 @@
| oo_collect('openshift.common.ip') | default([]) | join(',')
}}"
roles:
+ - role: os_firewall
- role: openshift_master
openshift_ca_host: "{{ groups.oo_first_master.0 }}"
openshift_master_etcd_hosts: "{{ hostvars
diff --git a/playbooks/common/openshift-master/scaleup.yml b/playbooks/common/openshift-master/scaleup.yml
index bc61ee9bb..6ad4cde65 100644
--- a/playbooks/common/openshift-master/scaleup.yml
+++ b/playbooks/common/openshift-master/scaleup.yml
@@ -1,11 +1,4 @@
---
-- include: ../openshift-cluster/evaluate_groups.yml
-
-- name: Gather facts
- hosts: oo_etcd_to_config:oo_masters_to_config:oo_nodes_to_config
- roles:
- - openshift_facts
-
- name: Update master count
hosts: oo_masters:!oo_masters_to_config
serial: 1
@@ -50,16 +43,6 @@
delay: 1
changed_when: false
-- name: Configure docker hosts
- hosts: oo_masters_to_config:oo_nodes_to_config
- vars:
- docker_additional_registries: "{{ lookup('oo_option', 'docker_additional_registries') | oo_split }}"
- docker_insecure_registries: "{{ lookup('oo_option', 'docker_insecure_registries') | oo_split }}"
- docker_blocked_registries: "{{ lookup('oo_option', 'docker_blocked_registries') | oo_split }}"
- roles:
- - openshift_facts
- - openshift_docker
-
- name: Disable excluders
hosts: oo_masters_to_config
tags:
diff --git a/playbooks/common/openshift-master/service.yml b/playbooks/common/openshift-master/service.yml
deleted file mode 100644
index 48a2731aa..000000000
--- a/playbooks/common/openshift-master/service.yml
+++ /dev/null
@@ -1,23 +0,0 @@
----
-- name: Populate g_service_masters host group if needed
- hosts: localhost
- gather_facts: no
- connection: local
- become: no
- tasks:
- - fail: msg="new_cluster_state is required to be injected in this playbook"
- when: new_cluster_state is not defined
-
- - name: Evaluate g_service_masters
- add_host:
- name: "{{ item }}"
- groups: g_service_masters
- with_items: "{{ oo_host_group_exp | default([]) }}"
- changed_when: False
-
-- name: Change state on master instance(s)
- hosts: g_service_masters
- connection: ssh
- gather_facts: no
- tasks:
- - service: name={{ openshift.common.service_type }}-master state="{{ new_cluster_state }}"
diff --git a/playbooks/common/openshift-nfs/service.yml b/playbooks/common/openshift-nfs/service.yml
deleted file mode 100644
index b1e35e4b1..000000000
--- a/playbooks/common/openshift-nfs/service.yml
+++ /dev/null
@@ -1,21 +0,0 @@
----
-- name: Populate g_service_nfs host group if needed
- hosts: localhost
- gather_facts: no
- tasks:
- - fail: msg="new_cluster_state is required to be injected in this playbook"
- when: new_cluster_state is not defined
-
- - name: Evaluate g_service_nfs
- add_host:
- name: "{{ item }}"
- groups: g_service_nfs
- with_items: "{{ oo_host_group_exp | default([]) }}"
- changed_when: False
-
-- name: Change state on nfs instance(s)
- hosts: g_service_nfs
- connection: ssh
- gather_facts: no
- tasks:
- - service: name=nfs-server state="{{ new_cluster_state }}"
diff --git a/playbooks/common/openshift-node/config.yml b/playbooks/common/openshift-node/config.yml
index ef7d54f9f..04c811c22 100644
--- a/playbooks/common/openshift-node/config.yml
+++ b/playbooks/common/openshift-node/config.yml
@@ -32,6 +32,7 @@
}}"
roles:
+ - role: os_firewall
- role: openshift_node
openshift_ca_host: "{{ groups.oo_first_master.0 }}"
@@ -47,6 +48,7 @@
| oo_collect('openshift.common.hostname') | default([]) | join (',')
}}"
roles:
+ - role: os_firewall
- role: openshift_node
openshift_ca_host: "{{ groups.oo_first_master.0 }}"
diff --git a/playbooks/common/openshift-node/scaleup.yml b/playbooks/common/openshift-node/scaleup.yml
index 40da8990d..b1bbbb14c 100644
--- a/playbooks/common/openshift-node/scaleup.yml
+++ b/playbooks/common/openshift-node/scaleup.yml
@@ -1,32 +1,4 @@
---
-- include: ../openshift-cluster/evaluate_groups.yml
-
-- name: Gather facts
- hosts: oo_etcd_to_config:oo_masters_to_config:oo_nodes_to_config
- roles:
- - openshift_facts
-
-- name: Gather and set facts for first master
- hosts: oo_first_master
- vars:
- openshift_master_count: "{{ groups.oo_masters | length }}"
- pre_tasks:
- - set_fact:
- openshift_master_default_subdomain: "{{ lookup('oo_option', 'openshift_master_default_subdomain') | default(None, true) }}"
- when: openshift_master_default_subdomain is not defined
- roles:
- - openshift_master_facts
-
-- name: Configure docker hosts
- hosts: oo_nodes_to_config
- vars:
- docker_additional_registries: "{{ lookup('oo_option', 'docker_additional_registries') | oo_split }}"
- docker_insecure_registries: "{{ lookup('oo_option', 'docker_insecure_registries') | oo_split }}"
- docker_blocked_registries: "{{ lookup('oo_option', 'docker_blocked_registries') | oo_split }}"
- roles:
- - openshift_facts
- - openshift_docker
-
- name: Disable excluders
hosts: oo_nodes_to_config
tags:
diff --git a/playbooks/common/openshift-node/service.yml b/playbooks/common/openshift-node/service.yml
deleted file mode 100644
index 130a5416f..000000000
--- a/playbooks/common/openshift-node/service.yml
+++ /dev/null
@@ -1,26 +0,0 @@
----
-- name: Populate g_service_nodes host group if needed
- hosts: localhost
- connection: local
- become: no
- gather_facts: no
- tasks:
- - fail: msg="new_cluster_state is required to be injected in this playbook"
- when: new_cluster_state is not defined
-
- - name: Evaluate g_service_nodes
- add_host:
- name: "{{ item }}"
- groups: g_service_nodes
- with_items: "{{ oo_host_group_exp | default([]) }}"
- changed_when: False
-
-- name: Change state on node instance(s)
- hosts: g_service_nodes
- connection: ssh
- gather_facts: no
- tasks:
- - name: Change state on node instance(s)
- service:
- name: "{{ service_type }}-node"
- state: "{{ new_cluster_state }}"
diff --git a/playbooks/gce/README.md b/playbooks/gce/README.md
deleted file mode 100644
index 0514d6f50..000000000
--- a/playbooks/gce/README.md
+++ /dev/null
@@ -1,4 +0,0 @@
-# GCE playbooks
-
-This playbook directory is meant to be driven by [`bin/cluster`](../../bin),
-which is community supported and most use is considered deprecated.
diff --git a/playbooks/gce/openshift-cluster/add_nodes.yml b/playbooks/gce/openshift-cluster/add_nodes.yml
deleted file mode 100644
index 765e03fdc..000000000
--- a/playbooks/gce/openshift-cluster/add_nodes.yml
+++ /dev/null
@@ -1,43 +0,0 @@
----
-- name: Launch instance(s)
- hosts: localhost
- connection: local
- become: no
- gather_facts: no
- vars_files:
- - vars.yml
- vars:
- oo_extend_env: True
- tasks:
- - fail:
- msg: Deployment type not supported for gce provider yet
- when: deployment_type == 'enterprise'
-
- - include: ../../common/openshift-cluster/tasks/set_node_launch_facts.yml
- vars:
- type: "compute"
- count: "{{ num_nodes }}"
- - include: tasks/launch_instances.yml
- vars:
- instances: "{{ node_names }}"
- cluster: "{{ cluster_id }}"
- type: "{{ k8s_type }}"
- g_sub_host_type: "{{ sub_host_type }}"
- gce_machine_type: "{{ lookup('env', 'gce_machine_node_type') | default(lookup('env', 'gce_machine_type'), true) }}"
- gce_machine_image: "{{ lookup('env', 'gce_machine_node_image') | default(lookup('env', 'gce_machine_image'), true) }}"
-
- - include: ../../common/openshift-cluster/tasks/set_node_launch_facts.yml
- vars:
- type: "infra"
- count: "{{ num_infra }}"
- - include: tasks/launch_instances.yml
- vars:
- instances: "{{ node_names }}"
- cluster: "{{ cluster_id }}"
- type: "{{ k8s_type }}"
- g_sub_host_type: "{{ sub_host_type }}"
- gce_machine_type: "{{ lookup('env', 'gce_machine_node_type') | default(lookup('env', 'gce_machine_type'), true) }}"
- gce_machine_image: "{{ lookup('env', 'gce_machine_node_image') | default(lookup('env', 'gce_machine_image'), true) }}"
-
-- include: scaleup.yml
-- include: list.yml
diff --git a/playbooks/gce/openshift-cluster/cluster_hosts.yml b/playbooks/gce/openshift-cluster/cluster_hosts.yml
deleted file mode 100644
index e5f41382b..000000000
--- a/playbooks/gce/openshift-cluster/cluster_hosts.yml
+++ /dev/null
@@ -1,25 +0,0 @@
----
-g_all_hosts: "{{ groups['tag_clusterid-' ~ cluster_id] | default([])
- | intersect(groups['tag_environment-' ~ cluster_env] | default([])) }}"
-
-g_etcd_hosts: "{{ g_all_hosts | intersect(groups['tag_host-type-etcd'] | default([])) }}"
-
-g_new_etcd_hosts: "{{ g_all_hosts | intersect(groups['tag_host-type-new_etcd'] | default([])) }}"
-
-g_lb_hosts: "{{ g_all_hosts | intersect(groups['tag_host-type-lb'] | default([])) }}"
-
-g_nfs_hosts: "{{ g_all_hosts | intersect(groups['tag_host-type-nfs'] | default([])) }}"
-
-g_glusterfs_hosts: "{{ g_all_hosts | intersect(groups['tag_host-type-glusterfs'] | default([])) }}"
-
-g_master_hosts: "{{ g_all_hosts | intersect(groups['tag_host-type-master'] | default([])) }}"
-
-g_new_master_hosts: "{{ g_all_hosts | intersect(groups['tag_host-type-new-master'] | default([])) }}"
-
-g_node_hosts: "{{ g_all_hosts | intersect(groups['tag_host-type-node'] | default([])) }}"
-
-g_new_node_hosts: "{{ g_all_hosts | intersect(groups['tag_host-type-new-node'] | default([])) }}"
-
-g_infra_hosts: "{{ g_node_hosts | intersect(groups['tag_sub-host-type-infra'] | default([])) }}"
-
-g_compute_hosts: "{{ g_node_hosts | intersect(groups['tag_sub-host-type-compute'] | default([])) }}"
diff --git a/playbooks/gce/openshift-cluster/config.yml b/playbooks/gce/openshift-cluster/config.yml
deleted file mode 100644
index 2625d4d05..000000000
--- a/playbooks/gce/openshift-cluster/config.yml
+++ /dev/null
@@ -1,36 +0,0 @@
----
-- hosts: localhost
- gather_facts: no
- tasks:
- - include_vars: vars.yml
- - include_vars: cluster_hosts.yml
- - add_host:
- name: "{{ item }}"
- groups: l_oo_all_hosts
- ansible_ssh_user: "{{ deployment_vars[deployment_type].ssh_user }}"
- ansible_become: "{{ deployment_vars[deployment_type].become }}"
- with_items: "{{ g_all_hosts | default([]) }}"
-
-- hosts: l_oo_all_hosts
- gather_facts: no
- tasks:
- - include_vars: vars.yml
- - include_vars: cluster_hosts.yml
-
-- include: ../../common/openshift-cluster/config.yml
- vars:
- g_ssh_user: "{{ deployment_vars[deployment_type].ssh_user }}"
- g_sudo: "{{ deployment_vars[deployment_type].become }}"
- g_nodeonmaster: true
- openshift_cluster_id: "{{ cluster_id }}"
- openshift_debug_level: "{{ debug_level }}"
- openshift_deployment_type: "{{ deployment_type }}"
- openshift_hostname: "{{ gce_private_ip }}"
- openshift_hosted_registry_selector: 'type=infra'
- openshift_hosted_router_selector: 'type=infra'
- openshift_master_cluster_method: 'native'
- openshift_use_openshift_sdn: "{{ lookup('oo_option', 'use_openshift_sdn') }}"
- os_sdn_network_plugin_name: "{{ lookup('oo_option', 'sdn_network_plugin_name') }}"
- openshift_use_flannel: "{{ lookup('oo_option', 'use_flannel') }}"
- openshift_use_calico: "{{ lookup('oo_option', 'use_calico') }}"
- openshift_use_fluentd: "{{ lookup('oo_option', 'use_fluentd') }}"
diff --git a/playbooks/gce/openshift-cluster/filter_plugins b/playbooks/gce/openshift-cluster/filter_plugins
deleted file mode 120000
index 99a95e4ca..000000000
--- a/playbooks/gce/openshift-cluster/filter_plugins
+++ /dev/null
@@ -1 +0,0 @@
-../../../filter_plugins \ No newline at end of file
diff --git a/playbooks/gce/openshift-cluster/launch.yml b/playbooks/gce/openshift-cluster/launch.yml
deleted file mode 100644
index 7532a678b..000000000
--- a/playbooks/gce/openshift-cluster/launch.yml
+++ /dev/null
@@ -1,67 +0,0 @@
----
-- name: Launch instance(s)
- hosts: localhost
- connection: local
- become: no
- gather_facts: no
- vars_files:
- - vars.yml
- tasks:
- - fail: msg="Deployment type not supported for gce provider yet"
- when: deployment_type == 'enterprise'
-
- - include: ../../common/openshift-cluster/tasks/set_etcd_launch_facts.yml
- - include: tasks/launch_instances.yml
- vars:
- instances: "{{ etcd_names }}"
- cluster: "{{ cluster_id }}"
- type: "{{ k8s_type }}"
- g_sub_host_type: "default"
- gce_machine_type: "{{ lookup('env', 'gce_machine_etcd_type') | default(lookup('env', 'gce_machine_type'), true) }}"
- gce_machine_image: "{{ lookup('env', 'gce_machine_etcd_image') | default(lookup('env', 'gce_machine_image'), true) }}"
-
-
- - include: ../../common/openshift-cluster/tasks/set_master_launch_facts.yml
- - include: tasks/launch_instances.yml
- vars:
- instances: "{{ master_names }}"
- cluster: "{{ cluster_id }}"
- type: "{{ k8s_type }}"
- g_sub_host_type: "default"
- gce_machine_type: "{{ lookup('env', 'gce_machine_master_type') | default(lookup('env', 'gce_machine_type'), true) }}"
- gce_machine_image: "{{ lookup('env', 'gce_machine_master_image') | default(lookup('env', 'gce_machine_image'), true) }}"
-
- - include: ../../common/openshift-cluster/tasks/set_node_launch_facts.yml
- vars:
- type: "compute"
- count: "{{ num_nodes }}"
- - include: tasks/launch_instances.yml
- vars:
- instances: "{{ node_names }}"
- cluster: "{{ cluster_id }}"
- type: "{{ k8s_type }}"
- g_sub_host_type: "{{ sub_host_type }}"
- gce_machine_type: "{{ lookup('env', 'gce_machine_node_type') | default(lookup('env', 'gce_machine_type'), true) }}"
- gce_machine_image: "{{ lookup('env', 'gce_machine_node_image') | default(lookup('env', 'gce_machine_image'), true) }}"
-
- - include: ../../common/openshift-cluster/tasks/set_node_launch_facts.yml
- vars:
- type: "infra"
- count: "{{ num_infra }}"
- - include: tasks/launch_instances.yml
- vars:
- instances: "{{ node_names }}"
- cluster: "{{ cluster_id }}"
- type: "{{ k8s_type }}"
- g_sub_host_type: "{{ sub_host_type }}"
- gce_machine_type: "{{ lookup('env', 'gce_machine_node_type') | default(lookup('env', 'gce_machine_type'), true) }}"
- gce_machine_image: "{{ lookup('env', 'gce_machine_node_image') | default(lookup('env', 'gce_machine_image'), true) }}"
-
- - add_host:
- name: "{{ master_names.0 }}"
- groups: service_master
- when: master_names is defined and master_names.0 is defined
-
-- include: update.yml
-
-- include: list.yml
diff --git a/playbooks/gce/openshift-cluster/list.yml b/playbooks/gce/openshift-cluster/list.yml
deleted file mode 100644
index 34ab09533..000000000
--- a/playbooks/gce/openshift-cluster/list.yml
+++ /dev/null
@@ -1,23 +0,0 @@
----
-- name: Generate oo_list_hosts group
- hosts: localhost
- connection: local
- become: no
- gather_facts: no
- vars_files:
- - vars.yml
- tasks:
- - set_fact: scratch_group=tag_clusterid-{{ cluster_id }}
- when: cluster_id != ''
- - set_fact: scratch_group=all
- when: cluster_id == ''
- - add_host:
- name: "{{ item }}"
- groups: oo_list_hosts
- ansible_ssh_user: "{{ deployment_vars[deployment_type].ssh_user }}"
- ansible_become: "{{ deployment_vars[deployment_type].become }}"
- oo_public_ipv4: "{{ hostvars[item].gce_public_ip }}"
- oo_private_ipv4: "{{ hostvars[item].gce_private_ip }}"
- with_items: "{{ groups[scratch_group] | default([], true) | difference(['localhost']) | difference(groups.status_terminated | default([], true)) }}"
- - debug:
- msg: "{{ hostvars | oo_select_keys(groups[scratch_group] | default([])) | oo_pretty_print_cluster }}"
diff --git a/playbooks/gce/openshift-cluster/lookup_plugins b/playbooks/gce/openshift-cluster/lookup_plugins
deleted file mode 120000
index ac79701db..000000000
--- a/playbooks/gce/openshift-cluster/lookup_plugins
+++ /dev/null
@@ -1 +0,0 @@
-../../../lookup_plugins \ No newline at end of file
diff --git a/playbooks/gce/openshift-cluster/roles b/playbooks/gce/openshift-cluster/roles
deleted file mode 120000
index 20c4c58cf..000000000
--- a/playbooks/gce/openshift-cluster/roles
+++ /dev/null
@@ -1 +0,0 @@
-../../../roles \ No newline at end of file
diff --git a/playbooks/gce/openshift-cluster/service.yml b/playbooks/gce/openshift-cluster/service.yml
deleted file mode 100644
index 13b267976..000000000
--- a/playbooks/gce/openshift-cluster/service.yml
+++ /dev/null
@@ -1,29 +0,0 @@
----
-- name: Call same systemctl command for openshift on all instance(s)
- hosts: localhost
- connection: local
- become: no
- gather_facts: no
- vars_files:
- - vars.yml
- - cluster_hosts.yml
- tasks:
- - fail: msg="cluster_id is required to be injected in this playbook"
- when: cluster_id is not defined
-
- - add_host:
- name: "{{ item }}"
- groups: g_service_nodes
- ansible_ssh_user: "{{ deployment_vars[deployment_type].ssh_user }}"
- ansible_become: "{{ deployment_vars[deployment_type].become }}"
- with_items: "{{ node_hosts | default([]) | difference(['localhost']) | difference(groups.status_terminated) }}"
-
- - add_host:
- name: "{{ item }}"
- groups: g_service_masters
- ansible_ssh_user: "{{ deployment_vars[deployment_type].ssh_user }}"
- ansible_become: "{{ deployment_vars[deployment_type].become }}"
- with_items: "{{ master_hosts | default([]) | difference(['localhost']) | difference(groups.status_terminated) }}"
-
-- include: ../../common/openshift-node/service.yml
-- include: ../../common/openshift-master/service.yml
diff --git a/playbooks/gce/openshift-cluster/tasks/launch_instances.yml b/playbooks/gce/openshift-cluster/tasks/launch_instances.yml
deleted file mode 100644
index 65dd2b71e..000000000
--- a/playbooks/gce/openshift-cluster/tasks/launch_instances.yml
+++ /dev/null
@@ -1,65 +0,0 @@
----
-- name: Launch instance(s)
- gce:
- instance_names: "{{ instances|join(',') }}"
- machine_type: "{{ gce_machine_type | default(deployment_vars[deployment_type].machine_type, true) }}"
- image: "{{ gce_machine_image | default(deployment_vars[deployment_type].image, true) }}"
- service_account_email: "{{ lookup('env', 'gce_service_account_email_address') }}"
- pem_file: "{{ lookup('env', 'gce_service_account_pem_file_path') }}"
- project_id: "{{ lookup('env', 'gce_project_id') }}"
- zone: "{{ lookup('env', 'zone') }}"
- network: "{{ lookup('env', 'network') }}"
- subnetwork: "{{ lookup('env', 'subnetwork') | default(omit, True) }}"
- # unsupported in 1.9.+
- #service_account_permissions: "datastore,logging-write"
- tags:
- - created-by-{{ lookup('env', 'LOGNAME') | regex_replace('[^a-z0-9]+', '') | default(cluster, true) }}
- - environment-{{ cluster_env }}
- - clusterid-{{ cluster_id }}
- - host-type-{{ type }}
- - sub-host-type-{{ g_sub_host_type }}
- metadata:
- startup-script: |
- #!/bin/bash
- echo "Defaults:{{ deployment_vars[deployment_type].ssh_user }} !requiretty" > /etc/sudoers.d/99-{{ deployment_vars[deployment_type].ssh_user }}
-
- when: instances |length > 0
- register: gce
-
-- set_fact:
- node_label:
- # There doesn't seem to be a way to get the region directly, so parse it out of the zone.
- region: "{{ gce.zone | regex_replace('^(.*)-.*$', '\\\\1') }}"
- type: "{{ g_sub_host_type }}"
- when: instances |length > 0 and type == "node"
-
-- set_fact:
- node_label:
- # There doesn't seem to be a way to get the region directly, so parse it out of the zone.
- region: "{{ gce.zone | regex_replace('^(.*)-.*$', '\\\\1') }}"
- type: "{{ type }}"
- when: instances |length > 0 and type != "node"
-
-- name: Add new instances to groups and set variables needed
- add_host:
- hostname: "{{ item.name }}"
- ansible_ssh_host: "{{ item.public_ip }}"
- ansible_ssh_user: "{{ deployment_vars[deployment_type].ssh_user }}"
- ansible_become: "{{ deployment_vars[deployment_type].become }}"
- groups: "{{ item.tags | oo_prepend_strings_in_list('tag_') | join(',') }}"
- gce_public_ip: "{{ item.public_ip }}"
- gce_private_ip: "{{ item.private_ip }}"
- openshift_node_labels: "{{ node_label }}"
- with_items: "{{ gce.instance_data | default([], true) }}"
-
-- name: Wait for ssh
- wait_for: port=22 host={{ item.public_ip }}
- with_items: "{{ gce.instance_data | default([], true) }}"
-
-- name: Wait for user setup
- command: "ssh -o StrictHostKeyChecking=no -o PasswordAuthentication=no -o ConnectTimeout=10 -o UserKnownHostsFile=/dev/null {{ hostvars[item.name].ansible_ssh_user }}@{{ item.public_ip }} echo {{ hostvars[item.name].ansible_ssh_user }} user is setup"
- register: result
- until: result.rc == 0
- retries: 30
- delay: 5
- with_items: "{{ gce.instance_data | default([], true) }}"
diff --git a/playbooks/gce/openshift-cluster/terminate.yml b/playbooks/gce/openshift-cluster/terminate.yml
deleted file mode 100644
index afe269b7c..000000000
--- a/playbooks/gce/openshift-cluster/terminate.yml
+++ /dev/null
@@ -1,58 +0,0 @@
----
-- name: Terminate instance(s)
- hosts: localhost
- connection: local
- become: no
- gather_facts: no
- vars_files:
- - vars.yml
- tasks:
- - add_host:
- name: "{{ item }}"
- groups: oo_hosts_to_terminate
- ansible_ssh_user: "{{ deployment_vars[deployment_type].ssh_user }}"
- ansible_become: "{{ deployment_vars[deployment_type].become }}"
- with_items: "{{ (groups['tag_clusterid-' ~ cluster_id] | default([])) | difference(['localhost']) }}"
-
-- name: Unsubscribe VMs
- hosts: oo_hosts_to_terminate
- vars_files:
- - vars.yml
- roles:
- - role: rhel_unsubscribe
- when: deployment_type in ['atomic-enterprise', 'enterprise', 'openshift-enterprise'] and
- ansible_distribution == "RedHat" and
- lookup('oo_option', 'rhel_skip_subscription') | default(rhsub_skip, True) |
- default('no', True) | lower in ['no', 'false']
-
-- name: Terminate instances(s)
- hosts: localhost
- become: no
- connection: local
- gather_facts: no
- vars_files:
- - vars.yml
- tasks:
- - name: Terminate instances that were previously launched
- local_action:
- module: gce
- state: 'absent'
- name: "{{ item }}"
- service_account_email: "{{ lookup('env', 'gce_service_account_email_address') }}"
- pem_file: "{{ lookup('env', 'gce_service_account_pem_file_path') }}"
- project_id: "{{ lookup('env', 'gce_project_id') }}"
- zone: "{{ lookup('env', 'zone') }}"
- with_items: "{{ groups['oo_hosts_to_terminate'] | default([], true) }}"
- when: item is defined
-
-#- include: ../openshift-node/terminate.yml
-# vars:
-# gce_service_account_email: "{{ lookup('env', 'gce_service_account_email_address') }}"
-# gce_pem_file: "{{ lookup('env', 'gce_service_account_pem_file_path') }}"
-# gce_project_id: "{{ lookup('env', 'gce_project_id') }}"
-#
-#- include: ../openshift-master/terminate.yml
-# vars:
-# gce_service_account_email: "{{ lookup('env', 'gce_service_account_email_address') }}"
-# gce_pem_file: "{{ lookup('env', 'gce_service_account_pem_file_path') }}"
-# gce_project_id: "{{ lookup('env', 'gce_project_id') }}"
diff --git a/playbooks/gce/openshift-cluster/update.yml b/playbooks/gce/openshift-cluster/update.yml
deleted file mode 100644
index 6d2af3d26..000000000
--- a/playbooks/gce/openshift-cluster/update.yml
+++ /dev/null
@@ -1,34 +0,0 @@
----
-- hosts: localhost
- gather_facts: no
- tasks:
- - include_vars: vars.yml
- - include_vars: cluster_hosts.yml
- - add_host:
- name: "{{ item }}"
- groups: l_oo_all_hosts
- with_items: "{{ g_all_hosts }}"
-
-- hosts: l_oo_all_hosts
- gather_facts: no
- tasks:
- - include_vars: vars.yml
- - include_vars: cluster_hosts.yml
-
-- name: Populate oo_hosts_to_update group
- hosts: localhost
- connection: local
- become: no
- gather_facts: no
- tasks:
- - name: Evaluate oo_hosts_to_update
- add_host:
- name: "{{ item }}"
- groups: oo_hosts_to_update
- ansible_ssh_user: "{{ deployment_vars[deployment_type].ssh_user }}"
- ansible_become: "{{ deployment_vars[deployment_type].become }}"
- with_items: "{{ g_all_hosts | default([]) }}"
-
-- include: ../../common/openshift-cluster/update_repos_and_packages.yml
-
-- include: config.yml
diff --git a/playbooks/gce/openshift-cluster/vars.yml b/playbooks/gce/openshift-cluster/vars.yml
deleted file mode 100644
index 13c754c1e..000000000
--- a/playbooks/gce/openshift-cluster/vars.yml
+++ /dev/null
@@ -1,18 +0,0 @@
----
-debug_level: 2
-
-deployment_rhel7_ent_base:
- image: "{{ lookup('oo_option', 'image_name') | default('rhel-7', True) }}"
- machine_type: "{{ lookup('oo_option', 'machine_type') | default('n1-standard-1', True) }}"
- ssh_user: "{{ lookup('env', 'gce_ssh_user') | default(ansible_ssh_user, true) }}"
- become: yes
-
-deployment_vars:
- origin:
- image: "{{ lookup('oo_option', 'image_name') | default('centos-7', True) }}"
- machine_type: "{{ lookup('oo_option', 'machine_type') | default('n1-standard-1', True) }}"
- ssh_user: "{{ lookup('env', 'gce_ssh_user') | default(ansible_ssh_user, true) }}"
- become: yes
- enterprise: "{{ deployment_rhel7_ent_base }}"
- openshift-enterprise: "{{ deployment_rhel7_ent_base }}"
- atomic-enterprise: "{{ deployment_rhel7_ent_base }}"
diff --git a/playbooks/libvirt/README.md b/playbooks/libvirt/README.md
deleted file mode 100644
index 3ce46a76f..000000000
--- a/playbooks/libvirt/README.md
+++ /dev/null
@@ -1,4 +0,0 @@
-# libvirt playbooks
-
-This playbook directory is meant to be driven by [`bin/cluster`](../../bin),
-which is community supported and most use is considered deprecated.
diff --git a/playbooks/libvirt/openshift-cluster/cluster_hosts.yml b/playbooks/libvirt/openshift-cluster/cluster_hosts.yml
deleted file mode 100644
index e5f41382b..000000000
--- a/playbooks/libvirt/openshift-cluster/cluster_hosts.yml
+++ /dev/null
@@ -1,25 +0,0 @@
----
-g_all_hosts: "{{ groups['tag_clusterid-' ~ cluster_id] | default([])
- | intersect(groups['tag_environment-' ~ cluster_env] | default([])) }}"
-
-g_etcd_hosts: "{{ g_all_hosts | intersect(groups['tag_host-type-etcd'] | default([])) }}"
-
-g_new_etcd_hosts: "{{ g_all_hosts | intersect(groups['tag_host-type-new_etcd'] | default([])) }}"
-
-g_lb_hosts: "{{ g_all_hosts | intersect(groups['tag_host-type-lb'] | default([])) }}"
-
-g_nfs_hosts: "{{ g_all_hosts | intersect(groups['tag_host-type-nfs'] | default([])) }}"
-
-g_glusterfs_hosts: "{{ g_all_hosts | intersect(groups['tag_host-type-glusterfs'] | default([])) }}"
-
-g_master_hosts: "{{ g_all_hosts | intersect(groups['tag_host-type-master'] | default([])) }}"
-
-g_new_master_hosts: "{{ g_all_hosts | intersect(groups['tag_host-type-new-master'] | default([])) }}"
-
-g_node_hosts: "{{ g_all_hosts | intersect(groups['tag_host-type-node'] | default([])) }}"
-
-g_new_node_hosts: "{{ g_all_hosts | intersect(groups['tag_host-type-new-node'] | default([])) }}"
-
-g_infra_hosts: "{{ g_node_hosts | intersect(groups['tag_sub-host-type-infra'] | default([])) }}"
-
-g_compute_hosts: "{{ g_node_hosts | intersect(groups['tag_sub-host-type-compute'] | default([])) }}"
diff --git a/playbooks/libvirt/openshift-cluster/config.yml b/playbooks/libvirt/openshift-cluster/config.yml
deleted file mode 100644
index 569e00da2..000000000
--- a/playbooks/libvirt/openshift-cluster/config.yml
+++ /dev/null
@@ -1,39 +0,0 @@
----
-# TODO: need to figure out a plan for setting hostname, currently the default
-# is localhost, so no hostname value (or public_hostname) value is getting
-# assigned
-
-- include: ../../common/openshift-cluster/std_include.yml
-
-- hosts: localhost
- gather_facts: no
- tasks:
- - include_vars: vars.yml
- - include_vars: cluster_hosts.yml
- - add_host:
- name: "{{ item }}"
- groups: l_oo_all_hosts
- with_items: "{{ g_all_hosts | default([]) }}"
-
-- hosts: l_oo_all_hosts
- gather_facts: no
- tasks:
- - include_vars: vars.yml
- - include_vars: cluster_hosts.yml
-
-- include: ../../common/openshift-cluster/config.yml
- vars:
- g_ssh_user: "{{ deployment_vars[deployment_type].ssh_user }}"
- g_sudo: "{{ deployment_vars[deployment_type].become }}"
- g_nodeonmaster: true
- openshift_cluster_id: "{{ cluster_id }}"
- openshift_debug_level: "{{ debug_level }}"
- openshift_deployment_type: "{{ deployment_type }}"
- openshift_hosted_registry_selector: 'type=infra'
- openshift_hosted_router_selector: 'type=infra'
- openshift_master_cluster_method: 'native'
- openshift_use_openshift_sdn: "{{ lookup('oo_option', 'use_openshift_sdn') }}"
- os_sdn_network_plugin_name: "{{ lookup('oo_option', 'sdn_network_plugin_name') }}"
- openshift_use_flannel: "{{ lookup('oo_option', 'use_flannel') }}"
- openshift_use_calico: "{{ lookup('oo_option', 'use_calico') }}"
- openshift_use_fluentd: "{{ lookup('oo_option', 'use_fluentd') }}"
diff --git a/playbooks/libvirt/openshift-cluster/filter_plugins b/playbooks/libvirt/openshift-cluster/filter_plugins
deleted file mode 120000
index 99a95e4ca..000000000
--- a/playbooks/libvirt/openshift-cluster/filter_plugins
+++ /dev/null
@@ -1 +0,0 @@
-../../../filter_plugins \ No newline at end of file
diff --git a/playbooks/libvirt/openshift-cluster/launch.yml b/playbooks/libvirt/openshift-cluster/launch.yml
deleted file mode 100644
index 2475b9d6b..000000000
--- a/playbooks/libvirt/openshift-cluster/launch.yml
+++ /dev/null
@@ -1,57 +0,0 @@
----
-- name: Launch instance(s)
- hosts: localhost
- become: no
- connection: local
- gather_facts: no
- vars_files:
- - vars.yml
- vars:
- image_url: "{{ deployment_vars[deployment_type].image.url }}"
- image_sha256: "{{ deployment_vars[deployment_type].image.sha256 }}"
- image_name: "{{ deployment_vars[deployment_type].image.name }}"
- image_compression: "{{ deployment_vars[deployment_type].image.compression }}"
- tasks:
- - include: tasks/configure_libvirt.yml
-
- - include: ../../common/openshift-cluster/tasks/set_etcd_launch_facts.yml
- - include: tasks/launch_instances.yml
- vars:
- instances: "{{ etcd_names }}"
- cluster: "{{ cluster_id }}"
- type: "{{ k8s_type }}"
- g_sub_host_type: "default"
-
- - include: ../../common/openshift-cluster/tasks/set_master_launch_facts.yml
- - include: tasks/launch_instances.yml
- vars:
- instances: "{{ master_names }}"
- cluster: "{{ cluster_id }}"
- type: "{{ k8s_type }}"
- g_sub_host_type: "default"
-
- - include: ../../common/openshift-cluster/tasks/set_node_launch_facts.yml
- vars:
- type: "compute"
- count: "{{ num_nodes }}"
- - include: tasks/launch_instances.yml
- vars:
- instances: "{{ node_names }}"
- cluster: "{{ cluster_id }}"
- type: "{{ k8s_type }}"
- g_sub_host_type: "{{ sub_host_type }}"
-
- - include: ../../common/openshift-cluster/tasks/set_node_launch_facts.yml
- vars:
- type: "infra"
- count: "{{ num_infra }}"
- - include: tasks/launch_instances.yml
- vars:
- instances: "{{ node_names }}"
- cluster: "{{ cluster_id }}"
- type: "{{ k8s_type }}"
- g_sub_host_type: "{{ sub_host_type }}"
-
-- include: update.yml
-
-- include: list.yml
diff --git a/playbooks/libvirt/openshift-cluster/list.yml b/playbooks/libvirt/openshift-cluster/list.yml
deleted file mode 100644
index 579cd7ac6..000000000
--- a/playbooks/libvirt/openshift-cluster/list.yml
+++ /dev/null
@@ -1,23 +0,0 @@
----
-- name: Generate oo_list_hosts group
- hosts: localhost
- become: no
- connection: local
- gather_facts: no
- vars_files:
- - vars.yml
- tasks:
- - set_fact: scratch_group=tag_clusterid-{{ cluster_id }}
- when: cluster_id != ''
- - set_fact: scratch_group=all
- when: cluster_id == ''
- - add_host:
- name: "{{ item }}"
- groups: oo_list_hosts
- ansible_ssh_user: "{{ deployment_vars[deployment_type].ssh_user }}"
- ansible_become: "{{ deployment_vars[deployment_type].become }}"
- oo_public_ipv4: ""
- oo_private_ipv4: "{{ hostvars[item].libvirt_ip_address }}"
- with_items: "{{ groups[scratch_group] | default([]) | difference(['localhost']) }}"
- - debug:
- msg: "{{ hostvars | oo_select_keys(groups[scratch_group] | default([])) | oo_pretty_print_cluster }}"
diff --git a/playbooks/libvirt/openshift-cluster/lookup_plugins b/playbooks/libvirt/openshift-cluster/lookup_plugins
deleted file mode 120000
index ac79701db..000000000
--- a/playbooks/libvirt/openshift-cluster/lookup_plugins
+++ /dev/null
@@ -1 +0,0 @@
-../../../lookup_plugins \ No newline at end of file
diff --git a/playbooks/libvirt/openshift-cluster/roles b/playbooks/libvirt/openshift-cluster/roles
deleted file mode 120000
index 20c4c58cf..000000000
--- a/playbooks/libvirt/openshift-cluster/roles
+++ /dev/null
@@ -1 +0,0 @@
-../../../roles \ No newline at end of file
diff --git a/playbooks/libvirt/openshift-cluster/service.yml b/playbooks/libvirt/openshift-cluster/service.yml
deleted file mode 100644
index 8bd24a8cf..000000000
--- a/playbooks/libvirt/openshift-cluster/service.yml
+++ /dev/null
@@ -1,34 +0,0 @@
----
-# TODO: need to figure out a plan for setting hostname, currently the default
-# is localhost, so no hostname value (or public_hostname) value is getting
-# assigned
-
-- name: Call same systemctl command for openshift on all instance(s)
- hosts: localhost
- become: no
- connection: local
- gather_facts: no
- vars_files:
- - vars.yml
- tasks:
- - fail: msg="cluster_id is required to be injected in this playbook"
- when: cluster_id is not defined
-
- - name: Evaluate g_service_masters
- add_host:
- name: "{{ item }}"
- ansible_ssh_user: "{{ deployment_vars[deployment_type].ssh_user }}"
- ansible_become: "{{ deployment_vars[deployment_type].become }}"
- groups: g_service_masters
- with_items: "{{ g_master_hosts | default([]) }}"
-
- - name: Evaluate g_service_nodes
- add_host:
- name: "{{ item }}"
- ansible_ssh_user: "{{ deployment_vars[deployment_type].ssh_user }}"
- ansible_become: "{{ deployment_vars[deployment_type].become }}"
- groups: g_service_nodes
- with_items: "{{ g_node_hosts | default([]) }}"
-
-- include: ../../common/openshift-node/service.yml
-- include: ../../common/openshift-master/service.yml
diff --git a/playbooks/libvirt/openshift-cluster/tasks/configure_libvirt.yml b/playbooks/libvirt/openshift-cluster/tasks/configure_libvirt.yml
deleted file mode 100644
index f237c1a60..000000000
--- a/playbooks/libvirt/openshift-cluster/tasks/configure_libvirt.yml
+++ /dev/null
@@ -1,6 +0,0 @@
----
-- include: configure_libvirt_storage_pool.yml
- when: libvirt_storage_pool is defined and libvirt_storage_pool_path is defined
-
-- include: configure_libvirt_network.yml
- when: libvirt_network is defined
diff --git a/playbooks/libvirt/openshift-cluster/tasks/configure_libvirt_network.yml b/playbooks/libvirt/openshift-cluster/tasks/configure_libvirt_network.yml
deleted file mode 100644
index b42ca83af..000000000
--- a/playbooks/libvirt/openshift-cluster/tasks/configure_libvirt_network.yml
+++ /dev/null
@@ -1,11 +0,0 @@
----
-- name: Create the libvirt network for OpenShift
- virt_net:
- name: '{{ libvirt_network }}'
- state: '{{ item }}'
- autostart: 'yes'
- xml: "{{ lookup('template', 'network.xml') }}"
- uri: '{{ libvirt_uri }}'
- with_items:
- - present
- - active
diff --git a/playbooks/libvirt/openshift-cluster/tasks/configure_libvirt_storage_pool.yml b/playbooks/libvirt/openshift-cluster/tasks/configure_libvirt_storage_pool.yml
deleted file mode 100644
index 8685624ec..000000000
--- a/playbooks/libvirt/openshift-cluster/tasks/configure_libvirt_storage_pool.yml
+++ /dev/null
@@ -1,30 +0,0 @@
----
-- name: Create libvirt storage directory for openshift
- file:
- dest: "{{ libvirt_storage_pool_path }}"
- state: directory
-
-# We need to set permissions on the directory and any items created under the directory, so we need to call the acl module with and without default set.
-- acl:
- default: '{{ item.default }}'
- entity: kvm
- etype: group
- name: "{{ libvirt_storage_pool_path }}"
- permissions: '{{ item.permissions }}'
- state: present
- with_items:
- - default: no
- permissions: x
- - default: yes
- permissions: rwx
-
-- name: Create the libvirt storage pool for OpenShift
- virt_pool:
- name: '{{ libvirt_storage_pool }}'
- state: '{{ item }}'
- autostart: 'yes'
- xml: "{{ lookup('template', 'storage-pool.xml') }}"
- uri: '{{ libvirt_uri }}'
- with_items:
- - present
- - active
diff --git a/playbooks/libvirt/openshift-cluster/tasks/launch_instances.yml b/playbooks/libvirt/openshift-cluster/tasks/launch_instances.yml
deleted file mode 100644
index 4df86effa..000000000
--- a/playbooks/libvirt/openshift-cluster/tasks/launch_instances.yml
+++ /dev/null
@@ -1,142 +0,0 @@
----
-# TODO: Add support for choosing base image based on deployment_type and os
-# wanted (os wanted needs support added in bin/cluster with sane defaults:
-# fedora/centos for origin, rhel for enterprise)
-
-# TODO: create a role to encapsulate some of this complexity, possibly also
-# create a module to manage the storage tasks, network tasks, and possibly
-# even handle the libvirt tasks to set metadata in the domain xml and be able
-# to create/query data about vms without having to use xml the python libvirt
-# bindings look like a good candidate for this
-
-- name: Download Base Cloud image
- get_url:
- url: '{{ image_url }}'
- sha256sum: '{{ image_sha256 }}'
- dest: '{{ libvirt_storage_pool_path }}/{{ [image_name, image_compression] | difference([""]) | join(".") }}'
- when: ( lookup("oo_option", "skip_image_download") | default("no", True) | lower ) in ["false", "no"]
- register: downloaded_image
-
-- name: Uncompress xz compressed base cloud image
- command: 'unxz -kf {{ libvirt_storage_pool_path }}/{{ [image_name, image_compression] | join(".") }}'
- args:
- creates: '{{ libvirt_storage_pool_path }}/{{ image_name }}'
- when: image_compression in ["xz"] and downloaded_image.changed
-
-- name: Uncompress tgz compressed base cloud image
- command: 'tar zxvf {{ libvirt_storage_pool_path }}/{{ [image_name, image_compression] | join(".") }}'
- args:
- creates: '{{ libvirt_storage_pool_path }}/{{ image_name }}'
- when: image_compression in ["tgz"] and downloaded_image.changed
-
-- name: Uncompress gzip compressed base cloud image
- command: 'gunzip {{ libvirt_storage_pool_path }}/{{ [image_name, image_compression] | join(".") }}'
- args:
- creates: '{{ libvirt_storage_pool_path }}/{{ image_name }}'
- when: image_compression in ["gz"] and downloaded_image.changed
-
-- name: Create the cloud-init config drive path
- file:
- dest: '{{ libvirt_storage_pool_path }}/{{ item }}_configdrive/'
- state: directory
- with_items: '{{ instances }}'
-
-- name: Create the cloud-init config drive files
- template:
- src: '{{ item[1] }}'
- dest: '{{ libvirt_storage_pool_path }}/{{ item[0] }}_configdrive/{{ item[1] }}'
- with_nested:
- - '{{ instances }}'
- - [ user-data, meta-data ]
-
-- name: Check for genisoimage
- command: which genisoimage
- register: which_genisoimage
-
-- name: Create the cloud-init config drive
- command: "{{ 'genisoimage' if which_genisoimage.rc == 0 else 'mkisofs' }} -output {{ libvirt_storage_pool_path }}/{{ item }}_cloud-init.iso -volid cidata -joliet -rock user-data meta-data"
- args:
- chdir: "{{ libvirt_storage_pool_path }}/{{ item }}_configdrive/"
- creates: "{{ libvirt_storage_pool_path }}/{{ item }}_cloud-init.iso"
- with_items: '{{ instances }}'
-
-- name: Refresh the libvirt storage pool for openshift
- command: 'virsh -c {{ libvirt_uri }} pool-refresh {{ libvirt_storage_pool }}'
-
-- name: Create VM drives
- command: 'virsh -c {{ libvirt_uri }} vol-create-as {{ libvirt_storage_pool }} {{ item }}.qcow2 10G --format qcow2 --backing-vol {{ image_name }} --backing-vol-format qcow2'
- with_items: '{{ instances }}'
-
-- name: Create VM docker drives
- command: 'virsh -c {{ libvirt_uri }} vol-create-as {{ libvirt_storage_pool }} {{ item }}-docker.qcow2 10G --format qcow2 --allocation 0'
- with_items: '{{ instances }}'
-
-- name: Create VMs
- virt:
- name: '{{ item }}'
- command: define
- xml: "{{ lookup('template', '../templates/domain.xml') }}"
- uri: '{{ libvirt_uri }}'
- with_items: '{{ instances }}'
-
-- name: Start VMs
- virt:
- name: '{{ item }}'
- state: running
- uri: '{{ libvirt_uri }}'
- with_items: '{{ instances }}'
-
-- name: Wait for the VMs to get an IP
- shell: 'virsh -c {{ libvirt_uri }} net-dhcp-leases {{ libvirt_network }} | egrep -c ''{{ instances | join("|") }}'''
- register: nb_allocated_ips
- until: nb_allocated_ips.stdout == '{{ instances | length }}'
- retries: 60
- delay: 3
- when: instances | length != 0
-
-- name: Collect IP addresses of the VMs
- shell: 'virsh -c {{ libvirt_uri }} net-dhcp-leases {{ libvirt_network }} | awk ''$6 == "{{ item }}" {gsub(/\/.*/, "", $5); print $5}'''
- register: scratch_ip
- with_items: '{{ instances }}'
-
-- set_fact:
- ips: "{{ scratch_ip.results | default([]) | oo_collect('stdout') }}"
-
-- set_fact:
- node_label:
- type: "{{ g_sub_host_type }}"
- when: instances | length > 0 and type == "node"
-
-- set_fact:
- node_label:
- type: "{{ type }}"
- when: instances | length > 0 and type != "node"
-
-- name: Add new instances
- add_host:
- hostname: '{{ item.0 }}'
- ansible_ssh_host: '{{ item.1 }}'
- ansible_ssh_user: "{{ deployment_vars[deployment_type].ssh_user }}"
- ansible_become: "{{ deployment_vars[deployment_type].become }}"
- groups: "tag_environment-{{ cluster_env }}, tag_host-type-{{ type }}, tag_sub-host-type-{{ g_sub_host_type }}, tag_clusterid-{{ cluster_id }}"
- openshift_node_labels: "{{ node_label }}"
- libvirt_ip_address: "{{ item.1 }}"
- with_together:
- - '{{ instances }}'
- - '{{ ips }}'
-
-- name: Wait for ssh
- wait_for:
- host: '{{ item }}'
- port: 22
- with_items: '{{ ips }}'
-
-- name: Wait for openshift user setup
- command: 'ssh -o StrictHostKeyChecking=no -o PasswordAuthentication=no -o ConnectTimeout=10 -o UserKnownHostsFile=/dev/null openshift@{{ item.1 }} echo openshift user is setup'
- register: result
- until: result.rc == 0
- retries: 30
- delay: 1
- with_together:
- - '{{ instances }}'
- - '{{ ips }}'
diff --git a/playbooks/libvirt/openshift-cluster/templates/domain.xml b/playbooks/libvirt/openshift-cluster/templates/domain.xml
deleted file mode 100644
index 88504a5f6..000000000
--- a/playbooks/libvirt/openshift-cluster/templates/domain.xml
+++ /dev/null
@@ -1,65 +0,0 @@
-<domain type='kvm' id='8'>
- <name>{{ item }}</name>
- <memory unit='MiB'>{{ libvirt_instance_memory_mib }}</memory>
- <metadata xmlns:ansible="https://github.com/ansible/ansible">
- <ansible:tags>
- <ansible:tag>environment-{{ cluster_env }}</ansible:tag>
- <ansible:tag>clusterid-{{ cluster }}</ansible:tag>
- <ansible:tag>host-type-{{ type }}</ansible:tag>
- <ansible:tag>sub-host-type-{{ g_sub_host_type }}</ansible:tag>
- </ansible:tags>
- </metadata>
- <vcpu placement='static'>{{ libvirt_instance_vcpu }}</vcpu>
- <os>
- <type arch='x86_64' machine='pc'>hvm</type>
- <boot dev='hd'/>
- </os>
- <features>
- <acpi/>
- <apic/>
- <pae/>
- </features>
- <cpu mode='host-model'>
- <model fallback='allow'/>
- </cpu>
- <clock offset='utc'>
- <timer name='rtc' tickpolicy='catchup'/>
- <timer name='pit' tickpolicy='delay'/>
- <timer name='hpet' present='no'/>
- </clock>
- <on_poweroff>destroy</on_poweroff>
- <on_reboot>restart</on_reboot>
- <on_crash>restart</on_crash>
- <devices>
- <emulator>/usr/bin/qemu-system-x86_64</emulator>
- <disk type='file' device='disk'>
- <driver name='qemu' type='qcow2' discard='unmap'/>
- <source file='{{ libvirt_storage_pool_path }}/{{ item }}.qcow2'/>
- <target dev='sda' bus='scsi'/>
- </disk>
- <disk type='file' device='disk'>
- <driver name='qemu' type='qcow2' discard='unmap'/>
- <source file='{{ libvirt_storage_pool_path }}/{{ item }}-docker.qcow2'/>
- <target dev='sdb' bus='scsi'/>
- </disk>
- <disk type='file' device='cdrom'>
- <driver name='qemu' type='raw'/>
- <source file='{{ libvirt_storage_pool_path }}/{{ item }}_cloud-init.iso'/>
- <target dev='sdc' bus='scsi'/>
- <readonly/>
- </disk>
- <controller type='scsi' model='virtio-scsi' />
- <interface type='network'>
- <source network='{{ libvirt_network }}'/>
- <model type='virtio'/>
- </interface>
- <serial type='pty'>
- <target port='0'/>
- </serial>
- <console type='pty'>
- <target type='serial' port='0'/>
- </console>
- <memballoon model='virtio'>
- </memballoon>
- </devices>
-</domain>
diff --git a/playbooks/libvirt/openshift-cluster/templates/meta-data b/playbooks/libvirt/openshift-cluster/templates/meta-data
deleted file mode 100644
index 6b421770d..000000000
--- a/playbooks/libvirt/openshift-cluster/templates/meta-data
+++ /dev/null
@@ -1,3 +0,0 @@
-instance-id: {{ item[0] }}
-hostname: {{ item[0] }}
-local-hostname: {{ item[0] }}.example.com
diff --git a/playbooks/libvirt/openshift-cluster/templates/network.xml b/playbooks/libvirt/openshift-cluster/templates/network.xml
deleted file mode 100644
index 0ce2a8342..000000000
--- a/playbooks/libvirt/openshift-cluster/templates/network.xml
+++ /dev/null
@@ -1,23 +0,0 @@
-<network>
- <name>{{ libvirt_network }}</name>
- <forward mode='nat'>
- <nat>
- <port start='1024' end='65535'/>
- </nat>
- </forward>
- <!-- TODO: query for first available virbr interface available -->
- <bridge name='virbr3' stp='on' delay='0'/>
- <!-- TODO: make overridable -->
- <domain name='example.com' localOnly='yes' />
- <dns>
- <!-- TODO: automatically add host entries -->
- </dns>
- <!-- TODO: query for available address space -->
- <ip address='192.168.55.1' netmask='255.255.255.0'>
- <dhcp>
- <range start='192.168.55.2' end='192.168.55.254'/>
- <!-- TODO: add static entries addresses for the hosts to be created -->
- </dhcp>
- </ip>
-</network>
-
diff --git a/playbooks/libvirt/openshift-cluster/templates/storage-pool.xml b/playbooks/libvirt/openshift-cluster/templates/storage-pool.xml
deleted file mode 100644
index da139afd0..000000000
--- a/playbooks/libvirt/openshift-cluster/templates/storage-pool.xml
+++ /dev/null
@@ -1,6 +0,0 @@
-<pool type='dir'>
- <name>{{ libvirt_storage_pool }}</name>
- <target>
- <path>{{ libvirt_storage_pool_path }}</path>
- </target>
-</pool>
diff --git a/playbooks/libvirt/openshift-cluster/templates/user-data b/playbooks/libvirt/openshift-cluster/templates/user-data
deleted file mode 100644
index fbcf7c886..000000000
--- a/playbooks/libvirt/openshift-cluster/templates/user-data
+++ /dev/null
@@ -1,43 +0,0 @@
-#cloud-config
-disable_root: true
-
-hostname: {{ item[0] }}
-fqdn: {{ item[0] }}.example.com
-
-mounts:
-- [ sdb ]
-
-users:
- - default
- - name: root
- ssh_authorized_keys:
- - {{ lookup('file', '~/.ssh/id_rsa.pub') }}
-
-system_info:
- default_user:
- name: openshift
- sudo: ["ALL=(ALL) NOPASSWD: ALL"]
-
-ssh_authorized_keys:
- - {{ lookup('file', '~/.ssh/id_rsa.pub') }}
-
-write_files:
- - path: /etc/sudoers.d/00-openshift-no-requiretty
- permissions: 440
- content: |
- Defaults:openshift !requiretty
- - path: /etc/sysconfig/docker-storage-setup
- owner: root:root
- permissions: '0644'
- content: |
- DEVS=/dev/sdb
- VG=docker_vg
- EXTRA_DOCKER_STORAGE_OPTIONS='--storage-opt dm.blkdiscard=true'
- - path: /etc/systemd/system/fstrim.timer.d/hourly.conf
- content: |
- [Timer]
- OnCalendar=hourly
-
-runcmd:
- - NETWORK_CONFIG=/etc/sysconfig/network-scripts/ifcfg-eth0; if ! grep DHCP_HOSTNAME ${NETWORK_CONFIG}; then echo 'DHCP_HOSTNAME="{{ item[0] }}.example.com"' >> ${NETWORK_CONFIG}; fi; pkill -9 dhclient; service network restart
- - systemctl enable --now fstrim.timer
diff --git a/playbooks/libvirt/openshift-cluster/terminate.yml b/playbooks/libvirt/openshift-cluster/terminate.yml
deleted file mode 100644
index 8a63d11a5..000000000
--- a/playbooks/libvirt/openshift-cluster/terminate.yml
+++ /dev/null
@@ -1,70 +0,0 @@
----
-# TODO: does not handle a non-existent cluster gracefully
-
-- name: Terminate instance(s)
- hosts: localhost
- become: no
- connection: local
- gather_facts: no
- vars_files:
- - vars.yml
- tasks:
- - set_fact: cluster_group=tag_clusterid-{{ cluster_id }}
- - add_host:
- name: "{{ item }}"
- groups: oo_hosts_to_terminate
- ansible_ssh_user: "{{ deployment_vars[deployment_type].ssh_user }}"
- ansible_become: "{{ deployment_vars[deployment_type].become }}"
- with_items: '{{ groups[cluster_group] | default([]) }}'
-
-- name: Unsubscribe VMs
- hosts: oo_hosts_to_terminate
- vars_files:
- - vars.yml
- roles:
- - role: rhel_unsubscribe
- when: deployment_type in ['atomic-enterprise', 'enterprise', 'openshift-enterprise'] and
- ansible_distribution == "RedHat" and
- lookup('oo_option', 'rhel_skip_subscription') | default(rhsub_skip, True) |
- default('no', True) | lower in ['no', 'false']
-
-- name: Terminate instance(s)
- hosts: localhost
- become: no
- connection: local
- gather_facts: no
- vars_files:
- - vars.yml
- tasks:
- - name: Destroy VMs
- virt:
- name: '{{ item[0] }}'
- command: '{{ item[1] }}'
- uri: '{{ libvirt_uri }}'
- with_nested:
- - "{{ groups['oo_hosts_to_terminate'] }}"
- - [ destroy, undefine ]
-
- - name: Delete VM drives
- command: 'virsh -c {{ libvirt_uri }} vol-delete --pool {{ libvirt_storage_pool }} {{ item }}.qcow2'
- args:
- removes: '{{ libvirt_storage_pool_path }}/{{ item }}.qcow2'
- with_items: "{{ groups['oo_hosts_to_terminate'] }}"
-
- - name: Delete VM docker drives
- command: 'virsh -c {{ libvirt_uri }} vol-delete --pool {{ libvirt_storage_pool }} {{ item }}-docker.qcow2'
- args:
- removes: '{{ libvirt_storage_pool_path }}/{{ item }}-docker.qcow2'
- with_items: "{{ groups['oo_hosts_to_terminate'] }}"
-
- - name: Delete the VM cloud-init image
- file:
- path: '{{ libvirt_storage_pool_path }}/{{ item }}_cloud-init.iso'
- state: absent
- with_items: "{{ groups['oo_hosts_to_terminate'] }}"
-
- - name: Remove the cloud-init config directory
- file:
- path: '{{ libvirt_storage_pool_path }}/{{ item }}_configdrive/'
- state: absent
- with_items: "{{ groups['oo_hosts_to_terminate'] }}"
diff --git a/playbooks/libvirt/openshift-cluster/update.yml b/playbooks/libvirt/openshift-cluster/update.yml
deleted file mode 100644
index a152135fc..000000000
--- a/playbooks/libvirt/openshift-cluster/update.yml
+++ /dev/null
@@ -1,37 +0,0 @@
----
-- hosts: localhost
- gather_facts: no
- tasks:
- - include_vars: vars.yml
- - include_vars: cluster_hosts.yml
- - add_host:
- name: "{{ item }}"
- groups: l_oo_all_hosts
- with_items: '{{ g_all_hosts }}'
-
-- hosts: l_oo_all_hosts
- gather_facts: no
- tasks:
- - include_vars: vars.yml
- - include_vars: cluster_hosts.yml
-
-- name: Populate oo_hosts_to_update group
- hosts: localhost
- connection: local
- become: no
- gather_facts: no
- vars_files:
- - vars.yml
- - cluster_hosts.yml
- tasks:
- - name: Evaluate oo_hosts_to_update
- add_host:
- name: "{{ item }}"
- groups: oo_hosts_to_update
- ansible_ssh_user: "{{ deployment_vars[deployment_type].ssh_user }}"
- ansible_become: "{{ deployment_vars[deployment_type].become }}"
- with_items: '{{ g_all_hosts | default([]) }}'
-
-- include: ../../common/openshift-cluster/update_repos_and_packages.yml
-
-- include: config.yml
diff --git a/playbooks/libvirt/openshift-cluster/vars.yml b/playbooks/libvirt/openshift-cluster/vars.yml
deleted file mode 100644
index 5156789e7..000000000
--- a/playbooks/libvirt/openshift-cluster/vars.yml
+++ /dev/null
@@ -1,40 +0,0 @@
----
-default_pool_path: "{{ lookup('env','HOME') }}/libvirt-storage-pool-openshift-ansible"
-libvirt_storage_pool_path: "{{ lookup('oo_option', 'libvirt_storage_pool_path') | default(default_pool_path, True) }}"
-libvirt_storage_pool: "{{ lookup('oo_option', 'libvirt_storage_pool') | default('openshift-ansible', True) }}"
-libvirt_network: "{{ lookup('oo_option', 'libvirt_network') | default('openshift-ansible', True) }}"
-libvirt_instance_memory_mib: "{{ lookup('oo_option', 'libvirt_instance_memory_mib') | default(1024, True) }}"
-libvirt_instance_vcpu: "{{ lookup('oo_option', 'libvirt_instance_vcpu') | default(2, True) }}"
-libvirt_uri: "{{ lookup('oo_option', 'libvirt_uri') | default('qemu:///system', True) }}"
-debug_level: 2
-
-# Automatic download of the qcow2 image for RHEL cannot be done directly from the RedHat portal because it requires authentication.
-# The default value of image_url for enterprise and openshift-enterprise deployment types below won't work.
-deployment_rhel7_ent_base:
- image:
- url: "{{ lookup('oo_option', 'image_url') |
- default('https://access.cdn.redhat.com//content/origin/files/sha256/25/25f880767ec6bf71beb532e17f1c45231640bbfdfbbb1dffb79d2c1b328388e0/rhel-guest-image-7.2-20151102.0.x86_64.qcow2', True) }}"
- name: "{{ lookup('oo_option', 'image_name') |
- default('rhel-guest-image-7.2-20151102.0.x86_64.qcow2', True) }}"
- sha256: "{{ lookup('oo_option', 'image_sha256') |
- default('25f880767ec6bf71beb532e17f1c45231640bbfdfbbb1dffb79d2c1b328388e0', True) }}"
- compression: ""
- ssh_user: openshift
- become: yes
-
-deployment_vars:
- origin:
- image:
- url: "{{ lookup('oo_option', 'image_url') |
- default('http://cloud.centos.org/centos/7/images/CentOS-7-x86_64-GenericCloud-1602.qcow2.xz', True) }}"
- compression: "{{ lookup('oo_option', 'image_compression') |
- default('xz', True) }}"
- name: "{{ lookup('oo_option', 'image_name') |
- default('CentOS-7-x86_64-GenericCloud.qcow2', True) }}"
- sha256: "{{ lookup('oo_option', 'image_sha256') |
- default('dd0f5e610e7c5ffacaca35ed7a78a19142a588f4543da77b61c1fb0d74400471', True) }}"
- ssh_user: openshift
- become: yes
- enterprise: "{{ deployment_rhel7_ent_base }}"
- openshift-enterprise: "{{ deployment_rhel7_ent_base }}"
- atomic-enterprise: "{{ deployment_rhel7_ent_base }}"
diff --git a/playbooks/openstack/README.md b/playbooks/openstack/README.md
deleted file mode 100644
index a6d8d6995..000000000
--- a/playbooks/openstack/README.md
+++ /dev/null
@@ -1,4 +0,0 @@
-# OpenStack playbooks
-
-This playbook directory is meant to be driven by [`bin/cluster`](../../bin),
-which is community supported and most use is considered deprecated.
diff --git a/playbooks/openstack/openshift-cluster/cluster_hosts.yml b/playbooks/openstack/openshift-cluster/cluster_hosts.yml
deleted file mode 100644
index 12c9fd442..000000000
--- a/playbooks/openstack/openshift-cluster/cluster_hosts.yml
+++ /dev/null
@@ -1,25 +0,0 @@
----
-g_all_hosts: "{{ groups['meta-clusterid_' ~ cluster_id] | default([])
- | intersect(groups['meta-environment_' ~ cluster_env] | default([])) }}"
-
-g_etcd_hosts: "{{ g_all_hosts | intersect(groups['meta-host-type_etcd'] | default([])) }}"
-
-g_new_etcd_hosts: "{{ g_all_hosts | intersect(groups['meta-host-type_new_etcd'] | default([])) }}"
-
-g_lb_hosts: "{{ g_all_hosts | intersect(groups['meta-host-type_lb'] | default([])) }}"
-
-g_nfs_hosts: "{{ g_all_hosts | intersect(groups['meta-host-type_nfs'] | default([])) }}"
-
-g_glusterfs_hosts: "{{ g_all_hosts | intersect(groups['tag_host-type-glusterfs'] | default([])) }}"
-
-g_master_hosts: "{{ g_all_hosts | intersect(groups['meta-host-type_master'] | default([])) }}"
-
-g_new_master_hosts: "{{ g_all_hosts | intersect(groups['meta-host-type_new_master'] | default([])) }}"
-
-g_node_hosts: "{{ g_all_hosts | intersect(groups['meta-host-type_node'] | default([])) }}"
-
-g_new_node_hosts: "{{ g_all_hosts | intersect(groups['meta-host-type_new_node'] | default([])) }}"
-
-g_infra_hosts: "{{ g_node_hosts | intersect(groups['meta-sub-host-type_infra'] | default([])) }}"
-
-g_compute_hosts: "{{ g_node_hosts | intersect(groups['meta-sub-host-type_compute'] | default([])) }}"
diff --git a/playbooks/openstack/openshift-cluster/config.yml b/playbooks/openstack/openshift-cluster/config.yml
deleted file mode 100644
index f9ddb9469..000000000
--- a/playbooks/openstack/openshift-cluster/config.yml
+++ /dev/null
@@ -1,33 +0,0 @@
----
-- hosts: localhost
- gather_facts: no
- tasks:
- - include_vars: vars.yml
- - include_vars: cluster_hosts.yml
- - add_host:
- name: "{{ item }}"
- groups: l_oo_all_hosts
- with_items: "{{ g_all_hosts | default([]) }}"
-
-- hosts: l_oo_all_hosts
- gather_facts: no
- tasks:
- - include_vars: vars.yml
- - include_vars: cluster_hosts.yml
-
-- include: ../../common/openshift-cluster/config.yml
- vars:
- g_nodeonmaster: true
- g_ssh_user: "{{ deployment_vars[deployment_type].ssh_user }}"
- g_sudo: "{{ deployment_vars[deployment_type].become }}"
- openshift_cluster_id: "{{ cluster_id }}"
- openshift_debug_level: "{{ debug_level }}"
- openshift_deployment_type: "{{ deployment_type }}"
- openshift_hosted_registry_selector: 'type=infra'
- openshift_hosted_router_selector: 'type=infra'
- openshift_master_cluster_method: 'native'
- openshift_use_openshift_sdn: "{{ lookup('oo_option', 'use_openshift_sdn') }}"
- os_sdn_network_plugin_name: "{{ lookup('oo_option', 'sdn_network_plugin_name') }}"
- openshift_use_flannel: "{{ lookup('oo_option', 'use_flannel') }}"
- openshift_use_calico: "{{ lookup('oo_option', 'use_calico') }}"
- openshift_use_fluentd: "{{ lookup('oo_option', 'use_fluentd') }}"
diff --git a/playbooks/openstack/openshift-cluster/files/heat_stack.yaml b/playbooks/openstack/openshift-cluster/files/heat_stack.yaml
deleted file mode 100644
index 82329eac1..000000000
--- a/playbooks/openstack/openshift-cluster/files/heat_stack.yaml
+++ /dev/null
@@ -1,508 +0,0 @@
-heat_template_version: 2014-10-16
-
-description: OpenShift cluster
-
-parameters:
-
- cluster_env:
- type: string
- label: Cluster environment
- description: Environment of the cluster
-
- cluster_id:
- type: string
- label: Cluster ID
- description: Identifier of the cluster
-
- subnet_24_prefix:
- type: string
- label: subnet /24 prefix
- description: /24 subnet prefix of the network of the cluster (dot separated number triplet)
-
- dns_nameservers:
- type: comma_delimited_list
- label: DNS nameservers list
- description: List of DNS nameservers
-
- external_net:
- type: string
- label: External network
- description: Name of the external network
- default: external
-
- ssh_public_key:
- type: string
- label: SSH public key
- description: SSH public key
- hidden: true
-
- ssh_incoming:
- type: string
- label: Source of ssh connections
- description: Source of legitimate ssh connections
- default: 0.0.0.0/0
-
- node_port_incoming:
- type: string
- label: Source of node port connections
- description: Authorized sources targeting node ports
- default: 0.0.0.0/0
-
- num_etcd:
- type: number
- label: Number of etcd nodes
- description: Number of etcd nodes
-
- num_masters:
- type: number
- label: Number of masters
- description: Number of masters
-
- num_nodes:
- type: number
- label: Number of compute nodes
- description: Number of compute nodes
-
- num_infra:
- type: number
- label: Number of infrastructure nodes
- description: Number of infrastructure nodes
-
- etcd_image:
- type: string
- label: Etcd image
- description: Name of the image for the etcd servers
-
- master_image:
- type: string
- label: Master image
- description: Name of the image for the master servers
-
- node_image:
- type: string
- label: Node image
- description: Name of the image for the compute node servers
-
- infra_image:
- type: string
- label: Infra image
- description: Name of the image for the infra node servers
-
- etcd_flavor:
- type: string
- label: Etcd flavor
- description: Flavor of the etcd servers
-
- master_flavor:
- type: string
- label: Master flavor
- description: Flavor of the master servers
-
- node_flavor:
- type: string
- label: Node flavor
- description: Flavor of the compute node servers
-
- infra_flavor:
- type: string
- label: Infra flavor
- description: Flavor of the infra node servers
-
-outputs:
-
- etcd_names:
- description: Name of the etcds
- value: { get_attr: [ etcd, name ] }
-
- etcd_ips:
- description: IPs of the etcds
- value: { get_attr: [ etcd, private_ip ] }
-
- etcd_floating_ips:
- description: Floating IPs of the etcds
- value: { get_attr: [ etcd, floating_ip ] }
-
- master_names:
- description: Name of the masters
- value: { get_attr: [ masters, name ] }
-
- master_ips:
- description: IPs of the masters
- value: { get_attr: [ masters, private_ip ] }
-
- master_floating_ips:
- description: Floating IPs of the masters
- value: { get_attr: [ masters, floating_ip ] }
-
- node_names:
- description: Name of the nodes
- value: { get_attr: [ compute_nodes, name ] }
-
- node_ips:
- description: IPs of the nodes
- value: { get_attr: [ compute_nodes, private_ip ] }
-
- node_floating_ips:
- description: Floating IPs of the nodes
- value: { get_attr: [ compute_nodes, floating_ip ] }
-
- infra_names:
- description: Name of the nodes
- value: { get_attr: [ infra_nodes, name ] }
-
- infra_ips:
- description: IPs of the nodes
- value: { get_attr: [ infra_nodes, private_ip ] }
-
- infra_floating_ips:
- description: Floating IPs of the nodes
- value: { get_attr: [ infra_nodes, floating_ip ] }
-
-resources:
-
- net:
- type: OS::Neutron::Net
- properties:
- name:
- str_replace:
- template: openshift-ansible-cluster_id-net
- params:
- cluster_id: { get_param: cluster_id }
-
- subnet:
- type: OS::Neutron::Subnet
- properties:
- name:
- str_replace:
- template: openshift-ansible-cluster_id-subnet
- params:
- cluster_id: { get_param: cluster_id }
- network: { get_resource: net }
- cidr:
- str_replace:
- template: subnet_24_prefix.0/24
- params:
- subnet_24_prefix: { get_param: subnet_24_prefix }
- dns_nameservers: { get_param: dns_nameservers }
-
- router:
- type: OS::Neutron::Router
- properties:
- name:
- str_replace:
- template: openshift-ansible-cluster_id-router
- params:
- cluster_id: { get_param: cluster_id }
- external_gateway_info:
- network: { get_param: external_net }
-
- interface:
- type: OS::Neutron::RouterInterface
- properties:
- router_id: { get_resource: router }
- subnet_id: { get_resource: subnet }
-
- keypair:
- type: OS::Nova::KeyPair
- properties:
- name:
- str_replace:
- template: openshift-ansible-cluster_id-keypair
- params:
- cluster_id: { get_param: cluster_id }
- public_key: { get_param: ssh_public_key }
-
- master-secgrp:
- type: OS::Neutron::SecurityGroup
- properties:
- name:
- str_replace:
- template: openshift-ansible-cluster_id-master-secgrp
- params:
- cluster_id: { get_param: cluster_id }
- description:
- str_replace:
- template: Security group for cluster_id OpenShift cluster master
- params:
- cluster_id: { get_param: cluster_id }
- rules:
- - direction: ingress
- protocol: tcp
- port_range_min: 22
- port_range_max: 22
- remote_ip_prefix: { get_param: ssh_incoming }
- - direction: ingress
- protocol: tcp
- port_range_min: 4001
- port_range_max: 4001
- - direction: ingress
- protocol: tcp
- port_range_min: 8443
- port_range_max: 8443
- - direction: ingress
- protocol: tcp
- port_range_min: 8444
- port_range_max: 8444
- - direction: ingress
- protocol: tcp
- port_range_min: 53
- port_range_max: 53
- - direction: ingress
- protocol: udp
- port_range_min: 53
- port_range_max: 53
- - direction: ingress
- protocol: tcp
- port_range_min: 8053
- port_range_max: 8053
- - direction: ingress
- protocol: udp
- port_range_min: 8053
- port_range_max: 8053
- - direction: ingress
- protocol: tcp
- port_range_min: 24224
- port_range_max: 24224
- - direction: ingress
- protocol: udp
- port_range_min: 24224
- port_range_max: 24224
- - direction: ingress
- protocol: tcp
- port_range_min: 2224
- port_range_max: 2224
- - direction: ingress
- protocol: udp
- port_range_min: 5404
- port_range_max: 5404
- - direction: ingress
- protocol: udp
- port_range_min: 5405
- port_range_max: 5405
- - direction: ingress
- protocol: tcp
- port_range_min: 9090
- port_range_max: 9090
-
- etcd-secgrp:
- type: OS::Neutron::SecurityGroup
- properties:
- name:
- str_replace:
- template: openshift-ansible-cluster_id-etcd-secgrp
- params:
- cluster_id: { get_param: cluster_id }
- description:
- str_replace:
- template: Security group for cluster_id etcd cluster
- params:
- cluster_id: { get_param: cluster_id }
- rules:
- - direction: ingress
- protocol: tcp
- port_range_min: 22
- port_range_max: 22
- remote_ip_prefix: { get_param: ssh_incoming }
- - direction: ingress
- protocol: tcp
- port_range_min: 2379
- port_range_max: 2379
- remote_mode: remote_group_id
- remote_group_id: { get_resource: master-secgrp }
- - direction: ingress
- protocol: tcp
- port_range_min: 2380
- port_range_max: 2380
- remote_mode: remote_group_id
-
- node-secgrp:
- type: OS::Neutron::SecurityGroup
- properties:
- name:
- str_replace:
- template: openshift-ansible-cluster_id-node-secgrp
- params:
- cluster_id: { get_param: cluster_id }
- description:
- str_replace:
- template: Security group for cluster_id OpenShift cluster nodes
- params:
- cluster_id: { get_param: cluster_id }
- rules:
- - direction: ingress
- protocol: tcp
- port_range_min: 22
- port_range_max: 22
- remote_ip_prefix: { get_param: ssh_incoming }
- - direction: ingress
- protocol: tcp
- port_range_min: 10250
- port_range_max: 10250
- remote_mode: remote_group_id
- - direction: ingress
- protocol: udp
- port_range_min: 4789
- port_range_max: 4789
- remote_mode: remote_group_id
- - direction: ingress
- protocol: tcp
- port_range_min: 30000
- port_range_max: 32767
- remote_ip_prefix: { get_param: node_port_incoming }
-
- infra-secgrp:
- type: OS::Neutron::SecurityGroup
- properties:
- name:
- str_replace:
- template: openshift-ansible-cluster_id-infra-secgrp
- params:
- cluster_id: { get_param: cluster_id }
- description:
- str_replace:
- template: Security group for cluster_id OpenShift infrastructure cluster nodes
- params:
- cluster_id: { get_param: cluster_id }
- rules:
- - direction: ingress
- protocol: tcp
- port_range_min: 80
- port_range_max: 80
- - direction: ingress
- protocol: tcp
- port_range_min: 443
- port_range_max: 443
-
- etcd:
- type: OS::Heat::ResourceGroup
- properties:
- count: { get_param: num_etcd }
- resource_def:
- type: heat_stack_server.yaml
- properties:
- name:
- str_replace:
- template: cluster_id-k8s_type-%index%
- params:
- cluster_id: { get_param: cluster_id }
- k8s_type: etcd
- cluster_env: { get_param: cluster_env }
- cluster_id: { get_param: cluster_id }
- type: etcd
- image: { get_param: etcd_image }
- flavor: { get_param: etcd_flavor }
- key_name: { get_resource: keypair }
- net: { get_resource: net }
- subnet: { get_resource: subnet }
- secgrp:
- - { get_resource: etcd-secgrp }
- floating_network: { get_param: external_net }
- net_name:
- str_replace:
- template: openshift-ansible-cluster_id-net
- params:
- cluster_id: { get_param: cluster_id }
- depends_on:
- - interface
-
- masters:
- type: OS::Heat::ResourceGroup
- properties:
- count: { get_param: num_masters }
- resource_def:
- type: heat_stack_server.yaml
- properties:
- name:
- str_replace:
- template: cluster_id-k8s_type-%index%
- params:
- cluster_id: { get_param: cluster_id }
- k8s_type: master
- cluster_env: { get_param: cluster_env }
- cluster_id: { get_param: cluster_id }
- type: master
- image: { get_param: master_image }
- flavor: { get_param: master_flavor }
- key_name: { get_resource: keypair }
- net: { get_resource: net }
- subnet: { get_resource: subnet }
- secgrp:
- - { get_resource: master-secgrp }
- - { get_resource: node-secgrp }
- floating_network: { get_param: external_net }
- net_name:
- str_replace:
- template: openshift-ansible-cluster_id-net
- params:
- cluster_id: { get_param: cluster_id }
- depends_on:
- - interface
-
- compute_nodes:
- type: OS::Heat::ResourceGroup
- properties:
- count: { get_param: num_nodes }
- resource_def:
- type: heat_stack_server.yaml
- properties:
- name:
- str_replace:
- template: cluster_id-k8s_type-sub_host_type-%index%
- params:
- cluster_id: { get_param: cluster_id }
- k8s_type: node
- sub_host_type: compute
- cluster_env: { get_param: cluster_env }
- cluster_id: { get_param: cluster_id }
- type: node
- subtype: compute
- image: { get_param: node_image }
- flavor: { get_param: node_flavor }
- key_name: { get_resource: keypair }
- net: { get_resource: net }
- subnet: { get_resource: subnet }
- secgrp:
- - { get_resource: node-secgrp }
- floating_network: { get_param: external_net }
- net_name:
- str_replace:
- template: openshift-ansible-cluster_id-net
- params:
- cluster_id: { get_param: cluster_id }
- depends_on:
- - interface
-
- infra_nodes:
- type: OS::Heat::ResourceGroup
- properties:
- count: { get_param: num_infra }
- resource_def:
- type: heat_stack_server.yaml
- properties:
- name:
- str_replace:
- template: cluster_id-k8s_type-sub_host_type-%index%
- params:
- cluster_id: { get_param: cluster_id }
- k8s_type: node
- sub_host_type: infra
- cluster_env: { get_param: cluster_env }
- cluster_id: { get_param: cluster_id }
- type: node
- subtype: infra
- image: { get_param: infra_image }
- flavor: { get_param: infra_flavor }
- key_name: { get_resource: keypair }
- net: { get_resource: net }
- subnet: { get_resource: subnet }
- secgrp:
- - { get_resource: node-secgrp }
- - { get_resource: infra-secgrp }
- floating_network: { get_param: external_net }
- net_name:
- str_replace:
- template: openshift-ansible-cluster_id-net
- params:
- cluster_id: { get_param: cluster_id }
- depends_on:
- - interface
diff --git a/playbooks/openstack/openshift-cluster/files/heat_stack_server.yaml b/playbooks/openstack/openshift-cluster/files/heat_stack_server.yaml
deleted file mode 100644
index 435139849..000000000
--- a/playbooks/openstack/openshift-cluster/files/heat_stack_server.yaml
+++ /dev/null
@@ -1,152 +0,0 @@
-heat_template_version: 2014-10-16
-
-description: OpenShift cluster server
-
-parameters:
-
- name:
- type: string
- label: Name
- description: Name
-
- cluster_env:
- type: string
- label: Cluster environment
- description: Environment of the cluster
-
- cluster_id:
- type: string
- label: Cluster ID
- description: Identifier of the cluster
-
- type:
- type: string
- label: Type
- description: Type master or node
-
- subtype:
- type: string
- label: Sub-type
- description: Sub-type compute or infra for nodes, default otherwise
- default: default
-
- key_name:
- type: string
- label: Key name
- description: Key name of keypair
-
- image:
- type: string
- label: Image
- description: Name of the image
-
- flavor:
- type: string
- label: Flavor
- description: Name of the flavor
-
- net:
- type: string
- label: Net ID
- description: Net resource
-
- net_name:
- type: string
- label: Net name
- description: Net name
-
- subnet:
- type: string
- label: Subnet ID
- description: Subnet resource
-
- secgrp:
- type: comma_delimited_list
- label: Security groups
- description: Security group resources
-
- floating_network:
- type: string
- label: Floating network
- description: Network to allocate floating IP from
-
-outputs:
-
- name:
- description: Name of the server
- value: { get_attr: [ server, name ] }
-
- private_ip:
- description: Private IP of the server
- value:
- get_attr:
- - server
- - addresses
- - { get_param: net_name }
- - 0
- - addr
-
- floating_ip:
- description: Floating IP of the server
- value:
- get_attr:
- - server
- - addresses
- - { get_param: net_name }
- - 1
- - addr
-
-resources:
-
- server:
- type: OS::Nova::Server
- properties:
- name: { get_param: name }
- key_name: { get_param: key_name }
- image: { get_param: image }
- flavor: { get_param: flavor }
- networks:
- - port: { get_resource: port }
- user_data: { get_resource: config }
- user_data_format: RAW
- metadata:
- environment: { get_param: cluster_env }
- clusterid: { get_param: cluster_id }
- host-type: { get_param: type }
- sub-host-type: { get_param: subtype }
-
- port:
- type: OS::Neutron::Port
- properties:
- network: { get_param: net }
- fixed_ips:
- - subnet: { get_param: subnet }
- security_groups: { get_param: secgrp }
-
- floating-ip:
- type: OS::Neutron::FloatingIP
- properties:
- floating_network: { get_param: floating_network }
- port_id: { get_resource: port }
-
- config:
- type: OS::Heat::CloudConfig
- properties:
- cloud_config:
- disable_root: true
-
- hostname: { get_param: name }
-
- system_info:
- default_user:
- name: openshift
- sudo: ["ALL=(ALL) NOPASSWD: ALL"]
-
- write_files:
- - path: /etc/sudoers.d/00-openshift-no-requiretty
- permissions: 440
- # content: Defaults:openshift !requiretty
- # Encoded in base64 to be sure that we do not forget the trailing newline or
- # sudo will not be able to parse that file
- encoding: b64
- content: RGVmYXVsdHM6b3BlbnNoaWZ0ICFyZXF1aXJldHR5Cg==
diff --git a/playbooks/openstack/openshift-cluster/filter_plugins b/playbooks/openstack/openshift-cluster/filter_plugins
deleted file mode 120000
index 99a95e4ca..000000000
--- a/playbooks/openstack/openshift-cluster/filter_plugins
+++ /dev/null
@@ -1 +0,0 @@
-../../../filter_plugins \ No newline at end of file
diff --git a/playbooks/openstack/openshift-cluster/launch.yml b/playbooks/openstack/openshift-cluster/launch.yml
deleted file mode 100644
index c0bc12f55..000000000
--- a/playbooks/openstack/openshift-cluster/launch.yml
+++ /dev/null
@@ -1,191 +0,0 @@
----
-- name: Launch instance(s)
- hosts: localhost
- become: no
- connection: local
- gather_facts: no
- vars_files:
- - vars.yml
- tasks:
- # TODO: Write an Ansible module for dealing with HEAT stacks
- # Dealing with the outputs is currently terrible
-
- - name: Check OpenStack stack
- command: 'heat stack-show openshift-ansible-{{ cluster_id }}-stack'
- register: stack_show_result
- changed_when: false
- failed_when: stack_show_result.rc != 0 and 'Stack not found' not in stack_show_result.stderr
-
- - set_fact:
- heat_stack_action: 'stack-create'
- when: stack_show_result.rc == 1
- - set_fact:
- heat_stack_action: 'stack-update'
- when: stack_show_result.rc == 0
-
- - name: Create or Update OpenStack Stack
- command: 'heat {{ heat_stack_action }} -f {{ openstack_infra_heat_stack }}
- --timeout {{ openstack_heat_timeout }}
- -P cluster_env={{ cluster_env }}
- -P cluster_id={{ cluster_id }}
- -P subnet_24_prefix={{ openstack_subnet_24_prefix }}
- -P dns_nameservers={{ openstack_network_dns | join(",") }}
- -P external_net={{ openstack_network_external_net }}
- -P ssh_public_key="{{ openstack_ssh_public_key }}"
- -P ssh_incoming={{ openstack_ssh_access_from }}
- -P node_port_incoming={{ openstack_node_port_access_from }}
- -P num_etcd={{ num_etcd }}
- -P num_masters={{ num_masters }}
- -P num_nodes={{ num_nodes }}
- -P num_infra={{ num_infra }}
- -P etcd_image={{ deployment_vars[deployment_type].image }}
- -P master_image={{ deployment_vars[deployment_type].image }}
- -P node_image={{ deployment_vars[deployment_type].image }}
- -P infra_image={{ deployment_vars[deployment_type].image }}
- -P etcd_flavor={{ openstack_flavor["etcd"] }}
- -P master_flavor={{ openstack_flavor["master"] }}
- -P node_flavor={{ openstack_flavor["node"] }}
- -P infra_flavor={{ openstack_flavor["infra"] }}
- openshift-ansible-{{ cluster_id }}-stack'
- args:
- chdir: '{{ playbook_dir }}'
-
- - name: Wait for OpenStack Stack readiness
- shell: 'heat stack-show openshift-ansible-{{ cluster_id }}-stack | awk ''$2 == "stack_status" {print $4}'''
- register: stack_show_status_result
- until: stack_show_status_result.stdout not in ['CREATE_IN_PROGRESS', 'UPDATE_IN_PROGRESS']
- retries: 30
- delay: 5
-
- - name: Display the stack resources
- command: 'heat resource-list openshift-ansible-{{ cluster_id }}-stack'
- register: stack_resource_list_result
- when: stack_show_status_result.stdout not in ['CREATE_COMPLETE', 'UPDATE_COMPLETE']
-
- - name: Display the stack status
- command: 'heat stack-show openshift-ansible-{{ cluster_id }}-stack'
- register: stack_show_result
- when: stack_show_status_result.stdout not in ['CREATE_COMPLETE', 'UPDATE_COMPLETE']
-
- - name: Delete the stack
- command: 'heat stack-delete openshift-ansible-{{ cluster_id }}-stack'
- when: stack_show_status_result.stdout not in ['CREATE_COMPLETE', 'UPDATE_COMPLETE']
-
- - fail:
- msg: |
-
- +--------------------------------------+
- | ^ |
- | /!\ Failed to create the heat stack |
- | /___\ |
- +--------------------------------------+
-
- Here is the list of stack resources and their status:
- {{ stack_resource_list_result.stdout }}
-
- Here is the status of the stack:
- {{ stack_show_result.stdout }}
-
- ^ Failed to create the heat stack
- /!\
- /___\ Please check the `stack_status_reason` line in the above array to know why.
- when: stack_show_status_result.stdout not in ['CREATE_COMPLETE', 'UPDATE_COMPLETE']
-
- - name: Read OpenStack Stack outputs
- command: 'heat stack-show openshift-ansible-{{ cluster_id }}-stack'
- register: stack_show_result
-
- - set_fact:
- parsed_outputs: "{{ stack_show_result | oo_parse_heat_stack_outputs }}"
-
- - name: Add new etcd instances groups and variables
- add_host:
- hostname: '{{ item[0] }}'
- ansible_ssh_host: '{{ item[2] }}'
- ansible_ssh_user: "{{ deployment_vars[deployment_type].ssh_user }}"
- ansible_become: "{{ deployment_vars[deployment_type].become }}"
- groups: 'meta-environment_{{ cluster_env }}, meta-host-type_etcd, meta-sub-host-type_default, meta-clusterid_{{ cluster_id }}'
- openshift_node_labels:
- type: "etcd"
- openstack:
- public_v4: '{{ item[2] }}'
- private_v4: '{{ item[1] }}'
- with_together:
- - '{{ parsed_outputs.etcd_names }}'
- - '{{ parsed_outputs.etcd_ips }}'
- - '{{ parsed_outputs.etcd_floating_ips }}'
-
- - name: Add new master instances groups and variables
- add_host:
- hostname: '{{ item[0] }}'
- ansible_ssh_host: '{{ item[2] }}'
- ansible_ssh_user: "{{ deployment_vars[deployment_type].ssh_user }}"
- ansible_become: "{{ deployment_vars[deployment_type].become }}"
- groups: 'meta-environment_{{ cluster_env }}, meta-host-type_master, meta-sub-host-type_default, meta-clusterid_{{ cluster_id }}'
- openshift_node_labels:
- type: "master"
- openstack:
- public_v4: '{{ item[2] }}'
- private_v4: '{{ item[1] }}'
- with_together:
- - '{{ parsed_outputs.master_names }}'
- - '{{ parsed_outputs.master_ips }}'
- - '{{ parsed_outputs.master_floating_ips }}'
-
- - name: Add new node instances groups and variables
- add_host:
- hostname: '{{ item[0] }}'
- ansible_ssh_host: '{{ item[2] }}'
- ansible_ssh_user: "{{ deployment_vars[deployment_type].ssh_user }}"
- ansible_become: "{{ deployment_vars[deployment_type].become }}"
- groups: 'meta-environment_{{ cluster_env }}, meta-host-type_node, meta-sub-host-type_compute, meta-clusterid_{{ cluster_id }}'
- openshift_node_labels:
- type: "compute"
- openstack:
- public_v4: '{{ item[2] }}'
- private_v4: '{{ item[1] }}'
- with_together:
- - '{{ parsed_outputs.node_names }}'
- - '{{ parsed_outputs.node_ips }}'
- - '{{ parsed_outputs.node_floating_ips }}'
-
- - name: Add new infra instances groups and variables
- add_host:
- hostname: '{{ item[0] }}'
- ansible_ssh_host: '{{ item[2] }}'
- ansible_ssh_user: "{{ deployment_vars[deployment_type].ssh_user }}"
- ansible_become: "{{ deployment_vars[deployment_type].become }}"
- groups: 'meta-environment_{{ cluster_env }}, meta-host-type_node, meta-sub-host-type_infra, meta-clusterid_{{ cluster_id }}'
- openshift_node_labels:
- type: "infra"
- openstack:
- public_v4: '{{ item[2] }}'
- private_v4: '{{ item[1] }}'
- with_together:
- - '{{ parsed_outputs.infra_names }}'
- - '{{ parsed_outputs.infra_ips }}'
- - '{{ parsed_outputs.infra_floating_ips }}'
-
- - name: Wait for ssh
- wait_for:
- host: '{{ item }}'
- port: 22
- with_flattened:
- - '{{ parsed_outputs.master_floating_ips }}'
- - '{{ parsed_outputs.node_floating_ips }}'
- - '{{ parsed_outputs.infra_floating_ips }}'
-
- - name: Wait for user setup
- command: 'ssh -o StrictHostKeyChecking=no -o PasswordAuthentication=no -o ConnectTimeout=10 -o UserKnownHostsFile=/dev/null {{ deployment_vars[deployment_type].ssh_user }}@{{ item }} echo {{ deployment_vars[deployment_type].ssh_user }} user is setup'
- register: result
- until: result.rc == 0
- retries: 30
- delay: 1
- with_flattened:
- - '{{ parsed_outputs.master_floating_ips }}'
- - '{{ parsed_outputs.node_floating_ips }}'
- - '{{ parsed_outputs.infra_floating_ips }}'
-
-- include: update.yml
-
-- include: list.yml
diff --git a/playbooks/openstack/openshift-cluster/list.yml b/playbooks/openstack/openshift-cluster/list.yml
deleted file mode 100644
index 6c6f671be..000000000
--- a/playbooks/openstack/openshift-cluster/list.yml
+++ /dev/null
@@ -1,24 +0,0 @@
----
-- name: Generate oo_list_hosts group
- hosts: localhost
- become: no
- connection: local
- gather_facts: no
- vars_files:
- - vars.yml
- tasks:
- - set_fact: scratch_group=meta-clusterid_{{ cluster_id }}
- when: cluster_id != ''
- - set_fact: scratch_group=all
- when: cluster_id == ''
- - add_host:
- name: "{{ item }}"
- groups: oo_list_hosts
- ansible_ssh_user: "{{ deployment_vars[deployment_type].ssh_user }}"
- ansible_ssh_host: "{{ hostvars[item].ansible_ssh_host | default(item) }}"
- ansible_become: "{{ deployment_vars[deployment_type].become }}"
- oo_public_ipv4: "{{ hostvars[item].openstack.public_v4 }}"
- oo_private_ipv4: "{{ hostvars[item].openstack.private_v4 }}"
- with_items: "{{ groups[scratch_group] | default([]) | difference(['localhost']) }}"
- - debug:
- msg: "{{ hostvars | oo_select_keys(groups[scratch_group] | default([])) | oo_pretty_print_cluster('meta-') }}"
diff --git a/playbooks/openstack/openshift-cluster/lookup_plugins b/playbooks/openstack/openshift-cluster/lookup_plugins
deleted file mode 120000
index ac79701db..000000000
--- a/playbooks/openstack/openshift-cluster/lookup_plugins
+++ /dev/null
@@ -1 +0,0 @@
-../../../lookup_plugins \ No newline at end of file
diff --git a/playbooks/openstack/openshift-cluster/roles b/playbooks/openstack/openshift-cluster/roles
deleted file mode 120000
index 20c4c58cf..000000000
--- a/playbooks/openstack/openshift-cluster/roles
+++ /dev/null
@@ -1 +0,0 @@
-../../../roles \ No newline at end of file
diff --git a/playbooks/openstack/openshift-cluster/terminate.yml b/playbooks/openstack/openshift-cluster/terminate.yml
deleted file mode 100644
index affb57117..000000000
--- a/playbooks/openstack/openshift-cluster/terminate.yml
+++ /dev/null
@@ -1,49 +0,0 @@
----
-- name: Terminate instance(s)
- hosts: localhost
- become: no
- connection: local
- gather_facts: no
- vars_files:
- - vars.yml
- tasks:
- - add_host:
- name: "{{ item }}"
- groups: oo_hosts_to_terminate
- ansible_ssh_user: "{{ deployment_vars[deployment_type].ssh_user }}"
- ansible_become: "{{ deployment_vars[deployment_type].become }}"
- with_items: "{{ (groups['meta-environment_' ~ cluster_env]|default([])) | intersect(groups['meta-clusterid_' ~ cluster_id ]|default([])) }}"
-
-- name: Unsubscribe VMs
- hosts: oo_hosts_to_terminate
- vars_files:
- - vars.yml
- roles:
- - role: rhel_unsubscribe
- when: deployment_type in ['atomic-enterprise', 'enterprise', 'openshift-enterprise'] and
- ansible_distribution == "RedHat" and
- lookup('oo_option', 'rhel_skip_subscription') | default(rhsub_skip, True) |
- default('no', True) | lower in ['no', 'false']
-
-- hosts: localhost
- become: no
- connection: local
- gather_facts: no
- vars_files:
- - vars.yml
- tasks:
- - name: Delete the OpenStack Stack
- command: 'heat stack-delete openshift-ansible-{{ cluster_id }}-stack'
- register: stack_delete_result
- changed_when: stack_delete_result.rc == 0
- failed_when: stack_delete_result.rc != 0 and 'could not be found' not in stack_delete_result.stdout
-
- - name: Wait for the completion of the OpenStack Stack deletion
- shell: 'heat stack-show openshift-ansible-{{ cluster_id }}-stack | awk ''$2 == "stack_status" {print $4}'''
- when: stack_delete_result.changed
- register: stack_show_result
- until: stack_show_result.stdout != 'DELETE_IN_PROGRESS'
- retries: 60
- delay: 5
- failed_when: '"Stack not found" not in stack_show_result.stderr and
- stack_show_result.stdout != "DELETE_COMPLETE"'
diff --git a/playbooks/openstack/openshift-cluster/update.yml b/playbooks/openstack/openshift-cluster/update.yml
deleted file mode 100644
index 6d2af3d26..000000000
--- a/playbooks/openstack/openshift-cluster/update.yml
+++ /dev/null
@@ -1,34 +0,0 @@
----
-- hosts: localhost
- gather_facts: no
- tasks:
- - include_vars: vars.yml
- - include_vars: cluster_hosts.yml
- - add_host:
- name: "{{ item }}"
- groups: l_oo_all_hosts
- with_items: "{{ g_all_hosts }}"
-
-- hosts: l_oo_all_hosts
- gather_facts: no
- tasks:
- - include_vars: vars.yml
- - include_vars: cluster_hosts.yml
-
-- name: Populate oo_hosts_to_update group
- hosts: localhost
- connection: local
- become: no
- gather_facts: no
- tasks:
- - name: Evaluate oo_hosts_to_update
- add_host:
- name: "{{ item }}"
- groups: oo_hosts_to_update
- ansible_ssh_user: "{{ deployment_vars[deployment_type].ssh_user }}"
- ansible_become: "{{ deployment_vars[deployment_type].become }}"
- with_items: "{{ g_all_hosts | default([]) }}"
-
-- include: ../../common/openshift-cluster/update_repos_and_packages.yml
-
-- include: config.yml
diff --git a/playbooks/openstack/openshift-cluster/vars.yml b/playbooks/openstack/openshift-cluster/vars.yml
deleted file mode 100644
index ba2855b73..000000000
--- a/playbooks/openstack/openshift-cluster/vars.yml
+++ /dev/null
@@ -1,38 +0,0 @@
-# yamllint disable rule:colons
----
-debug_level: 2
-openstack_infra_heat_stack: "{{ lookup('oo_option', 'infra_heat_stack' ) |
- default('files/heat_stack.yaml', True) }}"
-openstack_subnet_24_prefix: "{{ lookup('oo_option', 'subnet_24_prefix' ) |
- default('192.168.' + ( ( 1048576 | random % 256 ) | string() ), True) }}"
-openstack_network_external_net: "{{ lookup('oo_option', 'external_net' ) |
- default('external', True) }}"
-openstack_network_dns: "{{ lookup('oo_option', 'dns' ) |
- default('8.8.8.8,8.8.4.4', True) | oo_split() }}"
-openstack_ssh_public_key: "{{ lookup('file', lookup('oo_option', 'public_key') |
- default('~/.ssh/id_rsa.pub', True)) }}"
-openstack_ssh_access_from: "{{ lookup('oo_option', 'ssh_from') |
- default('0.0.0.0/0', True) }}"
-openstack_node_port_access_from: "{{ lookup('oo_option', 'node_port_from') |
- default('0.0.0.0/0', True) }}"
-openstack_heat_timeout: "{{ lookup('oo_option', 'heat_timeout') |
- default('3', True) }}"
-openstack_flavor:
- etcd: "{{ lookup('oo_option', 'etcd_flavor' ) | default('m1.small', True) }}"
- master: "{{ lookup('oo_option', 'master_flavor' ) | default('m1.small', True) }}"
- infra: "{{ lookup('oo_option', 'infra_flavor' ) | default('m1.small', True) }}"
- node: "{{ lookup('oo_option', 'node_flavor' ) | default('m1.medium', True) }}"
-
-deployment_rhel7_ent_base:
- image: "{{ lookup('oo_option', 'image_name') | default('rhel-guest-image-7.2-20151102.0.x86_64', True) }}"
- ssh_user: openshift
- become: yes
-
-deployment_vars:
- origin:
- image: "{{ lookup('oo_option', 'image_name') | default('centos-70-raw', True) }}"
- ssh_user: openshift
- become: yes
- enterprise: "{{ deployment_rhel7_ent_base }}"
- openshift-enterprise: "{{ deployment_rhel7_ent_base }}"
- atomic-enterprise: "{{ deployment_rhel7_ent_base }}"
diff --git a/roles/calico/defaults/main.yaml b/roles/calico/defaults/main.yaml
index b1907f8cb..be73e8a73 100644
--- a/roles/calico/defaults/main.yaml
+++ b/roles/calico/defaults/main.yaml
@@ -11,4 +11,4 @@ calico_url_ipam: "https://github.com/projectcalico/cni-plugin/releases/download/
calico_ipv4pool_ipip: "always"
calico_log_dir: "/var/log/calico"
-calico_node_image: "calico/node:v2.4.1"
+calico_node_image: "calico/node:v2.5.0"
diff --git a/roles/calico_master/defaults/main.yaml b/roles/calico_master/defaults/main.yaml
index d40286aba..01a2b9529 100644
--- a/roles/calico_master/defaults/main.yaml
+++ b/roles/calico_master/defaults/main.yaml
@@ -3,5 +3,5 @@ kubeconfig: "{{ openshift.common.config_base }}/master/openshift-master.kubeconf
calicoctl_bin_dir: "/usr/local/bin/"
-calico_url_calicoctl: "https://github.com/projectcalico/calicoctl/releases/download/v1.4.0/calicoctl"
+calico_url_calicoctl: "https://github.com/projectcalico/calicoctl/releases/download/v1.5.0/calicoctl"
calico_url_policy_controller: "quay.io/calico/kube-policy-controller:v0.7.0"
diff --git a/roles/docker/tasks/main.yml b/roles/docker/tasks/main.yml
index 1f9ac5059..78c6671d8 100644
--- a/roles/docker/tasks/main.yml
+++ b/roles/docker/tasks/main.yml
@@ -7,8 +7,8 @@
- set_fact:
l_use_system_container: "{{ openshift.docker.use_system_container | default(False) }}"
- l_use_crio: "{{ openshift.docker.use_crio | default(False) }}"
- l_use_crio_only: "{{ openshift.docker.use_crio_only | default(False) }}"
+ l_use_crio: "{{ openshift_use_crio | default(False) }}"
+ l_use_crio_only: "{{ openshift_use_crio_only | default(False) }}"
- name: Use Package Docker if Requested
include: package_docker.yml
diff --git a/roles/docker/templates/crio.conf.j2 b/roles/docker/templates/crio.conf.j2
index eae1759ab..5b31932b1 100644
--- a/roles/docker/templates/crio.conf.j2
+++ b/roles/docker/templates/crio.conf.j2
@@ -43,7 +43,7 @@ stream_port = "10010"
# This is a mandatory setting as this runtime will be the default one
# and will also be used for untrusted container workloads if
# runtime_untrusted_workload is not set.
-runtime = "/usr/libexec/crio/runc"
+runtime = "/usr/bin/runc"
# runtime_untrusted_workload is the OCI compatible runtime used for untrusted
# container workloads. This is an optional setting, except if
diff --git a/roles/etcd/defaults/main.yaml b/roles/etcd/defaults/main.yaml
index d12d7a358..3cc2bbb18 100644
--- a/roles/etcd/defaults/main.yaml
+++ b/roles/etcd/defaults/main.yaml
@@ -1,6 +1,6 @@
---
-r_etcd_firewall_enabled: True
-r_etcd_use_firewalld: False
+r_etcd_firewall_enabled: "{{ os_firewall_enabled | default(True) }}"
+r_etcd_use_firewalld: "{{ os_firewall_use_firewalld | default(Falsel) }}"
etcd_initial_cluster_state: new
etcd_initial_cluster_token: etcd-cluster-1
diff --git a/roles/etcd_migrate/tasks/migrate.yml b/roles/etcd_migrate/tasks/migrate.yml
index 173de77f4..54a9c74ff 100644
--- a/roles/etcd_migrate/tasks/migrate.yml
+++ b/roles/etcd_migrate/tasks/migrate.yml
@@ -21,15 +21,24 @@
lineinfile:
line: "ETCD_FORCE_NEW_CLUSTER=true"
dest: /etc/etcd/etcd.conf
+ backup: true
- name: Start etcd
systemd:
name: "{{ l_etcd_service }}"
state: started
+- name: Wait for cluster to become healthy after bringing up first member
+ command: >
+ etcdctl --cert-file {{ etcd_peer_cert_file }} --key-file {{ etcd_peer_key_file }} --ca-file {{ etcd_peer_ca_file }} --endpoint https://{{ etcd_peer }}:{{ etcd_client_port }} cluster-health
+ register: l_etcd_migrate_health
+ until: l_etcd_migrate_health.rc == 0
+ retries: 3
+ delay: 30
- name: Unset ETCD_FORCE_NEW_CLUSTER=true on first etcd host
lineinfile:
line: "ETCD_FORCE_NEW_CLUSTER=true"
dest: /etc/etcd/etcd.conf
state: absent
+ backup: true
- name: Restart first etcd host
systemd:
name: "{{ l_etcd_service }}"
diff --git a/roles/lib_openshift/library/oc_route.py b/roles/lib_openshift/library/oc_route.py
index d6db75e1e..8f8e46e1e 100644
--- a/roles/lib_openshift/library/oc_route.py
+++ b/roles/lib_openshift/library/oc_route.py
@@ -1665,9 +1665,6 @@ class OCRoute(OpenShiftCLI):
@staticmethod
def get_cert_data(path, content):
'''get the data for a particular value'''
- if not path and not content:
- return None
-
rval = None
if path and os.path.exists(path) and os.access(path, os.R_OK):
rval = open(path).read()
@@ -1706,14 +1703,14 @@ class OCRoute(OpenShiftCLI):
if params['tls_termination'] and params['tls_termination'].lower() != 'passthrough': # E501
for key, option in files.items():
- if key == 'destcacert' and params['tls_termination'] != 'reencrypt':
+ if not option['path'] and not option['content']:
continue
option['value'] = OCRoute.get_cert_data(option['path'], option['content']) # E501
if not option['value']:
return {'failed': True,
- 'msg': 'Verify that you pass a value for %s' % key}
+ 'msg': 'Verify that you pass a correct value for %s' % key}
rconfig = RouteConfig(params['name'],
params['namespace'],
diff --git a/roles/lib_openshift/src/class/oc_route.py b/roles/lib_openshift/src/class/oc_route.py
index 3935525f1..3a1bd732f 100644
--- a/roles/lib_openshift/src/class/oc_route.py
+++ b/roles/lib_openshift/src/class/oc_route.py
@@ -68,9 +68,6 @@ class OCRoute(OpenShiftCLI):
@staticmethod
def get_cert_data(path, content):
'''get the data for a particular value'''
- if not path and not content:
- return None
-
rval = None
if path and os.path.exists(path) and os.access(path, os.R_OK):
rval = open(path).read()
@@ -109,14 +106,14 @@ class OCRoute(OpenShiftCLI):
if params['tls_termination'] and params['tls_termination'].lower() != 'passthrough': # E501
for key, option in files.items():
- if key == 'destcacert' and params['tls_termination'] != 'reencrypt':
+ if not option['path'] and not option['content']:
continue
option['value'] = OCRoute.get_cert_data(option['path'], option['content']) # E501
if not option['value']:
return {'failed': True,
- 'msg': 'Verify that you pass a value for %s' % key}
+ 'msg': 'Verify that you pass a correct value for %s' % key}
rconfig = RouteConfig(params['name'],
params['namespace'],
diff --git a/roles/nuage_node/handlers/main.yaml b/roles/nuage_node/handlers/main.yaml
index 60247c33e..e68ae74bd 100644
--- a/roles/nuage_node/handlers/main.yaml
+++ b/roles/nuage_node/handlers/main.yaml
@@ -1,7 +1,7 @@
---
- name: restart node
become: yes
- systemd: name={{ openshift.common.service_type }}-node state=restarted
+ systemd: name={{ openshift.common.service_type }}-node daemon-reload=yes state=restarted
- name: save iptable rules
become: yes
diff --git a/roles/nuage_node/tasks/main.yaml b/roles/nuage_node/tasks/main.yaml
index 3764681ff..9db9dbb6a 100644
--- a/roles/nuage_node/tasks/main.yaml
+++ b/roles/nuage_node/tasks/main.yaml
@@ -33,6 +33,11 @@
- include: certificates.yml
+- name: Add additional Docker mounts for Nuage for atomic hosts
+ become: yes
+ lineinfile: dest="{{ openshift_atomic_node_config_file }}" line="{{ nuage_atomic_docker_additional_mounts }}"
+ when: openshift.common.is_atomic | bool
+
- name: Restart node services
command: /bin/true
notify:
diff --git a/roles/nuage_node/vars/main.yaml b/roles/nuage_node/vars/main.yaml
index 4cf68411f..d8bfca62a 100644
--- a/roles/nuage_node/vars/main.yaml
+++ b/roles/nuage_node/vars/main.yaml
@@ -23,3 +23,5 @@ cni_conf_dir: "/etc/cni/net.d/"
cni_bin_dir: "/opt/cni/bin/"
nuage_plugin_crt_dir: /usr/share/vsp-openshift
+openshift_atomic_node_config_file: /etc/sysconfig/{{ openshift.common.service_type }}-node
+nuage_atomic_docker_additional_mounts: "DOCKER_ADDTL_BIND_MOUNTS=-v /var/usr/share/vsp-openshift:/var/usr/share/vsp-openshift -v /etc/default:/etc/default -v /var/run:/var/run -v /opt/cni/bin:/opt/cni/bin -v /etc/cni/net.d:/etc/cni/net.d"
diff --git a/roles/openshift_ami_prep/defaults/main.yml b/roles/openshift_ami_prep/defaults/main.yml
deleted file mode 100644
index 2ba6d8eae..000000000
--- a/roles/openshift_ami_prep/defaults/main.yml
+++ /dev/null
@@ -1,50 +0,0 @@
----
-
-
-r_openshift_ami_prep_packages:
-- atomic-openshift-master
-- atomic-openshift-node
-- atomic-openshift-docker-excluder
-- atomic-openshift-sdn-ovs
-- openvswitch
-- docker
-- etcd
-#- pcs
-- haproxy
-- dnsmasq
-- ntp
-- logrotate
-- httpd-tools
-- bind
-- firewalld
-- libselinux-python
-- conntrack-tools
-- openssl
-- cloud-init
-- iproute
-- python-dbus
-- PyYAML
-- yum-utils
-- python2-boto
-- python2-boto3
-- cloud-utils-growpart
-# gluster
-- glusterfs-fuse
-- heketi-client
-# nfs
-- nfs-utils
-- flannel
-- bash-completion
-# cockpit
-- cockpit-ws
-- cockpit-system
-- cockpit-bridge
-- cockpit-docker
-# iscsi
-- iscsi-initiator-utils
-# ceph
-- ceph-common
-# systemcontainer
-# - runc
-# - container-selinux
-# - atomic
diff --git a/roles/openshift_ami_prep/tasks/main.yml b/roles/openshift_ami_prep/tasks/main.yml
deleted file mode 100644
index 98f7bc0e2..000000000
--- a/roles/openshift_ami_prep/tasks/main.yml
+++ /dev/null
@@ -1,42 +0,0 @@
----
-- name: install repositories
- include: yum_repos.yml
- static: yes
-
-- name: install needed rpm(s)
- package:
- name: "{{ item }}"
- state: present
- with_items: "{{ r_openshift_ami_prep_packages }}"
-
-- name: create the directory for node
- file:
- state: directory
- path: "/etc/systemd/system/{{ r_openshift_ami_prep_node }}.service.d"
-
-- name: laydown systemd override
- copy:
- dest: "/etc/systemd/system/{{ r_openshift_ami_prep_node }}.service.d/override.conf"
- content: |
- [Unit]
- After=cloud-init.service
-
-- name: update the sysconfig to have KUBECONFIG
- lineinfile:
- dest: "/etc/sysconfig/{{ r_openshift_ami_prep_node }}"
- line: "KUBECONFIG=/root/csr_kubeconfig"
- regexp: "^KUBECONFIG=.*"
-
-- name: update the ExecStart to have bootstrap
- lineinfile:
- dest: "/usr/lib/systemd/system/{{ r_openshift_ami_prep_node }}.service"
- line: "{% raw %}ExecStart=/usr/bin/openshift start node --bootstrap --kubeconfig=${KUBECONFIG} $OPTIONS{% endraw %}"
- regexp: "^ExecStart=.*"
-
-- name: systemctl enable origin-node
- systemd:
- name: "{{ item }}"
- enabled: no
- with_items:
- - "{{ r_openshift_ami_prep_node }}.service"
- - "{{ r_openshift_ami_prep_master }}.service"
diff --git a/roles/openshift_ami_prep/tasks/yum_repos.yml b/roles/openshift_ami_prep/tasks/yum_repos.yml
deleted file mode 100644
index c48c67ac2..000000000
--- a/roles/openshift_ami_prep/tasks/yum_repos.yml
+++ /dev/null
@@ -1,14 +0,0 @@
----
-- name: Create our install repository
- yum_repository:
- description: "{{ item.description | default(omit) }}"
- name: "{{ item.name }}"
- baseurl: "{{ item.baseurl }}"
- gpgkey: "{{ item.gpgkey | default(omit)}}"
- gpgcheck: "{{ item.gpgcheck | default(1) }}"
- sslverify: "{{ item.sslverify | default(1) }}"
- sslclientkey: "{{ item.sslclientkey | default(omit) }}"
- sslclientcert: "{{ item.sslclientcert | default(omit) }}"
- file: "{{ item.file }}"
- enabled: "{{ item.enabled }}"
- with_items: "{{ r_openshift_ami_prep_yum_repositories }}"
diff --git a/roles/openshift_cfme/defaults/main.yml b/roles/openshift_cfme/defaults/main.yml
index 393bee1f3..8aa57e75a 100644
--- a/roles/openshift_cfme/defaults/main.yml
+++ b/roles/openshift_cfme/defaults/main.yml
@@ -29,6 +29,7 @@ openshift_cfme_pv_data:
openshift_cfme_maxImagesBulkImportedPerRepository: 100
# Hostname/IP of the NFS server. Currently defaults to first master
openshift_cfme_nfs_server: "{{ groups.nfs.0 }}"
+openshift_cfme_nfs_directory: "/exports"
# TODO: Refactor '_install_app' variable. This is just for testing but
# maybe in the future it should control the entire yes/no for CFME.
#
diff --git a/roles/openshift_cfme/templates/miq-pv-db.yaml.j2 b/roles/openshift_cfme/templates/miq-pv-db.yaml.j2
index b8c3bb277..280f3e97a 100644
--- a/roles/openshift_cfme/templates/miq-pv-db.yaml.j2
+++ b/roles/openshift_cfme/templates/miq-pv-db.yaml.j2
@@ -8,6 +8,6 @@ spec:
accessModes:
- ReadWriteOnce
nfs:
- path: /exports/miq-pv01
+ path: {{ openshift_cfme_nfs_directory }}/miq-pv01
server: {{ openshift_cfme_nfs_server }}
persistentVolumeReclaimPolicy: Retain
diff --git a/roles/openshift_cfme/templates/miq-pv-region.yaml.j2 b/roles/openshift_cfme/templates/miq-pv-region.yaml.j2
index 7218773f0..fe80dffa5 100644
--- a/roles/openshift_cfme/templates/miq-pv-region.yaml.j2
+++ b/roles/openshift_cfme/templates/miq-pv-region.yaml.j2
@@ -8,6 +8,6 @@ spec:
accessModes:
- ReadWriteOnce
nfs:
- path: /exports/miq-pv02
+ path: {{ openshift_cfme_nfs_directory }}/miq-pv02
server: {{ openshift_cfme_nfs_server }}
persistentVolumeReclaimPolicy: Retain
diff --git a/roles/openshift_cfme/templates/miq-pv-server.yaml.j2 b/roles/openshift_cfme/templates/miq-pv-server.yaml.j2
index 7b40b6c69..f84b67ea9 100644
--- a/roles/openshift_cfme/templates/miq-pv-server.yaml.j2
+++ b/roles/openshift_cfme/templates/miq-pv-server.yaml.j2
@@ -8,6 +8,6 @@ spec:
accessModes:
- ReadWriteOnce
nfs:
- path: /exports/miq-pv03
+ path: {{ openshift_cfme_nfs_directory }}/miq-pv03
server: {{ openshift_cfme_nfs_server }}
persistentVolumeReclaimPolicy: Retain
diff --git a/roles/openshift_cli/tasks/main.yml b/roles/openshift_cli/tasks/main.yml
index c716a0860..9e61805f9 100644
--- a/roles/openshift_cli/tasks/main.yml
+++ b/roles/openshift_cli/tasks/main.yml
@@ -1,6 +1,6 @@
---
- set_fact:
- l_use_crio: "{{ openshift_docker_use_crio | default(false) }}"
+ l_use_crio: "{{ openshift_use_crio | default(false) }}"
- name: Install clients
package: name={{ openshift.common.service_type }}-clients state=present
diff --git a/roles/openshift_docker_facts/tasks/main.yml b/roles/openshift_docker_facts/tasks/main.yml
index 516d7dc29..334150f63 100644
--- a/roles/openshift_docker_facts/tasks/main.yml
+++ b/roles/openshift_docker_facts/tasks/main.yml
@@ -17,7 +17,7 @@
hosted_registry_insecure: "{{ openshift_docker_hosted_registry_insecure | default(openshift.docker.hosted_registry_insecure | default(False)) }}"
hosted_registry_network: "{{ openshift_docker_hosted_registry_network | default(None) }}"
use_system_container: "{{ openshift_docker_use_system_container | default(False) }}"
- use_crio: "{{ openshift_docker_use_crio | default(False) }}"
+ use_crio: "{{ openshift_use_crio | default(False) }}"
- role: node
local_facts:
sdn_mtu: "{{ openshift_node_sdn_mtu | default(None) }}"
diff --git a/roles/openshift_health_checker/openshift_checks/docker_image_availability.py b/roles/openshift_health_checker/openshift_checks/docker_image_availability.py
index 85a922f86..857a80c74 100644
--- a/roles/openshift_health_checker/openshift_checks/docker_image_availability.py
+++ b/roles/openshift_health_checker/openshift_checks/docker_image_availability.py
@@ -168,7 +168,10 @@ class DockerImageAvailability(DockerHostMixin, OpenShiftCheck):
registries = [registry]
for registry in registries:
- args = {"_raw_params": "skopeo inspect --tls-verify=false docker://{}/{}".format(registry, image)}
+ args = {
+ "_raw_params": "timeout 10 skopeo inspect --tls-verify=false "
+ "docker://{}/{}".format(registry, image)
+ }
result = self.execute_module("command", args)
if result.get("rc", 0) == 0 and not result.get("failed"):
return True
diff --git a/roles/openshift_hosted/defaults/main.yml b/roles/openshift_hosted/defaults/main.yml
index f3747eead..c26df3afa 100644
--- a/roles/openshift_hosted/defaults/main.yml
+++ b/roles/openshift_hosted/defaults/main.yml
@@ -5,8 +5,8 @@ r_openshift_hosted_router_use_firewalld: "{{ os_firewall_use_firewalld | default
r_openshift_hosted_registry_firewall_enabled: "{{ os_firewall_enabled | default(True) }}"
r_openshift_hosted_registry_use_firewalld: "{{ os_firewall_use_firewalld | default(False) }}"
-openshift_hosted_router_wait: True
-openshift_hosted_registry_wait: True
+openshift_hosted_router_wait: "{{ not openshift_master_bootstrap_enabled | default(True) }}"
+openshift_hosted_registry_wait: "{{ not openshift_master_bootstrap_enabled | default(True) }}"
registry_volume_claim: 'registry-claim'
diff --git a/roles/openshift_logging/tasks/annotate_ops_projects.yaml b/roles/openshift_logging/tasks/annotate_ops_projects.yaml
new file mode 100644
index 000000000..d4b33616a
--- /dev/null
+++ b/roles/openshift_logging/tasks/annotate_ops_projects.yaml
@@ -0,0 +1,17 @@
+---
+- oc_obj:
+ state: list
+ kind: project
+ name: "{{ item }}"
+ with_items: "{{ __default_logging_ops_projects }}"
+ register: __logging_ops_projects
+
+- name: Annotate Operations Projects
+ oc_edit:
+ kind: ns
+ name: "{{ item.item }}"
+ separator: '#'
+ content:
+ metadata#annotations#openshift.io/logging.ui.hostname: "{{ openshift_logging_kibana_ops_hostname }}"
+ with_items: "{{ __logging_ops_projects.results }}"
+ when: "{{ item.results.stderr is not defined }}"
diff --git a/roles/openshift_logging/tasks/install_logging.yaml b/roles/openshift_logging/tasks/install_logging.yaml
index f8553be79..a77df9986 100644
--- a/roles/openshift_logging/tasks/install_logging.yaml
+++ b/roles/openshift_logging/tasks/install_logging.yaml
@@ -216,6 +216,7 @@
when:
- openshift_logging_use_ops | bool
+- include: annotate_ops_projects.yaml
## Curator
- include_role:
diff --git a/roles/openshift_logging/vars/main.yaml b/roles/openshift_logging/vars/main.yaml
index e561b41e2..01809fddf 100644
--- a/roles/openshift_logging/vars/main.yaml
+++ b/roles/openshift_logging/vars/main.yaml
@@ -6,3 +6,5 @@ es_ops_node_quorum: "{{ (openshift_logging_es_ops_cluster_size | int/2 | round(0
es_ops_recover_expected_nodes: "{{openshift_logging_es_ops_cluster_size | int}}"
es_log_appenders: ['file', 'console']
+
+__default_logging_ops_projects: ['default', 'openshift', 'openshift-infra', 'kube-system']
diff --git a/roles/openshift_logging_elasticsearch/tasks/main.yaml b/roles/openshift_logging_elasticsearch/tasks/main.yaml
index 931846fdb..1e800b1d6 100644
--- a/roles/openshift_logging_elasticsearch/tasks/main.yaml
+++ b/roles/openshift_logging_elasticsearch/tasks/main.yaml
@@ -229,7 +229,7 @@
dest: "{{ tempdir }}/templates/logging-es-pvc.yml"
vars:
obj_name: "{{ openshift_logging_elasticsearch_pvc_name }}"
- size: "{{ openshift_logging_elasticsearch_pvc_size }}"
+ size: "{{ (openshift_logging_elasticsearch_pvc_size | trim | length == 0) | ternary('10Gi', openshift_logging_elasticsearch_pvc_size) }}"
access_modes: "{{ openshift_logging_elasticsearch_pvc_access_modes | list }}"
pv_selector: "{{ openshift_logging_elasticsearch_pvc_pv_selector }}"
storage_class_name: "{{ openshift_logging_elasticsearch_pvc_storage_class_name | default('', true) }}"
@@ -243,7 +243,7 @@
dest: "{{ tempdir }}/templates/logging-es-pvc.yml"
vars:
obj_name: "{{ openshift_logging_elasticsearch_pvc_name }}"
- size: "{{ openshift_logging_elasticsearch_pvc_size }}"
+ size: "{{ (openshift_logging_elasticsearch_pvc_size | trim | length == 0) | ternary('10Gi', openshift_logging_elasticsearch_pvc_size) }}"
access_modes: "{{ openshift_logging_elasticsearch_pvc_access_modes | list }}"
pv_selector: "{{ openshift_logging_elasticsearch_pvc_pv_selector }}"
when:
diff --git a/roles/openshift_master/defaults/main.yml b/roles/openshift_master/defaults/main.yml
index cbc879d31..d70106276 100644
--- a/roles/openshift_master/defaults/main.yml
+++ b/roles/openshift_master/defaults/main.yml
@@ -19,3 +19,8 @@ r_openshift_master_os_firewall_allow:
- service: etcd embedded
port: 4001/tcp
cond: "{{ groups.oo_etcd_to_config | default([]) | length == 0 }}"
+
+oreg_url: ''
+oreg_host: "{{ oreg_url.split('/')[0] if '.' in oreg_url.split('/')[0] else '' }}"
+oreg_auth_credentials_path: "{{ openshift.common.data_dir }}/.docker"
+oreg_auth_credentials_replace: False
diff --git a/roles/openshift_master/tasks/main.yml b/roles/openshift_master/tasks/main.yml
index 173b27ce0..a06defdb9 100644
--- a/roles/openshift_master/tasks/main.yml
+++ b/roles/openshift_master/tasks/main.yml
@@ -229,6 +229,22 @@
- restart master controllers
when: openshift_master_bootstrap_enabled | default(False)
+- name: Check for credentials file for registry auth
+ stat:
+ path: "{{oreg_auth_credentials_path }}"
+ when:
+ - oreg_auth_user is defined
+ register: master_oreg_auth_credentials_stat
+
+- name: Create credentials for registry auth
+ command: "docker --config={{ oreg_auth_credentials_path }} login -u {{ oreg_auth_user }} -p {{ oreg_auth_password }} {{ oreg_host }}"
+ when:
+ - oreg_auth_user is defined
+ - (not master_oreg_auth_credentials_stat.stat.exists or oreg_auth_credentials_replace) | bool
+ notify:
+ - restart master api
+ - restart master controllers
+
- include: set_loopback_context.yml
when:
- openshift.common.version_gte_3_2_or_1_2
diff --git a/roles/openshift_node/defaults/main.yml b/roles/openshift_node/defaults/main.yml
index c7867d225..a7dad5b1f 100644
--- a/roles/openshift_node/defaults/main.yml
+++ b/roles/openshift_node/defaults/main.yml
@@ -1,6 +1,64 @@
---
r_openshift_node_firewall_enabled: "{{ os_firewall_enabled | default(True) }}"
r_openshift_node_use_firewalld: "{{ os_firewall_use_firewalld | default(False) }}"
+
+openshift_service_type: "{{ openshift.common.service_type }}"
+
+openshift_image_tag: ''
+
+openshift_node_ami_prep_packages:
+- "{{ openshift_service_type }}-master"
+- "{{ openshift_service_type }}-node"
+- "{{ openshift_service_type }}-docker-excluder"
+- "{{ openshift_service_type }}-sdn-ovs"
+- ansible
+- openvswitch
+- docker
+- etcd
+#- pcs
+- haproxy
+- dnsmasq
+- ntp
+- logrotate
+- httpd-tools
+- bind
+- firewalld
+- libselinux-python
+- conntrack-tools
+- openssl
+- cloud-init
+- iproute
+- python-dbus
+- PyYAML
+- yum-utils
+- python2-boto
+- python2-boto3
+- cloud-utils-growpart
+# gluster
+- glusterfs-fuse
+- heketi-client
+# nfs
+- nfs-utils
+- flannel
+- bash-completion
+# cockpit
+- cockpit-ws
+- cockpit-system
+- cockpit-bridge
+- cockpit-docker
+# iscsi
+- iscsi-initiator-utils
+# ceph
+- ceph-common
+# systemcontainer
+# - runc
+# - container-selinux
+# - atomic
+#
+openshift_deployment_type: origin
+
+openshift_node_bootstrap: False
+
r_openshift_node_os_firewall_deny: []
r_openshift_node_os_firewall_allow:
- service: Kubernetes kubelet
@@ -21,3 +79,8 @@ r_openshift_node_os_firewall_allow:
- service: Kubernetes service NodePort UDP
port: "{{ openshift_node_port_range | default('') }}/udp"
cond: "{{ openshift_node_port_range is defined }}"
+
+oreg_url: ''
+oreg_host: "{{ oreg_url.split('/')[0] if '.' in oreg_url.split('/')[0] else '' }}"
+oreg_auth_credentials_path: "{{ openshift.common.data_dir }}/.docker"
+oreg_auth_credentials_replace: False
diff --git a/roles/openshift_node/handlers/main.yml b/roles/openshift_node/handlers/main.yml
index f2c45a4bd..14ba48aba 100644
--- a/roles/openshift_node/handlers/main.yml
+++ b/roles/openshift_node/handlers/main.yml
@@ -27,6 +27,7 @@
when:
- (not skip_node_svc_handlers | default(False) | bool)
- not (node_service_status_changed | default(false) | bool)
+ - not openshift_node_bootstrap
- name: reload sysctl.conf
command: /sbin/sysctl -p
diff --git a/roles/openshift_node/meta/main.yml b/roles/openshift_node/meta/main.yml
index 06373de04..3db980514 100644
--- a/roles/openshift_node/meta/main.yml
+++ b/roles/openshift_node/meta/main.yml
@@ -19,6 +19,7 @@ dependencies:
- role: openshift_clock
- role: openshift_docker
- role: openshift_node_certificates
+ when: not openshift_node_bootstrap
- role: openshift_cloud_provider
- role: openshift_node_dnsmasq
when: openshift.common.use_dnsmasq | bool
diff --git a/roles/openshift_node/tasks/bootstrap.yml b/roles/openshift_node/tasks/bootstrap.yml
new file mode 100644
index 000000000..cb1440283
--- /dev/null
+++ b/roles/openshift_node/tasks/bootstrap.yml
@@ -0,0 +1,55 @@
+---
+- name: install needed rpm(s)
+ package:
+ name: "{{ item }}"
+ state: present
+ with_items: "{{ openshift_node_ami_prep_packages }}"
+
+- name: create the directory for node
+ file:
+ state: directory
+ path: "/etc/systemd/system/{{ openshift_service_type }}-node.service.d"
+
+- name: laydown systemd override
+ copy:
+ dest: "/etc/systemd/system/{{ openshift_service_type }}-node.service.d/override.conf"
+ content: |
+ [Unit]
+ After=cloud-init.service
+
+- name: update the sysconfig to have KUBECONFIG
+ lineinfile:
+ dest: "/etc/sysconfig/{{ openshift_service_type }}-node"
+ line: "KUBECONFIG=/root/csr_kubeconfig"
+ regexp: "^KUBECONFIG=.*"
+
+- name: update the ExecStart to have bootstrap
+ lineinfile:
+ dest: "/usr/lib/systemd/system/{{ openshift_service_type }}-node.service"
+ line: "{% raw %}ExecStart=/usr/bin/openshift start node --bootstrap --kubeconfig=${KUBECONFIG} $OPTIONS{% endraw %}"
+ regexp: "^ExecStart=.*"
+
+- name: "systemctl enable {{ openshift_service_type }}-node"
+ systemd:
+ name: "{{ item }}"
+ enabled: no
+ with_items:
+ - "{{ openshift_service_type }}-node.service"
+ - "{{ openshift_service_type }}-master.service"
+
+- name: Check for RPM generated config marker file .config_managed
+ stat:
+ path: /etc/origin/.config_managed
+ register: rpmgenerated_config
+
+- name: Remove RPM generated config files if present
+ file:
+ path: "/etc/origin/{{ item }}"
+ state: absent
+ when:
+ - rpmgenerated_config.stat.exists
+ - openshift_deployment_type in ['openshift-enterprise', 'atomic-enterprise']
+ with_items:
+ - master
+ - node
+ - .config_managed
diff --git a/roles/openshift_node/tasks/config.yml b/roles/openshift_node/tasks/config.yml
new file mode 100644
index 000000000..8210fd881
--- /dev/null
+++ b/roles/openshift_node/tasks/config.yml
@@ -0,0 +1,111 @@
+---
+- name: Install the systemd units
+ include: systemd_units.yml
+
+- name: Check for tuned package
+ command: rpm -q tuned
+ args:
+ warn: no
+ register: tuned_installed
+ changed_when: false
+ failed_when: false
+
+- name: Set atomic-guest tuned profile
+ command: "tuned-adm profile atomic-guest"
+ when: tuned_installed.rc == 0 and openshift.common.is_atomic | bool
+
+- name: Start and enable openvswitch service
+ systemd:
+ name: openvswitch.service
+ enabled: yes
+ state: started
+ daemon_reload: yes
+ when:
+ - openshift.common.is_containerized | bool
+ - openshift.common.use_openshift_sdn | default(true) | bool
+ register: ovs_start_result
+ until: not ovs_start_result | failed
+ retries: 3
+ delay: 30
+
+- set_fact:
+ ovs_service_status_changed: "{{ ovs_start_result | changed }}"
+
+- file:
+ dest: "{{ (openshift_node_kubelet_args|default({'config':None})).config}}"
+ state: directory
+ when: openshift_node_kubelet_args is defined and 'config' in openshift_node_kubelet_args
+
+# TODO: add the validate parameter when there is a validation command to run
+- name: Create the Node config
+ template:
+ dest: "{{ openshift.common.config_base }}/node/node-config.yaml"
+ src: node.yaml.v1.j2
+ backup: true
+ owner: root
+ group: root
+ mode: 0600
+ notify:
+ - restart node
+
+- name: Configure Node Environment Variables
+ lineinfile:
+ dest: /etc/sysconfig/{{ openshift.common.service_type }}-node
+ regexp: "^{{ item.key }}="
+ line: "{{ item.key }}={{ item.value }}"
+ create: true
+ with_dict: "{{ openshift.node.env_vars | default({}) }}"
+ notify:
+ - restart node
+
+# Necessary because when you're on a node that's also a master the master will be
+# restarted after the node restarts docker and it will take up to 60 seconds for
+# systemd to start the master again
+- when: openshift.common.is_containerized | bool
+ block:
+ - name: Wait for master API to become available before proceeding
+ # Using curl here since the uri module requires python-httplib2 and
+ # wait_for port doesn't provide health information.
+ command: >
+ curl --silent --tlsv1.2 --cacert {{ openshift.common.config_base }}/node/ca.crt
+ {{ openshift_node_master_api_url }}/healthz/ready
+ args:
+ # Disables the following warning:
+ # Consider using get_url or uri module rather than running curl
+ warn: no
+ register: api_available_output
+ until: api_available_output.stdout == 'ok'
+ retries: 120
+ delay: 1
+ changed_when: false
+
+ - name: Start and enable node dep
+ systemd:
+ daemon_reload: yes
+ name: "{{ openshift.common.service_type }}-node-dep"
+ enabled: yes
+ state: started
+
+- name: Start and enable node
+ systemd:
+ name: "{{ openshift.common.service_type }}-node"
+ enabled: yes
+ state: started
+ daemon_reload: yes
+ register: node_start_result
+ until: not node_start_result | failed
+ retries: 1
+ delay: 30
+ ignore_errors: true
+
+- name: Dump logs from node service if it failed
+ command: journalctl --no-pager -n 100 -u {{ openshift.common.service_type }}-node
+ when: node_start_result | failed
+
+- name: Abort if node failed to start
+ fail:
+ msg: Node failed to start please inspect the logs and try again
+ when: node_start_result | failed
+
+- set_fact:
+ node_service_status_changed: "{{ node_start_result | changed }}"
diff --git a/roles/openshift_node/tasks/install.yml b/roles/openshift_node/tasks/install.yml
new file mode 100644
index 000000000..9bf4ed879
--- /dev/null
+++ b/roles/openshift_node/tasks/install.yml
@@ -0,0 +1,33 @@
+---
+# We have to add tuned-profiles in the same transaction otherwise we run into depsolving
+# problems because the rpms don't pin the version properly. This was fixed in 3.1 packaging.
+- when: not openshift.common.is_containerized | bool
+ block:
+ - name: Install Node package
+ package:
+ name: "{{ openshift.common.service_type }}-node{{ openshift_pkg_version | default('') | oo_image_tag_to_rpm_version(include_dash=True) }},tuned-profiles-{{ openshift.common.service_type }}-node{{ openshift_pkg_version | default('') | oo_image_tag_to_rpm_version(include_dash=True) }}"
+ state: present
+
+ - name: Install sdn-ovs package
+ package:
+ name: "{{ openshift.common.service_type }}-sdn-ovs{{ openshift_pkg_version | oo_image_tag_to_rpm_version(include_dash=True) }}"
+ state: present
+ when:
+ - openshift.common.use_openshift_sdn | default(true) | bool
+
+ - name: Install conntrack-tools package
+ package:
+ name: "conntrack-tools"
+ state: present
+
+- when:
+ - openshift.common.is_containerized | bool
+ - not openshift.common.is_node_system_container | bool
+ block:
+ - name: Pre-pull node image when containerized
+ command: >
+ docker pull {{ openshift.node.node_image }}:{{ openshift_image_tag }}
+ register: pull_result
+ changed_when: "'Downloaded newer image' in pull_result.stdout"
+
+ - include: config/install-node-docker-service-file.yml
diff --git a/roles/openshift_node/tasks/main.yml b/roles/openshift_node/tasks/main.yml
index 81456eac9..60a25dcc6 100644
--- a/roles/openshift_node/tasks/main.yml
+++ b/roles/openshift_node/tasks/main.yml
@@ -1,43 +1,15 @@
---
-# TODO: allow for overriding default ports where possible
- fail:
msg: "SELinux is disabled, This deployment type requires that SELinux is enabled."
when:
- (not ansible_selinux or ansible_selinux.status != 'enabled') and deployment_type in ['enterprise', 'online', 'atomic-enterprise', 'openshift-enterprise']
- - not openshift_docker_use_crio | default(false)
+ - not openshift_use_crio | default(false)
- name: setup firewall
include: firewall.yml
static: yes
-- name: Set node facts
- openshift_facts:
- role: "{{ item.role }}"
- local_facts: "{{ item.local_facts }}"
- with_items:
- # Reset node labels to an empty dictionary.
- - role: node
- local_facts:
- labels: {}
- - role: node
- local_facts:
- annotations: "{{ openshift_node_annotations | default(none) }}"
- debug_level: "{{ openshift_node_debug_level | default(openshift.common.debug_level) }}"
- iptables_sync_period: "{{ openshift_node_iptables_sync_period | default(None) }}"
- kubelet_args: "{{ openshift_node_kubelet_args | default(None) }}"
- labels: "{{ lookup('oo_option', 'openshift_node_labels') | default( openshift_node_labels | default(none), true) }}"
- registry_url: "{{ oreg_url_node | default(oreg_url) | default(None) }}"
- schedulable: "{{ openshift_schedulable | default(openshift_scheduleable) | default(None) }}"
- sdn_mtu: "{{ openshift_node_sdn_mtu | default(None) }}"
- storage_plugin_deps: "{{ osn_storage_plugin_deps | default(None) }}"
- set_node_ip: "{{ openshift_set_node_ip | default(None) }}"
- node_image: "{{ osn_image | default(None) }}"
- ovs_image: "{{ osn_ovs_image | default(None) }}"
- proxy_mode: "{{ openshift_node_proxy_mode | default('iptables') }}"
- local_quota_per_fsgroup: "{{ openshift_node_local_quota_per_fsgroup | default(None) }}"
- dns_ip: "{{ openshift_dns_ip | default(none) | get_dns_ip(hostvars[inventory_hostname])}}"
- env_vars: "{{ openshift_node_env_vars | default(None) }}"
-
+#### Disable SWAP #####
# https://docs.openshift.com/container-platform/3.4/admin_guide/overcommit.html#disabling-swap-memory
- name: Check for swap usage
command: grep "^[^#].*swap" /etc/fstab
@@ -46,9 +18,10 @@
failed_when: false
register: swap_result
-# Disable Swap Block
-- block:
-
+- when:
+ - swap_result.stdout_lines | length > 0
+ - openshift_disable_swap | default(true) | bool
+ block:
- name: Disable swap
command: swapoff --all
@@ -64,45 +37,17 @@
dest: /etc/fstab
line: '# OpenShift-Ansible Installer disabled swap per overcommit guidelines'
state: present
+#### End Disable Swap Block ####
- when:
- - swap_result.stdout_lines | length > 0
- - openshift_disable_swap | default(true) | bool
-# End Disable Swap Block
-
-- name: Install Node package
- package:
- name: "{{ openshift.common.service_type }}-node{{ openshift_pkg_version | default('') | oo_image_tag_to_rpm_version(include_dash=True) }}"
- state: present
- when: not openshift.common.is_containerized | bool
-
-- name: setup tuned
- include: tuned.yml
- static: yes
-
-- name: Install sdn-ovs package
- package:
- name: "{{ openshift.common.service_type }}-sdn-ovs{{ openshift_pkg_version | oo_image_tag_to_rpm_version(include_dash=True) }}"
- state: present
- when:
- - openshift.common.use_openshift_sdn | default(true) | bool
- - not openshift.common.is_containerized | bool
+- name: include node installer
+ include: install.yml
- name: Restart cri-o
systemd:
name: cri-o
enabled: yes
state: restarted
- when: openshift_docker_use_crio | default(false)
-
-- name: Install conntrack-tools package
- package:
- name: "conntrack-tools"
- state: present
- when: not openshift.common.is_containerized | bool
-
-- name: Install the systemd units
- include: systemd_units.yml
+ when: openshift_use_crio | default(false)
# The atomic-openshift-node service will set this parameter on
# startup, but if the network service is restarted this setting is
@@ -116,37 +61,26 @@
notify:
- reload sysctl.conf
-- name: Start and enable openvswitch service
- systemd:
- name: openvswitch.service
- enabled: yes
- state: started
- daemon_reload: yes
+- name: include bootstrap node config
+ include: bootstrap.yml
+ when: openshift_node_bootstrap
+
+- name: include standard node config
+ include: config.yml
+ when: not openshift_node_bootstrap
+
+- name: Check for credentials file for registry auth
+ stat:
+ path: "{{oreg_auth_credentials_path }}"
when:
- - openshift.common.is_containerized | bool
- - openshift.common.use_openshift_sdn | default(true) | bool
- register: ovs_start_result
- until: not ovs_start_result | failed
- retries: 3
- delay: 30
-
-- set_fact:
- ovs_service_status_changed: "{{ ovs_start_result | changed }}"
-
-- file:
- dest: "{{ (openshift_node_kubelet_args|default({'config':None})).config}}"
- state: directory
- when: openshift_node_kubelet_args is defined and 'config' in openshift_node_kubelet_args
-
-# TODO: add the validate parameter when there is a validation command to run
-- name: Create the Node config
- template:
- dest: "{{ openshift.common.config_base }}/node/node-config.yaml"
- src: node.yaml.v1.j2
- backup: true
- owner: root
- group: root
- mode: 0600
+ - oreg_auth_user is defined
+ register: node_oreg_auth_credentials_stat
+
+- name: Create credentials for registry auth
+ command: "docker --config={{ oreg_auth_credentials_path }} login -u {{ oreg_auth_user }} -p {{ oreg_auth_password }} {{ oreg_host }}"
+ when:
+ - oreg_auth_user is defined
+ - (not node_oreg_auth_credentials_stat.stat.exists or oreg_auth_credentials_replace) | bool
notify:
- restart node
@@ -166,16 +100,7 @@
notify:
- restart node
-- name: Configure Node Environment Variables
- lineinfile:
- dest: /etc/sysconfig/{{ openshift.common.service_type }}-node
- regexp: "^{{ item.key }}="
- line: "{{ item.key }}={{ item.value }}"
- create: true
- with_dict: "{{ openshift.node.env_vars | default({}) }}"
- notify:
- - restart node
-
+#### Storage class plugins here ####
- name: NFS storage plugin configuration
include: storage_plugins/nfs.yml
tags:
@@ -193,55 +118,7 @@
include: storage_plugins/iscsi.yml
when: "'iscsi' in openshift.node.storage_plugin_deps"
-# Necessary because when you're on a node that's also a master the master will be
-# restarted after the node restarts docker and it will take up to 60 seconds for
-# systemd to start the master again
-- name: Wait for master API to become available before proceeding
- # Using curl here since the uri module requires python-httplib2 and
- # wait_for port doesn't provide health information.
- command: >
- curl --silent --tlsv1.2 --cacert {{ openshift.common.config_base }}/node/ca.crt
- {{ openshift_node_master_api_url }}/healthz/ready
- args:
- # Disables the following warning:
- # Consider using get_url or uri module rather than running curl
- warn: no
- register: api_available_output
- until: api_available_output.stdout == 'ok'
- retries: 120
- delay: 1
- changed_when: false
- when: openshift.common.is_containerized | bool
-
-- name: Start and enable node dep
- systemd:
- daemon_reload: yes
- name: "{{ openshift.common.service_type }}-node-dep"
- enabled: yes
- state: started
- when: openshift.common.is_containerized | bool
-
+##### END Storage #####
-- name: Start and enable node
- systemd:
- name: "{{ openshift.common.service_type }}-node"
- enabled: yes
- state: started
- daemon_reload: yes
- register: node_start_result
- until: not node_start_result | failed
- retries: 1
- delay: 30
- ignore_errors: true
-
-- name: Dump logs from node service if it failed
- command: journalctl --no-pager -n 100 -u {{ openshift.common.service_type }}-node
- when: node_start_result | failed
-
-- name: Abort if node failed to start
- fail:
- msg: Node failed to start please inspect the logs and try again
- when: node_start_result | failed
-
-- set_fact:
- node_service_status_changed: "{{ node_start_result | changed }}"
+- include: config/workaround-bz1331590-ovs-oom-fix.yml
+ when: openshift.common.use_openshift_sdn | default(true) | bool
diff --git a/roles/openshift_node/tasks/openvswitch_system_container.yml b/roles/openshift_node/tasks/openvswitch_system_container.yml
index dc1df9185..e09063aa5 100644
--- a/roles/openshift_node/tasks/openvswitch_system_container.yml
+++ b/roles/openshift_node/tasks/openvswitch_system_container.yml
@@ -1,6 +1,6 @@
---
- set_fact:
- l_use_crio: "{{ openshift_docker_use_crio | default(false) }}"
+ l_use_crio: "{{ openshift_use_crio | default(false) }}"
- set_fact:
l_service_name: "cri-o"
diff --git a/roles/openshift_node/tasks/systemd_units.yml b/roles/openshift_node/tasks/systemd_units.yml
index b86bb1549..4687400cd 100644
--- a/roles/openshift_node/tasks/systemd_units.yml
+++ b/roles/openshift_node/tasks/systemd_units.yml
@@ -1,22 +1,6 @@
---
# This file is included both in the openshift_master role and in the upgrade
# playbooks.
-
-- include: config/install-node-deps-docker-service-file.yml
- when: openshift.common.is_containerized | bool
-
-- block:
- - name: Pre-pull node image
- command: >
- docker pull {{ openshift.node.node_image }}:{{ openshift_image_tag }}
- register: pull_result
- changed_when: "'Downloaded newer image' in pull_result.stdout"
-
- - include: config/install-node-docker-service-file.yml
- when:
- - openshift.common.is_containerized | bool
- - not openshift.common.is_node_system_container | bool
-
- name: Install Node service file
template:
dest: "/etc/systemd/system/{{ openshift.common.service_type }}-node.service"
@@ -26,24 +10,24 @@
- reload systemd units
- restart node
-- include: config/install-ovs-service-env-file.yml
- when: openshift.common.is_containerized | bool
+- when: openshift.common.is_containerized | bool
+ block:
+ - name: include node deps docker service file
+ include: config/install-node-deps-docker-service-file.yml
-- name: Install Node system container
- include: node_system_container.yml
- when:
- - openshift.common.is_containerized | bool
- - openshift.common.is_node_system_container | bool
+ - name: include ovs service environment file
+ include: config/install-ovs-service-env-file.yml
-- name: Install OpenvSwitch system containers
- include: openvswitch_system_container.yml
- when:
- - openshift.common.use_openshift_sdn | default(true) | bool
- - openshift.common.is_containerized | bool
- - openshift.common.is_openvswitch_system_container | bool
+ - name: Install Node system container
+ include: node_system_container.yml
+ when:
+ - openshift.common.is_node_system_container | bool
-- include: config/workaround-bz1331590-ovs-oom-fix.yml
- when: openshift.common.use_openshift_sdn | default(true) | bool
+ - name: Install OpenvSwitch system containers
+ include: openvswitch_system_container.yml
+ when:
+ - openshift.common.use_openshift_sdn | default(true) | bool
+ - openshift.common.is_openvswitch_system_container | bool
- block:
- name: Pre-pull openvswitch image
diff --git a/roles/openshift_node/templates/node.service.j2 b/roles/openshift_node/templates/node.service.j2
index 3d0ae3bbd..0856737f6 100644
--- a/roles/openshift_node/templates/node.service.j2
+++ b/roles/openshift_node/templates/node.service.j2
@@ -8,7 +8,7 @@ Wants={{ openshift.docker.service_name }}.service
Documentation=https://github.com/openshift/origin
Requires=dnsmasq.service
After=dnsmasq.service
-{% if openshift.docker.use_crio %}Wants=cri-o.service{% endif %}
+{% if openshift_use_crio|default(false) %}Wants=cri-o.service{% endif %}
[Service]
Type=notify
diff --git a/roles/openshift_node/templates/node.yaml.v1.j2 b/roles/openshift_node/templates/node.yaml.v1.j2
index 93f8658b4..711afcadb 100644
--- a/roles/openshift_node/templates/node.yaml.v1.j2
+++ b/roles/openshift_node/templates/node.yaml.v1.j2
@@ -16,7 +16,7 @@ imageConfig:
latest: false
kind: NodeConfig
kubeletArguments: {{ openshift.node.kubelet_args | default(None) | to_padded_yaml(level=1) }}
-{% if openshift.docker.use_crio | default(False) %}
+{% if openshift_use_crio | default(False) %}
container-runtime:
- remote
container-runtime-endpoint:
diff --git a/roles/openshift_node/templates/openshift.docker.node.dep.service b/roles/openshift_node/templates/openshift.docker.node.dep.service
index c4580be1f..8734e7443 100644
--- a/roles/openshift_node/templates/openshift.docker.node.dep.service
+++ b/roles/openshift_node/templates/openshift.docker.node.dep.service
@@ -3,7 +3,7 @@ Requires={{ openshift.docker.service_name }}.service
After={{ openshift.docker.service_name }}.service
PartOf={{ openshift.common.service_type }}-node.service
Before={{ openshift.common.service_type }}-node.service
-{% if openshift.docker.use_crio %}Wants=cri-o.service{% endif %}
+{% if openshift_use_crio|default(false) %}Wants=cri-o.service{% endif %}
[Service]
ExecStart=/bin/bash -c "if [[ -f /usr/bin/docker-current ]]; then echo \"DOCKER_ADDTL_BIND_MOUNTS=--volume=/usr/bin/docker-current:/usr/bin/docker-current:ro --volume=/etc/sysconfig/docker:/etc/sysconfig/docker:ro\" > /etc/sysconfig/{{ openshift.common.service_type }}-node-dep; else echo \"#DOCKER_ADDTL_BIND_MOUNTS=\" > /etc/sysconfig/{{ openshift.common.service_type }}-node-dep; fi"
diff --git a/roles/openshift_node_certificates/defaults/main.yml b/roles/openshift_node_certificates/defaults/main.yml
index 70a38b844..455f26f30 100644
--- a/roles/openshift_node_certificates/defaults/main.yml
+++ b/roles/openshift_node_certificates/defaults/main.yml
@@ -1,2 +1,3 @@
---
openshift_node_cert_expire_days: 730
+openshift_ca_host: ''
diff --git a/roles/openshift_node_dnsmasq/tasks/main.yml b/roles/openshift_node_dnsmasq/tasks/main.yml
index d0221a94b..9bbaafc29 100644
--- a/roles/openshift_node_dnsmasq/tasks/main.yml
+++ b/roles/openshift_node_dnsmasq/tasks/main.yml
@@ -14,6 +14,17 @@
package: name=dnsmasq state=installed
when: not openshift.common.is_atomic | bool
+- name: ensure origin/node directory exists
+ file:
+ state: directory
+ path: "{{ item }}"
+ owner: root
+ group: root
+ mode: '0700'
+ with_items:
+ - /etc/origin
+ - /etc/origin/node
+
# this file is copied to /etc/dnsmasq.d/ when the node starts and is removed
# when the node stops. A dbus-message is sent to dnsmasq to add the same entries
# so that dnsmasq doesn't need to be restarted. Once we can use dnsmasq 2.77 or
diff --git a/roles/openshift_repos/tasks/main.yaml b/roles/openshift_repos/tasks/main.yaml
index 7458db87e..6b3de4dba 100644
--- a/roles/openshift_repos/tasks/main.yaml
+++ b/roles/openshift_repos/tasks/main.yaml
@@ -6,23 +6,24 @@
- when: not ostree_booted.stat.exists
block:
+ # TODO: This needs to be removed and placed into a role
- name: Ensure libselinux-python is installed
package: name=libselinux-python state=present
- name: Create any additional repos that are defined
- template:
- src: yum_repo.j2
- dest: /etc/yum.repos.d/openshift_additional.repo
- when:
- - openshift_additional_repos | length > 0
- notify: refresh cache
-
- - name: Remove the additional repos if no longer defined
- file:
- dest: /etc/yum.repos.d/openshift_additional.repo
- state: absent
- when:
- - openshift_additional_repos | length == 0
+ yum_repository:
+ description: "{{ item.description | default(item.name) }}"
+ name: "{{ item.name | default(item.id) }}"
+ baseurl: "{{ item.baseurl }}"
+ gpgkey: "{{ item.gpgkey | default(omit)}}"
+ gpgcheck: "{{ item.gpgcheck | default(1) }}"
+ sslverify: "{{ item.sslverify | default(1) }}"
+ sslclientkey: "{{ item.sslclientkey | default(omit) }}"
+ sslclientcert: "{{ item.sslclientcert | default(omit) }}"
+ file: "{{ item.name }}"
+ enabled: "{{ item.enabled | default('no')}}"
+ with_items: "{{ openshift_additional_repos }}"
+ when: openshift_additional_repos | length > 0
notify: refresh cache
# Singleton block
diff --git a/roles/openshift_repos/templates/yum_repo.j2 b/roles/openshift_repos/templates/yum_repo.j2
deleted file mode 100644
index ef2cd6603..000000000
--- a/roles/openshift_repos/templates/yum_repo.j2
+++ /dev/null
@@ -1,14 +0,0 @@
-{% for repo in openshift_additional_repos %}
-[{{ repo.id }}]
-name={{ repo.name | default(repo.id) }}
-baseurl={{ repo.baseurl }}
-{% set enable_repo = repo.enabled | default(1) %}
-enabled={{ 1 if ( enable_repo == 1 or enable_repo == True ) else 0 }}
-{% set enable_gpg_check = repo.gpgcheck | default(1) %}
-gpgcheck={{ 1 if ( enable_gpg_check == 1 or enable_gpg_check == True ) else 0 }}
-{% for key, value in repo.iteritems() %}
-{% if key not in ['id', 'name', 'baseurl', 'enabled', 'gpgcheck'] and value is defined %}
-{{ key }}={{ value }}
-{% endif %}
-{% endfor %}
-{% endfor %}
diff --git a/roles/openshift_service_catalog/tasks/install.yml b/roles/openshift_service_catalog/tasks/install.yml
index 686857d94..64f94347b 100644
--- a/roles/openshift_service_catalog/tasks/install.yml
+++ b/roles/openshift_service_catalog/tasks/install.yml
@@ -168,19 +168,19 @@
- "{{ mktemp.stdout }}/service_catalog_api_server.yml"
delete_after: yes
-- template:
- src: api_server_service.j2
- dest: "{{ mktemp.stdout }}/service_catalog_api_service.yml"
-
- name: Set Service Catalog API Server service
- oc_obj:
- state: present
- namespace: "kube-service-catalog"
- kind: service
+ oc_service:
name: apiserver
- files:
- - "{{ mktemp.stdout }}/service_catalog_api_service.yml"
- delete_after: yes
+ namespace: kube-service-catalog
+ state: present
+ ports:
+ - name: secure
+ port: 443
+ protocol: TCP
+ targetPort: 6443
+ selector:
+ app: apiserver
+ session_affinity: None
- template:
src: api_server_route.j2
@@ -216,19 +216,19 @@
- "{{ mktemp.stdout }}/controller_manager.yml"
delete_after: yes
-- template:
- src: controller_manager_service.j2
- dest: "{{ mktemp.stdout }}/controller_manager_service.yml"
-
- name: Set Controller Manager service
- oc_obj:
- state: present
- namespace: "kube-service-catalog"
- kind: service
+ oc_service:
name: controller-manager
- files:
- - "{{ mktemp.stdout }}/controller_manager_service.yml"
- delete_after: yes
+ namespace: kube-service-catalog
+ state: present
+ ports:
+ - port: 6443
+ protocol: TCP
+ targetPort: 6443
+ selector:
+ app: controller-manager
+ session_affinity: None
+ service_type: ClusterIP
- include: start_api_server.yml
diff --git a/roles/openshift_service_catalog/templates/api_server_service.j2 b/roles/openshift_service_catalog/templates/api_server_service.j2
deleted file mode 100644
index bae337201..000000000
--- a/roles/openshift_service_catalog/templates/api_server_service.j2
+++ /dev/null
@@ -1,13 +0,0 @@
-apiVersion: v1
-kind: Service
-metadata:
- name: apiserver
-spec:
- ports:
- - name: secure
- port: 443
- protocol: TCP
- targetPort: 6443
- selector:
- app: apiserver
- sessionAffinity: None
diff --git a/roles/openshift_service_catalog/templates/controller_manager_service.j2 b/roles/openshift_service_catalog/templates/controller_manager_service.j2
deleted file mode 100644
index 2bac645fc..000000000
--- a/roles/openshift_service_catalog/templates/controller_manager_service.j2
+++ /dev/null
@@ -1,13 +0,0 @@
-apiVersion: v1
-kind: Service
-metadata:
- name: controller-manager
-spec:
- ports:
- - port: 6443
- protocol: TCP
- targetPort: 6443
- selector:
- app: controller-manager
- sessionAffinity: None
- type: ClusterIP
diff --git a/roles/openshift_version/tasks/set_version_containerized.yml b/roles/openshift_version/tasks/set_version_containerized.yml
index 4d9f72f01..a2a579e9d 100644
--- a/roles/openshift_version/tasks/set_version_containerized.yml
+++ b/roles/openshift_version/tasks/set_version_containerized.yml
@@ -1,6 +1,6 @@
---
- set_fact:
- l_use_crio: "{{ openshift_docker_use_crio | default(false) }}"
+ l_use_crio: "{{ openshift_use_crio | default(false) }}"
- name: Set containerized version to configure if openshift_image_tag specified
set_fact:
diff --git a/setup.py b/setup.py
index b9c34a8b8..9ce1a8a0b 100644
--- a/setup.py
+++ b/setup.py
@@ -225,8 +225,9 @@ class OpenShiftAnsibleSyntaxCheck(Command):
included_playbooks = set()
for yaml_file in find_files(
- os.path.join(os.getcwd(), 'playbooks', 'byo'),
- None, None, r'\.ya?ml$'):
+ os.path.join(os.getcwd(), 'playbooks'),
+ ['adhoc', 'tasks'],
+ None, r'\.ya?ml$'):
with open(yaml_file, 'r') as contents:
for task in yaml.safe_load(contents):
if not isinstance(task, dict):
@@ -245,19 +246,27 @@ class OpenShiftAnsibleSyntaxCheck(Command):
# Evaluate the difference between all playbooks and included playbooks
entrypoint_playbooks = sorted(playbooks.difference(included_playbooks))
print('Entry point playbook count: {}'.format(len(entrypoint_playbooks)))
- # Syntax each entry point playbook
+
for playbook in entrypoint_playbooks:
print('-' * 60)
print('Syntax checking playbook: {}'.format(playbook))
- try:
- subprocess.check_output(
- ['ansible-playbook', '-i localhost,',
- '--syntax-check', playbook]
- )
- except subprocess.CalledProcessError as cpe:
- print('{}Execution failed: {}{}'.format(
- self.FAIL, cpe, self.ENDC))
+
+ if 'common' in playbook:
+ # Error on any entry points in 'common'
+ print('{}Invalid entry point playbook. All playbooks must'
+ ' start in playbooks/byo{}'.format(self.FAIL, self.ENDC))
has_errors = True
+ else:
+ # Syntax check each entry point playbook
+ try:
+ subprocess.check_output(
+ ['ansible-playbook', '-i localhost,',
+ '--syntax-check', playbook]
+ )
+ except subprocess.CalledProcessError as cpe:
+ print('{}Execution failed: {}{}'.format(
+ self.FAIL, cpe, self.ENDC))
+ has_errors = True
if has_errors:
raise SystemExit(1)
diff --git a/tox.ini b/tox.ini
index 53a9222d8..899767833 100644
--- a/tox.ini
+++ b/tox.ini
@@ -22,6 +22,5 @@ commands =
pylint: python setup.py lint
yamllint: python setup.py yamllint
generate_validation: python setup.py generate_validation
- # TODO(rhcarvalho): check syntax of other important entrypoint playbooks
ansible_syntax: python setup.py ansible_syntax
integration: python -c 'print("run test/integration/run-tests.sh")'