summaryrefslogtreecommitdiffstats
path: root/playbooks
diff options
context:
space:
mode:
Diffstat (limited to 'playbooks')
-rw-r--r--playbooks/aws/BUILD_AMI.md21
-rw-r--r--playbooks/aws/PREREQUISITES.md40
-rw-r--r--playbooks/aws/README.md141
-rwxr-xr-xplaybooks/aws/openshift-cluster/accept.yml6
-rw-r--r--playbooks/aws/openshift-cluster/build_ami.yml77
-rw-r--r--playbooks/aws/openshift-cluster/install.yml63
-rw-r--r--playbooks/aws/openshift-cluster/prerequisites.yml8
-rw-r--r--playbooks/aws/openshift-cluster/provision.yml2
-rw-r--r--playbooks/aws/openshift-cluster/provision_instance.yml12
-rw-r--r--playbooks/aws/openshift-cluster/provision_sec_group.yml13
-rw-r--r--playbooks/aws/openshift-cluster/provision_ssh_keypair.yml12
-rw-r--r--playbooks/aws/openshift-cluster/provision_vpc.yml10
-rw-r--r--playbooks/aws/openshift-cluster/provisioning_vars.example.yml28
-rw-r--r--playbooks/aws/openshift-cluster/seal_ami.yml12
-rw-r--r--playbooks/aws/provisioning-inventory.example.ini25
-rw-r--r--playbooks/aws/provisioning_vars.yml.example120
-rw-r--r--playbooks/byo/openshift-cfme/uninstall.yml6
-rw-r--r--playbooks/byo/openshift-cluster/config.yml2
-rw-r--r--playbooks/byo/openshift-etcd/embedded2external.yml6
-rw-r--r--playbooks/byo/openshift-management/add_container_provider.yml6
-rw-r--r--playbooks/byo/openshift-management/add_many_container_providers.yml36
-rw-r--r--playbooks/byo/openshift-management/config.yml (renamed from playbooks/byo/openshift-cfme/config.yml)4
l---------playbooks/byo/openshift-management/roles (renamed from playbooks/common/openshift-cfme/roles)0
-rw-r--r--playbooks/byo/openshift-management/uninstall.yml2
-rw-r--r--playbooks/byo/openshift-master/certificates.yml2
-rw-r--r--playbooks/byo/openshift-node/scaleup.yml2
-rw-r--r--playbooks/common/openshift-cfme/config.yml44
-rw-r--r--playbooks/common/openshift-checks/adhoc.yml5
-rw-r--r--playbooks/common/openshift-checks/health.yml6
-rw-r--r--playbooks/common/openshift-checks/install.yml47
-rw-r--r--playbooks/common/openshift-checks/pre-install.yml6
-rw-r--r--playbooks/common/openshift-cluster/config.yml32
-rw-r--r--playbooks/common/openshift-cluster/create_persistent_volumes.yml9
-rw-r--r--playbooks/common/openshift-cluster/evaluate_groups.yml2
-rw-r--r--playbooks/common/openshift-cluster/initialize_facts.yml15
-rw-r--r--playbooks/common/openshift-cluster/initialize_openshift_version.yml15
-rw-r--r--playbooks/common/openshift-cluster/openshift_default_storage_class.yml2
-rw-r--r--playbooks/common/openshift-cluster/openshift_hosted.yml6
-rw-r--r--playbooks/common/openshift-cluster/openshift_logging.yml6
-rw-r--r--playbooks/common/openshift-cluster/openshift_metrics.yml6
-rw-r--r--playbooks/common/openshift-cluster/openshift_prometheus.yml20
-rw-r--r--playbooks/common/openshift-cluster/redeploy-certificates/openshift-ca.yml12
-rw-r--r--playbooks/common/openshift-cluster/service_catalog.yml6
-rw-r--r--playbooks/common/openshift-cluster/std_include.yml6
-rw-r--r--playbooks/common/openshift-cluster/upgrades/files/shared_resource_viewer_role.yaml37
-rw-r--r--playbooks/common/openshift-cluster/upgrades/initialize_nodes_to_upgrade.yml1
-rw-r--r--playbooks/common/openshift-cluster/upgrades/post_control_plane.yml25
-rw-r--r--playbooks/common/openshift-cluster/upgrades/pre/verify_control_plane_running.yml37
-rw-r--r--playbooks/common/openshift-cluster/upgrades/pre/verify_health_checks.yml6
-rw-r--r--playbooks/common/openshift-cluster/upgrades/pre/verify_upgrade_targets.yml8
-rw-r--r--playbooks/common/openshift-cluster/upgrades/upgrade_control_plane.yml56
-rw-r--r--playbooks/common/openshift-cluster/upgrades/v3_3/master_config_upgrade.yml4
-rw-r--r--playbooks/common/openshift-cluster/upgrades/v3_3/upgrade_control_plane.yml1
-rw-r--r--playbooks/common/openshift-cluster/upgrades/v3_4/master_config_upgrade.yml14
-rw-r--r--playbooks/common/openshift-cluster/upgrades/v3_4/upgrade_control_plane.yml1
-rw-r--r--playbooks/common/openshift-cluster/upgrades/v3_5/master_config_upgrade.yml14
-rw-r--r--playbooks/common/openshift-cluster/upgrades/v3_5/upgrade.yml2
-rw-r--r--playbooks/common/openshift-cluster/upgrades/v3_5/upgrade_control_plane.yml1
-rw-r--r--playbooks/common/openshift-cluster/upgrades/v3_6/master_config_upgrade.yml15
-rw-r--r--playbooks/common/openshift-cluster/upgrades/v3_6/upgrade.yml2
-rw-r--r--playbooks/common/openshift-cluster/upgrades/v3_6/upgrade_control_plane.yml1
-rw-r--r--playbooks/common/openshift-cluster/upgrades/v3_7/master_config_upgrade.yml18
-rw-r--r--playbooks/common/openshift-cluster/upgrades/v3_7/upgrade.yml16
-rw-r--r--playbooks/common/openshift-cluster/upgrades/v3_7/upgrade_control_plane.yml15
-rw-r--r--playbooks/common/openshift-cluster/upgrades/v3_7/validator.yml2
-rw-r--r--playbooks/common/openshift-etcd/certificates.yml29
-rw-r--r--playbooks/common/openshift-etcd/config.yml10
-rw-r--r--playbooks/common/openshift-etcd/embedded2external.yml172
-rw-r--r--playbooks/common/openshift-etcd/master_etcd_certificates.yml14
-rw-r--r--playbooks/common/openshift-etcd/migrate.yml24
-rw-r--r--playbooks/common/openshift-etcd/scaleup.yml4
-rw-r--r--playbooks/common/openshift-etcd/server_certificates.yml15
-rw-r--r--playbooks/common/openshift-glusterfs/config.yml16
-rw-r--r--playbooks/common/openshift-loadbalancer/config.yml17
-rw-r--r--playbooks/common/openshift-management/add_container_provider.yml8
-rw-r--r--playbooks/common/openshift-management/config.yml35
l---------playbooks/common/openshift-management/filter_plugins (renamed from playbooks/common/openshift-cfme/filter_plugins)0
l---------playbooks/common/openshift-management/library (renamed from playbooks/common/openshift-cfme/library)0
l---------playbooks/common/openshift-management/roles1
-rw-r--r--playbooks/common/openshift-management/uninstall.yml (renamed from playbooks/common/openshift-cfme/uninstall.yml)4
-rw-r--r--playbooks/common/openshift-master/additional_config.yml10
-rw-r--r--playbooks/common/openshift-master/ca.yml8
-rw-r--r--playbooks/common/openshift-master/config.yml15
-rw-r--r--playbooks/common/openshift-master/scaleup.yml2
-rw-r--r--playbooks/common/openshift-nfs/config.yml6
-rw-r--r--playbooks/common/openshift-node/additional_config.yml14
-rw-r--r--playbooks/common/openshift-node/clean_image.yml10
-rw-r--r--playbooks/common/openshift-node/config.yml8
-rw-r--r--playbooks/common/openshift-node/configure_nodes.yml1
-rw-r--r--playbooks/common/openshift-node/image_prep.yml24
90 files changed, 1094 insertions, 530 deletions
diff --git a/playbooks/aws/BUILD_AMI.md b/playbooks/aws/BUILD_AMI.md
new file mode 100644
index 000000000..468264a9a
--- /dev/null
+++ b/playbooks/aws/BUILD_AMI.md
@@ -0,0 +1,21 @@
+# Build AMI
+
+When seeking to deploy a working openshift cluster using these plays, a few
+items must be in place.
+
+These are:
+
+1. Create an instance, using a specified ssh key.
+2. Run openshift-ansible setup roles to ensure packages and services are correctly configured.
+3. Create the AMI.
+4. If encryption is desired
+ - A KMS key is created with the name of $clusterid
+ - An encrypted AMI will be produced with $clusterid KMS key
+5. Terminate the instance used to configure the AMI.
+
+More AMI specific options can be found in ['openshift_aws/defaults/main.yml'](../../roles/openshift_aws/defaults/main.yml). When creating an encrypted AMI please specify use_encryption:
+```
+# openshift_aws_ami_encrypt: True # defaults to false
+```
+
+**Note**: This will ensure to take the recently created AMI and encrypt it to be used later. If encryption is not desired then set the value to false (defaults to false). The AMI id will be fetched and used according to its most recent creation date.
diff --git a/playbooks/aws/PREREQUISITES.md b/playbooks/aws/PREREQUISITES.md
new file mode 100644
index 000000000..4f428dcc3
--- /dev/null
+++ b/playbooks/aws/PREREQUISITES.md
@@ -0,0 +1,40 @@
+# Prerequisites
+
+When seeking to deploy a working openshift cluster using these plays, a few
+items must be in place.
+
+These are:
+
+1) vpc
+2) security group to build the AMI in.
+3) ssh keys to log into instances
+
+These items can be provisioned ahead of time, or you can utilize the plays here
+to create these items.
+
+If you wish to provision these items yourself, or you already have these items
+provisioned and wish to utilize existing components, please refer to
+provisioning_vars.yml.example.
+
+If you wish to have these items created for you, continue with this document.
+
+# Running prerequisites.yml
+
+Warning: Running these plays will provision items in your AWS account (if not
+present), and you may incur billing charges. These plays are not suitable
+for the free-tier.
+
+## Step 1:
+Ensure you have specified all the necessary provisioning variables. See
+provisioning_vars.example.yml and README.md for more information.
+
+## Step 2:
+```
+$ ansible-playbook -i inventory.yml prerequisites.yml -e @provisioning_vars.yml
+```
+
+This will create a VPC, security group, and ssh_key. These plays are idempotent,
+and multiple runs should result in no additional provisioning of these components.
+
+You can also verify that you will successfully utilize existing components with
+these plays.
diff --git a/playbooks/aws/README.md b/playbooks/aws/README.md
index 816cb35b4..417fb539a 100644
--- a/playbooks/aws/README.md
+++ b/playbooks/aws/README.md
@@ -8,6 +8,13 @@ With recent desire for provisioning from customers and developers alike, the AWS
deploy highly scalable Openshift clusters utilizing AWS auto scale groups and
custom AMIs.
+To speed in the provisioning of medium and large clusters, openshift-node
+instances are created using a pre-built AMI. A list of pre-built AMIs will
+be available soon.
+
+If the deployer wishes to build their own AMI for provisioning, instructions
+to do so are provided here.
+
### Where do I start?
Before any provisioning may occur, AWS account credentials must be present in the environment. This can be done in two ways:
@@ -31,8 +38,13 @@ Before any provisioning may occur, AWS account credentials must be present in th
### Let's Provision!
-The newly added playbooks are the following:
-- build_ami.yml - Builds a custom AMI. This currently requires the user to supply a valid AMI with access to repositories that contain openshift repositories.
+Warning: Running these plays will provision items in your AWS account (if not
+present), and you may incur billing charges. These plays are not suitable
+for the free-tier.
+
+#### High-level overview
+- prerequisites.yml - Provision VPC, Security Groups, SSH keys, if needed. See PREREQUISITES.md for more information.
+- build_ami.yml - Builds a custom AMI. See BUILD_AMI.md for more information.
- provision.yml - Create a vpc, elbs, security groups, launch config, asg's, etc.
- install.yml - Calls the openshift-ansible installer on the newly created instances
- provision_nodes.yml - Creates the infra and compute node scale groups
@@ -41,82 +53,39 @@ The newly added playbooks are the following:
The current expected work flow should be to provide an AMI with access to Openshift repositories. There should be a repository specified in the `openshift_additional_repos` parameter of the inventory file. The next expectation is a minimal set of values in the `provisioning_vars.yml` file to configure the desired settings for cluster instances. These settings are AWS specific and should be tailored to the consumer's AWS custom account settings.
+Values specified in provisioning_vars.yml may instead be specified in your inventory group_vars
+under the appropriate groups. Most variables can exist in the 'all' group.
+
```yaml
---
-# when creating an AMI set this to True
-# when installing a cluster set this to False
-openshift_node_bootstrap: True
-
-# specify a clusterid
-# openshift_aws_clusterid: default
-
-# specify a region
-# openshift_aws_region: us-east-1
-
-# must specify a base_ami when building an AMI
-# openshift_aws_base_ami: # base image for AMI to build from
-# specify when using a custom AMI
-# openshift_aws_ami:
-
-# when creating an encrypted AMI please specify use_encryption
-# openshift_aws_ami_encrypt: False
-
-# custom certificates are required for the ELB
-# openshift_aws_iam_cert_path: '/path/to/cert/wildcard.<clusterid>.<domain>.com.crt'
-# openshift_aws_iam_cert_key_path: '/path/to/key/wildcard.<clusterid>.<domain>.com.key'
-# openshift_aws_iam_cert_chain_path: '/path/to/ca_cert_file/ca.crt'
-
-# This is required for any ec2 instances
-# openshift_aws_ssh_key_name: myuser_key
-
-# This will ensure these users are created
-#openshift_aws_users:
-#- key_name: myuser_key
-# username: myuser
-# pub_key: |
-# ssh-rsa AAAA
+# Minimum mandatory provisioning variables. See provisioning_vars.yml.example.
+# for more information.
+openshift_deployment_type: # 'origin' or 'openshift-enterprise'
+openshift_release: # example: v3.7
+openshift_pkg_version: # example: -3.7.0
+openshift_aws_ssh_key_name: # example: myuser_key
+openshift_aws_base_ami: # example: ami-12345678
+# These are required when doing SSL on the ELBs
+openshift_aws_iam_cert_path: # example: '/path/to/wildcard.<clusterid>.example.com.crt'
+openshift_aws_iam_cert_key_path: # example: '/path/to/wildcard.<clusterid>.example.com.key'
```
If customization is required for the instances, scale groups, or any other configurable option please see the ['openshift_aws/defaults/main.yml'](../../roles/openshift_aws/defaults/main.yml) for variables and overrides. These overrides can be placed in the `provisioning_vars.yml`, `inventory`, or `group_vars`.
-In order to create the bootstrap-able AMI we need to create an openshift-ansible inventory file. This file enables us to create the AMI using the openshift-ansible node roles. The exception here is that there will be no hosts specified by the inventory file. Here is an example:
-
-```ini
-[OSEv3:children]
-masters
-nodes
-etcd
-
-[OSEv3:vars]
-################################################################################
-# Ensure these variables are set for bootstrap
-################################################################################
-# openshift_deployment_type is required for installation
-openshift_deployment_type=origin
+In order to create the bootstrap-able AMI we need to create a basic openshift-ansible inventory. This enables us to create the AMI using the openshift-ansible node roles. This inventory should not include any hosts, but certain variables should be defined in the appropriate groups, just as deploying a cluster
+using the normal openshift-ansible method. See provisioning-inventory.example.ini for an example.
-# required when building an AMI. This will
-# be dependent on the version provided by the yum repository
-openshift_pkg_version=-3.6.0
-
-openshift_master_bootstrap_enabled=True
-
-openshift_hosted_router_wait=False
-openshift_hosted_registry_wait=False
-
-# Repository for installation
-openshift_additional_repos=[{'name': 'openshift-repo', 'id': 'openshift-repo', 'baseurl': 'https://mirror.openshift.com/enterprise/enterprise-3.6/latest/x86_64/os/', 'enabled': 'yes', 'gpgcheck': 0, 'sslverify': 'no', 'sslclientcert': '/var/lib/yum/client-cert.pem', 'sslclientkey': '/var/lib/yum/client-key.pem', 'gpgkey': 'https://mirror.ops.rhcloud.com/libra/keys/RPM-GPG-KEY-redhat-release https://mirror.ops.rhcloud.com/libra/keys/RPM-GPG-KEY-redhat-beta https://mirror.ops.rhcloud.com/libra/keys/RPM-GPG-KEY-redhat-openshifthosted'}]
-
-################################################################################
-# cluster specific settings maybe be placed here
+There are more examples of cluster inventory settings [`here`](../../inventory/byo/).
-[masters]
+#### Step 0 (optional)
-[etcd]
+You may provision a VPC, Security Group, and SSH keypair to build the AMI.
-[nodes]
+```
+$ ansible-playbook -i inventory.yml prerequisites.yml -e @provisioning_vars.yml
```
-There are more examples of cluster inventory settings [`here`](../../inventory/byo/).
+See PREREQUISITES.md for more information.
#### Step 1
@@ -126,24 +95,6 @@ Once the `inventory` and the `provisioning_vars.yml` file has been updated with
$ ansible-playbook -i inventory.yml build_ami.yml -e @provisioning_vars.yml
```
-1. This script will build a VPC. Default name will be clusterid if not specified.
-2. Create an ssh key required for the instance.
-3. Create a security group.
-4. Create an instance using the key from step 2 or a specified key.
-5. Run openshift-ansible setup roles to ensure packages and services are correctly configured.
-6. Create the AMI.
-7. If encryption is desired
- - A KMS key is created with the name of $clusterid
- - An encrypted AMI will be produced with $clusterid KMS key
-8. Terminate the instance used to configure the AMI.
-
-More AMI specific options can be found in ['openshift_aws/defaults/main.yml'](../../roles/openshift_aws/defaults/main.yml). When creating an encrypted AMI please specify use_encryption:
-```
-# openshift_aws_ami_encrypt: True # defaults to false
-```
-
-**Note**: This will ensure to take the recently created AMI and encrypt it to be used later. If encryption is not desired then set the value to false (defaults to false). The AMI id will be fetched and used according to its most recent creation date.
-
#### Step 2
Now that we have created an AMI for our Openshift installation, there are two ways to use the AMI.
@@ -167,16 +118,14 @@ $ ansible-playbook provision.yml -e @provisioning_vars.yml
```
This playbook runs through the following steps:
-1. Ensures a VPC is created.
-2. Ensures a SSH key exists.
-3. Creates an s3 bucket for the registry named $clusterid-docker-registry
-4. Create master security groups.
-5. Create a master launch config.
-6. Create the master auto scaling groups.
-7. If certificates are desired for ELB, they will be uploaded.
-8. Create internal and external master ELBs.
-9. Add newly created masters to the correct groups.
-10. Set a couple of important facts for the masters.
+1. Creates an s3 bucket for the registry named $clusterid-docker-registry
+2. Create master security groups.
+3. Create a master launch config.
+4. Create the master auto scaling groups.
+5. If certificates are desired for ELB, they will be uploaded.
+6. Create internal and external master ELBs.
+7. Add newly created masters to the correct groups.
+8. Set a couple of important facts for the masters.
At this point we have successfully created the infrastructure including the master nodes.
@@ -195,13 +144,13 @@ Once this playbook completes, the cluster masters should be installed and config
#### Step 5
-Now that we have a cluster deployed it will be more interesting to create some node types. This can be done easily with the following playbook:
+Now that we have the cluster masters deployed, we need to deploy our infrastructure and compute nodes:
```
$ ansible-playbook provision_nodes.yml -e @provisioning_vars.yml
```
-Once this playbook completes, it should create the compute and infra node scale groups. These nodes will attempt to register themselves to the cluster. These requests must be approved by an administrator.
+Once this playbook completes, it should create the compute and infra node scale groups. These nodes will attempt to register themselves to the cluster. These requests must be approved by an administrator in Step 6.
#### Step 6
diff --git a/playbooks/aws/openshift-cluster/accept.yml b/playbooks/aws/openshift-cluster/accept.yml
index ffc367f9f..c2c8bea50 100755
--- a/playbooks/aws/openshift-cluster/accept.yml
+++ b/playbooks/aws/openshift-cluster/accept.yml
@@ -42,12 +42,12 @@
until: "'instances' in instancesout and instancesout.instances|length > 0"
- debug:
- msg: "{{ instancesout.instances|map(attribute='private_dns_name') | list | regex_replace('.ec2.internal') }}"
+ msg: "{{ instancesout.instances|map(attribute='private_dns_name') | list }}"
- name: approve nodes
oc_adm_csr:
#approve_all: True
- nodes: "{{ instancesout.instances|map(attribute='private_dns_name') | list | regex_replace('.ec2.internal') }}"
- timeout: 0
+ nodes: "{{ instancesout.instances|map(attribute='private_dns_name') | list }}"
+ timeout: 60
register: nodeout
delegate_to: "{{ mastersout.instances[0].public_ip_address }}"
diff --git a/playbooks/aws/openshift-cluster/build_ami.yml b/playbooks/aws/openshift-cluster/build_ami.yml
index 1e54f0467..fae30eb0a 100644
--- a/playbooks/aws/openshift-cluster/build_ami.yml
+++ b/playbooks/aws/openshift-cluster/build_ami.yml
@@ -17,71 +17,24 @@
- name: openshift_aws_region
msg: "openshift_aws_region={{ openshift_aws_region | default('us-east-1') }}"
- - name: create an instance and prepare for ami
- include_role:
- name: openshift_aws
- tasks_from: build_ami.yml
- vars:
- openshift_aws_node_group_type: compute
-
- - name: fetch newly created instances
- ec2_remote_facts:
- region: "{{ openshift_aws_region | default('us-east-1') }}"
- filters:
- "tag:Name": "{{ openshift_aws_base_ami_name | default('ami_base') }}"
- instance-state-name: running
- register: instancesout
- retries: 20
- delay: 3
- until: instancesout.instances|length > 0
-
- - name: wait for ssh to become available
- wait_for:
- port: 22
- host: "{{ instancesout.instances[0].public_ip_address }}"
- timeout: 300
- search_regex: OpenSSH
-
- - name: add host to nodes
- add_host:
- groups: nodes
- name: "{{ instancesout.instances[0].public_dns_name }}"
+- include: provision_instance.yml
+ vars:
+ openshift_aws_node_group_type: compute
- hosts: nodes
gather_facts: False
tasks:
- name: set the user to perform installation
set_fact:
- ansible_ssh_user: "{{ openshift_aws_build_ami_ssh_user | default('root') }}"
-
-- name: normalize groups
- include: ../../byo/openshift-cluster/initialize_groups.yml
-
-- name: run the std_include
- include: ../../common/openshift-cluster/evaluate_groups.yml
-
-- name: run the std_include
- include: ../../common/openshift-cluster/initialize_facts.yml
-
-- name: run the std_include
- include: ../../common/openshift-cluster/initialize_openshift_repos.yml
-
-- name: run node config setup
- include: ../../common/openshift-node/setup.yml
-
-- name: run node config
- include: ../../common/openshift-node/configure_nodes.yml
-
-- name: Re-enable excluders
- include: ../../common/openshift-node/enable_excluders.yml
-
-- hosts: localhost
- connection: local
- become: no
- tasks:
- - name: seal the ami
- include_role:
- name: openshift_aws
- tasks_from: seal_ami.yml
- vars:
- openshift_aws_ami_name: "openshift-gi-{{ lookup('pipe', 'date +%Y%m%d%H%M')}}"
+ ansible_ssh_user: "{{ openshift_aws_build_ami_ssh_user | default(ansible_ssh_user) }}"
+ openshift_node_bootstrap: True
+ openshift_node_image_prep_packages:
+ - cloud-utils-growpart
+
+# This is the part that installs all of the software and configs for the instance
+# to become a node.
+- include: ../../common/openshift-node/image_prep.yml
+
+- include: seal_ami.yml
+ vars:
+ openshift_aws_ami_name: "openshift-gi-{{ lookup('pipe', 'date +%Y%m%d%H%M')}}"
diff --git a/playbooks/aws/openshift-cluster/install.yml b/playbooks/aws/openshift-cluster/install.yml
index 86d58a68e..4d0bf9531 100644
--- a/playbooks/aws/openshift-cluster/install.yml
+++ b/playbooks/aws/openshift-cluster/install.yml
@@ -1,68 +1,19 @@
---
-- name: Setup the vpc and the master node group
+- name: Setup the master node group
hosts: localhost
tasks:
- - name: Alert user to variables needed - clusterid
- debug:
- msg: "openshift_aws_clusterid={{ openshift_aws_clusterid | default('default') }}"
-
- - name: Alert user to variables needed - region
- debug:
- msg: "openshift_aws_region={{ openshift_aws_region | default('us-east-1') }}"
-
- - name: fetch newly created instances
- ec2_remote_facts:
- region: "{{ openshift_aws_region | default('us-east-1') }}"
- filters:
- "tag:clusterid": "{{ openshift_aws_clusterid | default('default') }}"
- "tag:host-type": master
- instance-state-name: running
- register: instancesout
- retries: 20
- delay: 3
- until: instancesout.instances|length > 0
-
- - name: add new master to masters group
- add_host:
- groups: "masters,etcd,nodes"
- name: "{{ item.public_ip_address }}"
- hostname: "{{ openshift_aws_clusterid | default('default') }}-master-{{ item.id[:-5] }}"
- with_items: "{{ instancesout.instances }}"
-
- - name: wait for ssh to become available
- wait_for:
- port: 22
- host: "{{ item.public_ip_address }}"
- timeout: 300
- search_regex: OpenSSH
- with_items: "{{ instancesout.instances }}"
+ - include_role:
+ name: openshift_aws
+ tasks_from: setup_master_group.yml
- name: set the master facts for hostname to elb
hosts: masters
gather_facts: no
remote_user: root
tasks:
- - name: fetch elbs
- ec2_elb_facts:
- region: "{{ openshift_aws_region | default('us-east-1') }}"
- names:
- - "{{ item }}"
- with_items:
- - "{{ openshift_aws_clusterid | default('default') }}-master-external"
- - "{{ openshift_aws_clusterid | default('default') }}-master-internal"
- delegate_to: localhost
- register: elbs
-
- - debug: var=elbs
-
- - name: set fact
- set_fact:
- openshift_master_cluster_hostname: "{{ elbs.results[1].elbs[0].dns_name }}"
- osm_custom_cors_origins:
- - "{{ elbs.results[1].elbs[0].dns_name }}"
- - "console.{{ openshift_aws_clusterid | default('default') }}.openshift.com"
- - "api.{{ openshift_aws_clusterid | default('default') }}.openshift.com"
- with_items: "{{ groups['masters'] }}"
+ - include_role:
+ name: openshift_aws
+ tasks_from: master_facts.yml
- name: normalize groups
include: ../../byo/openshift-cluster/initialize_groups.yml
diff --git a/playbooks/aws/openshift-cluster/prerequisites.yml b/playbooks/aws/openshift-cluster/prerequisites.yml
new file mode 100644
index 000000000..df77fe3bc
--- /dev/null
+++ b/playbooks/aws/openshift-cluster/prerequisites.yml
@@ -0,0 +1,8 @@
+---
+- include: provision_vpc.yml
+
+- include: provision_ssh_keypair.yml
+
+- include: provision_sec_group.yml
+ vars:
+ openshift_aws_node_group_type: compute
diff --git a/playbooks/aws/openshift-cluster/provision.yml b/playbooks/aws/openshift-cluster/provision.yml
index 8f018abd0..4b5bd22ea 100644
--- a/playbooks/aws/openshift-cluster/provision.yml
+++ b/playbooks/aws/openshift-cluster/provision.yml
@@ -1,5 +1,5 @@
---
-- name: Setup the vpc and the master node group
+- name: Setup the elb and the master node group
hosts: localhost
tasks:
diff --git a/playbooks/aws/openshift-cluster/provision_instance.yml b/playbooks/aws/openshift-cluster/provision_instance.yml
new file mode 100644
index 000000000..6e843453c
--- /dev/null
+++ b/playbooks/aws/openshift-cluster/provision_instance.yml
@@ -0,0 +1,12 @@
+---
+# If running this play directly, be sure the variable
+# 'openshift_aws_node_group_type' is set correctly for your usage.
+# See build_ami.yml for an example.
+- hosts: localhost
+ connection: local
+ gather_facts: no
+ tasks:
+ - name: create an instance and prepare for ami
+ include_role:
+ name: openshift_aws
+ tasks_from: provision_instance.yml
diff --git a/playbooks/aws/openshift-cluster/provision_sec_group.yml b/playbooks/aws/openshift-cluster/provision_sec_group.yml
new file mode 100644
index 000000000..039357adb
--- /dev/null
+++ b/playbooks/aws/openshift-cluster/provision_sec_group.yml
@@ -0,0 +1,13 @@
+---
+# If running this play directly, be sure the variable
+# 'openshift_aws_node_group_type' is set correctly for your usage.
+# See build_ami.yml for an example.
+- hosts: localhost
+ connection: local
+ gather_facts: no
+ tasks:
+ - name: create an instance and prepare for ami
+ include_role:
+ name: openshift_aws
+ tasks_from: security_group.yml
+ when: openshift_aws_create_security_groups | default(True) | bool
diff --git a/playbooks/aws/openshift-cluster/provision_ssh_keypair.yml b/playbooks/aws/openshift-cluster/provision_ssh_keypair.yml
new file mode 100644
index 000000000..3ec683958
--- /dev/null
+++ b/playbooks/aws/openshift-cluster/provision_ssh_keypair.yml
@@ -0,0 +1,12 @@
+---
+- hosts: localhost
+ connection: local
+ gather_facts: no
+ tasks:
+ - name: create an instance and prepare for ami
+ include_role:
+ name: openshift_aws
+ tasks_from: ssh_keys.yml
+ vars:
+ openshift_aws_node_group_type: compute
+ when: openshift_aws_users | default([]) | length > 0
diff --git a/playbooks/aws/openshift-cluster/provision_vpc.yml b/playbooks/aws/openshift-cluster/provision_vpc.yml
new file mode 100644
index 000000000..0a23a6d32
--- /dev/null
+++ b/playbooks/aws/openshift-cluster/provision_vpc.yml
@@ -0,0 +1,10 @@
+---
+- hosts: localhost
+ connection: local
+ gather_facts: no
+ tasks:
+ - name: create a vpc
+ include_role:
+ name: openshift_aws
+ tasks_from: vpc.yml
+ when: openshift_aws_create_vpc | default(True) | bool
diff --git a/playbooks/aws/openshift-cluster/provisioning_vars.example.yml b/playbooks/aws/openshift-cluster/provisioning_vars.example.yml
deleted file mode 100644
index 28eb9c993..000000000
--- a/playbooks/aws/openshift-cluster/provisioning_vars.example.yml
+++ /dev/null
@@ -1,28 +0,0 @@
----
-# when creating an AMI set this option to True
-# when installing the cluster, set this to False
-openshift_node_bootstrap: True
-
-# specify a clusterid
-#openshift_aws_clusterid: default
-
-# must specify a base_ami when building an AMI
-#openshift_aws_base_ami:
-
-# when creating an encrypted AMI please specify use_encryption
-#openshift_aws_ami_encrypt: False
-
-# custom certificates are required for the ELB
-#openshift_aws_iam_cert_path: '/path/to/wildcard.<clusterid>.example.com.crt'
-#openshift_aws_iam_key_path: '/path/to/wildcard.<clusterid>.example.com.key'
-#openshift_aws_iam_cert_chain_path: '/path/to/cert.ca.crt'
-
-# This is required for any ec2 instances
-#openshift_aws_ssh_key_name: myuser_key
-
-# This will ensure these users are created
-#openshift_aws_users:
-#- key_name: myuser_key
-# username: myuser
-# pub_key: |
-# ssh-rsa AAAA
diff --git a/playbooks/aws/openshift-cluster/seal_ami.yml b/playbooks/aws/openshift-cluster/seal_ami.yml
new file mode 100644
index 000000000..8239a64fb
--- /dev/null
+++ b/playbooks/aws/openshift-cluster/seal_ami.yml
@@ -0,0 +1,12 @@
+---
+# If running this play directly, be sure the variable
+# 'openshift_aws_ami_name' is set correctly for your usage.
+# See build_ami.yml for an example.
+- hosts: localhost
+ connection: local
+ become: no
+ tasks:
+ - name: seal the ami
+ include_role:
+ name: openshift_aws
+ tasks_from: seal_ami.yml
diff --git a/playbooks/aws/provisioning-inventory.example.ini b/playbooks/aws/provisioning-inventory.example.ini
new file mode 100644
index 000000000..238a7eb2f
--- /dev/null
+++ b/playbooks/aws/provisioning-inventory.example.ini
@@ -0,0 +1,25 @@
+[OSEv3:children]
+masters
+nodes
+etcd
+
+[OSEv3:vars]
+################################################################################
+# Ensure these variables are set for bootstrap
+################################################################################
+# openshift_deployment_type is required for installation
+openshift_deployment_type=origin
+
+openshift_master_bootstrap_enabled=True
+
+openshift_hosted_router_wait=False
+openshift_hosted_registry_wait=False
+
+################################################################################
+# cluster specific settings maybe be placed here
+
+[masters]
+
+[etcd]
+
+[nodes]
diff --git a/playbooks/aws/provisioning_vars.yml.example b/playbooks/aws/provisioning_vars.yml.example
new file mode 100644
index 000000000..1491fb868
--- /dev/null
+++ b/playbooks/aws/provisioning_vars.yml.example
@@ -0,0 +1,120 @@
+---
+# Variables that are commented in this file are optional; uncommented variables
+# are mandatory.
+
+# Default values for each variable are provided, as applicable.
+# Example values for mandatory variables are provided as a comment at the end
+# of the line.
+
+# ------------------------ #
+# Common/Cluster Variables #
+# ------------------------ #
+# Variables in this section affect all areas of the cluster
+
+# Deployment type must be specified.
+openshift_deployment_type: # 'origin' or 'openshift-enterprise'
+
+# openshift_release must be specified. Use whatever version of openshift
+# that is supported by openshift-ansible that you wish.
+openshift_release: # v3.7
+
+# This will be dependent on the version provided by the yum repository
+openshift_pkg_version: # -3.7.0
+
+# specify a clusterid
+# This value is also used as the default value for many other components.
+#openshift_aws_clusterid: default
+
+# AWS region
+# This value will instruct the plays where all items should be created.
+# Multi-region deployments are not supported using these plays at this time.
+#openshift_aws_region: us-east-1
+
+#openshift_aws_create_launch_config: true
+#openshift_aws_create_scale_group: true
+
+# --- #
+# VPC #
+# --- #
+
+# openshift_aws_create_vpc defaults to true. If you don't wish to provision
+# a vpc, set this to false.
+#openshift_aws_create_vpc: true
+
+# Name of the vpc. Needs to be set if using a pre-existing vpc.
+#openshift_aws_vpc_name: "{{ openshift_aws_clusterid }}"
+
+# Name of the subnet in the vpc to use. Needs to be set if using a pre-existing
+# vpc + subnet.
+#openshift_aws_subnet_name:
+
+# -------------- #
+# Security Group #
+# -------------- #
+
+# openshift_aws_create_security_groups defaults to true. If you wish to use
+# an existing security group, set this to false.
+#openshift_aws_create_security_groups: true
+
+# openshift_aws_build_ami_group is the name of the security group to build the
+# ami in. This defaults to the value of openshift_aws_clusterid.
+#openshift_aws_build_ami_group: "{{ openshift_aws_clusterid }}"
+
+# openshift_aws_launch_config_security_groups specifies the security groups to
+# apply to the launch config. The launch config security groups will be what
+# the cluster actually is deployed in.
+#openshift_aws_launch_config_security_groups: see roles/openshift_aws/defaults.yml
+
+# openshift_aws_node_security_groups are created when
+# openshift_aws_create_security_groups is set to true.
+#openshift_aws_node_security_groups: see roles/openshift_aws/defaults.yml
+
+# -------- #
+# ssh keys #
+# -------- #
+
+# Specify the key pair name here to connect to the provisioned instances. This
+# can be an existing key, or it can be one of the keys specified in
+# openshift_aws_users
+openshift_aws_ssh_key_name: # myuser_key
+
+# This will ensure these user and public keys are created.
+#openshift_aws_users:
+#- key_name: myuser_key
+# username: myuser
+# pub_key: |
+# ssh-rsa AAAA
+
+# When building the AMI, specify the user to ssh to the instance as.
+# openshift_aws_build_ami_ssh_user: root
+
+# --------- #
+# AMI Build #
+# --------- #
+# Variables in this section apply to building a node AMI for use in your
+# openshift cluster.
+
+# must specify a base_ami when building an AMI
+openshift_aws_base_ami: # ami-12345678
+
+# when creating an encrypted AMI please specify use_encryption
+#openshift_aws_ami_encrypt: False
+
+# -- #
+# S3 #
+# -- #
+
+# Create an s3 bucket.
+#openshift_aws_create_s3: True
+
+# --- #
+# ELB #
+# --- #
+
+# openshift_aws_elb_name will be the base-name of the ELBs.
+#openshift_aws_elb_name: "{{ openshift_aws_clusterid }}"
+
+# custom certificates are required for the ELB
+openshift_aws_iam_cert_path: # '/path/to/wildcard.<clusterid>.example.com.crt'
+openshift_aws_iam_cert_key_path: # '/path/to/wildcard.<clusterid>.example.com.key'
+openshift_aws_iam_cert_chain_path: # '/path/to/cert.ca.crt'
diff --git a/playbooks/byo/openshift-cfme/uninstall.yml b/playbooks/byo/openshift-cfme/uninstall.yml
deleted file mode 100644
index c8ed16859..000000000
--- a/playbooks/byo/openshift-cfme/uninstall.yml
+++ /dev/null
@@ -1,6 +0,0 @@
----
-# - include: ../openshift-cluster/initialize_groups.yml
-# tags:
-# - always
-
-- include: ../../common/openshift-cfme/uninstall.yml
diff --git a/playbooks/byo/openshift-cluster/config.yml b/playbooks/byo/openshift-cluster/config.yml
index 60fa44c5b..f2e52782b 100644
--- a/playbooks/byo/openshift-cluster/config.yml
+++ b/playbooks/byo/openshift-cluster/config.yml
@@ -8,5 +8,3 @@
- always
- include: ../../common/openshift-cluster/config.yml
- vars:
- openshift_deployment_subtype: "{{ deployment_subtype | default(none) }}"
diff --git a/playbooks/byo/openshift-etcd/embedded2external.yml b/playbooks/byo/openshift-etcd/embedded2external.yml
new file mode 100644
index 000000000..6690a7624
--- /dev/null
+++ b/playbooks/byo/openshift-etcd/embedded2external.yml
@@ -0,0 +1,6 @@
+---
+- include: ../openshift-cluster/initialize_groups.yml
+
+- include: ../../common/openshift-cluster/std_include.yml
+
+- include: ../../common/openshift-etcd/embedded2external.yml
diff --git a/playbooks/byo/openshift-management/add_container_provider.yml b/playbooks/byo/openshift-management/add_container_provider.yml
new file mode 100644
index 000000000..3378b5abd
--- /dev/null
+++ b/playbooks/byo/openshift-management/add_container_provider.yml
@@ -0,0 +1,6 @@
+---
+- include: ../openshift-cluster/initialize_groups.yml
+
+- include: ../../common/openshift-cluster/evaluate_groups.yml
+
+- include: ../../common/openshift-management/add_container_provider.yml
diff --git a/playbooks/byo/openshift-management/add_many_container_providers.yml b/playbooks/byo/openshift-management/add_many_container_providers.yml
new file mode 100644
index 000000000..62fdb11c5
--- /dev/null
+++ b/playbooks/byo/openshift-management/add_many_container_providers.yml
@@ -0,0 +1,36 @@
+---
+- hosts: localhost
+ tasks:
+ - name: Ensure the container provider configuration is defined
+ assert:
+ that: container_providers_config is defined
+ msg: |
+ Error: Must provide providers config path. Fix: Add '-e container_providers_config=/path/to/your/config' to the ansible-playbook command
+
+ - name: Include providers/management configuration
+ include_vars:
+ file: "{{ container_providers_config }}"
+
+ - name: Ensure this cluster is a container provider
+ uri:
+ url: "https://{{ management_server['hostname'] }}/api/providers"
+ body_format: json
+ method: POST
+ user: "{{ management_server['user'] }}"
+ password: "{{ management_server['password'] }}"
+ validate_certs: no
+ # Docs on formatting the BODY of the POST request:
+ # http://manageiq.org/docs/reference/latest/api/reference/providers.html#specifying-connection-configurations
+ body: "{{ item }}"
+ failed_when: false
+ with_items: "{{ container_providers }}"
+ register: results
+
+ # Include openshift_management for access to filter_plugins.
+ - include_role:
+ name: openshift_management
+ tasks_from: noop
+
+ - name: print each result
+ debug:
+ msg: "{{ results.results | oo_filter_container_providers }}"
diff --git a/playbooks/byo/openshift-cfme/config.yml b/playbooks/byo/openshift-management/config.yml
index 0e8e7a94d..e8795ef85 100644
--- a/playbooks/byo/openshift-cfme/config.yml
+++ b/playbooks/byo/openshift-management/config.yml
@@ -1,8 +1,6 @@
---
- include: ../openshift-cluster/initialize_groups.yml
- tags:
- - always
- include: ../../common/openshift-cluster/evaluate_groups.yml
-- include: ../../common/openshift-cfme/config.yml
+- include: ../../common/openshift-management/config.yml
diff --git a/playbooks/common/openshift-cfme/roles b/playbooks/byo/openshift-management/roles
index 20c4c58cf..20c4c58cf 120000
--- a/playbooks/common/openshift-cfme/roles
+++ b/playbooks/byo/openshift-management/roles
diff --git a/playbooks/byo/openshift-management/uninstall.yml b/playbooks/byo/openshift-management/uninstall.yml
new file mode 100644
index 000000000..e95c1c88a
--- /dev/null
+++ b/playbooks/byo/openshift-management/uninstall.yml
@@ -0,0 +1,2 @@
+---
+- include: ../../common/openshift-management/uninstall.yml
diff --git a/playbooks/byo/openshift-master/certificates.yml b/playbooks/byo/openshift-master/certificates.yml
index 26b964034..e147dcba1 100644
--- a/playbooks/byo/openshift-master/certificates.yml
+++ b/playbooks/byo/openshift-master/certificates.yml
@@ -3,6 +3,4 @@
- include: ../../common/openshift-cluster/std_include.yml
-- include: ../../common/openshift-master/ca.yml
-
- include: ../../common/openshift-master/certificates.yml
diff --git a/playbooks/byo/openshift-node/scaleup.yml b/playbooks/byo/openshift-node/scaleup.yml
index 9f992cca6..e0c36fb69 100644
--- a/playbooks/byo/openshift-node/scaleup.yml
+++ b/playbooks/byo/openshift-node/scaleup.yml
@@ -16,6 +16,4 @@
- include: ../../common/openshift-cluster/std_include.yml
-- include: ../../common/openshift-node/certificates.yml
-
- include: ../../common/openshift-node/config.yml
diff --git a/playbooks/common/openshift-cfme/config.yml b/playbooks/common/openshift-cfme/config.yml
deleted file mode 100644
index 533a35d9e..000000000
--- a/playbooks/common/openshift-cfme/config.yml
+++ /dev/null
@@ -1,44 +0,0 @@
----
-# TODO: Make this work. The 'name' variable below is undefined
-# presently because it's part of the cfme role. This play can't run
-# until that's re-worked.
-#
-# - name: Pre-Pull manageiq-pods docker images
-# hosts: nodes
-# tasks:
-# - name: Ensure the latest manageiq-pods docker image is pulling
-# docker_image:
-# name: "{{ openshift_cfme_container_image }}"
-# # Fire-and-forget method, never timeout
-# async: 99999999999
-# # F-a-f, never check on this. True 'background' task.
-# poll: 0
-
-- name: Configure Masters for CFME Bulk Image Imports
- hosts: oo_masters_to_config
- serial: 1
- tasks:
- - name: Run master cfme tuning playbook
- include_role:
- name: openshift_cfme
- tasks_from: tune_masters
-
-- name: Setup CFME
- hosts: oo_first_master
- vars:
- r_openshift_cfme_miq_template_content: "{{ lookup('file', 'roles/openshift_cfme/files/miq-template.yaml') | from_yaml}}"
- pre_tasks:
- - name: Create a temporary place to evaluate the PV templates
- command: mktemp -d /tmp/openshift-ansible-XXXXXXX
- register: r_openshift_cfme_mktemp
- changed_when: false
- - name: Ensure the server template was read from disk
- debug:
- msg="{{ r_openshift_cfme_miq_template_content | from_yaml }}"
-
- tasks:
- - name: Run the CFME Setup Role
- include_role:
- name: openshift_cfme
- vars:
- template_dir: "{{ hostvars[groups.masters.0].r_openshift_cfme_mktemp.stdout }}"
diff --git a/playbooks/common/openshift-checks/adhoc.yml b/playbooks/common/openshift-checks/adhoc.yml
index dfcef8435..d0deaeb65 100644
--- a/playbooks/common/openshift-checks/adhoc.yml
+++ b/playbooks/common/openshift-checks/adhoc.yml
@@ -1,12 +1,13 @@
---
-- name: OpenShift health checks
+- name: OpenShift Health Checks
hosts: oo_all_hosts
+
roles:
- openshift_health_checker
vars:
- r_openshift_health_checker_playbook_context: adhoc
post_tasks:
- - name: Run health checks
+ - name: Run health checks (adhoc)
action: openshift_health_check
args:
checks: '{{ openshift_checks | default([]) }}'
diff --git a/playbooks/common/openshift-checks/health.yml b/playbooks/common/openshift-checks/health.yml
index 21ea785ef..d0921b9d3 100644
--- a/playbooks/common/openshift-checks/health.yml
+++ b/playbooks/common/openshift-checks/health.yml
@@ -1,11 +1,13 @@
---
-- name: Run OpenShift health checks
+- name: OpenShift Health Checks
hosts: oo_all_hosts
+
roles:
- openshift_health_checker
vars:
- r_openshift_health_checker_playbook_context: health
post_tasks:
- - action: openshift_health_check
+ - name: Run health checks (@health)
+ action: openshift_health_check
args:
checks: ['@health']
diff --git a/playbooks/common/openshift-checks/install.yml b/playbooks/common/openshift-checks/install.yml
new file mode 100644
index 000000000..6701a2e15
--- /dev/null
+++ b/playbooks/common/openshift-checks/install.yml
@@ -0,0 +1,47 @@
+---
+- name: Health Check Checkpoint Start
+ hosts: oo_all_hosts
+ gather_facts: false
+ tasks:
+ - name: Set Health Check 'In Progress'
+ set_stats:
+ data:
+ installer_phase_health: "In Progress"
+ aggregate: false
+
+- name: OpenShift Health Checks
+ hosts: oo_all_hosts
+ any_errors_fatal: true
+ roles:
+ - openshift_health_checker
+ vars:
+ - r_openshift_health_checker_playbook_context: install
+ post_tasks:
+ - name: Run health checks (install) - EL
+ when: ansible_distribution != "Fedora"
+ action: openshift_health_check
+ args:
+ checks:
+ - disk_availability
+ - memory_availability
+ - package_availability
+ - package_version
+ - docker_image_availability
+ - docker_storage
+
+ - name: Run health checks (install) - Fedora
+ when: ansible_distribution == "Fedora"
+ action: openshift_health_check
+ args:
+ checks:
+ - docker_image_availability
+
+- name: Health Check Checkpoint End
+ hosts: oo_all_hosts
+ gather_facts: false
+ tasks:
+ - name: Set Health Check 'Complete'
+ set_stats:
+ data:
+ installer_phase_health: "Complete"
+ aggregate: false
diff --git a/playbooks/common/openshift-checks/pre-install.yml b/playbooks/common/openshift-checks/pre-install.yml
index 88e6f9120..32449d4e4 100644
--- a/playbooks/common/openshift-checks/pre-install.yml
+++ b/playbooks/common/openshift-checks/pre-install.yml
@@ -1,11 +1,13 @@
---
-- name: run OpenShift pre-install checks
+- name: OpenShift Health Checks
hosts: oo_all_hosts
+
roles:
- openshift_health_checker
vars:
- r_openshift_health_checker_playbook_context: pre-install
post_tasks:
- - action: openshift_health_check
+ - name: Run health checks (@preflight)
+ action: openshift_health_check
args:
checks: ['@preflight']
diff --git a/playbooks/common/openshift-cluster/config.yml b/playbooks/common/openshift-cluster/config.yml
index 4ca0d48e4..244787985 100644
--- a/playbooks/common/openshift-cluster/config.yml
+++ b/playbooks/common/openshift-cluster/config.yml
@@ -1,26 +1,5 @@
---
-# TODO: refactor this into its own include
-# and pass a variable for ctx
-- name: Verify Requirements
- hosts: oo_all_hosts
- roles:
- - openshift_health_checker
- vars:
- - r_openshift_health_checker_playbook_context: install
- post_tasks:
- - action: openshift_health_check
- args:
- checks:
- - disk_availability
- - memory_availability
- - package_availability
- - package_version
- - docker_image_availability
- - docker_storage
-
-- include: ../openshift-etcd/ca.yml
-
-- include: ../openshift-etcd/certificates.yml
+- include: ../openshift-checks/install.yml
- include: ../openshift-etcd/config.yml
@@ -30,16 +9,10 @@
- include: ../openshift-loadbalancer/config.yml
when: groups.oo_lb_to_config | default([]) | count > 0
-- include: ../openshift-master/ca.yml
-
-- include: ../openshift-master/certificates.yml
-
- include: ../openshift-master/config.yml
- include: ../openshift-master/additional_config.yml
-- include: ../openshift-node/certificates.yml
-
- include: ../openshift-node/config.yml
- include: ../openshift-glusterfs/config.yml
@@ -56,6 +29,9 @@
- include: service_catalog.yml
when: openshift_enable_service_catalog | default(false) | bool
+- include: ../openshift-management/config.yml
+ when: openshift_management_install_management | default(false) | bool
+
- name: Print deprecated variable warning message if necessary
hosts: oo_first_master
gather_facts: no
diff --git a/playbooks/common/openshift-cluster/create_persistent_volumes.yml b/playbooks/common/openshift-cluster/create_persistent_volumes.yml
index 8a60a30b8..ec6f2c52c 100644
--- a/playbooks/common/openshift-cluster/create_persistent_volumes.yml
+++ b/playbooks/common/openshift-cluster/create_persistent_volumes.yml
@@ -1,4 +1,13 @@
---
+- name: Create persistent volumes
+ hosts: oo_first_master
+ vars:
+ persistent_volumes: "{{ hostvars[groups.oo_first_master.0] | oo_persistent_volumes(groups) }}"
+ persistent_volume_claims: "{{ hostvars[groups.oo_first_master.0] | oo_persistent_volume_claims }}"
+ tasks:
+ - debug: var=persistent_volumes
+ - debug: var=persistent_volume_claims
+
- name: Create Hosted Resources - persistent volumes
hosts: oo_first_master
vars:
diff --git a/playbooks/common/openshift-cluster/evaluate_groups.yml b/playbooks/common/openshift-cluster/evaluate_groups.yml
index e55b2f964..78b552279 100644
--- a/playbooks/common/openshift-cluster/evaluate_groups.yml
+++ b/playbooks/common/openshift-cluster/evaluate_groups.yml
@@ -51,7 +51,7 @@
when:
- g_etcd_hosts | default([]) | length not in [3,1]
- not openshift_master_unsupported_embedded_etcd | default(False)
- - not openshift_node_bootstrap | default(False)
+ - not (openshift_node_bootstrap | default(False))
- name: Evaluate oo_all_hosts
add_host:
diff --git a/playbooks/common/openshift-cluster/initialize_facts.yml b/playbooks/common/openshift-cluster/initialize_facts.yml
index be2f8b5f4..91223d368 100644
--- a/playbooks/common/openshift-cluster/initialize_facts.yml
+++ b/playbooks/common/openshift-cluster/initialize_facts.yml
@@ -10,6 +10,7 @@
- name: load openshift_facts module
include_role:
name: openshift_facts
+ static: yes
# TODO: Should this role be refactored into health_checks??
- name: Run openshift_sanitize_inventory to set variables
@@ -145,7 +146,19 @@
https_proxy: "{{ openshift_https_proxy | default(None) }}"
no_proxy: "{{ openshift_no_proxy | default(None) }}"
generate_no_proxy_hosts: "{{ openshift_generate_no_proxy_hosts | default(True) }}"
- no_proxy_internal_hostnames: "{{ openshift_no_proxy_internal_hostnames | default(None) }}"
+
+ - name: Set fact of no_proxy_internal_hostnames
+ openshift_facts:
+ role: common
+ local_facts:
+ no_proxy_internal_hostnames: "{{ hostvars | oo_select_keys(groups['oo_nodes_to_config']
+ | union(groups['oo_masters_to_config'])
+ | union(groups['oo_etcd_to_config'] | default([])))
+ | oo_collect('openshift.common.hostname') | default([]) | join (',')
+ }}"
+ when:
+ - openshift_http_proxy is defined or openshift_https_proxy is defined
+ - openshift_generate_no_proxy_hosts | default(True) | bool
- name: initialize_facts set_fact repoquery command
set_fact:
diff --git a/playbooks/common/openshift-cluster/initialize_openshift_version.yml b/playbooks/common/openshift-cluster/initialize_openshift_version.yml
index 6100c36e1..37a5284d5 100644
--- a/playbooks/common/openshift-cluster/initialize_openshift_version.yml
+++ b/playbooks/common/openshift-cluster/initialize_openshift_version.yml
@@ -1,15 +1,4 @@
---
-# openshift_install_base_package_group may be set in a play variable to limit
-# the host groups the base package is installed on. This is currently used
-# for master/control-plane upgrades.
-- name: Set version_install_base_package true on masters and nodes
- hosts: "{{ openshift_install_base_package_group | default('oo_masters_to_config:oo_nodes_to_config') }}"
- tasks:
- - name: Set version_install_base_package true
- set_fact:
- version_install_base_package: True
- when: version_install_base_package is not defined
-
# NOTE: requires openshift_facts be run
- name: Determine openshift_version to configure on first master
hosts: oo_first_master
@@ -19,8 +8,8 @@
# NOTE: We set this even on etcd hosts as they may also later run as masters,
# and we don't want to install wrong version of docker and have to downgrade
# later.
-- name: Set openshift_version for all hosts
- hosts: oo_all_hosts:!oo_first_master
+- name: Set openshift_version for etcd, node, and master hosts
+ hosts: oo_etcd_to_config:oo_nodes_to_config:oo_masters_to_config:!oo_first_master
vars:
openshift_version: "{{ hostvars[groups.oo_first_master.0].openshift_version }}"
pre_tasks:
diff --git a/playbooks/common/openshift-cluster/openshift_default_storage_class.yml b/playbooks/common/openshift-cluster/openshift_default_storage_class.yml
index 4b4f19690..62fe0dd60 100644
--- a/playbooks/common/openshift-cluster/openshift_default_storage_class.yml
+++ b/playbooks/common/openshift-cluster/openshift_default_storage_class.yml
@@ -3,4 +3,4 @@
hosts: oo_first_master
roles:
- role: openshift_default_storage_class
- when: openshift_cloudprovider_kind is defined and (openshift_cloudprovider_kind == 'aws' or openshift_cloudprovider_kind == 'gce')
+ when: openshift_cloudprovider_kind is defined and (openshift_cloudprovider_kind == 'aws' or openshift_cloudprovider_kind == 'gce' or openshift_cloudprovider_kind == 'openstack')
diff --git a/playbooks/common/openshift-cluster/openshift_hosted.yml b/playbooks/common/openshift-cluster/openshift_hosted.yml
index 32e5e708a..c1536eb36 100644
--- a/playbooks/common/openshift-cluster/openshift_hosted.yml
+++ b/playbooks/common/openshift-cluster/openshift_hosted.yml
@@ -1,7 +1,6 @@
---
- name: Hosted Install Checkpoint Start
- hosts: localhost
- connection: local
+ hosts: oo_all_hosts
gather_facts: false
tasks:
- name: Set Hosted install 'In Progress'
@@ -26,8 +25,7 @@
when: openshift_hosted_prometheus_deploy | default(False) | bool
- name: Hosted Install Checkpoint End
- hosts: localhost
- connection: local
+ hosts: oo_all_hosts
gather_facts: false
tasks:
- name: Set Hosted install 'Complete'
diff --git a/playbooks/common/openshift-cluster/openshift_logging.yml b/playbooks/common/openshift-cluster/openshift_logging.yml
index 69f50fbcd..529a4c939 100644
--- a/playbooks/common/openshift-cluster/openshift_logging.yml
+++ b/playbooks/common/openshift-cluster/openshift_logging.yml
@@ -1,7 +1,6 @@
---
- name: Logging Install Checkpoint Start
- hosts: localhost
- connection: local
+ hosts: oo_all_hosts
gather_facts: false
tasks:
- name: Set Logging install 'In Progress'
@@ -24,8 +23,7 @@
tasks_from: update_master_config
- name: Logging Install Checkpoint End
- hosts: localhost
- connection: local
+ hosts: oo_all_hosts
gather_facts: false
tasks:
- name: Set Logging install 'Complete'
diff --git a/playbooks/common/openshift-cluster/openshift_metrics.yml b/playbooks/common/openshift-cluster/openshift_metrics.yml
index e369dcd86..9c0bd489b 100644
--- a/playbooks/common/openshift-cluster/openshift_metrics.yml
+++ b/playbooks/common/openshift-cluster/openshift_metrics.yml
@@ -1,7 +1,6 @@
---
- name: Metrics Install Checkpoint Start
- hosts: localhost
- connection: local
+ hosts: oo_all_hosts
gather_facts: false
tasks:
- name: Set Metrics install 'In Progress'
@@ -25,8 +24,7 @@
tasks_from: update_master_config.yaml
- name: Metrics Install Checkpoint End
- hosts: localhost
- connection: local
+ hosts: oo_all_hosts
gather_facts: false
tasks:
- name: Set Metrics install 'Complete'
diff --git a/playbooks/common/openshift-cluster/openshift_prometheus.yml b/playbooks/common/openshift-cluster/openshift_prometheus.yml
index ac2d250a3..a73b294a5 100644
--- a/playbooks/common/openshift-cluster/openshift_prometheus.yml
+++ b/playbooks/common/openshift-cluster/openshift_prometheus.yml
@@ -1,5 +1,25 @@
---
+- name: Prometheus Install Checkpoint Start
+ hosts: oo_all_hosts
+ gather_facts: false
+ tasks:
+ - name: Set Prometheus install 'In Progress'
+ set_stats:
+ data:
+ installer_phase_prometheus: "In Progress"
+ aggregate: false
+
- name: Create Hosted Resources - openshift_prometheus
hosts: oo_first_master
roles:
- role: openshift_prometheus
+
+- name: Prometheus Install Checkpoint End
+ hosts: oo_all_hosts
+ gather_facts: false
+ tasks:
+ - name: Set Prometheus install 'Complete'
+ set_stats:
+ data:
+ installer_phase_prometheus: "Complete"
+ aggregate: false
diff --git a/playbooks/common/openshift-cluster/redeploy-certificates/openshift-ca.yml b/playbooks/common/openshift-cluster/redeploy-certificates/openshift-ca.yml
index 12cd209d2..2068ed199 100644
--- a/playbooks/common/openshift-cluster/redeploy-certificates/openshift-ca.yml
+++ b/playbooks/common/openshift-cluster/redeploy-certificates/openshift-ca.yml
@@ -44,8 +44,8 @@
- modify_yaml:
dest: "{{ openshift.common.config_base }}/master/master-config.yaml"
yaml_key: servingInfo.clientCA
- yaml_value: ca-bundle.crt
- when: (g_master_config_output.content|b64decode|from_yaml).servingInfo.clientCA != 'ca-bundle.crt'
+ yaml_value: ca.crt
+ when: (g_master_config_output.content|b64decode|from_yaml).servingInfo.clientCA != 'ca.crt'
- modify_yaml:
dest: "{{ openshift.common.config_base }}/master/master-config.yaml"
yaml_key: etcdClientInfo.ca
@@ -114,12 +114,18 @@
register: g_new_openshift_ca_mktemp
changed_when: false
-- include: ../../openshift-master/ca.yml
+- name: Create OpenShift CA
+ hosts: oo_first_master
vars:
# Set openshift_ca_config_dir to a temporary directory where CA
# will be created. We'll replace the existing CA with the CA
# created in the temporary directory.
openshift_ca_config_dir: "{{ hostvars[groups.oo_first_master.0].g_new_openshift_ca_mktemp.stdout }}"
+ roles:
+ - role: openshift_master_facts
+ - role: openshift_named_certificates
+ - role: openshift_ca
+ openshift_ca_host: "{{ groups.oo_first_master.0 }}"
- name: Create temp directory for syncing certs
hosts: localhost
diff --git a/playbooks/common/openshift-cluster/service_catalog.yml b/playbooks/common/openshift-cluster/service_catalog.yml
index 95a8f601c..bd964b2ce 100644
--- a/playbooks/common/openshift-cluster/service_catalog.yml
+++ b/playbooks/common/openshift-cluster/service_catalog.yml
@@ -1,7 +1,6 @@
---
- name: Service Catalog Install Checkpoint Start
- hosts: localhost
- connection: local
+ hosts: oo_all_hosts
gather_facts: false
tasks:
- name: Set Service Catalog install 'In Progress'
@@ -20,8 +19,7 @@
first_master: "{{ groups.oo_first_master[0] }}"
- name: Service Catalog Install Checkpoint End
- hosts: localhost
- connection: local
+ hosts: oo_all_hosts
gather_facts: false
tasks:
- name: Set Service Catalog install 'Complete'
diff --git a/playbooks/common/openshift-cluster/std_include.yml b/playbooks/common/openshift-cluster/std_include.yml
index 090ad6445..45b34c8bd 100644
--- a/playbooks/common/openshift-cluster/std_include.yml
+++ b/playbooks/common/openshift-cluster/std_include.yml
@@ -1,7 +1,6 @@
---
- name: Initialization Checkpoint Start
- hosts: localhost
- connection: local
+ hosts: oo_all_hosts
gather_facts: false
roles:
- installer_checkpoint
@@ -37,8 +36,7 @@
- always
- name: Initialization Checkpoint End
- hosts: localhost
- connection: local
+ hosts: oo_all_hosts
gather_facts: false
tasks:
- name: Set install initialization 'Complete'
diff --git a/playbooks/common/openshift-cluster/upgrades/files/shared_resource_viewer_role.yaml b/playbooks/common/openshift-cluster/upgrades/files/shared_resource_viewer_role.yaml
new file mode 100644
index 000000000..9c9c260fb
--- /dev/null
+++ b/playbooks/common/openshift-cluster/upgrades/files/shared_resource_viewer_role.yaml
@@ -0,0 +1,37 @@
+---
+apiVersion: v1
+kind: Role
+metadata:
+ name: shared-resource-viewer
+ namespace: openshift
+rules:
+- apiGroups:
+ - ""
+ - template.openshift.io
+ attributeRestrictions: null
+ resources:
+ - templates
+ verbs:
+ - get
+ - list
+ - watch
+- apiGroups:
+ - ""
+ - image.openshift.io
+ attributeRestrictions: null
+ resources:
+ - imagestreamimages
+ - imagestreams
+ - imagestreamtags
+ verbs:
+ - get
+ - list
+ - watch
+- apiGroups:
+ - ""
+ - image.openshift.io
+ attributeRestrictions: null
+ resources:
+ - imagestreams/layers
+ verbs:
+ - get
diff --git a/playbooks/common/openshift-cluster/upgrades/initialize_nodes_to_upgrade.yml b/playbooks/common/openshift-cluster/upgrades/initialize_nodes_to_upgrade.yml
index 72de63070..fc1cbf32a 100644
--- a/playbooks/common/openshift-cluster/upgrades/initialize_nodes_to_upgrade.yml
+++ b/playbooks/common/openshift-cluster/upgrades/initialize_nodes_to_upgrade.yml
@@ -30,6 +30,7 @@
ansible_become: "{{ g_sudo | default(omit) }}"
with_items: " {{ groups['oo_nodes_to_config'] }}"
when:
+ - hostvars[item].openshift is defined
- hostvars[item].openshift.common.hostname in nodes_to_upgrade.results.results[0]['items'] | map(attribute='metadata.name') | list
changed_when: false
diff --git a/playbooks/common/openshift-cluster/upgrades/post_control_plane.yml b/playbooks/common/openshift-cluster/upgrades/post_control_plane.yml
index 07e521a89..122066955 100644
--- a/playbooks/common/openshift-cluster/upgrades/post_control_plane.yml
+++ b/playbooks/common/openshift-cluster/upgrades/post_control_plane.yml
@@ -103,9 +103,16 @@
openshift_hosted_templates_import_command: replace
# Check for warnings to be printed at the end of the upgrade:
-- name: Check for warnings
+- name: Clean up and display warnings
hosts: oo_masters_to_config
- tasks:
+ tags:
+ - always
+ gather_facts: no
+ roles:
+ - role: openshift_excluder
+ r_openshift_excluder_action: enable
+ r_openshift_excluder_service_type: "{{ openshift.common.service_type }}"
+ post_tasks:
# Check if any masters are using pluginOrderOverride and warn if so, only for 1.3/3.3 and beyond:
- name: grep pluginOrderOverride
command: grep pluginOrderOverride {{ openshift.common.config_base }}/master/master-config.yaml
@@ -121,12 +128,8 @@
- not grep_plugin_order_override | skipped
- grep_plugin_order_override.rc == 0
-- name: Re-enable excluder if it was previously enabled
- hosts: oo_masters_to_config
- tags:
- - always
- gather_facts: no
- roles:
- - role: openshift_excluder
- r_openshift_excluder_action: enable
- r_openshift_excluder_service_type: "{{ openshift.common.service_type }}"
+ - name: Warn if shared-resource-viewer could not be updated
+ debug:
+ msg: "WARNING the shared-resource-viewer role could not be upgraded to 3.6 spec because it's marked protected, please see https://bugzilla.redhat.com/show_bug.cgi?id=1493213"
+ when:
+ - __shared_resource_viewer_protected | default(false)
diff --git a/playbooks/common/openshift-cluster/upgrades/pre/verify_control_plane_running.yml b/playbooks/common/openshift-cluster/upgrades/pre/verify_control_plane_running.yml
index 45022cd61..6a5bc24f7 100644
--- a/playbooks/common/openshift-cluster/upgrades/pre/verify_control_plane_running.yml
+++ b/playbooks/common/openshift-cluster/upgrades/pre/verify_control_plane_running.yml
@@ -9,16 +9,29 @@
local_facts:
ha: "{{ groups.oo_masters_to_config | length > 1 }}"
- - name: Ensure HA Master is running
- service:
- name: "{{ openshift.common.service_type }}-master-api"
- state: started
- enabled: yes
- when: openshift.common.is_containerized | bool
+ - when: openshift.common.is_containerized | bool
+ block:
+ - set_fact:
+ master_services:
+ - "{{ openshift.common.service_type }}-master"
- - name: Ensure HA Master is running
- service:
- name: "{{ openshift.common.service_type }}-master-controllers"
- state: started
- enabled: yes
- when: openshift.common.is_containerized | bool
+ # In case of the non-ha to ha upgrade.
+ - name: Check if the {{ openshift.common.service_type }}-master-api.service exists
+ command: >
+ systemctl list-units {{ openshift.common.service_type }}-master-api.service --no-legend
+ register: master_api_service_status
+
+ - set_fact:
+ master_services:
+ - "{{ openshift.common.service_type }}-master-api"
+ - "{{ openshift.common.service_type }}-master-controllers"
+ when:
+ - master_api_service_status.stdout_lines | length > 0
+ - (openshift.common.service_type + '-master-api.service') in master_api_service_status.stdout_lines[0]
+
+ - name: Ensure Master is running
+ service:
+ name: "{{ item }}"
+ state: started
+ enabled: yes
+ with_items: "{{ master_services }}"
diff --git a/playbooks/common/openshift-cluster/upgrades/pre/verify_health_checks.yml b/playbooks/common/openshift-cluster/upgrades/pre/verify_health_checks.yml
index ad6325ca0..2a8de50a2 100644
--- a/playbooks/common/openshift-cluster/upgrades/pre/verify_health_checks.yml
+++ b/playbooks/common/openshift-cluster/upgrades/pre/verify_health_checks.yml
@@ -1,12 +1,14 @@
---
-- name: Verify Host Requirements
+- name: OpenShift Health Checks
hosts: oo_all_hosts
+ any_errors_fatal: true
roles:
- openshift_health_checker
vars:
- r_openshift_health_checker_playbook_context: upgrade
post_tasks:
- - action: openshift_health_check
+ - name: Run health checks (upgrade)
+ action: openshift_health_check
args:
checks:
- disk_availability
diff --git a/playbooks/common/openshift-cluster/upgrades/pre/verify_upgrade_targets.yml b/playbooks/common/openshift-cluster/upgrades/pre/verify_upgrade_targets.yml
index 142ce5f3d..13fa37b09 100644
--- a/playbooks/common/openshift-cluster/upgrades/pre/verify_upgrade_targets.yml
+++ b/playbooks/common/openshift-cluster/upgrades/pre/verify_upgrade_targets.yml
@@ -4,6 +4,12 @@
msg: Verify OpenShift is already installed
when: openshift.common.version is not defined
+- name: Update oreg_auth docker login credentials if necessary
+ include_role:
+ name: docker
+ tasks_from: registry_auth.yml
+ when: oreg_auth_user is defined
+
- name: Verify containers are available for upgrade
command: >
docker pull {{ openshift.common.cli_image }}:{{ openshift_image_tag }}
@@ -37,7 +43,7 @@
fail:
msg: "OpenShift {{ avail_openshift_version }} is available, but {{ openshift_upgrade_target }} or greater is required"
when:
- - openshift_pkg_version | default('0.0', True) | version_compare(openshift_release, '<')
+ - (openshift_pkg_version | default('-0.0', True)).split('-')[1] | version_compare(openshift_release, '<')
- name: Fail when openshift version does not meet minium requirement for Origin upgrade
fail:
diff --git a/playbooks/common/openshift-cluster/upgrades/upgrade_control_plane.yml b/playbooks/common/openshift-cluster/upgrades/upgrade_control_plane.yml
index da47491c1..a5e2f7940 100644
--- a/playbooks/common/openshift-cluster/upgrades/upgrade_control_plane.yml
+++ b/playbooks/common/openshift-cluster/upgrades/upgrade_control_plane.yml
@@ -31,7 +31,6 @@
role: master
local_facts:
embedded_etcd: "{{ groups.oo_etcd_to_config | default([]) | length == 0 }}"
- debug_level: "{{ openshift_master_debug_level | default(openshift.common.debug_level | default(2)) }}"
- name: Upgrade and backup etcd
include: ./etcd/main.yml
@@ -91,6 +90,9 @@
- include_vars: ../../../../roles/openshift_master/vars/main.yml
+ - name: Update journald config
+ include: ../../../../roles/openshift_master/tasks/journald.yml
+
- name: Remove any legacy systemd units and update systemd units
include: ../../../../roles/openshift_master/tasks/systemd_units.yml
@@ -193,13 +195,14 @@
# Another spot where we assume docker is running and do not want to accidentally trigger an unsafe
# restart.
skip_docker_role: True
+ __master_shared_resource_viewer_file: "shared_resource_viewer_role.yaml"
tasks:
- name: Reconcile Cluster Roles
command: >
{{ openshift.common.client_binary }} adm --config={{ openshift.common.config_base }}/master/admin.kubeconfig
policy reconcile-cluster-roles --additive-only=true --confirm -o name
register: reconcile_cluster_role_result
- when: not openshift.common.version_gte_3_7 | bool
+ when: openshift_version | version_compare('3.7','<')
changed_when:
- reconcile_cluster_role_result.stdout != ''
- reconcile_cluster_role_result.rc == 0
@@ -214,7 +217,7 @@
--exclude-groups=system:unauthenticated
--exclude-users=system:anonymous
--additive-only=true --confirm -o name
- when: not openshift.common.version_gte_3_7 | bool
+ when: openshift_version | version_compare('3.7','<')
register: reconcile_bindings_result
changed_when:
- reconcile_bindings_result.stdout != ''
@@ -229,7 +232,52 @@
changed_when:
- reconcile_jenkins_role_binding_result.stdout != ''
- reconcile_jenkins_role_binding_result.rc == 0
- when: (not openshift.common.version_gte_3_7 | bool) and (openshift.common.version_gte_3_4_or_1_4 | bool)
+ when:
+ - openshift_version | version_compare('3.7','<')
+ - openshift_version | version_compare('3.4','>=')
+
+ - when: openshift_upgrade_target | version_compare('3.7','<')
+ block:
+ - name: Retrieve shared-resource-viewer
+ oc_obj:
+ state: list
+ kind: role
+ name: "shared-resource-viewer"
+ namespace: "openshift"
+ register: objout
+
+ - name: Determine if shared-resource-viewer is protected
+ set_fact:
+ __shared_resource_viewer_protected: true
+ when:
+ - "'results' in objout"
+ - "'results' in objout['results']"
+ - "'annotations' in objout['results']['results'][0]['metadata']"
+ - "'openshift.io/reconcile-protect' in objout['results']['results'][0]['metadata']['annotations']"
+ - "objout['results']['results'][0]['metadata']['annotations']['openshift.io/reconcile-protect'] == 'true'"
+ - copy:
+ src: "{{ item }}"
+ dest: "/tmp/{{ item }}"
+ with_items:
+ - "{{ __master_shared_resource_viewer_file }}"
+ when: __shared_resource_viewer_protected is not defined
+
+ - name: Fixup shared-resource-viewer role
+ oc_obj:
+ state: present
+ kind: role
+ name: "shared-resource-viewer"
+ namespace: "openshift"
+ files:
+ - "/tmp/{{ __master_shared_resource_viewer_file }}"
+ delete_after: true
+ when: __shared_resource_viewer_protected is not defined
+ register: result
+ retries: 3
+ delay: 5
+ until: result.rc == 0
+ ignore_errors: true
+
- name: Reconcile Security Context Constraints
command: >
diff --git a/playbooks/common/openshift-cluster/upgrades/v3_3/master_config_upgrade.yml b/playbooks/common/openshift-cluster/upgrades/v3_3/master_config_upgrade.yml
index d69472fad..5e7a66171 100644
--- a/playbooks/common/openshift-cluster/upgrades/v3_3/master_config_upgrade.yml
+++ b/playbooks/common/openshift-cluster/upgrades/v3_3/master_config_upgrade.yml
@@ -41,12 +41,12 @@
- modify_yaml:
dest: "{{ openshift.common.config_base}}/master/master-config.yaml"
- yaml_key: 'controllerConfig.servicesServingCert.signer.certFile'
+ yaml_key: 'controllerConfig.serviceServingCert.signer.certFile'
yaml_value: service-signer.crt
- modify_yaml:
dest: "{{ openshift.common.config_base}}/master/master-config.yaml"
- yaml_key: 'controllerConfig.servicesServingCert.signer.keyFile'
+ yaml_key: 'controllerConfig.serviceServingCert.signer.keyFile'
yaml_value: service-signer.key
- modify_yaml:
diff --git a/playbooks/common/openshift-cluster/upgrades/v3_3/upgrade_control_plane.yml b/playbooks/common/openshift-cluster/upgrades/v3_3/upgrade_control_plane.yml
index f64f0e003..54c85f0fb 100644
--- a/playbooks/common/openshift-cluster/upgrades/v3_3/upgrade_control_plane.yml
+++ b/playbooks/common/openshift-cluster/upgrades/v3_3/upgrade_control_plane.yml
@@ -68,7 +68,6 @@
# defined, and overriding the normal behavior of protecting the installed version
openshift_release: "{{ openshift_upgrade_target }}"
openshift_protect_installed_version: False
- openshift_install_base_package_group: "oo_masters_to_config"
# We skip the docker role at this point in upgrade to prevent
# unintended package, container, or config upgrades which trigger
diff --git a/playbooks/common/openshift-cluster/upgrades/v3_4/master_config_upgrade.yml b/playbooks/common/openshift-cluster/upgrades/v3_4/master_config_upgrade.yml
index ed89dbe8d..52458e03c 100644
--- a/playbooks/common/openshift-cluster/upgrades/v3_4/master_config_upgrade.yml
+++ b/playbooks/common/openshift-cluster/upgrades/v3_4/master_config_upgrade.yml
@@ -1,16 +1,10 @@
---
- modify_yaml:
dest: "{{ openshift.common.config_base}}/master/master-config.yaml"
- yaml_key: 'admissionConfig.pluginConfig'
- yaml_value: "{{ openshift.master.admission_plugin_config }}"
- when: "'admission_plugin_config' in openshift.master"
+ yaml_key: 'controllerConfig.serviceServingCert.signer.certFile'
+ yaml_value: service-signer.crt
- modify_yaml:
dest: "{{ openshift.common.config_base}}/master/master-config.yaml"
- yaml_key: 'admissionConfig.pluginOrderOverride'
- yaml_value:
-
-- modify_yaml:
- dest: "{{ openshift.common.config_base}}/master/master-config.yaml"
- yaml_key: 'kubernetesMasterConfig.admissionConfig'
- yaml_value:
+ yaml_key: 'controllerConfig.serviceServingCert.signer.keyFile'
+ yaml_value: service-signer.key
diff --git a/playbooks/common/openshift-cluster/upgrades/v3_4/upgrade_control_plane.yml b/playbooks/common/openshift-cluster/upgrades/v3_4/upgrade_control_plane.yml
index 43da5b629..d7cb38d03 100644
--- a/playbooks/common/openshift-cluster/upgrades/v3_4/upgrade_control_plane.yml
+++ b/playbooks/common/openshift-cluster/upgrades/v3_4/upgrade_control_plane.yml
@@ -68,7 +68,6 @@
# defined, and overriding the normal behavior of protecting the installed version
openshift_release: "{{ openshift_upgrade_target }}"
openshift_protect_installed_version: False
- openshift_install_base_package_group: "oo_masters_to_config"
# We skip the docker role at this point in upgrade to prevent
# unintended package, container, or config upgrades which trigger
diff --git a/playbooks/common/openshift-cluster/upgrades/v3_5/master_config_upgrade.yml b/playbooks/common/openshift-cluster/upgrades/v3_5/master_config_upgrade.yml
index ed89dbe8d..52458e03c 100644
--- a/playbooks/common/openshift-cluster/upgrades/v3_5/master_config_upgrade.yml
+++ b/playbooks/common/openshift-cluster/upgrades/v3_5/master_config_upgrade.yml
@@ -1,16 +1,10 @@
---
- modify_yaml:
dest: "{{ openshift.common.config_base}}/master/master-config.yaml"
- yaml_key: 'admissionConfig.pluginConfig'
- yaml_value: "{{ openshift.master.admission_plugin_config }}"
- when: "'admission_plugin_config' in openshift.master"
+ yaml_key: 'controllerConfig.serviceServingCert.signer.certFile'
+ yaml_value: service-signer.crt
- modify_yaml:
dest: "{{ openshift.common.config_base}}/master/master-config.yaml"
- yaml_key: 'admissionConfig.pluginOrderOverride'
- yaml_value:
-
-- modify_yaml:
- dest: "{{ openshift.common.config_base}}/master/master-config.yaml"
- yaml_key: 'kubernetesMasterConfig.admissionConfig'
- yaml_value:
+ yaml_key: 'controllerConfig.serviceServingCert.signer.keyFile'
+ yaml_value: service-signer.key
diff --git a/playbooks/common/openshift-cluster/upgrades/v3_5/upgrade.yml b/playbooks/common/openshift-cluster/upgrades/v3_5/upgrade.yml
index 30e719d8f..bda245fe1 100644
--- a/playbooks/common/openshift-cluster/upgrades/v3_5/upgrade.yml
+++ b/playbooks/common/openshift-cluster/upgrades/v3_5/upgrade.yml
@@ -112,6 +112,8 @@
- include: ../cleanup_unused_images.yml
- include: ../upgrade_control_plane.yml
+ vars:
+ master_config_hook: "v3_5/master_config_upgrade.yml"
- include: ../upgrade_nodes.yml
diff --git a/playbooks/common/openshift-cluster/upgrades/v3_5/upgrade_control_plane.yml b/playbooks/common/openshift-cluster/upgrades/v3_5/upgrade_control_plane.yml
index e9cec9220..6cdea7b84 100644
--- a/playbooks/common/openshift-cluster/upgrades/v3_5/upgrade_control_plane.yml
+++ b/playbooks/common/openshift-cluster/upgrades/v3_5/upgrade_control_plane.yml
@@ -72,7 +72,6 @@
# defined, and overriding the normal behavior of protecting the installed version
openshift_release: "{{ openshift_upgrade_target }}"
openshift_protect_installed_version: False
- openshift_install_base_package_group: "oo_masters_to_config"
# We skip the docker role at this point in upgrade to prevent
# unintended package, container, or config upgrades which trigger
diff --git a/playbooks/common/openshift-cluster/upgrades/v3_6/master_config_upgrade.yml b/playbooks/common/openshift-cluster/upgrades/v3_6/master_config_upgrade.yml
index ed89dbe8d..db0c8f886 100644
--- a/playbooks/common/openshift-cluster/upgrades/v3_6/master_config_upgrade.yml
+++ b/playbooks/common/openshift-cluster/upgrades/v3_6/master_config_upgrade.yml
@@ -1,16 +1,15 @@
---
- modify_yaml:
dest: "{{ openshift.common.config_base}}/master/master-config.yaml"
- yaml_key: 'admissionConfig.pluginConfig'
- yaml_value: "{{ openshift.master.admission_plugin_config }}"
- when: "'admission_plugin_config' in openshift.master"
+ yaml_key: 'controllerConfig.serviceServingCert.signer.certFile'
+ yaml_value: service-signer.crt
- modify_yaml:
dest: "{{ openshift.common.config_base}}/master/master-config.yaml"
- yaml_key: 'admissionConfig.pluginOrderOverride'
- yaml_value:
+ yaml_key: 'controllerConfig.serviceServingCert.signer.keyFile'
+ yaml_value: service-signer.key
- modify_yaml:
- dest: "{{ openshift.common.config_base}}/master/master-config.yaml"
- yaml_key: 'kubernetesMasterConfig.admissionConfig'
- yaml_value:
+ dest: "{{ openshift.common.config_base }}/master/master-config.yaml"
+ yaml_key: servingInfo.clientCA
+ yaml_value: ca.crt
diff --git a/playbooks/common/openshift-cluster/upgrades/v3_6/upgrade.yml b/playbooks/common/openshift-cluster/upgrades/v3_6/upgrade.yml
index 920dc2ffc..dd109cfa9 100644
--- a/playbooks/common/openshift-cluster/upgrades/v3_6/upgrade.yml
+++ b/playbooks/common/openshift-cluster/upgrades/v3_6/upgrade.yml
@@ -116,6 +116,8 @@
- include: ../cleanup_unused_images.yml
- include: ../upgrade_control_plane.yml
+ vars:
+ master_config_hook: "v3_6/master_config_upgrade.yml"
- include: ../upgrade_nodes.yml
diff --git a/playbooks/common/openshift-cluster/upgrades/v3_6/upgrade_control_plane.yml b/playbooks/common/openshift-cluster/upgrades/v3_6/upgrade_control_plane.yml
index 27d8515dc..8ab68002d 100644
--- a/playbooks/common/openshift-cluster/upgrades/v3_6/upgrade_control_plane.yml
+++ b/playbooks/common/openshift-cluster/upgrades/v3_6/upgrade_control_plane.yml
@@ -76,7 +76,6 @@
# defined, and overriding the normal behavior of protecting the installed version
openshift_release: "{{ openshift_upgrade_target }}"
openshift_protect_installed_version: False
- openshift_install_base_package_group: "oo_masters_to_config"
# We skip the docker role at this point in upgrade to prevent
# unintended package, container, or config upgrades which trigger
diff --git a/playbooks/common/openshift-cluster/upgrades/v3_7/master_config_upgrade.yml b/playbooks/common/openshift-cluster/upgrades/v3_7/master_config_upgrade.yml
index ed89dbe8d..1d4d1919c 100644
--- a/playbooks/common/openshift-cluster/upgrades/v3_7/master_config_upgrade.yml
+++ b/playbooks/common/openshift-cluster/upgrades/v3_7/master_config_upgrade.yml
@@ -1,16 +1,20 @@
---
- modify_yaml:
dest: "{{ openshift.common.config_base}}/master/master-config.yaml"
- yaml_key: 'admissionConfig.pluginConfig'
- yaml_value: "{{ openshift.master.admission_plugin_config }}"
- when: "'admission_plugin_config' in openshift.master"
+ yaml_key: 'controllerConfig.election.lockName'
+ yaml_value: 'openshift-master-controllers'
- modify_yaml:
dest: "{{ openshift.common.config_base}}/master/master-config.yaml"
- yaml_key: 'admissionConfig.pluginOrderOverride'
- yaml_value:
+ yaml_key: 'controllerConfig.serviceServingCert.signer.certFile'
+ yaml_value: service-signer.crt
- modify_yaml:
dest: "{{ openshift.common.config_base}}/master/master-config.yaml"
- yaml_key: 'kubernetesMasterConfig.admissionConfig'
- yaml_value:
+ yaml_key: 'controllerConfig.serviceServingCert.signer.keyFile'
+ yaml_value: service-signer.key
+
+- modify_yaml:
+ dest: "{{ openshift.common.config_base }}/master/master-config.yaml"
+ yaml_key: servingInfo.clientCA
+ yaml_value: ca.crt
diff --git a/playbooks/common/openshift-cluster/upgrades/v3_7/upgrade.yml b/playbooks/common/openshift-cluster/upgrades/v3_7/upgrade.yml
index f1ca1edb9..f4862e321 100644
--- a/playbooks/common/openshift-cluster/upgrades/v3_7/upgrade.yml
+++ b/playbooks/common/openshift-cluster/upgrades/v3_7/upgrade.yml
@@ -120,6 +120,22 @@
- include: ../cleanup_unused_images.yml
- include: ../upgrade_control_plane.yml
+ vars:
+ master_config_hook: "v3_7/master_config_upgrade.yml"
+
+# All controllers must be stopped at the same time then restarted
+- name: Cycle all controller services to force new leader election mode
+ hosts: oo_masters_to_config
+ gather_facts: no
+ tasks:
+ - name: Stop {{ openshift.common.service_type }}-master-controllers
+ systemd:
+ name: "{{ openshift.common.service_type }}-master-controllers"
+ state: stopped
+ - name: Start {{ openshift.common.service_type }}-master-controllers
+ systemd:
+ name: "{{ openshift.common.service_type }}-master-controllers"
+ state: started
- include: ../upgrade_nodes.yml
diff --git a/playbooks/common/openshift-cluster/upgrades/v3_7/upgrade_control_plane.yml b/playbooks/common/openshift-cluster/upgrades/v3_7/upgrade_control_plane.yml
index 6c4f9671b..d5a8379d7 100644
--- a/playbooks/common/openshift-cluster/upgrades/v3_7/upgrade_control_plane.yml
+++ b/playbooks/common/openshift-cluster/upgrades/v3_7/upgrade_control_plane.yml
@@ -80,7 +80,6 @@
# defined, and overriding the normal behavior of protecting the installed version
openshift_release: "{{ openshift_upgrade_target }}"
openshift_protect_installed_version: False
- openshift_install_base_package_group: "oo_masters_to_config"
# We skip the docker role at this point in upgrade to prevent
# unintended package, container, or config upgrades which trigger
@@ -128,4 +127,18 @@
vars:
master_config_hook: "v3_7/master_config_upgrade.yml"
+# All controllers must be stopped at the same time then restarted
+- name: Cycle all controller services to force new leader election mode
+ hosts: oo_etcd_to_config
+ gather_facts: no
+ tasks:
+ - name: Stop {{ openshift.common.service_type }}-master-controllers
+ systemd:
+ name: "{{ openshift.common.service_type }}-master-controllers"
+ state: stopped
+ - name: Start {{ openshift.common.service_type }}-master-controllers
+ systemd:
+ name: "{{ openshift.common.service_type }}-master-controllers"
+ state: started
+
- include: ../post_control_plane.yml
diff --git a/playbooks/common/openshift-cluster/upgrades/v3_7/validator.yml b/playbooks/common/openshift-cluster/upgrades/v3_7/validator.yml
index f76fc68d1..8e4f99c91 100644
--- a/playbooks/common/openshift-cluster/upgrades/v3_7/validator.yml
+++ b/playbooks/common/openshift-cluster/upgrades/v3_7/validator.yml
@@ -15,7 +15,7 @@
- name: Confirm OpenShift authorization objects are in sync
command: >
{{ openshift.common.client_binary }} adm migrate authorization
- when: not openshift.common.version_gte_3_7 | bool
+ when: openshift_version | version_compare('3.7','<')
changed_when: false
register: l_oc_result
until: l_oc_result.rc == 0
diff --git a/playbooks/common/openshift-etcd/certificates.yml b/playbooks/common/openshift-etcd/certificates.yml
index 31a0f50d8..eb6b94f33 100644
--- a/playbooks/common/openshift-etcd/certificates.yml
+++ b/playbooks/common/openshift-etcd/certificates.yml
@@ -1,29 +1,4 @@
---
-- name: Create etcd server certificates for etcd hosts
- hosts: oo_etcd_to_config
- any_errors_fatal: true
- roles:
- - role: openshift_etcd_facts
- post_tasks:
- - include_role:
- name: etcd
- tasks_from: server_certificates
- vars:
- etcd_ca_host: "{{ groups.oo_etcd_to_config.0 }}"
- etcd_peers: "{{ groups.oo_etcd_to_config | default([], true) }}"
- etcd_certificates_etcd_hosts: "{{ groups.oo_etcd_to_config | default([], true) }}"
- r_etcd_common_etcd_runtime: "{{ openshift.common.etcd_runtime }}"
+- include: server_certificates.yml
-- name: Create etcd client certificates for master hosts
- hosts: oo_masters_to_config
- any_errors_fatal: true
- roles:
- - role: openshift_etcd_facts
- - role: openshift_etcd_client_certificates
- etcd_ca_host: "{{ groups.oo_etcd_to_config.0 }}"
- etcd_cert_subdir: "openshift-master-{{ openshift.common.hostname }}"
- etcd_cert_config_dir: "{{ openshift.common.config_base }}/master"
- etcd_cert_prefix: "master.etcd-"
- openshift_ca_host: "{{ groups.oo_first_master.0 }}"
- r_etcd_common_etcd_runtime: "{{ openshift.common.etcd_runtime }}"
- when: groups.oo_etcd_to_config is defined and groups.oo_etcd_to_config
+- include: master_etcd_certificates.yml
diff --git a/playbooks/common/openshift-etcd/config.yml b/playbooks/common/openshift-etcd/config.yml
index 2cae231b4..48d46bbb0 100644
--- a/playbooks/common/openshift-etcd/config.yml
+++ b/playbooks/common/openshift-etcd/config.yml
@@ -1,7 +1,6 @@
---
- name: etcd Install Checkpoint Start
- hosts: localhost
- connection: local
+ hosts: oo_all_hosts
gather_facts: false
tasks:
- name: Set etcd install 'In Progress'
@@ -10,6 +9,10 @@
installer_phase_etcd: "In Progress"
aggregate: false
+- include: ca.yml
+
+- include: certificates.yml
+
- name: Configure etcd
hosts: oo_etcd_to_config
any_errors_fatal: true
@@ -23,8 +26,7 @@
- role: nickhammond.logrotate
- name: etcd Install Checkpoint End
- hosts: localhost
- connection: local
+ hosts: oo_all_hosts
gather_facts: false
tasks:
- name: Set etcd install 'Complete'
diff --git a/playbooks/common/openshift-etcd/embedded2external.yml b/playbooks/common/openshift-etcd/embedded2external.yml
new file mode 100644
index 000000000..b16b78c4f
--- /dev/null
+++ b/playbooks/common/openshift-etcd/embedded2external.yml
@@ -0,0 +1,172 @@
+---
+- name: Pre-migrate checks
+ hosts: localhost
+ tasks:
+ # Check there is only one etcd host
+ - assert:
+ that: groups.oo_etcd_to_config | default([]) | length == 1
+ msg: "[etcd] group must contain only one host"
+ # Check there is only one master
+ - assert:
+ that: groups.oo_masters_to_config | default([]) | length == 1
+ msg: "[master] group must contain only one host"
+
+# 1. stop a master
+- name: Prepare masters for etcd data migration
+ hosts: oo_first_master
+ roles:
+ - role: openshift_facts
+ tasks:
+ - name: Check the master API is ready
+ include_role:
+ name: openshift_master
+ tasks_from: check_master_api_is_ready
+ - set_fact:
+ master_service: "{{ openshift.common.service_type + '-master' }}"
+ embedded_etcd_backup_suffix: "{{ lookup('pipe', 'date +%Y%m%d%H%M%S') }}"
+ - debug:
+ msg: "master service name: {{ master_service }}"
+ - name: Stop master
+ service:
+ name: "{{ master_service }}"
+ state: stopped
+ # 2. backup embedded etcd
+ # Can't use with_items with include_role: https://github.com/ansible/ansible/issues/21285
+ - include_role:
+ name: etcd
+ tasks_from: backup
+ vars:
+ r_etcd_common_etcd_runtime: "{{ openshift.common.etcd_runtime }}"
+ r_etcd_common_backup_tag: pre-migrate
+ r_etcd_common_embedded_etcd: "{{ true }}"
+ r_etcd_common_backup_sufix_name: "{{ embedded_etcd_backup_suffix }}"
+
+ - include_role:
+ name: etcd
+ tasks_from: backup.archive
+ vars:
+ r_etcd_common_etcd_runtime: "{{ openshift.common.etcd_runtime }}"
+ r_etcd_common_backup_tag: pre-migrate
+ r_etcd_common_embedded_etcd: "{{ true }}"
+ r_etcd_common_backup_sufix_name: "{{ embedded_etcd_backup_suffix }}"
+
+# 3. deploy certificates (for etcd and master)
+- include: ca.yml
+
+- include: server_certificates.yml
+
+- name: Backup etcd client certificates for master host
+ hosts: oo_first_master
+ tasks:
+ - include_role:
+ name: etcd
+ tasks_from: backup_master_etcd_certificates
+
+- name: Redeploy master etcd certificates
+ include: master_etcd_certificates.yml
+ vars:
+ etcd_certificates_redeploy: "{{ true }}"
+
+# 4. deploy external etcd
+- include: ../openshift-etcd/config.yml
+
+# 5. stop external etcd
+- name: Cleanse etcd
+ hosts: oo_etcd_to_config[0]
+ gather_facts: no
+ pre_tasks:
+ - include_role:
+ name: etcd
+ tasks_from: disable_etcd
+ vars:
+ r_etcd_common_etcd_runtime: "{{ openshift.common.etcd_runtime }}"
+ - include_role:
+ name: etcd
+ tasks_from: clean_data
+ vars:
+ r_etcd_common_etcd_runtime: "{{ openshift.common.etcd_runtime }}"
+
+# 6. copy the embedded etcd backup to the external host
+# TODO(jchaloup): if the etcd and first master are on the same host, just copy the directory
+- name: Copy embedded etcd backup to the external host
+ hosts: localhost
+ tasks:
+ - name: Create local temp directory for syncing etcd backup
+ local_action: command mktemp -d /tmp/etcd_backup-XXXXXXX
+ register: g_etcd_client_mktemp
+ changed_when: False
+ become: no
+
+ - include_role:
+ name: etcd
+ tasks_from: backup.fetch
+ vars:
+ r_etcd_common_etcd_runtime: "{{ hostvars[groups.oo_first_master.0].openshift.common.etcd_runtime }}"
+ etcd_backup_sync_directory: "{{ g_etcd_client_mktemp.stdout }}"
+ r_etcd_common_backup_tag: pre-migrate
+ r_etcd_common_embedded_etcd: "{{ true }}"
+ r_etcd_common_backup_sufix_name: "{{ hostvars[groups.oo_first_master.0].embedded_etcd_backup_suffix }}"
+ delegate_to: "{{ groups.oo_first_master[0] }}"
+
+ - include_role:
+ name: etcd
+ tasks_from: backup.copy
+ vars:
+ r_etcd_common_etcd_runtime: "{{ hostvars[groups.oo_etcd_to_config.0].openshift.common.etcd_runtime }}"
+ etcd_backup_sync_directory: "{{ g_etcd_client_mktemp.stdout }}"
+ r_etcd_common_backup_tag: pre-migrate
+ r_etcd_common_backup_sufix_name: "{{ hostvars[groups.oo_first_master.0].embedded_etcd_backup_suffix }}"
+ delegate_to: "{{ groups.oo_etcd_to_config[0] }}"
+
+ - debug:
+ msg: "etcd_backup_dest_directory: {{ g_etcd_client_mktemp.stdout }}"
+
+ - name: Delete temporary directory
+ local_action: file path="{{ g_etcd_client_mktemp.stdout }}" state=absent
+ changed_when: False
+ become: no
+
+# 7. force new cluster from the backup
+- name: Force new etcd cluster
+ hosts: oo_etcd_to_config[0]
+ tasks:
+ - include_role:
+ name: etcd
+ tasks_from: backup.unarchive
+ vars:
+ r_etcd_common_etcd_runtime: "{{ openshift.common.etcd_runtime }}"
+ r_etcd_common_backup_tag: pre-migrate
+ r_etcd_common_backup_sufix_name: "{{ hostvars[groups.oo_first_master.0].embedded_etcd_backup_suffix }}"
+
+ - include_role:
+ name: etcd
+ tasks_from: backup.force_new_cluster
+ vars:
+ r_etcd_common_etcd_runtime: "{{ openshift.common.etcd_runtime }}"
+ r_etcd_common_backup_tag: pre-migrate
+ r_etcd_common_backup_sufix_name: "{{ hostvars[groups.oo_first_master.0].embedded_etcd_backup_suffix }}"
+ etcd_peer: "{{ openshift.common.ip }}"
+ etcd_url_scheme: "https"
+ etcd_peer_url_scheme: "https"
+
+# 8. re-configure master to use the external etcd
+- name: Configure master to use external etcd
+ hosts: oo_first_master
+ tasks:
+ - include_role:
+ name: openshift_master
+ tasks_from: configure_external_etcd
+ vars:
+ etcd_peer_url_scheme: "https"
+ etcd_ip: "{{ hostvars[groups.oo_etcd_to_config.0].openshift.common.ip }}"
+ etcd_peer_port: 2379
+
+ # 9. start the master
+ - name: Start master
+ service:
+ name: "{{ master_service }}"
+ state: started
+ register: service_status
+ until: service_status.state is defined and service_status.state == "started"
+ retries: 5
+ delay: 10
diff --git a/playbooks/common/openshift-etcd/master_etcd_certificates.yml b/playbooks/common/openshift-etcd/master_etcd_certificates.yml
new file mode 100644
index 000000000..0a25aac57
--- /dev/null
+++ b/playbooks/common/openshift-etcd/master_etcd_certificates.yml
@@ -0,0 +1,14 @@
+---
+- name: Create etcd client certificates for master hosts
+ hosts: oo_masters_to_config
+ any_errors_fatal: true
+ roles:
+ - role: openshift_etcd_facts
+ - role: openshift_etcd_client_certificates
+ etcd_ca_host: "{{ groups.oo_etcd_to_config.0 }}"
+ etcd_cert_subdir: "openshift-master-{{ openshift.common.hostname }}"
+ etcd_cert_config_dir: "{{ openshift.common.config_base }}/master"
+ etcd_cert_prefix: "master.etcd-"
+ openshift_ca_host: "{{ groups.oo_first_master.0 }}"
+ r_etcd_common_etcd_runtime: "{{ openshift.common.etcd_runtime }}"
+ when: groups.oo_etcd_to_config is defined and groups.oo_etcd_to_config
diff --git a/playbooks/common/openshift-etcd/migrate.yml b/playbooks/common/openshift-etcd/migrate.yml
index 2456ad3a8..31362f2f6 100644
--- a/playbooks/common/openshift-etcd/migrate.yml
+++ b/playbooks/common/openshift-etcd/migrate.yml
@@ -1,4 +1,17 @@
---
+- name: Check if the master has embedded etcd
+ hosts: localhost
+ connection: local
+ become: no
+ gather_facts: no
+ tags:
+ - always
+ tasks:
+ - fail:
+ msg: "Migration of an embedded etcd is not supported. Please, migrate the embedded etcd into an external etcd first."
+ when:
+ - groups.oo_etcd_to_config | default([]) | length == 0
+
- name: Run pre-checks
hosts: oo_etcd_to_migrate
tasks:
@@ -60,12 +73,11 @@
hosts: oo_etcd_to_migrate
gather_facts: no
pre_tasks:
- - set_fact:
- l_etcd_service: "{{ 'etcd_container' if openshift.common.is_containerized else 'etcd' }}"
- - name: Disable etcd members
- service:
- name: "{{ l_etcd_service }}"
- state: stopped
+ - include_role:
+ name: etcd
+ tasks_from: disable_etcd
+ vars:
+ r_etcd_common_etcd_runtime: "{{ openshift.common.etcd_runtime }}"
- name: Migrate data on first etcd
hosts: oo_etcd_to_migrate[0]
diff --git a/playbooks/common/openshift-etcd/scaleup.yml b/playbooks/common/openshift-etcd/scaleup.yml
index b5ba2bbba..20061366c 100644
--- a/playbooks/common/openshift-etcd/scaleup.yml
+++ b/playbooks/common/openshift-etcd/scaleup.yml
@@ -46,7 +46,7 @@
etcd_ca_host: "{{ groups.oo_etcd_to_config.0 }}"
etcd_certificates_etcd_hosts: "{{ groups.oo_etcd_to_config | default([], true) }}"
etcd_initial_cluster_state: "existing"
- initial_etcd_cluster: "{{ etcd_add_check.stdout_lines[3] | regex_replace('ETCD_INITIAL_CLUSTER=','') | regex_replace('\"','') }}"
+ etcd_initial_cluster: "{{ etcd_add_check.stdout_lines[3] | regex_replace('ETCD_INITIAL_CLUSTER=','') | regex_replace('\"','') }}"
etcd_ca_setup: False
r_etcd_common_etcd_runtime: "{{ openshift.common.etcd_runtime }}"
- role: nickhammond.logrotate
@@ -71,7 +71,7 @@
etcd_ca_host: "{{ groups.oo_etcd_to_config.0 }}"
openshift_ca_host: "{{ groups.oo_first_master.0 }}"
openshift_master_etcd_hosts: "{{ hostvars
- | oo_select_keys(groups['oo_etcd_to_config'] | union(groups['oo_new_etcd_to_config']))
+ | oo_select_keys(groups['oo_etcd_to_config'] | union(groups['oo_new_etcd_to_config'] | default([]) ))
| oo_collect('openshift.common.hostname')
| default(none, true) }}"
openshift_master_etcd_port: "{{ (etcd_client_port | default('2379')) if (groups.oo_etcd_to_config is defined and groups.oo_etcd_to_config) else none }}"
diff --git a/playbooks/common/openshift-etcd/server_certificates.yml b/playbooks/common/openshift-etcd/server_certificates.yml
new file mode 100644
index 000000000..10e06747b
--- /dev/null
+++ b/playbooks/common/openshift-etcd/server_certificates.yml
@@ -0,0 +1,15 @@
+---
+- name: Create etcd server certificates for etcd hosts
+ hosts: oo_etcd_to_config
+ any_errors_fatal: true
+ roles:
+ - role: openshift_etcd_facts
+ post_tasks:
+ - include_role:
+ name: etcd
+ tasks_from: server_certificates
+ vars:
+ etcd_ca_host: "{{ groups.oo_etcd_to_config.0 }}"
+ etcd_peers: "{{ groups.oo_etcd_to_config | default([], true) }}"
+ etcd_certificates_etcd_hosts: "{{ groups.oo_etcd_to_config | default([], true) }}"
+ r_etcd_common_etcd_runtime: "{{ openshift.common.etcd_runtime }}"
diff --git a/playbooks/common/openshift-glusterfs/config.yml b/playbooks/common/openshift-glusterfs/config.yml
index 516618de2..c2ae5f313 100644
--- a/playbooks/common/openshift-glusterfs/config.yml
+++ b/playbooks/common/openshift-glusterfs/config.yml
@@ -1,7 +1,6 @@
---
- name: GlusterFS Install Checkpoint Start
- hosts: localhost
- connection: local
+ hosts: oo_all_hosts
gather_facts: false
tasks:
- name: Set GlusterFS install 'In Progress'
@@ -18,6 +17,11 @@
tasks_from: firewall.yml
when:
- openshift_storage_glusterfs_is_native | default(True) | bool
+ - include_role:
+ name: openshift_storage_glusterfs
+ tasks_from: kernel_modules.yml
+ when:
+ - openshift_storage_glusterfs_is_native | default(True) | bool
- name: Open firewall ports for GlusterFS registry nodes
hosts: glusterfs_registry
@@ -27,6 +31,11 @@
tasks_from: firewall.yml
when:
- openshift_storage_glusterfs_registry_is_native | default(True) | bool
+ - include_role:
+ name: openshift_storage_glusterfs
+ tasks_from: kernel_modules.yml
+ when:
+ - openshift_storage_glusterfs_registry_is_native | default(True) | bool
- name: Configure GlusterFS
hosts: oo_first_master
@@ -37,8 +46,7 @@
when: groups.oo_glusterfs_to_config | default([]) | count > 0
- name: GlusterFS Install Checkpoint End
- hosts: localhost
- connection: local
+ hosts: oo_all_hosts
gather_facts: false
tasks:
- name: Set GlusterFS install 'Complete'
diff --git a/playbooks/common/openshift-loadbalancer/config.yml b/playbooks/common/openshift-loadbalancer/config.yml
index ecbb092bc..2a703cb61 100644
--- a/playbooks/common/openshift-loadbalancer/config.yml
+++ b/playbooks/common/openshift-loadbalancer/config.yml
@@ -1,7 +1,6 @@
---
- name: Load Balancer Install Checkpoint Start
- hosts: localhost
- connection: local
+ hosts: oo_all_hosts
gather_facts: false
tasks:
- name: Set load balancer install 'In Progress'
@@ -10,6 +9,15 @@
installer_phase_loadbalancer: "In Progress"
aggregate: false
+- name: Configure firewall and docker for load balancers
+ hosts: oo_lb_to_config:!oo_masters_to_config:!oo_nodes_to_config
+ vars:
+ openshift_image_tag: "{{ hostvars[groups.oo_first_master.0].openshift_image_tag }}"
+ roles:
+ - role: os_firewall
+ - role: openshift_docker
+ when: openshift.common.is_containerized | default(False) | bool and not skip_docker_role | default(False) | bool
+
- name: Configure load balancers
hosts: oo_lb_to_config
vars:
@@ -25,12 +33,11 @@
+ openshift_loadbalancer_additional_backends | default([]) }}"
openshift_image_tag: "{{ hostvars[groups.oo_first_master.0].openshift_image_tag }}"
roles:
- - role: os_firewall
- role: openshift_loadbalancer
+ - role: tuned
- name: Load Balancer Install Checkpoint End
- hosts: localhost
- connection: local
+ hosts: oo_all_hosts
gather_facts: false
tasks:
- name: Set load balancer install 'Complete'
diff --git a/playbooks/common/openshift-management/add_container_provider.yml b/playbooks/common/openshift-management/add_container_provider.yml
new file mode 100644
index 000000000..facb3a5b9
--- /dev/null
+++ b/playbooks/common/openshift-management/add_container_provider.yml
@@ -0,0 +1,8 @@
+---
+- name: Add Container Provider to Management
+ hosts: oo_first_master
+ tasks:
+ - name: Run the Management Integration Tasks
+ include_role:
+ name: openshift_management
+ tasks_from: add_container_provider
diff --git a/playbooks/common/openshift-management/config.yml b/playbooks/common/openshift-management/config.yml
new file mode 100644
index 000000000..908679e81
--- /dev/null
+++ b/playbooks/common/openshift-management/config.yml
@@ -0,0 +1,35 @@
+---
+- name: Management Install Checkpoint Start
+ hosts: oo_all_hosts
+ gather_facts: false
+ tasks:
+ - name: Set Management install 'In Progress'
+ set_stats:
+ data:
+ installer_phase_management: "In Progress"
+ aggregate: false
+
+- name: Setup CFME
+ hosts: oo_first_master
+ pre_tasks:
+ - name: Create a temporary place to evaluate the PV templates
+ command: mktemp -d /tmp/openshift-ansible-XXXXXXX
+ register: r_openshift_management_mktemp
+ changed_when: false
+
+ tasks:
+ - name: Run the CFME Setup Role
+ include_role:
+ name: openshift_management
+ vars:
+ template_dir: "{{ hostvars[groups.masters.0].r_openshift_management_mktemp.stdout }}"
+
+- name: Management Install Checkpoint End
+ hosts: oo_all_hosts
+ gather_facts: false
+ tasks:
+ - name: Set Management install 'Complete'
+ set_stats:
+ data:
+ installer_phase_management: "Complete"
+ aggregate: false
diff --git a/playbooks/common/openshift-cfme/filter_plugins b/playbooks/common/openshift-management/filter_plugins
index 99a95e4ca..99a95e4ca 120000
--- a/playbooks/common/openshift-cfme/filter_plugins
+++ b/playbooks/common/openshift-management/filter_plugins
diff --git a/playbooks/common/openshift-cfme/library b/playbooks/common/openshift-management/library
index ba40d2f56..ba40d2f56 120000
--- a/playbooks/common/openshift-cfme/library
+++ b/playbooks/common/openshift-management/library
diff --git a/playbooks/common/openshift-management/roles b/playbooks/common/openshift-management/roles
new file mode 120000
index 000000000..20c4c58cf
--- /dev/null
+++ b/playbooks/common/openshift-management/roles
@@ -0,0 +1 @@
+../../../roles \ No newline at end of file
diff --git a/playbooks/common/openshift-cfme/uninstall.yml b/playbooks/common/openshift-management/uninstall.yml
index 78b8e7668..9f35cc276 100644
--- a/playbooks/common/openshift-cfme/uninstall.yml
+++ b/playbooks/common/openshift-management/uninstall.yml
@@ -1,8 +1,8 @@
---
- name: Uninstall CFME
- hosts: masters
+ hosts: masters[0]
tasks:
- name: Run the CFME Uninstall Role Tasks
include_role:
- name: openshift_cfme
+ name: openshift_management
tasks_from: uninstall
diff --git a/playbooks/common/openshift-master/additional_config.yml b/playbooks/common/openshift-master/additional_config.yml
index ee76e2ed7..350557f19 100644
--- a/playbooks/common/openshift-master/additional_config.yml
+++ b/playbooks/common/openshift-master/additional_config.yml
@@ -1,7 +1,6 @@
---
- name: Master Additional Install Checkpoint Start
- hosts: localhost
- connection: local
+ hosts: oo_all_hosts
gather_facts: false
tasks:
- name: Set Master Additional install 'In Progress'
@@ -26,10 +25,10 @@
- role: openshift_hosted_templates
registry_url: "{{ openshift.master.registry_url }}"
- role: openshift_manageiq
- when: openshift_use_manageiq | default(false) | bool
+ when: openshift_use_manageiq | default(true) | bool
- role: cockpit
when:
- - openshift.common.is_atomic
+ - not openshift.common.is_atomic | bool
- deployment_type == 'openshift-enterprise'
- osm_use_cockpit is undefined or osm_use_cockpit | bool
- openshift.common.deployment_subtype != 'registry'
@@ -37,8 +36,7 @@
when: openshift_use_flannel | default(false) | bool
- name: Master Additional Install Checkpoint End
- hosts: localhost
- connection: local
+ hosts: oo_all_hosts
gather_facts: false
tasks:
- name: Set Master Additional install 'Complete'
diff --git a/playbooks/common/openshift-master/ca.yml b/playbooks/common/openshift-master/ca.yml
deleted file mode 100644
index 5bb796fa3..000000000
--- a/playbooks/common/openshift-master/ca.yml
+++ /dev/null
@@ -1,8 +0,0 @@
----
-- name: Create OpenShift CA
- hosts: oo_masters_to_config
- roles:
- - role: openshift_master_facts
- - role: openshift_named_certificates
- - role: openshift_ca
- openshift_ca_host: "{{ groups.oo_first_master.0 }}"
diff --git a/playbooks/common/openshift-master/config.yml b/playbooks/common/openshift-master/config.yml
index 766e0e501..b359919ba 100644
--- a/playbooks/common/openshift-master/config.yml
+++ b/playbooks/common/openshift-master/config.yml
@@ -1,7 +1,6 @@
---
- name: Master Install Checkpoint Start
- hosts: localhost
- connection: local
+ hosts: oo_all_hosts
gather_facts: false
tasks:
- name: Set Master install 'In Progress'
@@ -10,6 +9,8 @@
installer_phase_master: "In Progress"
aggregate: false
+- include: certificates.yml
+
- name: Disable excluders
hosts: oo_masters_to_config
gather_facts: no
@@ -196,6 +197,7 @@
openshift_master_default_registry_value: "{{ hostvars[groups.oo_first_master.0].l_default_registry_value }}"
openshift_master_default_registry_value_api: "{{ hostvars[groups.oo_first_master.0].l_default_registry_value_api }}"
openshift_master_default_registry_value_controllers: "{{ hostvars[groups.oo_first_master.0].l_default_registry_value_controllers }}"
+ - role: tuned
- role: nuage_ca
when: openshift_use_nuage | default(false) | bool
- role: nuage_common
@@ -204,6 +206,12 @@
when: openshift_use_nuage | default(false) | bool
- role: calico_master
when: openshift_use_calico | default(false) | bool
+ tasks:
+ - include_role:
+ name: kuryr
+ tasks_from: master
+ when: openshift_use_kuryr | default(false) | bool
+
post_tasks:
- name: Create group for deployment type
group_by: key=oo_masters_deployment_type_{{ openshift.common.deployment_type }}
@@ -224,8 +232,7 @@
r_openshift_excluder_service_type: "{{ openshift.common.service_type }}"
- name: Master Install Checkpoint End
- hosts: localhost
- connection: local
+ hosts: oo_all_hosts
gather_facts: false
tasks:
- name: Set Master install 'Complete'
diff --git a/playbooks/common/openshift-master/scaleup.yml b/playbooks/common/openshift-master/scaleup.yml
index d007fac85..f4dc9df8a 100644
--- a/playbooks/common/openshift-master/scaleup.yml
+++ b/playbooks/common/openshift-master/scaleup.yml
@@ -47,8 +47,6 @@
- include: ../openshift-etcd/certificates.yml
-- include: ../openshift-master/certificates.yml
-
- include: ../openshift-master/config.yml
- include: ../openshift-loadbalancer/config.yml
diff --git a/playbooks/common/openshift-nfs/config.yml b/playbooks/common/openshift-nfs/config.yml
index 66303d6f7..ce672daf5 100644
--- a/playbooks/common/openshift-nfs/config.yml
+++ b/playbooks/common/openshift-nfs/config.yml
@@ -1,7 +1,6 @@
---
- name: NFS Install Checkpoint Start
- hosts: localhost
- connection: local
+ hosts: oo_all_hosts
gather_facts: false
tasks:
- name: Set NFS install 'In Progress'
@@ -17,8 +16,7 @@
- role: openshift_storage_nfs
- name: NFS Install Checkpoint End
- hosts: localhost
- connection: local
+ hosts: oo_all_hosts
gather_facts: false
tasks:
- name: Set NFS install 'Complete'
diff --git a/playbooks/common/openshift-node/additional_config.yml b/playbooks/common/openshift-node/additional_config.yml
index fe51ef833..ac757397b 100644
--- a/playbooks/common/openshift-node/additional_config.yml
+++ b/playbooks/common/openshift-node/additional_config.yml
@@ -19,10 +19,14 @@
- group_by:
key: oo_nodes_use_{{ (openshift_use_contiv | default(False)) | ternary('contiv','nothing') }}
changed_when: False
+ # Create group for kuryr nodes
+ - group_by:
+ key: oo_nodes_use_{{ (openshift_use_kuryr | default(False)) | ternary('kuryr','nothing') }}
+ changed_when: False
- include: etcd_client_config.yml
vars:
- openshift_node_scale_up_group: "oo_nodes_use_flannel:oo_nodes_use_calico:oo_nodes_use_contiv"
+ openshift_node_scale_up_group: "oo_nodes_use_flannel:oo_nodes_use_calico:oo_nodes_use_contiv:oo_nodes_use_kuryr"
- name: Additional node config
hosts: oo_nodes_use_flannel
@@ -50,3 +54,11 @@
- role: contiv
contiv_role: netplugin
when: openshift_use_contiv | default(false) | bool
+
+- name: Configure Kuryr node
+ hosts: oo_nodes_use_kuryr
+ tasks:
+ - include_role:
+ name: kuryr
+ tasks_from: node
+ when: openshift_use_kuryr | default(false) | bool
diff --git a/playbooks/common/openshift-node/clean_image.yml b/playbooks/common/openshift-node/clean_image.yml
new file mode 100644
index 000000000..38753d0af
--- /dev/null
+++ b/playbooks/common/openshift-node/clean_image.yml
@@ -0,0 +1,10 @@
+---
+- name: Configure nodes
+ hosts: oo_nodes_to_config:!oo_containerized_master_nodes
+ tasks:
+ - name: Remove any ansible facts created during AMI creation
+ file:
+ path: "/etc/ansible/facts.d/{{ item }}"
+ state: absent
+ with_items:
+ - openshift.fact
diff --git a/playbooks/common/openshift-node/config.yml b/playbooks/common/openshift-node/config.yml
index 6fd8aa6f1..4f8f98aef 100644
--- a/playbooks/common/openshift-node/config.yml
+++ b/playbooks/common/openshift-node/config.yml
@@ -1,7 +1,6 @@
---
- name: Node Install Checkpoint Start
- hosts: localhost
- connection: local
+ hosts: oo_all_hosts
gather_facts: false
tasks:
- name: Set Node install 'In Progress'
@@ -10,6 +9,8 @@
installer_phase_node: "In Progress"
aggregate: false
+- include: certificates.yml
+
- include: setup.yml
- include: containerized_nodes.yml
@@ -23,8 +24,7 @@
- include: enable_excluders.yml
- name: Node Install Checkpoint End
- hosts: localhost
- connection: local
+ hosts: oo_all_hosts
gather_facts: false
tasks:
- name: Set Node install 'Complete'
diff --git a/playbooks/common/openshift-node/configure_nodes.yml b/playbooks/common/openshift-node/configure_nodes.yml
index c96e4921c..17259422d 100644
--- a/playbooks/common/openshift-node/configure_nodes.yml
+++ b/playbooks/common/openshift-node/configure_nodes.yml
@@ -13,4 +13,5 @@
roles:
- role: os_firewall
- role: openshift_node
+ - role: tuned
- role: nickhammond.logrotate
diff --git a/playbooks/common/openshift-node/image_prep.yml b/playbooks/common/openshift-node/image_prep.yml
new file mode 100644
index 000000000..30651a1df
--- /dev/null
+++ b/playbooks/common/openshift-node/image_prep.yml
@@ -0,0 +1,24 @@
+---
+- name: normalize groups
+ include: ../../byo/openshift-cluster/initialize_groups.yml
+
+- name: evaluate the groups
+ include: ../openshift-cluster/evaluate_groups.yml
+
+- name: initialize the facts
+ include: ../openshift-cluster/initialize_facts.yml
+
+- name: initialize the repositories
+ include: ../openshift-cluster/initialize_openshift_repos.yml
+
+- name: run node config setup
+ include: setup.yml
+
+- name: run node config
+ include: configure_nodes.yml
+
+- name: Re-enable excluders
+ include: enable_excluders.yml
+
+- name: Remove any undesired artifacts from build
+ include: clean_image.yml