summaryrefslogtreecommitdiffstats
path: root/playbooks
diff options
context:
space:
mode:
Diffstat (limited to 'playbooks')
-rw-r--r--playbooks/adhoc/openshift_hosted_logging_efk.yaml2
-rw-r--r--playbooks/adhoc/uninstall.yml15
-rw-r--r--playbooks/aws/openshift-cluster/build_ami.yml6
-rw-r--r--playbooks/aws/openshift-cluster/hosted.yml12
-rw-r--r--playbooks/aws/openshift-cluster/install.yml18
-rw-r--r--playbooks/aws/openshift-cluster/prerequisites.yml6
-rw-r--r--playbooks/aws/openshift-cluster/provision_install.yml10
-rw-r--r--playbooks/byo/config.yml3
-rw-r--r--playbooks/byo/openshift-cluster/config.yml4
-rw-r--r--playbooks/byo/openshift-cluster/redeploy-certificates.yml42
-rw-r--r--playbooks/byo/openshift-cluster/redeploy-etcd-ca.yml4
-rw-r--r--playbooks/byo/openshift-cluster/redeploy-etcd-certificates.yml18
-rw-r--r--playbooks/byo/openshift-cluster/redeploy-master-certificates.yml10
-rw-r--r--playbooks/byo/openshift-cluster/redeploy-node-certificates.yml10
-rw-r--r--playbooks/byo/openshift-cluster/redeploy-openshift-ca.yml4
-rw-r--r--playbooks/byo/openshift-cluster/redeploy-registry-certificates.yml4
-rw-r--r--playbooks/byo/openshift-cluster/redeploy-router-certificates.yml4
-rw-r--r--playbooks/byo/openshift-cluster/upgrades/docker/upgrade.yml4
-rw-r--r--playbooks/byo/openshift-cluster/upgrades/upgrade_etcd.yml4
-rw-r--r--playbooks/byo/openshift-cluster/upgrades/v3_6/upgrade.yml2
-rw-r--r--playbooks/byo/openshift-cluster/upgrades/v3_6/upgrade_control_plane.yml2
-rw-r--r--playbooks/byo/openshift-cluster/upgrades/v3_6/upgrade_nodes.yml2
-rw-r--r--playbooks/byo/openshift-cluster/upgrades/v3_7/upgrade.yml2
-rw-r--r--playbooks/byo/openshift-cluster/upgrades/v3_7/upgrade_control_plane.yml2
-rw-r--r--playbooks/byo/openshift-cluster/upgrades/v3_7/upgrade_nodes.yml2
-rw-r--r--playbooks/byo/openshift-cluster/upgrades/v3_7/upgrade_scale_groups.yml7
-rw-r--r--playbooks/byo/openshift-cluster/upgrades/v3_8/README.md2
-rw-r--r--playbooks/byo/openshift-cluster/upgrades/v3_8/upgrade.yml2
-rw-r--r--playbooks/byo/openshift-cluster/upgrades/v3_8/upgrade_control_plane.yml2
-rw-r--r--playbooks/byo/openshift-cluster/upgrades/v3_8/upgrade_nodes.yml2
-rw-r--r--playbooks/byo/openshift-cluster/upgrades/v3_9/README.md20
-rw-r--r--playbooks/byo/openshift-cluster/upgrades/v3_9/upgrade.yml5
-rw-r--r--playbooks/byo/openshift-cluster/upgrades/v3_9/upgrade_control_plane.yml14
-rw-r--r--playbooks/byo/openshift-cluster/upgrades/v3_9/upgrade_nodes.yml7
-rw-r--r--playbooks/byo/openshift-node/network_manager.yml3
-rw-r--r--playbooks/byo/openshift_facts.yml5
-rw-r--r--playbooks/byo/rhel_subscribe.yml2
-rw-r--r--playbooks/common/openshift-cluster/config.yml44
l---------playbooks/common/openshift-cluster/redeploy-certificates/filter_plugins1
l---------playbooks/common/openshift-cluster/redeploy-certificates/library1
l---------playbooks/common/openshift-cluster/redeploy-certificates/lookup_plugins1
l---------playbooks/common/openshift-cluster/redeploy-certificates/roles1
-rw-r--r--playbooks/common/openshift-cluster/upgrades/disable_master_excluders.yml1
-rw-r--r--playbooks/common/openshift-cluster/upgrades/disable_node_excluders.yml1
-rw-r--r--playbooks/common/openshift-cluster/upgrades/docker/docker_upgrade.yml10
-rw-r--r--playbooks/common/openshift-cluster/upgrades/docker/tasks/restart.yml10
-rw-r--r--playbooks/common/openshift-cluster/upgrades/docker/tasks/upgrade.yml10
l---------playbooks/common/openshift-cluster/upgrades/etcd/filter_plugins1
l---------playbooks/common/openshift-cluster/upgrades/etcd/lookup_plugins1
l---------playbooks/common/openshift-cluster/upgrades/etcd/roles1
-rw-r--r--playbooks/common/openshift-cluster/upgrades/init.yml4
-rw-r--r--playbooks/common/openshift-cluster/upgrades/post_control_plane.yml1
-rw-r--r--playbooks/common/openshift-cluster/upgrades/pre/tasks/verify_docker_upgrade_targets.yml2
-rw-r--r--playbooks/common/openshift-cluster/upgrades/pre/verify_control_plane_running.yml12
-rw-r--r--playbooks/common/openshift-cluster/upgrades/pre/verify_upgrade_targets.yml4
-rw-r--r--playbooks/common/openshift-cluster/upgrades/upgrade_control_plane.yml24
-rw-r--r--playbooks/common/openshift-cluster/upgrades/upgrade_nodes.yml3
-rw-r--r--playbooks/common/openshift-cluster/upgrades/upgrade_scale_group.yml59
-rw-r--r--playbooks/common/openshift-cluster/upgrades/v3_6/upgrade.yml40
-rw-r--r--playbooks/common/openshift-cluster/upgrades/v3_6/upgrade_control_plane.yml36
-rw-r--r--playbooks/common/openshift-cluster/upgrades/v3_6/upgrade_nodes.yml30
-rw-r--r--playbooks/common/openshift-cluster/upgrades/v3_7/upgrade.yml50
-rw-r--r--playbooks/common/openshift-cluster/upgrades/v3_7/upgrade_control_plane.yml46
-rw-r--r--playbooks/common/openshift-cluster/upgrades/v3_7/upgrade_nodes.yml30
-rw-r--r--playbooks/common/openshift-cluster/upgrades/v3_8/upgrade.yml50
-rw-r--r--playbooks/common/openshift-cluster/upgrades/v3_8/upgrade_control_plane.yml46
-rw-r--r--playbooks/common/openshift-cluster/upgrades/v3_8/upgrade_nodes.yml30
l---------playbooks/common/openshift-cluster/upgrades/v3_9/filter_plugins1
-rw-r--r--playbooks/common/openshift-cluster/upgrades/v3_9/master_config_upgrade.yml20
l---------playbooks/common/openshift-cluster/upgrades/v3_9/roles1
-rw-r--r--playbooks/common/openshift-cluster/upgrades/v3_9/upgrade.yml142
-rw-r--r--playbooks/common/openshift-cluster/upgrades/v3_9/upgrade_control_plane.yml144
-rw-r--r--playbooks/common/openshift-cluster/upgrades/v3_9/upgrade_nodes.yml115
-rw-r--r--playbooks/common/openshift-cluster/upgrades/v3_9/validator.yml7
-rw-r--r--playbooks/container-runtime/config.yml6
-rw-r--r--playbooks/container-runtime/private/config.yml28
l---------playbooks/container-runtime/private/roles1
-rw-r--r--playbooks/deploy_cluster.yml46
-rw-r--r--playbooks/gcp/provision.yml (renamed from playbooks/gcp/openshift-cluster/provision.yml)7
-rw-r--r--playbooks/init/facts.yml14
-rw-r--r--playbooks/init/main.yml3
-rw-r--r--playbooks/openshift-etcd/private/ca.yml2
-rw-r--r--playbooks/openshift-etcd/private/certificates-backup.yml (renamed from playbooks/common/openshift-cluster/redeploy-certificates/etcd-backup.yml)6
-rw-r--r--playbooks/openshift-etcd/private/config.yml1
-rw-r--r--playbooks/openshift-etcd/private/embedded2external.yml24
-rw-r--r--playbooks/openshift-etcd/private/migrate.yml18
-rw-r--r--playbooks/openshift-etcd/private/redeploy-ca.yml (renamed from playbooks/common/openshift-cluster/redeploy-certificates/etcd-ca.yml)12
-rw-r--r--playbooks/openshift-etcd/private/redeploy-certificates.yml (renamed from playbooks/common/openshift-cluster/redeploy-certificates/check-expiry.yml)8
-rw-r--r--playbooks/openshift-etcd/private/scaleup.yml4
-rw-r--r--playbooks/openshift-etcd/private/server_certificates.yml2
-rw-r--r--playbooks/openshift-etcd/private/upgrade_backup.yml (renamed from playbooks/common/openshift-cluster/upgrades/etcd/backup.yml)2
-rw-r--r--playbooks/openshift-etcd/private/upgrade_image_members.yml (renamed from playbooks/common/openshift-cluster/upgrades/etcd/upgrade_image_members.yml)2
-rw-r--r--playbooks/openshift-etcd/private/upgrade_main.yml (renamed from playbooks/common/openshift-cluster/upgrades/etcd/main.yml)8
-rw-r--r--playbooks/openshift-etcd/private/upgrade_rpm_members.yml (renamed from playbooks/common/openshift-cluster/upgrades/etcd/upgrade_rpm_members.yml)2
-rw-r--r--playbooks/openshift-etcd/private/upgrade_step.yml (renamed from playbooks/common/openshift-cluster/upgrades/etcd/upgrade.yml)24
-rw-r--r--playbooks/openshift-etcd/redeploy-ca.yml4
-rw-r--r--playbooks/openshift-etcd/redeploy-certificates.yml10
-rw-r--r--playbooks/openshift-etcd/upgrade.yml4
-rw-r--r--playbooks/openshift-glusterfs/private/registry.yml31
-rw-r--r--playbooks/openshift-hosted/private/cockpit-ui.yml2
-rw-r--r--playbooks/openshift-hosted/private/create_persistent_volumes.yml4
-rw-r--r--playbooks/openshift-hosted/private/redeploy-registry-certificates.yml (renamed from playbooks/common/openshift-cluster/redeploy-certificates/registry.yml)0
-rw-r--r--playbooks/openshift-hosted/private/redeploy-router-certificates.yml (renamed from playbooks/common/openshift-cluster/redeploy-certificates/router.yml)0
-rw-r--r--playbooks/openshift-hosted/redeploy-registry-certificates.yml4
-rw-r--r--playbooks/openshift-hosted/redeploy-router-certificates.yml4
-rw-r--r--playbooks/openshift-loadbalancer/private/config.yml9
-rw-r--r--playbooks/openshift-logging/config.yml (renamed from playbooks/byo/openshift-cluster/openshift-logging.yml)4
-rw-r--r--playbooks/openshift-logging/private/config.yml (renamed from playbooks/common/openshift-cluster/openshift_logging.yml)0
l---------playbooks/openshift-logging/private/filter_plugins1
l---------playbooks/openshift-logging/private/library1
l---------playbooks/openshift-logging/private/lookup_plugins1
l---------playbooks/openshift-logging/private/roles1
-rw-r--r--playbooks/openshift-master/private/additional_config.yml2
-rw-r--r--playbooks/openshift-master/private/certificates-backup.yml (renamed from playbooks/common/openshift-cluster/redeploy-certificates/masters-backup.yml)0
-rw-r--r--playbooks/openshift-master/private/config.yml5
-rw-r--r--playbooks/openshift-master/private/redeploy-certificates.yml6
-rw-r--r--playbooks/openshift-master/private/redeploy-openshift-ca.yml (renamed from playbooks/common/openshift-cluster/redeploy-certificates/openshift-ca.yml)4
-rw-r--r--playbooks/openshift-master/private/scaleup.yml4
-rw-r--r--playbooks/openshift-master/private/tasks/wire_aggregator.yml6
-rw-r--r--playbooks/openshift-master/private/validate_restart.yml3
-rw-r--r--playbooks/openshift-master/redeploy-certificates.yml6
-rw-r--r--playbooks/openshift-master/redeploy-openshift-ca.yml4
-rw-r--r--playbooks/openshift-nfs/private/config.yml1
-rw-r--r--playbooks/openshift-node/private/certificates-backup.yml (renamed from playbooks/common/openshift-cluster/redeploy-certificates/nodes-backup.yml)0
-rw-r--r--playbooks/openshift-node/private/configure_nodes.yml2
-rw-r--r--playbooks/openshift-node/private/containerized_nodes.yml2
-rw-r--r--playbooks/openshift-node/private/enable_excluders.yml1
-rw-r--r--playbooks/openshift-node/private/image_prep.yml12
-rw-r--r--playbooks/openshift-node/private/network_manager.yml4
-rw-r--r--playbooks/openshift-node/private/redeploy-certificates.yml6
-rw-r--r--playbooks/openshift-node/private/restart.yml12
-rw-r--r--playbooks/openshift-node/private/setup.yml1
-rw-r--r--playbooks/openshift-node/redeploy-certificates.yml6
-rw-r--r--playbooks/openstack/README.md11
-rw-r--r--playbooks/openstack/advanced-configuration.md152
-rw-r--r--playbooks/openstack/openshift-cluster/install.yml7
-rw-r--r--playbooks/openstack/openshift-cluster/provision.yml13
-rw-r--r--playbooks/openstack/openshift-cluster/provision_install.yml6
-rw-r--r--playbooks/openstack/sample-inventory/group_vars/OSEv3.yml3
-rw-r--r--playbooks/openstack/sample-inventory/group_vars/all.yml25
-rw-r--r--playbooks/prerequisites.yml17
-rw-r--r--playbooks/redeploy-certificates.yml26
l---------playbooks/roles1
143 files changed, 1093 insertions, 798 deletions
diff --git a/playbooks/adhoc/openshift_hosted_logging_efk.yaml b/playbooks/adhoc/openshift_hosted_logging_efk.yaml
index 44a2ef534..69b2541bb 100644
--- a/playbooks/adhoc/openshift_hosted_logging_efk.yaml
+++ b/playbooks/adhoc/openshift_hosted_logging_efk.yaml
@@ -8,7 +8,7 @@
hosts: masters:!masters[0]
pre_tasks:
- set_fact:
- openshift_logging_kibana_hostname: "{{ openshift_hosted_logging_hostname | default('kibana.' ~ (openshift_master_default_subdomain | default('router.default.svc.cluster.local', true))) }}"
+ openshift_logging_kibana_hostname: "{{ openshift_hosted_logging_hostname | default('kibana.' ~ openshift_master_default_subdomain }}"
tasks:
- include_role:
name: openshift_logging
diff --git a/playbooks/adhoc/uninstall.yml b/playbooks/adhoc/uninstall.yml
index 5ed55a817..ed7a7bd1a 100644
--- a/playbooks/adhoc/uninstall.yml
+++ b/playbooks/adhoc/uninstall.yml
@@ -62,7 +62,6 @@
- origin-master
- origin-master-api
- origin-master-controllers
- - pcsd
failed_when: false
- hosts: etcd
@@ -126,10 +125,14 @@
- origin-sdn-ovs
- tuned-profiles-openshift-node
- tuned-profiles-origin-node
+ register: result
+ until: result | success
- name: Remove flannel package
package: name=flannel state=absent
when: openshift_use_flannel | default(false) | bool
+ register: result
+ until: result | success
when: not is_atomic | bool
- shell: systemctl reset-failed
@@ -380,8 +383,8 @@
- origin-excluder
- origin-docker-excluder
- origin-master
- - pacemaker
- - pcs
+ register: result
+ until: result | success
- shell: systemctl reset-failed
changed_when: False
@@ -450,8 +453,6 @@
- /etc/sysconfig/origin-master-api
- /etc/sysconfig/origin-master-controllers
- /usr/share/openshift/examples
- - /var/lib/pacemaker
- - /var/lib/pcsd
- /usr/lib/systemd/system/atomic-openshift-master-api.service
- /usr/lib/systemd/system/atomic-openshift-master-controllers.service
- /usr/lib/systemd/system/origin-master-api.service
@@ -497,6 +498,8 @@
with_items:
- etcd
- etcd3
+ register: result
+ until: result | success
- shell: systemctl reset-failed
changed_when: False
@@ -554,6 +557,8 @@
when: not is_atomic | bool and openshift_remove_all | default(True) | bool
with_items:
- haproxy
+ register: result
+ until: result | success
- shell: systemctl reset-failed
changed_when: False
diff --git a/playbooks/aws/openshift-cluster/build_ami.yml b/playbooks/aws/openshift-cluster/build_ami.yml
index 5815c4975..5bf4f652a 100644
--- a/playbooks/aws/openshift-cluster/build_ami.yml
+++ b/playbooks/aws/openshift-cluster/build_ami.yml
@@ -17,7 +17,7 @@
- name: openshift_aws_region
msg: "openshift_aws_region={{ openshift_aws_region | default('us-east-1') }}"
-- include: provision_instance.yml
+- import_playbook: provision_instance.yml
vars:
openshift_aws_node_group_type: compute
@@ -33,8 +33,8 @@
# This is the part that installs all of the software and configs for the instance
# to become a node.
-- include: ../../openshift-node/private/image_prep.yml
+- import_playbook: ../../openshift-node/private/image_prep.yml
-- include: seal_ami.yml
+- import_playbook: seal_ami.yml
vars:
openshift_aws_ami_name: "openshift-gi-{{ lookup('pipe', 'date +%Y%m%d%H%M')}}"
diff --git a/playbooks/aws/openshift-cluster/hosted.yml b/playbooks/aws/openshift-cluster/hosted.yml
index da7ec9d21..9d9ed29de 100644
--- a/playbooks/aws/openshift-cluster/hosted.yml
+++ b/playbooks/aws/openshift-cluster/hosted.yml
@@ -1,19 +1,19 @@
---
-- include: ../../openshift-hosted/private/config.yml
+- import_playbook: ../../openshift-hosted/private/config.yml
-- include: ../../openshift-metrics/private/config.yml
+- import_playbook: ../../openshift-metrics/private/config.yml
when: openshift_metrics_install_metrics | default(false) | bool
-- include: ../../common/openshift-cluster/openshift_logging.yml
+- import_playbook: ../../openshift-logging/private/config.yml
when: openshift_logging_install_logging | default(false) | bool
-- include: ../../openshift-prometheus/private/config.yml
+- import_playbook: ../../openshift-prometheus/private/config.yml
when: openshift_hosted_prometheus_deploy | default(false) | bool
-- include: ../../openshift-service-catalog/private/config.yml
+- import_playbook: ../../openshift-service-catalog/private/config.yml
when: openshift_enable_service_catalog | default(false) | bool
-- include: ../../openshift-management/private/config.yml
+- import_playbook: ../../openshift-management/private/config.yml
when: openshift_management_install_management | default(false) | bool
- name: Print deprecated variable warning message if necessary
diff --git a/playbooks/aws/openshift-cluster/install.yml b/playbooks/aws/openshift-cluster/install.yml
index f8206529a..b03fb0b7f 100644
--- a/playbooks/aws/openshift-cluster/install.yml
+++ b/playbooks/aws/openshift-cluster/install.yml
@@ -16,31 +16,31 @@
tasks_from: master_facts.yml
- name: run the init
- include: ../../init/main.yml
+ import_playbook: ../../init/main.yml
- name: perform the installer openshift-checks
- include: ../../openshift-checks/private/install.yml
+ import_playbook: ../../openshift-checks/private/install.yml
- name: etcd install
- include: ../../openshift-etcd/private/config.yml
+ import_playbook: ../../openshift-etcd/private/config.yml
- name: include nfs
- include: ../../openshift-nfs/private/config.yml
+ import_playbook: ../../openshift-nfs/private/config.yml
when: groups.oo_nfs_to_config | default([]) | count > 0
- name: include loadbalancer
- include: ../../openshift-loadbalancer/private/config.yml
+ import_playbook: ../../openshift-loadbalancer/private/config.yml
when: groups.oo_lb_to_config | default([]) | count > 0
- name: include openshift-master config
- include: ../../openshift-master/private/config.yml
+ import_playbook: ../../openshift-master/private/config.yml
- name: include master additional config
- include: ../../openshift-master/private/additional_config.yml
+ import_playbook: ../../openshift-master/private/additional_config.yml
- name: include master additional config
- include: ../../openshift-node/private/config.yml
+ import_playbook: ../../openshift-node/private/config.yml
- name: include openshift-glusterfs
- include: ../../openshift-glusterfs/private/config.yml
+ import_playbook: ../../openshift-glusterfs/private/config.yml
when: groups.oo_glusterfs_to_config | default([]) | count > 0
diff --git a/playbooks/aws/openshift-cluster/prerequisites.yml b/playbooks/aws/openshift-cluster/prerequisites.yml
index f5eb01b14..0afcce331 100644
--- a/playbooks/aws/openshift-cluster/prerequisites.yml
+++ b/playbooks/aws/openshift-cluster/prerequisites.yml
@@ -1,6 +1,6 @@
---
-- include: provision_vpc.yml
+- import_playbook: provision_vpc.yml
-- include: provision_ssh_keypair.yml
+- import_playbook: provision_ssh_keypair.yml
-- include: provision_sec_group.yml
+- import_playbook: provision_sec_group.yml
diff --git a/playbooks/aws/openshift-cluster/provision_install.yml b/playbooks/aws/openshift-cluster/provision_install.yml
index 78dd6a49b..f98f5be9a 100644
--- a/playbooks/aws/openshift-cluster/provision_install.yml
+++ b/playbooks/aws/openshift-cluster/provision_install.yml
@@ -4,16 +4,16 @@
# this playbook is run with the following parameters:
# ansible-playbook -i openshift-ansible-inventory provision_install.yml
- name: Include the provision.yml playbook to create cluster
- include: provision.yml
+ import_playbook: provision.yml
- name: Include the install.yml playbook to install cluster on masters
- include: install.yml
+ import_playbook: install.yml
- name: provision the infra/compute playbook to install node resources
- include: provision_nodes.yml
+ import_playbook: provision_nodes.yml
- name: Include the accept.yml playbook to accept nodes into the cluster
- include: accept.yml
+ import_playbook: accept.yml
- name: Include the hosted.yml playbook to finish the hosted configuration
- include: hosted.yml
+ import_playbook: hosted.yml
diff --git a/playbooks/byo/config.yml b/playbooks/byo/config.yml
index 7d03914a2..4b74e5bce 100644
--- a/playbooks/byo/config.yml
+++ b/playbooks/byo/config.yml
@@ -1,2 +1,3 @@
---
-- include: openshift-cluster/config.yml
+# TODO (rteague): Temporarily leaving this playbook to allow CI tests to operate until CI jobs are updated.
+- import_playbook: ../deploy_cluster.yml
diff --git a/playbooks/byo/openshift-cluster/config.yml b/playbooks/byo/openshift-cluster/config.yml
deleted file mode 100644
index 57823847b..000000000
--- a/playbooks/byo/openshift-cluster/config.yml
+++ /dev/null
@@ -1,4 +0,0 @@
----
-- include: ../../init/main.yml
-
-- include: ../../common/openshift-cluster/config.yml
diff --git a/playbooks/byo/openshift-cluster/redeploy-certificates.yml b/playbooks/byo/openshift-cluster/redeploy-certificates.yml
deleted file mode 100644
index c26f11772..000000000
--- a/playbooks/byo/openshift-cluster/redeploy-certificates.yml
+++ /dev/null
@@ -1,42 +0,0 @@
----
-- include: ../../init/main.yml
-
-- include: ../../common/openshift-cluster/redeploy-certificates/check-expiry.yml
- vars:
- g_check_expiry_hosts: 'oo_etcd_to_config'
-
-- include: ../../common/openshift-cluster/redeploy-certificates/etcd-backup.yml
-
-- include: ../../openshift-etcd/private/certificates.yml
- vars:
- etcd_certificates_redeploy: true
-
-- include: ../../common/openshift-cluster/redeploy-certificates/masters-backup.yml
-
-- include: ../../openshift-master/private/certificates.yml
- vars:
- openshift_certificates_redeploy: true
-
-- include: ../../common/openshift-cluster/redeploy-certificates/nodes-backup.yml
-
-- include: ../../openshift-node/private/certificates.yml
- vars:
- openshift_certificates_redeploy: true
-
-- include: ../../openshift-etcd/private/restart.yml
- vars:
- g_etcd_certificates_expired: "{{ ('expired' in (hostvars | oo_select_keys(groups['etcd']) | oo_collect('check_results.check_results.etcd') | oo_collect('health'))) | bool }}"
-
-- include: ../../openshift-master/private/restart.yml
-
-- include: ../../openshift-node/private/restart.yml
-
-- include: ../../common/openshift-cluster/redeploy-certificates/router.yml
- when: openshift_hosted_manage_router | default(true) | bool
-
-- include: ../../common/openshift-cluster/redeploy-certificates/registry.yml
- when: openshift_hosted_manage_registry | default(true) | bool
-
-- include: ../../openshift-master/private/revert-client-ca.yml
-
-- include: ../../openshift-master/private/restart.yml
diff --git a/playbooks/byo/openshift-cluster/redeploy-etcd-ca.yml b/playbooks/byo/openshift-cluster/redeploy-etcd-ca.yml
deleted file mode 100644
index 77dd121b3..000000000
--- a/playbooks/byo/openshift-cluster/redeploy-etcd-ca.yml
+++ /dev/null
@@ -1,4 +0,0 @@
----
-- include: ../../init/main.yml
-
-- include: ../../common/openshift-cluster/redeploy-certificates/etcd-ca.yml
diff --git a/playbooks/byo/openshift-cluster/redeploy-etcd-certificates.yml b/playbooks/byo/openshift-cluster/redeploy-etcd-certificates.yml
deleted file mode 100644
index 94e50cc28..000000000
--- a/playbooks/byo/openshift-cluster/redeploy-etcd-certificates.yml
+++ /dev/null
@@ -1,18 +0,0 @@
----
-- include: ../../init/main.yml
-
-- include: ../../common/openshift-cluster/redeploy-certificates/check-expiry.yml
- vars:
- g_check_expiry_hosts: 'oo_etcd_to_config'
-
-- include: ../../common/openshift-cluster/redeploy-certificates/etcd-backup.yml
-
-- include: ../../openshift-etcd/private/certificates.yml
- vars:
- etcd_certificates_redeploy: true
-
-- include: ../../openshift-etcd/private/restart.yml
- vars:
- g_etcd_certificates_expired: "{{ ('expired' in (hostvars | oo_select_keys(groups['etcd']) | oo_collect('check_results.check_results.etcd') | oo_collect('health'))) | bool }}"
-
-- include: ../../openshift-master/private/restart.yml
diff --git a/playbooks/byo/openshift-cluster/redeploy-master-certificates.yml b/playbooks/byo/openshift-cluster/redeploy-master-certificates.yml
deleted file mode 100644
index 88e52f809..000000000
--- a/playbooks/byo/openshift-cluster/redeploy-master-certificates.yml
+++ /dev/null
@@ -1,10 +0,0 @@
----
-- include: ../../init/main.yml
-
-- include: ../../common/openshift-cluster/redeploy-certificates/masters-backup.yml
-
-- include: ../../openshift-master/private/certificates.yml
- vars:
- openshift_certificates_redeploy: true
-
-- include: ../../openshift-master/private/restart.yml
diff --git a/playbooks/byo/openshift-cluster/redeploy-node-certificates.yml b/playbooks/byo/openshift-cluster/redeploy-node-certificates.yml
deleted file mode 100644
index 2abbe78f1..000000000
--- a/playbooks/byo/openshift-cluster/redeploy-node-certificates.yml
+++ /dev/null
@@ -1,10 +0,0 @@
----
-- include: ../../init/main.yml
-
-- include: ../../common/openshift-cluster/redeploy-certificates/nodes-backup.yml
-
-- include: ../../openshift-node/private/certificates.yml
- vars:
- openshift_certificates_redeploy: true
-
-- include: ../../openshift-node/private/restart.yml
diff --git a/playbooks/byo/openshift-cluster/redeploy-openshift-ca.yml b/playbooks/byo/openshift-cluster/redeploy-openshift-ca.yml
deleted file mode 100644
index 9cebeb1ee..000000000
--- a/playbooks/byo/openshift-cluster/redeploy-openshift-ca.yml
+++ /dev/null
@@ -1,4 +0,0 @@
----
-- include: ../../init/main.yml
-
-- include: ../../common/openshift-cluster/redeploy-certificates/openshift-ca.yml
diff --git a/playbooks/byo/openshift-cluster/redeploy-registry-certificates.yml b/playbooks/byo/openshift-cluster/redeploy-registry-certificates.yml
deleted file mode 100644
index 36b6250a7..000000000
--- a/playbooks/byo/openshift-cluster/redeploy-registry-certificates.yml
+++ /dev/null
@@ -1,4 +0,0 @@
----
-- include: ../../init/main.yml
-
-- include: ../../common/openshift-cluster/redeploy-certificates/registry.yml
diff --git a/playbooks/byo/openshift-cluster/redeploy-router-certificates.yml b/playbooks/byo/openshift-cluster/redeploy-router-certificates.yml
deleted file mode 100644
index 181e03381..000000000
--- a/playbooks/byo/openshift-cluster/redeploy-router-certificates.yml
+++ /dev/null
@@ -1,4 +0,0 @@
----
-- include: ../../init/main.yml
-
-- include: ../../common/openshift-cluster/redeploy-certificates/router.yml
diff --git a/playbooks/byo/openshift-cluster/upgrades/docker/upgrade.yml b/playbooks/byo/openshift-cluster/upgrades/docker/upgrade.yml
index c46b22331..76308465c 100644
--- a/playbooks/byo/openshift-cluster/upgrades/docker/upgrade.yml
+++ b/playbooks/byo/openshift-cluster/upgrades/docker/upgrade.yml
@@ -1,5 +1,5 @@
---
# Playbook to upgrade Docker to the max allowable version for an OpenShift cluster.
-- include: ../../../../init/evaluate_groups.yml
+- import_playbook: ../../../../init/evaluate_groups.yml
-- include: ../../../../common/openshift-cluster/upgrades/docker/docker_upgrade.yml
+- import_playbook: ../../../../common/openshift-cluster/upgrades/docker/docker_upgrade.yml
diff --git a/playbooks/byo/openshift-cluster/upgrades/upgrade_etcd.yml b/playbooks/byo/openshift-cluster/upgrades/upgrade_etcd.yml
deleted file mode 100644
index a9be8dec4..000000000
--- a/playbooks/byo/openshift-cluster/upgrades/upgrade_etcd.yml
+++ /dev/null
@@ -1,4 +0,0 @@
----
-- include: ../../../init/evaluate_groups.yml
-
-- include: ../../../common/openshift-cluster/upgrades/etcd/main.yml
diff --git a/playbooks/byo/openshift-cluster/upgrades/v3_6/upgrade.yml b/playbooks/byo/openshift-cluster/upgrades/v3_6/upgrade.yml
index c880fe7f7..0effc68bf 100644
--- a/playbooks/byo/openshift-cluster/upgrades/v3_6/upgrade.yml
+++ b/playbooks/byo/openshift-cluster/upgrades/v3_6/upgrade.yml
@@ -2,4 +2,4 @@
#
# Full Control Plane + Nodes Upgrade
#
-- include: ../../../../common/openshift-cluster/upgrades/v3_6/upgrade.yml
+- import_playbook: ../../../../common/openshift-cluster/upgrades/v3_6/upgrade.yml
diff --git a/playbooks/byo/openshift-cluster/upgrades/v3_6/upgrade_control_plane.yml b/playbooks/byo/openshift-cluster/upgrades/v3_6/upgrade_control_plane.yml
index aeec5f5cc..ebced5413 100644
--- a/playbooks/byo/openshift-cluster/upgrades/v3_6/upgrade_control_plane.yml
+++ b/playbooks/byo/openshift-cluster/upgrades/v3_6/upgrade_control_plane.yml
@@ -11,4 +11,4 @@
#
# You can run the upgrade_nodes.yml playbook after this to upgrade these components separately.
#
-- include: ../../../../common/openshift-cluster/upgrades/v3_6/upgrade_control_plane.yml
+- import_playbook: ../../../../common/openshift-cluster/upgrades/v3_6/upgrade_control_plane.yml
diff --git a/playbooks/byo/openshift-cluster/upgrades/v3_6/upgrade_nodes.yml b/playbooks/byo/openshift-cluster/upgrades/v3_6/upgrade_nodes.yml
index 4664a9a2b..f2e97fc01 100644
--- a/playbooks/byo/openshift-cluster/upgrades/v3_6/upgrade_nodes.yml
+++ b/playbooks/byo/openshift-cluster/upgrades/v3_6/upgrade_nodes.yml
@@ -4,4 +4,4 @@
#
# Upgrades nodes only, but requires the control plane to have already been upgraded.
#
-- include: ../../../../common/openshift-cluster/upgrades/v3_6/upgrade_nodes.yml
+- import_playbook: ../../../../common/openshift-cluster/upgrades/v3_6/upgrade_nodes.yml
diff --git a/playbooks/byo/openshift-cluster/upgrades/v3_7/upgrade.yml b/playbooks/byo/openshift-cluster/upgrades/v3_7/upgrade.yml
index cbb89bc4d..f6fedfdff 100644
--- a/playbooks/byo/openshift-cluster/upgrades/v3_7/upgrade.yml
+++ b/playbooks/byo/openshift-cluster/upgrades/v3_7/upgrade.yml
@@ -2,4 +2,4 @@
#
# Full Control Plane + Nodes Upgrade
#
-- include: ../../../../common/openshift-cluster/upgrades/v3_7/upgrade.yml
+- import_playbook: ../../../../common/openshift-cluster/upgrades/v3_7/upgrade.yml
diff --git a/playbooks/byo/openshift-cluster/upgrades/v3_7/upgrade_control_plane.yml b/playbooks/byo/openshift-cluster/upgrades/v3_7/upgrade_control_plane.yml
index 1adfbdec0..b8b5f5762 100644
--- a/playbooks/byo/openshift-cluster/upgrades/v3_7/upgrade_control_plane.yml
+++ b/playbooks/byo/openshift-cluster/upgrades/v3_7/upgrade_control_plane.yml
@@ -11,4 +11,4 @@
#
# You can run the upgrade_nodes.yml playbook after this to upgrade these components separately.
#
-- include: ../../../../common/openshift-cluster/upgrades/v3_7/upgrade_control_plane.yml
+- import_playbook: ../../../../common/openshift-cluster/upgrades/v3_7/upgrade_control_plane.yml
diff --git a/playbooks/byo/openshift-cluster/upgrades/v3_7/upgrade_nodes.yml b/playbooks/byo/openshift-cluster/upgrades/v3_7/upgrade_nodes.yml
index b4da18281..c63f11b30 100644
--- a/playbooks/byo/openshift-cluster/upgrades/v3_7/upgrade_nodes.yml
+++ b/playbooks/byo/openshift-cluster/upgrades/v3_7/upgrade_nodes.yml
@@ -4,4 +4,4 @@
#
# Upgrades nodes only, but requires the control plane to have already been upgraded.
#
-- include: ../../../../common/openshift-cluster/upgrades/v3_7/upgrade_nodes.yml
+- import_playbook: ../../../../common/openshift-cluster/upgrades/v3_7/upgrade_nodes.yml
diff --git a/playbooks/byo/openshift-cluster/upgrades/v3_7/upgrade_scale_groups.yml b/playbooks/byo/openshift-cluster/upgrades/v3_7/upgrade_scale_groups.yml
new file mode 100644
index 000000000..23a3fcbb5
--- /dev/null
+++ b/playbooks/byo/openshift-cluster/upgrades/v3_7/upgrade_scale_groups.yml
@@ -0,0 +1,7 @@
+---
+#
+# Node Scale Group Upgrade Playbook
+#
+# Upgrades scale group nodes only.
+#
+- import_playbook: ../../../../common/openshift-cluster/upgrades/upgrade_scale_group.yml
diff --git a/playbooks/byo/openshift-cluster/upgrades/v3_8/README.md b/playbooks/byo/openshift-cluster/upgrades/v3_8/README.md
index d9be6ae3b..815e49c28 100644
--- a/playbooks/byo/openshift-cluster/upgrades/v3_8/README.md
+++ b/playbooks/byo/openshift-cluster/upgrades/v3_8/README.md
@@ -1,4 +1,4 @@
-# v3.6 Major and Minor Upgrade Playbook
+# v3.8 Major and Minor Upgrade Playbook
## Overview
This playbook currently performs the following steps.
diff --git a/playbooks/byo/openshift-cluster/upgrades/v3_8/upgrade.yml b/playbooks/byo/openshift-cluster/upgrades/v3_8/upgrade.yml
index f7e5dd1d2..c4094aa7e 100644
--- a/playbooks/byo/openshift-cluster/upgrades/v3_8/upgrade.yml
+++ b/playbooks/byo/openshift-cluster/upgrades/v3_8/upgrade.yml
@@ -2,4 +2,4 @@
#
# Full Control Plane + Nodes Upgrade
#
-- include: ../../../../common/openshift-cluster/upgrades/v3_8/upgrade.yml
+- import_playbook: ../../../../common/openshift-cluster/upgrades/v3_8/upgrade.yml
diff --git a/playbooks/byo/openshift-cluster/upgrades/v3_8/upgrade_control_plane.yml b/playbooks/byo/openshift-cluster/upgrades/v3_8/upgrade_control_plane.yml
index cc04d81c1..5a3aa6288 100644
--- a/playbooks/byo/openshift-cluster/upgrades/v3_8/upgrade_control_plane.yml
+++ b/playbooks/byo/openshift-cluster/upgrades/v3_8/upgrade_control_plane.yml
@@ -11,4 +11,4 @@
#
# You can run the upgrade_nodes.yml playbook after this to upgrade these components separately.
#
-- include: ../../../../common/openshift-cluster/upgrades/v3_8/upgrade_control_plane.yml
+- import_playbook: ../../../../common/openshift-cluster/upgrades/v3_8/upgrade_control_plane.yml
diff --git a/playbooks/byo/openshift-cluster/upgrades/v3_8/upgrade_nodes.yml b/playbooks/byo/openshift-cluster/upgrades/v3_8/upgrade_nodes.yml
index 37a9f69bb..74981cc31 100644
--- a/playbooks/byo/openshift-cluster/upgrades/v3_8/upgrade_nodes.yml
+++ b/playbooks/byo/openshift-cluster/upgrades/v3_8/upgrade_nodes.yml
@@ -4,4 +4,4 @@
#
# Upgrades nodes only, but requires the control plane to have already been upgraded.
#
-- include: ../../../../common/openshift-cluster/upgrades/v3_8/upgrade_nodes.yml
+- import_playbook: ../../../../common/openshift-cluster/upgrades/v3_8/upgrade_nodes.yml
diff --git a/playbooks/byo/openshift-cluster/upgrades/v3_9/README.md b/playbooks/byo/openshift-cluster/upgrades/v3_9/README.md
new file mode 100644
index 000000000..0ab3d3a52
--- /dev/null
+++ b/playbooks/byo/openshift-cluster/upgrades/v3_9/README.md
@@ -0,0 +1,20 @@
+# v3.9 Major and Minor Upgrade Playbook
+
+## Overview
+This playbook currently performs the following steps.
+
+ * Upgrade and restart master services
+ * Unschedule node
+ * Upgrade and restart docker
+ * Upgrade and restart node services
+ * Modifies the subset of the configuration necessary
+ * Applies the latest cluster policies
+ * Updates the default router if one exists
+ * Updates the default registry if one exists
+ * Updates image streams and quickstarts
+
+## Usage
+
+```
+ansible-playbook -i ~/ansible-inventory openshift-ansible/playbooks/byo/openshift-cluster/upgrades/v3_9/upgrade.yml
+```
diff --git a/playbooks/byo/openshift-cluster/upgrades/v3_9/upgrade.yml b/playbooks/byo/openshift-cluster/upgrades/v3_9/upgrade.yml
new file mode 100644
index 000000000..a2a9d59f2
--- /dev/null
+++ b/playbooks/byo/openshift-cluster/upgrades/v3_9/upgrade.yml
@@ -0,0 +1,5 @@
+---
+#
+# Full Control Plane + Nodes Upgrade
+#
+- import_playbook: ../../../../common/openshift-cluster/upgrades/v3_9/upgrade.yml
diff --git a/playbooks/byo/openshift-cluster/upgrades/v3_9/upgrade_control_plane.yml b/playbooks/byo/openshift-cluster/upgrades/v3_9/upgrade_control_plane.yml
new file mode 100644
index 000000000..869e185af
--- /dev/null
+++ b/playbooks/byo/openshift-cluster/upgrades/v3_9/upgrade_control_plane.yml
@@ -0,0 +1,14 @@
+---
+#
+# Control Plane Upgrade Playbook
+#
+# Upgrades masters and Docker (only on standalone etcd hosts)
+#
+# This upgrade does not include:
+# - node service running on masters
+# - docker running on masters
+# - node service running on dedicated nodes
+#
+# You can run the upgrade_nodes.yml playbook after this to upgrade these components separately.
+#
+- import_playbook: ../../../../common/openshift-cluster/upgrades/v3_9/upgrade_control_plane.yml
diff --git a/playbooks/byo/openshift-cluster/upgrades/v3_9/upgrade_nodes.yml b/playbooks/byo/openshift-cluster/upgrades/v3_9/upgrade_nodes.yml
new file mode 100644
index 000000000..a5867434b
--- /dev/null
+++ b/playbooks/byo/openshift-cluster/upgrades/v3_9/upgrade_nodes.yml
@@ -0,0 +1,7 @@
+---
+#
+# Node Upgrade Playbook
+#
+# Upgrades nodes only, but requires the control plane to have already been upgraded.
+#
+- import_playbook: ../../../../common/openshift-cluster/upgrades/v3_9/upgrade_nodes.yml
diff --git a/playbooks/byo/openshift-node/network_manager.yml b/playbooks/byo/openshift-node/network_manager.yml
deleted file mode 100644
index ca09fb65c..000000000
--- a/playbooks/byo/openshift-node/network_manager.yml
+++ /dev/null
@@ -1,3 +0,0 @@
----
-# TODO (rteague): Temporarily leaving this playbook to allow CI tests to operate until CI jobs are updated.
-- include: ../../openshift-node/network_manager.yml
diff --git a/playbooks/byo/openshift_facts.yml b/playbooks/byo/openshift_facts.yml
index 29e0ebe8d..85a65b7e1 100644
--- a/playbooks/byo/openshift_facts.yml
+++ b/playbooks/byo/openshift_facts.yml
@@ -1,13 +1,12 @@
---
-- include: ../init/main.yml
+- import_playbook: ../init/main.yml
- name: Gather Cluster facts
hosts: oo_all_hosts
roles:
- openshift_facts
tasks:
- - openshift_facts:
- openshift_env: "{{ hostvars[inventory_hostname] | oo_openshift_env }}"
+ - openshift_facts: {}
register: result
- debug:
var: result
diff --git a/playbooks/byo/rhel_subscribe.yml b/playbooks/byo/rhel_subscribe.yml
index 261143080..5a877809a 100644
--- a/playbooks/byo/rhel_subscribe.yml
+++ b/playbooks/byo/rhel_subscribe.yml
@@ -1,5 +1,5 @@
---
-- include: ../init/evaluate_groups.yml
+- import_playbook: ../init/evaluate_groups.yml
- name: Subscribe hosts, update repos and update OS packages
hosts: oo_all_hosts
diff --git a/playbooks/common/openshift-cluster/config.yml b/playbooks/common/openshift-cluster/config.yml
deleted file mode 100644
index 2eeb81b86..000000000
--- a/playbooks/common/openshift-cluster/config.yml
+++ /dev/null
@@ -1,44 +0,0 @@
----
-- include: ../../openshift-checks/private/install.yml
-
-- include: ../../openshift-etcd/private/config.yml
-
-- include: ../../openshift-nfs/private/config.yml
- when: groups.oo_nfs_to_config | default([]) | count > 0
-
-- include: ../../openshift-loadbalancer/private/config.yml
- when: groups.oo_lb_to_config | default([]) | count > 0
-
-- include: ../../openshift-master/private/config.yml
-
-- include: ../../openshift-master/private/additional_config.yml
-
-- include: ../../openshift-node/private/config.yml
-
-- include: ../../openshift-glusterfs/private/config.yml
- when: groups.oo_glusterfs_to_config | default([]) | count > 0
-
-- include: ../../openshift-hosted/private/config.yml
-
-- include: ../../openshift-metrics/private/config.yml
- when: openshift_metrics_install_metrics | default(false) | bool
-
-- include: openshift_logging.yml
- when: openshift_logging_install_logging | default(false) | bool
-
-- include: ../../openshift-prometheus/private/config.yml
- when: openshift_hosted_prometheus_deploy | default(false) | bool
-
-- include: ../../openshift-service-catalog/private/config.yml
- when: openshift_enable_service_catalog | default(true) | bool
-
-- include: ../../openshift-management/private/config.yml
- when: openshift_management_install_management | default(false) | bool
-
-- name: Print deprecated variable warning message if necessary
- hosts: oo_first_master
- gather_facts: no
- tasks:
- - debug: msg="{{__deprecation_message}}"
- when:
- - __deprecation_message | default ('') | length > 0
diff --git a/playbooks/common/openshift-cluster/redeploy-certificates/filter_plugins b/playbooks/common/openshift-cluster/redeploy-certificates/filter_plugins
deleted file mode 120000
index b1213dedb..000000000
--- a/playbooks/common/openshift-cluster/redeploy-certificates/filter_plugins
+++ /dev/null
@@ -1 +0,0 @@
-../../../../filter_plugins \ No newline at end of file
diff --git a/playbooks/common/openshift-cluster/redeploy-certificates/library b/playbooks/common/openshift-cluster/redeploy-certificates/library
deleted file mode 120000
index 9a53f009d..000000000
--- a/playbooks/common/openshift-cluster/redeploy-certificates/library
+++ /dev/null
@@ -1 +0,0 @@
-../../../../library \ No newline at end of file
diff --git a/playbooks/common/openshift-cluster/redeploy-certificates/lookup_plugins b/playbooks/common/openshift-cluster/redeploy-certificates/lookup_plugins
deleted file mode 120000
index aff753026..000000000
--- a/playbooks/common/openshift-cluster/redeploy-certificates/lookup_plugins
+++ /dev/null
@@ -1 +0,0 @@
-../../../../lookup_plugins \ No newline at end of file
diff --git a/playbooks/common/openshift-cluster/redeploy-certificates/roles b/playbooks/common/openshift-cluster/redeploy-certificates/roles
deleted file mode 120000
index 4bdbcbad3..000000000
--- a/playbooks/common/openshift-cluster/redeploy-certificates/roles
+++ /dev/null
@@ -1 +0,0 @@
-../../../../roles \ No newline at end of file
diff --git a/playbooks/common/openshift-cluster/upgrades/disable_master_excluders.yml b/playbooks/common/openshift-cluster/upgrades/disable_master_excluders.yml
index 800621857..33ed6a283 100644
--- a/playbooks/common/openshift-cluster/upgrades/disable_master_excluders.yml
+++ b/playbooks/common/openshift-cluster/upgrades/disable_master_excluders.yml
@@ -5,7 +5,6 @@
roles:
- role: openshift_excluder
r_openshift_excluder_action: disable
- r_openshift_excluder_service_type: "{{ openshift.common.service_type }}"
r_openshift_excluder_verify_upgrade: true
r_openshift_excluder_upgrade_target: "{{ openshift_upgrade_target }}"
r_openshift_excluder_package_state: latest
diff --git a/playbooks/common/openshift-cluster/upgrades/disable_node_excluders.yml b/playbooks/common/openshift-cluster/upgrades/disable_node_excluders.yml
index a66301c0d..ab3171c9a 100644
--- a/playbooks/common/openshift-cluster/upgrades/disable_node_excluders.yml
+++ b/playbooks/common/openshift-cluster/upgrades/disable_node_excluders.yml
@@ -5,7 +5,6 @@
roles:
- role: openshift_excluder
r_openshift_excluder_action: disable
- r_openshift_excluder_service_type: "{{ openshift.common.service_type }}"
r_openshift_excluder_verify_upgrade: true
r_openshift_excluder_upgrade_target: "{{ openshift_upgrade_target }}"
r_openshift_excluder_package_state: latest
diff --git a/playbooks/common/openshift-cluster/upgrades/docker/docker_upgrade.yml b/playbooks/common/openshift-cluster/upgrades/docker/docker_upgrade.yml
index 6d4ddf011..5c6def484 100644
--- a/playbooks/common/openshift-cluster/upgrades/docker/docker_upgrade.yml
+++ b/playbooks/common/openshift-cluster/upgrades/docker/docker_upgrade.yml
@@ -1,11 +1,11 @@
---
-- include: ../../../../init/evaluate_groups.yml
+- import_playbook: ../../../../init/evaluate_groups.yml
vars:
# Do not allow adding hosts during upgrade.
g_new_master_hosts: []
g_new_node_hosts: []
-- include: ../initialize_nodes_to_upgrade.yml
+- import_playbook: ../initialize_nodes_to_upgrade.yml
- name: Check for appropriate Docker versions
hosts: oo_masters_to_config:oo_nodes_to_upgrade:oo_etcd_to_config
@@ -19,7 +19,7 @@
msg: Cannot upgrade Docker on Atomic operating systems.
when: openshift.common.is_atomic | bool
- - include: upgrade_check.yml
+ - include_tasks: upgrade_check.yml
when: docker_upgrade is not defined or docker_upgrade | bool
@@ -51,7 +51,7 @@
- name: Drain Node for Kubelet upgrade
command: >
- {{ openshift.common.admin_binary }} drain {{ openshift.node.nodename }} --config={{ openshift.common.config_base }}/master/admin.kubeconfig --force --delete-local-data --ignore-daemonsets
+ {{ openshift.common.client_binary }} adm drain {{ openshift.node.nodename }} --config={{ openshift.common.config_base }}/master/admin.kubeconfig --force --delete-local-data --ignore-daemonsets
delegate_to: "{{ groups.oo_first_master.0 }}"
when: l_docker_upgrade is defined and l_docker_upgrade | bool and inventory_hostname in groups.oo_nodes_to_upgrade
register: l_docker_upgrade_drain_result
@@ -59,7 +59,7 @@
retries: 60
delay: 60
- - include: tasks/upgrade.yml
+ - include_tasks: tasks/upgrade.yml
when: l_docker_upgrade is defined and l_docker_upgrade | bool
- name: Set node schedulability
diff --git a/playbooks/common/openshift-cluster/upgrades/docker/tasks/restart.yml b/playbooks/common/openshift-cluster/upgrades/docker/tasks/restart.yml
index 83f16ac0d..dbc4f39c7 100644
--- a/playbooks/common/openshift-cluster/upgrades/docker/tasks/restart.yml
+++ b/playbooks/common/openshift-cluster/upgrades/docker/tasks/restart.yml
@@ -6,18 +6,14 @@
retries: 3
delay: 30
-- name: Update docker facts
- openshift_facts:
- role: docker
-
- name: Restart containerized services
service: name={{ item }} state=started
with_items:
- etcd_container
- openvswitch
- - "{{ openshift.common.service_type }}-master-api"
- - "{{ openshift.common.service_type }}-master-controllers"
- - "{{ openshift.common.service_type }}-node"
+ - "{{ openshift_service_type }}-master-api"
+ - "{{ openshift_service_type }}-master-controllers"
+ - "{{ openshift_service_type }}-node"
failed_when: false
when: openshift.common.is_containerized | bool
diff --git a/playbooks/common/openshift-cluster/upgrades/docker/tasks/upgrade.yml b/playbooks/common/openshift-cluster/upgrades/docker/tasks/upgrade.yml
index 808cc562c..4856a4b51 100644
--- a/playbooks/common/openshift-cluster/upgrades/docker/tasks/upgrade.yml
+++ b/playbooks/common/openshift-cluster/upgrades/docker/tasks/upgrade.yml
@@ -4,9 +4,9 @@
- name: Stop containerized services
service: name={{ item }} state=stopped
with_items:
- - "{{ openshift.common.service_type }}-master-api"
- - "{{ openshift.common.service_type }}-master-controllers"
- - "{{ openshift.common.service_type }}-node"
+ - "{{ openshift_service_type }}-master-api"
+ - "{{ openshift_service_type }}-master-controllers"
+ - "{{ openshift_service_type }}-node"
- etcd_container
- openvswitch
failed_when: false
@@ -41,6 +41,8 @@
- name: Upgrade Docker
package: name=docker{{ '-' + docker_version }} state=present
+ register: result
+ until: result | success
-- include: restart.yml
+- include_tasks: restart.yml
when: not skip_docker_restart | default(False) | bool
diff --git a/playbooks/common/openshift-cluster/upgrades/etcd/filter_plugins b/playbooks/common/openshift-cluster/upgrades/etcd/filter_plugins
deleted file mode 120000
index 27ddaa18b..000000000
--- a/playbooks/common/openshift-cluster/upgrades/etcd/filter_plugins
+++ /dev/null
@@ -1 +0,0 @@
-../../../../../filter_plugins \ No newline at end of file
diff --git a/playbooks/common/openshift-cluster/upgrades/etcd/lookup_plugins b/playbooks/common/openshift-cluster/upgrades/etcd/lookup_plugins
deleted file mode 120000
index cf407f69b..000000000
--- a/playbooks/common/openshift-cluster/upgrades/etcd/lookup_plugins
+++ /dev/null
@@ -1 +0,0 @@
-../../../../../lookup_plugins \ No newline at end of file
diff --git a/playbooks/common/openshift-cluster/upgrades/etcd/roles b/playbooks/common/openshift-cluster/upgrades/etcd/roles
deleted file mode 120000
index 6bc1a7aef..000000000
--- a/playbooks/common/openshift-cluster/upgrades/etcd/roles
+++ /dev/null
@@ -1 +0,0 @@
-../../../../../roles \ No newline at end of file
diff --git a/playbooks/common/openshift-cluster/upgrades/init.yml b/playbooks/common/openshift-cluster/upgrades/init.yml
index 9981d905b..5454a6680 100644
--- a/playbooks/common/openshift-cluster/upgrades/init.yml
+++ b/playbooks/common/openshift-cluster/upgrades/init.yml
@@ -1,11 +1,11 @@
---
-- include: ../../../init/evaluate_groups.yml
+- import_playbook: ../../../init/evaluate_groups.yml
vars:
# Do not allow adding hosts during upgrade.
g_new_master_hosts: []
g_new_node_hosts: []
-- include: ../../../init/facts.yml
+- import_playbook: ../../../init/facts.yml
- name: Ensure firewall is not switched during upgrade
hosts: oo_all_hosts
diff --git a/playbooks/common/openshift-cluster/upgrades/post_control_plane.yml b/playbooks/common/openshift-cluster/upgrades/post_control_plane.yml
index c458184c9..344ddea3c 100644
--- a/playbooks/common/openshift-cluster/upgrades/post_control_plane.yml
+++ b/playbooks/common/openshift-cluster/upgrades/post_control_plane.yml
@@ -114,7 +114,6 @@
roles:
- role: openshift_excluder
r_openshift_excluder_action: enable
- r_openshift_excluder_service_type: "{{ openshift.common.service_type }}"
post_tasks:
# Check if any masters are using pluginOrderOverride and warn if so, only for 1.3/3.3 and beyond:
- name: grep pluginOrderOverride
diff --git a/playbooks/common/openshift-cluster/upgrades/pre/tasks/verify_docker_upgrade_targets.yml b/playbooks/common/openshift-cluster/upgrades/pre/tasks/verify_docker_upgrade_targets.yml
index 6d8503879..18a08eb99 100644
--- a/playbooks/common/openshift-cluster/upgrades/pre/tasks/verify_docker_upgrade_targets.yml
+++ b/playbooks/common/openshift-cluster/upgrades/pre/tasks/verify_docker_upgrade_targets.yml
@@ -1,7 +1,7 @@
---
# Only check if docker upgrade is required if docker_upgrade is not
# already set to False.
-- include: ../../docker/upgrade_check.yml
+- include_tasks: ../../docker/upgrade_check.yml
when:
- docker_upgrade is not defined or (docker_upgrade | bool)
- not (openshift.common.is_atomic | bool)
diff --git a/playbooks/common/openshift-cluster/upgrades/pre/verify_control_plane_running.yml b/playbooks/common/openshift-cluster/upgrades/pre/verify_control_plane_running.yml
index 6a5bc24f7..bef95546d 100644
--- a/playbooks/common/openshift-cluster/upgrades/pre/verify_control_plane_running.yml
+++ b/playbooks/common/openshift-cluster/upgrades/pre/verify_control_plane_running.yml
@@ -13,21 +13,21 @@
block:
- set_fact:
master_services:
- - "{{ openshift.common.service_type }}-master"
+ - "{{ openshift_service_type }}-master"
# In case of the non-ha to ha upgrade.
- - name: Check if the {{ openshift.common.service_type }}-master-api.service exists
+ - name: Check if the {{ openshift_service_type }}-master-api.service exists
command: >
- systemctl list-units {{ openshift.common.service_type }}-master-api.service --no-legend
+ systemctl list-units {{ openshift_service_type }}-master-api.service --no-legend
register: master_api_service_status
- set_fact:
master_services:
- - "{{ openshift.common.service_type }}-master-api"
- - "{{ openshift.common.service_type }}-master-controllers"
+ - "{{ openshift_service_type }}-master-api"
+ - "{{ openshift_service_type }}-master-controllers"
when:
- master_api_service_status.stdout_lines | length > 0
- - (openshift.common.service_type + '-master-api.service') in master_api_service_status.stdout_lines[0]
+ - (openshift_service_type + '-master-api.service') in master_api_service_status.stdout_lines[0]
- name: Ensure Master is running
service:
diff --git a/playbooks/common/openshift-cluster/upgrades/pre/verify_upgrade_targets.yml b/playbooks/common/openshift-cluster/upgrades/pre/verify_upgrade_targets.yml
index 446f315d6..96f970506 100644
--- a/playbooks/common/openshift-cluster/upgrades/pre/verify_upgrade_targets.yml
+++ b/playbooks/common/openshift-cluster/upgrades/pre/verify_upgrade_targets.yml
@@ -6,7 +6,7 @@
- name: Update oreg_auth docker login credentials if necessary
include_role:
- name: docker
+ name: container_runtime
tasks_from: registry_auth.yml
when: oreg_auth_user is defined
@@ -21,7 +21,7 @@
block:
- name: Check latest available OpenShift RPM version
repoquery:
- name: "{{ openshift.common.service_type }}"
+ name: "{{ openshift_service_type }}"
ignore_excluders: true
register: repoquery_out
diff --git a/playbooks/common/openshift-cluster/upgrades/upgrade_control_plane.yml b/playbooks/common/openshift-cluster/upgrades/upgrade_control_plane.yml
index d7a52707c..37fc8a0f6 100644
--- a/playbooks/common/openshift-cluster/upgrades/upgrade_control_plane.yml
+++ b/playbooks/common/openshift-cluster/upgrades/upgrade_control_plane.yml
@@ -16,8 +16,8 @@
local_facts:
embedded_etcd: "{{ groups.oo_etcd_to_config | default([]) | length == 0 }}"
-- name: Upgrade and backup etcd
- include: ./etcd/main.yml
+- name: Backup and upgrade etcd
+ import_playbook: ../../../openshift-etcd/private/upgrade_main.yml
# Create service signer cert when missing. Service signer certificate
# is added to master config in the master_config_upgrade hook.
@@ -30,7 +30,7 @@
register: service_signer_cert_stat
changed_when: false
-- include: create_service_signer_cert.yml
+- import_playbook: create_service_signer_cert.yml
# oc adm migrate storage should be run prior to etcd v3 upgrade
# See: https://github.com/openshift/origin/pull/14625#issuecomment-308467060
@@ -71,7 +71,7 @@
- debug: msg="Running master pre-upgrade hook {{ openshift_master_upgrade_pre_hook }}"
when: openshift_master_upgrade_pre_hook is defined
- - include: "{{ openshift_master_upgrade_pre_hook }}"
+ - include_tasks: "{{ openshift_master_upgrade_pre_hook }}"
when: openshift_master_upgrade_pre_hook is defined
- include_role:
@@ -82,20 +82,20 @@
- debug: msg="Running master upgrade hook {{ openshift_master_upgrade_hook }}"
when: openshift_master_upgrade_hook is defined
- - include: "{{ openshift_master_upgrade_hook }}"
+ - include_tasks: "{{ openshift_master_upgrade_hook }}"
when: openshift_master_upgrade_hook is defined
- - include: ../../../openshift-master/private/tasks/restart_hosts.yml
+ - include_tasks: ../../../openshift-master/private/tasks/restart_hosts.yml
when: openshift.common.rolling_restart_mode == 'system'
- - include: ../../../openshift-master/private/tasks/restart_services.yml
+ - include_tasks: ../../../openshift-master/private/tasks/restart_services.yml
when: openshift.common.rolling_restart_mode == 'services'
# Run the post-upgrade hook if defined:
- debug: msg="Running master post-upgrade hook {{ openshift_master_upgrade_post_hook }}"
when: openshift_master_upgrade_post_hook is defined
- - include: "{{ openshift_master_upgrade_post_hook }}"
+ - include_tasks: "{{ openshift_master_upgrade_post_hook }}"
when: openshift_master_upgrade_post_hook is defined
- name: Post master upgrade - Upgrade clusterpolicies storage
@@ -143,10 +143,6 @@
roles:
- { role: openshift_cli }
vars:
- openshift_docker_hosted_registry_network: "{{ hostvars[groups.oo_first_master.0].openshift.common.portal_net }}"
- # Another spot where we assume docker is running and do not want to accidentally trigger an unsafe
- # restart.
- skip_docker_role: True
__master_shared_resource_viewer_file: "shared_resource_viewer_role.yaml"
tasks:
- name: Reconcile Cluster Roles
@@ -279,7 +275,7 @@
roles:
- openshift_facts
tasks:
- - include: docker/tasks/upgrade.yml
+ - include_tasks: docker/tasks/upgrade.yml
when: l_docker_upgrade is defined and l_docker_upgrade | bool and not openshift.common.is_atomic | bool
- name: Drain and upgrade master nodes
@@ -309,7 +305,7 @@
- name: Drain Node for Kubelet upgrade
command: >
- {{ hostvars[groups.oo_first_master.0].openshift.common.admin_binary }} drain {{ openshift.node.nodename | lower }} --config={{ openshift.common.config_base }}/master/admin.kubeconfig --force --delete-local-data --ignore-daemonsets
+ {{ hostvars[groups.oo_first_master.0].openshift.common.client_binary }} adm drain {{ openshift.node.nodename | lower }} --config={{ openshift.common.config_base }}/master/admin.kubeconfig --force --delete-local-data --ignore-daemonsets
delegate_to: "{{ groups.oo_first_master.0 }}"
register: l_upgrade_control_plane_drain_result
until: not l_upgrade_control_plane_drain_result | failed
diff --git a/playbooks/common/openshift-cluster/upgrades/upgrade_nodes.yml b/playbooks/common/openshift-cluster/upgrades/upgrade_nodes.yml
index 75ffd3fe9..f7a85545b 100644
--- a/playbooks/common/openshift-cluster/upgrades/upgrade_nodes.yml
+++ b/playbooks/common/openshift-cluster/upgrades/upgrade_nodes.yml
@@ -26,7 +26,7 @@
- name: Drain Node for Kubelet upgrade
command: >
- {{ hostvars[groups.oo_first_master.0].openshift.common.admin_binary }} drain {{ openshift.node.nodename | lower }} --config={{ openshift.common.config_base }}/master/admin.kubeconfig --force --delete-local-data --ignore-daemonsets
+ {{ hostvars[groups.oo_first_master.0].openshift.common.client_binary }} adm drain {{ openshift.node.nodename | lower }} --config={{ openshift.common.config_base }}/master/admin.kubeconfig --force --delete-local-data --ignore-daemonsets
delegate_to: "{{ groups.oo_first_master.0 }}"
register: l_upgrade_nodes_drain_result
until: not l_upgrade_nodes_drain_result | failed
@@ -45,7 +45,6 @@
name: openshift_excluder
vars:
r_openshift_excluder_action: enable
- r_openshift_excluder_service_type: "{{ openshift.common.service_type }}"
- name: Set node schedulability
oc_adm_manage_node:
node: "{{ openshift.node.nodename | lower }}"
diff --git a/playbooks/common/openshift-cluster/upgrades/upgrade_scale_group.yml b/playbooks/common/openshift-cluster/upgrades/upgrade_scale_group.yml
new file mode 100644
index 000000000..47410dff3
--- /dev/null
+++ b/playbooks/common/openshift-cluster/upgrades/upgrade_scale_group.yml
@@ -0,0 +1,59 @@
+---
+- name: create new scale group
+ hosts: localhost
+ tasks:
+ - name: build upgrade scale groups
+ include_role:
+ name: openshift_aws
+ tasks_from: upgrade_node_group.yml
+
+ - fail:
+ msg: "Ensure that new scale groups were provisioned before proceeding to update."
+ when:
+ - "'oo_sg_new_nodes' not in groups or groups.oo_sg_new_nodes|length == 0"
+
+- name: initialize upgrade bits
+ import_playbook: init.yml
+
+- name: Drain and upgrade nodes
+ hosts: oo_sg_current_nodes
+ # This var must be set with -e on invocation, as it is not a per-host inventory var
+ # and is evaluated early. Values such as "20%" can also be used.
+ serial: "{{ openshift_upgrade_nodes_serial | default(1) }}"
+ max_fail_percentage: "{{ openshift_upgrade_nodes_max_fail_percentage | default(0) }}"
+
+ pre_tasks:
+ - name: Load lib_openshift modules
+ include_role:
+ name: ../roles/lib_openshift
+
+ # TODO: To better handle re-trying failed upgrades, it would be nice to check if the node
+ # or docker actually needs an upgrade before proceeding. Perhaps best to save this until
+ # we merge upgrade functionality into the base roles and a normal config.yml playbook run.
+ - name: Mark node unschedulable
+ oc_adm_manage_node:
+ node: "{{ openshift.node.nodename | lower }}"
+ schedulable: False
+ delegate_to: "{{ groups.oo_first_master.0 }}"
+ retries: 10
+ delay: 5
+ register: node_unschedulable
+ until: node_unschedulable|succeeded
+
+ - name: Drain Node for Kubelet upgrade
+ command: >
+ {{ hostvars[groups.oo_first_master.0].openshift.common.client_binary }} adm drain {{ openshift.node.nodename | lower }} --config={{ openshift.common.config_base }}/master/admin.kubeconfig --force --delete-local-data --ignore-daemonsets
+ delegate_to: "{{ groups.oo_first_master.0 }}"
+ register: l_upgrade_nodes_drain_result
+ until: not l_upgrade_nodes_drain_result | failed
+ retries: 60
+ delay: 60
+
+# Alright, let's clean up!
+- name: clean up the old scale group
+ hosts: localhost
+ tasks:
+ - name: clean up scale group
+ include_role:
+ name: openshift_aws
+ tasks_from: remove_scale_group.yml
diff --git a/playbooks/common/openshift-cluster/upgrades/v3_6/upgrade.yml b/playbooks/common/openshift-cluster/upgrades/v3_6/upgrade.yml
index 6cb6a665f..9f9399ff9 100644
--- a/playbooks/common/openshift-cluster/upgrades/v3_6/upgrade.yml
+++ b/playbooks/common/openshift-cluster/upgrades/v3_6/upgrade.yml
@@ -2,7 +2,7 @@
#
# Full Control Plane + Nodes Upgrade
#
-- include: ../init.yml
+- import_playbook: ../init.yml
tags:
- pre_upgrade
@@ -17,7 +17,7 @@
# Pre-upgrade
-- include: ../initialize_nodes_to_upgrade.yml
+- import_playbook: ../initialize_nodes_to_upgrade.yml
tags:
- pre_upgrade
@@ -43,27 +43,27 @@
- openshift_http_proxy is defined or openshift_https_proxy is defined
- openshift_generate_no_proxy_hosts | default(True) | bool
-- include: ../pre/verify_inventory_vars.yml
+- import_playbook: ../pre/verify_inventory_vars.yml
tags:
- pre_upgrade
-- include: ../pre/verify_health_checks.yml
+- import_playbook: ../pre/verify_health_checks.yml
tags:
- pre_upgrade
-- include: ../pre/verify_control_plane_running.yml
+- import_playbook: ../pre/verify_control_plane_running.yml
tags:
- pre_upgrade
-- include: ../disable_master_excluders.yml
+- import_playbook: ../disable_master_excluders.yml
tags:
- pre_upgrade
-- include: ../disable_node_excluders.yml
+- import_playbook: ../disable_node_excluders.yml
tags:
- pre_upgrade
-- include: ../../../../init/version.yml
+- import_playbook: ../../../../init/version.yml
tags:
- pre_upgrade
vars:
@@ -73,35 +73,29 @@
openshift_release: "{{ openshift_upgrade_target }}"
openshift_protect_installed_version: False
- # We skip the docker role at this point in upgrade to prevent
- # unintended package, container, or config upgrades which trigger
- # docker restarts. At this early stage of upgrade we can assume
- # docker is configured and running.
- skip_docker_role: True
-
-- include: ../../../../openshift-master/private/validate_restart.yml
+- import_playbook: ../../../../openshift-master/private/validate_restart.yml
tags:
- pre_upgrade
- name: Verify upgrade targets
hosts: oo_masters_to_config:oo_nodes_to_upgrade
tasks:
- - include: ../pre/verify_upgrade_targets.yml
+ - include_tasks: ../pre/verify_upgrade_targets.yml
tags:
- pre_upgrade
- name: Verify docker upgrade targets
hosts: oo_masters_to_config:oo_nodes_to_upgrade:oo_etcd_to_config
tasks:
- - include: ../pre/tasks/verify_docker_upgrade_targets.yml
+ - include_tasks: ../pre/tasks/verify_docker_upgrade_targets.yml
tags:
- pre_upgrade
-- include: validator.yml
+- import_playbook: validator.yml
tags:
- pre_upgrade
-- include: ../pre/gate_checks.yml
+- import_playbook: ../pre/gate_checks.yml
tags:
- pre_upgrade
@@ -113,12 +107,12 @@
- name: Cleanup unused Docker images
hosts: oo_masters_to_config:oo_nodes_to_upgrade:oo_etcd_to_config
tasks:
- - include: ../cleanup_unused_images.yml
+ - include_tasks: ../cleanup_unused_images.yml
-- include: ../upgrade_control_plane.yml
+- import_playbook: ../upgrade_control_plane.yml
vars:
master_config_hook: "v3_6/master_config_upgrade.yml"
-- include: ../upgrade_nodes.yml
+- import_playbook: ../upgrade_nodes.yml
-- include: ../post_control_plane.yml
+- import_playbook: ../post_control_plane.yml
diff --git a/playbooks/common/openshift-cluster/upgrades/v3_6/upgrade_control_plane.yml b/playbooks/common/openshift-cluster/upgrades/v3_6/upgrade_control_plane.yml
index 8f48bedcc..7374160d6 100644
--- a/playbooks/common/openshift-cluster/upgrades/v3_6/upgrade_control_plane.yml
+++ b/playbooks/common/openshift-cluster/upgrades/v3_6/upgrade_control_plane.yml
@@ -11,7 +11,7 @@
#
# You can run the upgrade_nodes.yml playbook after this to upgrade these components separately.
#
-- include: ../init.yml
+- import_playbook: ../init.yml
tags:
- pre_upgrade
@@ -25,7 +25,7 @@
openshift_upgrade_min: "{{ '1.5' if deployment_type == 'origin' else '3.5' }}"
# Pre-upgrade
-- include: ../initialize_nodes_to_upgrade.yml
+- import_playbook: ../initialize_nodes_to_upgrade.yml
tags:
- pre_upgrade
@@ -51,23 +51,23 @@
- openshift_http_proxy is defined or openshift_https_proxy is defined
- openshift_generate_no_proxy_hosts | default(True) | bool
-- include: ../pre/verify_inventory_vars.yml
+- import_playbook: ../pre/verify_inventory_vars.yml
tags:
- pre_upgrade
-- include: ../pre/verify_health_checks.yml
+- import_playbook: ../pre/verify_health_checks.yml
tags:
- pre_upgrade
-- include: ../pre/verify_control_plane_running.yml
+- import_playbook: ../pre/verify_control_plane_running.yml
tags:
- pre_upgrade
-- include: ../disable_master_excluders.yml
+- import_playbook: ../disable_master_excluders.yml
tags:
- pre_upgrade
-- include: ../../../../init/version.yml
+- import_playbook: ../../../../init/version.yml
tags:
- pre_upgrade
vars:
@@ -77,35 +77,29 @@
openshift_release: "{{ openshift_upgrade_target }}"
openshift_protect_installed_version: False
- # We skip the docker role at this point in upgrade to prevent
- # unintended package, container, or config upgrades which trigger
- # docker restarts. At this early stage of upgrade we can assume
- # docker is configured and running.
- skip_docker_role: True
-
-- include: ../../../../openshift-master/private/validate_restart.yml
+- import_playbook: ../../../../openshift-master/private/validate_restart.yml
tags:
- pre_upgrade
- name: Verify upgrade targets
hosts: oo_masters_to_config
tasks:
- - include: ../pre/verify_upgrade_targets.yml
+ - include_tasks: ../pre/verify_upgrade_targets.yml
tags:
- pre_upgrade
- name: Verify docker upgrade targets
hosts: oo_masters_to_config:oo_etcd_to_config
tasks:
- - include: ../pre/tasks/verify_docker_upgrade_targets.yml
+ - include_tasks: ../pre/tasks/verify_docker_upgrade_targets.yml
tags:
- pre_upgrade
-- include: validator.yml
+- import_playbook: validator.yml
tags:
- pre_upgrade
-- include: ../pre/gate_checks.yml
+- import_playbook: ../pre/gate_checks.yml
tags:
- pre_upgrade
@@ -117,10 +111,10 @@
- name: Cleanup unused Docker images
hosts: oo_masters_to_config:oo_etcd_to_config
tasks:
- - include: ../cleanup_unused_images.yml
+ - include_tasks: ../cleanup_unused_images.yml
-- include: ../upgrade_control_plane.yml
+- import_playbook: ../upgrade_control_plane.yml
vars:
master_config_hook: "v3_6/master_config_upgrade.yml"
-- include: ../post_control_plane.yml
+- import_playbook: ../post_control_plane.yml
diff --git a/playbooks/common/openshift-cluster/upgrades/v3_6/upgrade_nodes.yml b/playbooks/common/openshift-cluster/upgrades/v3_6/upgrade_nodes.yml
index f25cfe0d0..de9bf098e 100644
--- a/playbooks/common/openshift-cluster/upgrades/v3_6/upgrade_nodes.yml
+++ b/playbooks/common/openshift-cluster/upgrades/v3_6/upgrade_nodes.yml
@@ -4,7 +4,7 @@
#
# Upgrades nodes only, but requires the control plane to have already been upgraded.
#
-- include: ../init.yml
+- import_playbook: ../init.yml
tags:
- pre_upgrade
@@ -18,7 +18,7 @@
openshift_upgrade_min: "{{ '1.5' if deployment_type == 'origin' else '3.5' }}"
# Pre-upgrade
-- include: ../initialize_nodes_to_upgrade.yml
+- import_playbook: ../initialize_nodes_to_upgrade.yml
tags:
- pre_upgrade
@@ -44,19 +44,19 @@
- openshift_http_proxy is defined or openshift_https_proxy is defined
- openshift_generate_no_proxy_hosts | default(True) | bool
-- include: ../pre/verify_inventory_vars.yml
+- import_playbook: ../pre/verify_inventory_vars.yml
tags:
- pre_upgrade
-- include: ../pre/verify_health_checks.yml
+- import_playbook: ../pre/verify_health_checks.yml
tags:
- pre_upgrade
-- include: ../disable_node_excluders.yml
+- import_playbook: ../disable_node_excluders.yml
tags:
- pre_upgrade
-- include: ../../../../init/version.yml
+- import_playbook: ../../../../init/version.yml
tags:
- pre_upgrade
vars:
@@ -66,12 +66,6 @@
openshift_release: "{{ openshift_upgrade_target }}"
openshift_protect_installed_version: False
- # We skip the docker role at this point in upgrade to prevent
- # unintended package, container, or config upgrades which trigger
- # docker restarts. At this early stage of upgrade we can assume
- # docker is configured and running.
- skip_docker_role: True
-
- name: Verify masters are already upgraded
hosts: oo_masters_to_config
tags:
@@ -80,25 +74,25 @@
- fail: msg="Master running {{ openshift.common.version }} must be upgraded to {{ openshift_version }} before node upgrade can be run."
when: openshift.common.version != openshift_version
-- include: ../pre/verify_control_plane_running.yml
+- import_playbook: ../pre/verify_control_plane_running.yml
tags:
- pre_upgrade
- name: Verify upgrade targets
hosts: oo_nodes_to_upgrade
tasks:
- - include: ../pre/verify_upgrade_targets.yml
+ - include_tasks: ../pre/verify_upgrade_targets.yml
tags:
- pre_upgrade
- name: Verify docker upgrade targets
hosts: oo_nodes_to_upgrade
tasks:
- - include: ../pre/tasks/verify_docker_upgrade_targets.yml
+ - include_tasks: ../pre/tasks/verify_docker_upgrade_targets.yml
tags:
- pre_upgrade
-- include: ../pre/gate_checks.yml
+- import_playbook: ../pre/gate_checks.yml
tags:
- pre_upgrade
@@ -110,6 +104,6 @@
- name: Cleanup unused Docker images
hosts: oo_nodes_to_upgrade
tasks:
- - include: ../cleanup_unused_images.yml
+ - include_tasks: ../cleanup_unused_images.yml
-- include: ../upgrade_nodes.yml
+- import_playbook: ../upgrade_nodes.yml
diff --git a/playbooks/common/openshift-cluster/upgrades/v3_7/upgrade.yml b/playbooks/common/openshift-cluster/upgrades/v3_7/upgrade.yml
index 2b99568c7..0c1a99272 100644
--- a/playbooks/common/openshift-cluster/upgrades/v3_7/upgrade.yml
+++ b/playbooks/common/openshift-cluster/upgrades/v3_7/upgrade.yml
@@ -2,7 +2,7 @@
#
# Full Control Plane + Nodes Upgrade
#
-- include: ../init.yml
+- import_playbook: ../init.yml
tags:
- pre_upgrade
@@ -17,11 +17,11 @@
# Pre-upgrade
-- include: ../initialize_nodes_to_upgrade.yml
+- import_playbook: ../initialize_nodes_to_upgrade.yml
tags:
- pre_upgrade
-- include: ../pre/verify_etcd3_backend.yml
+- import_playbook: ../pre/verify_etcd3_backend.yml
tags:
- pre_upgrade
@@ -47,27 +47,27 @@
- openshift_http_proxy is defined or openshift_https_proxy is defined
- openshift_generate_no_proxy_hosts | default(True) | bool
-- include: ../pre/verify_inventory_vars.yml
+- import_playbook: ../pre/verify_inventory_vars.yml
tags:
- pre_upgrade
-- include: ../pre/verify_health_checks.yml
+- import_playbook: ../pre/verify_health_checks.yml
tags:
- pre_upgrade
-- include: ../pre/verify_control_plane_running.yml
+- import_playbook: ../pre/verify_control_plane_running.yml
tags:
- pre_upgrade
-- include: ../disable_master_excluders.yml
+- import_playbook: ../disable_master_excluders.yml
tags:
- pre_upgrade
-- include: ../disable_node_excluders.yml
+- import_playbook: ../disable_node_excluders.yml
tags:
- pre_upgrade
-- include: ../../../../init/version.yml
+- import_playbook: ../../../../init/version.yml
tags:
- pre_upgrade
vars:
@@ -77,35 +77,29 @@
openshift_release: "{{ openshift_upgrade_target }}"
openshift_protect_installed_version: False
- # We skip the docker role at this point in upgrade to prevent
- # unintended package, container, or config upgrades which trigger
- # docker restarts. At this early stage of upgrade we can assume
- # docker is configured and running.
- skip_docker_role: True
-
-- include: ../../../../openshift-master/private/validate_restart.yml
+- import_playbook: ../../../../openshift-master/private/validate_restart.yml
tags:
- pre_upgrade
- name: Verify upgrade targets
hosts: oo_masters_to_config:oo_nodes_to_upgrade
tasks:
- - include: ../pre/verify_upgrade_targets.yml
+ - include_tasks: ../pre/verify_upgrade_targets.yml
tags:
- pre_upgrade
- name: Verify docker upgrade targets
hosts: oo_masters_to_config:oo_nodes_to_upgrade:oo_etcd_to_config
tasks:
- - include: ../pre/tasks/verify_docker_upgrade_targets.yml
+ - include_tasks: ../pre/tasks/verify_docker_upgrade_targets.yml
tags:
- pre_upgrade
-- include: validator.yml
+- import_playbook: validator.yml
tags:
- pre_upgrade
-- include: ../pre/gate_checks.yml
+- import_playbook: ../pre/gate_checks.yml
tags:
- pre_upgrade
@@ -117,9 +111,9 @@
- name: Cleanup unused Docker images
hosts: oo_masters_to_config:oo_nodes_to_upgrade:oo_etcd_to_config
tasks:
- - include: ../cleanup_unused_images.yml
+ - include_tasks: ../cleanup_unused_images.yml
-- include: ../upgrade_control_plane.yml
+- import_playbook: ../upgrade_control_plane.yml
vars:
master_config_hook: "v3_7/master_config_upgrade.yml"
@@ -128,15 +122,15 @@
hosts: oo_masters_to_config
gather_facts: no
tasks:
- - name: Stop {{ openshift.common.service_type }}-master-controllers
+ - name: Stop {{ openshift_service_type }}-master-controllers
systemd:
- name: "{{ openshift.common.service_type }}-master-controllers"
+ name: "{{ openshift_service_type }}-master-controllers"
state: stopped
- - name: Start {{ openshift.common.service_type }}-master-controllers
+ - name: Start {{ openshift_service_type }}-master-controllers
systemd:
- name: "{{ openshift.common.service_type }}-master-controllers"
+ name: "{{ openshift_service_type }}-master-controllers"
state: started
-- include: ../upgrade_nodes.yml
+- import_playbook: ../upgrade_nodes.yml
-- include: ../post_control_plane.yml
+- import_playbook: ../post_control_plane.yml
diff --git a/playbooks/common/openshift-cluster/upgrades/v3_7/upgrade_control_plane.yml b/playbooks/common/openshift-cluster/upgrades/v3_7/upgrade_control_plane.yml
index d3d2046e6..9dcad352c 100644
--- a/playbooks/common/openshift-cluster/upgrades/v3_7/upgrade_control_plane.yml
+++ b/playbooks/common/openshift-cluster/upgrades/v3_7/upgrade_control_plane.yml
@@ -11,7 +11,7 @@
#
# You can run the upgrade_nodes.yml playbook after this to upgrade these components separately.
#
-- include: ../init.yml
+- import_playbook: ../init.yml
tags:
- pre_upgrade
@@ -25,11 +25,11 @@
openshift_upgrade_min: '3.6'
# Pre-upgrade
-- include: ../initialize_nodes_to_upgrade.yml
+- import_playbook: ../initialize_nodes_to_upgrade.yml
tags:
- pre_upgrade
-- include: ../pre/verify_etcd3_backend.yml
+- import_playbook: ../pre/verify_etcd3_backend.yml
tags:
- pre_upgrade
@@ -55,23 +55,23 @@
- openshift_http_proxy is defined or openshift_https_proxy is defined
- openshift_generate_no_proxy_hosts | default(True) | bool
-- include: ../pre/verify_inventory_vars.yml
+- import_playbook: ../pre/verify_inventory_vars.yml
tags:
- pre_upgrade
-- include: ../pre/verify_health_checks.yml
+- import_playbook: ../pre/verify_health_checks.yml
tags:
- pre_upgrade
-- include: ../pre/verify_control_plane_running.yml
+- import_playbook: ../pre/verify_control_plane_running.yml
tags:
- pre_upgrade
-- include: ../disable_master_excluders.yml
+- import_playbook: ../disable_master_excluders.yml
tags:
- pre_upgrade
-- include: ../../../../init/version.yml
+- import_playbook: ../../../../init/version.yml
tags:
- pre_upgrade
vars:
@@ -81,35 +81,29 @@
openshift_release: "{{ openshift_upgrade_target }}"
openshift_protect_installed_version: False
- # We skip the docker role at this point in upgrade to prevent
- # unintended package, container, or config upgrades which trigger
- # docker restarts. At this early stage of upgrade we can assume
- # docker is configured and running.
- skip_docker_role: True
-
-- include: ../../../../openshift-master/private/validate_restart.yml
+- import_playbook: ../../../../openshift-master/private/validate_restart.yml
tags:
- pre_upgrade
- name: Verify upgrade targets
hosts: oo_masters_to_config
tasks:
- - include: ../pre/verify_upgrade_targets.yml
+ - include_tasks: ../pre/verify_upgrade_targets.yml
tags:
- pre_upgrade
- name: Verify docker upgrade targets
hosts: oo_masters_to_config:oo_etcd_to_config
tasks:
- - include: ../pre/tasks/verify_docker_upgrade_targets.yml
+ - include_tasks: ../pre/tasks/verify_docker_upgrade_targets.yml
tags:
- pre_upgrade
-- include: validator.yml
+- import_playbook: validator.yml
tags:
- pre_upgrade
-- include: ../pre/gate_checks.yml
+- import_playbook: ../pre/gate_checks.yml
tags:
- pre_upgrade
@@ -121,9 +115,9 @@
- name: Cleanup unused Docker images
hosts: oo_masters_to_config:oo_etcd_to_config
tasks:
- - include: ../cleanup_unused_images.yml
+ - include_tasks: ../cleanup_unused_images.yml
-- include: ../upgrade_control_plane.yml
+- import_playbook: ../upgrade_control_plane.yml
vars:
master_config_hook: "v3_7/master_config_upgrade.yml"
@@ -132,13 +126,13 @@
hosts: oo_masters_to_config
gather_facts: no
tasks:
- - name: Stop {{ openshift.common.service_type }}-master-controllers
+ - name: Stop {{ openshift_service_type }}-master-controllers
systemd:
- name: "{{ openshift.common.service_type }}-master-controllers"
+ name: "{{ openshift_service_type }}-master-controllers"
state: stopped
- - name: Start {{ openshift.common.service_type }}-master-controllers
+ - name: Start {{ openshift_service_type }}-master-controllers
systemd:
- name: "{{ openshift.common.service_type }}-master-controllers"
+ name: "{{ openshift_service_type }}-master-controllers"
state: started
-- include: ../post_control_plane.yml
+- import_playbook: ../post_control_plane.yml
diff --git a/playbooks/common/openshift-cluster/upgrades/v3_7/upgrade_nodes.yml b/playbooks/common/openshift-cluster/upgrades/v3_7/upgrade_nodes.yml
index c0546bd2d..27a7f67ea 100644
--- a/playbooks/common/openshift-cluster/upgrades/v3_7/upgrade_nodes.yml
+++ b/playbooks/common/openshift-cluster/upgrades/v3_7/upgrade_nodes.yml
@@ -4,7 +4,7 @@
#
# Upgrades nodes only, but requires the control plane to have already been upgraded.
#
-- include: ../init.yml
+- import_playbook: ../init.yml
tags:
- pre_upgrade
@@ -18,7 +18,7 @@
openshift_upgrade_min: '3.6'
# Pre-upgrade
-- include: ../initialize_nodes_to_upgrade.yml
+- import_playbook: ../initialize_nodes_to_upgrade.yml
tags:
- pre_upgrade
@@ -44,19 +44,19 @@
- openshift_http_proxy is defined or openshift_https_proxy is defined
- openshift_generate_no_proxy_hosts | default(True) | bool
-- include: ../pre/verify_inventory_vars.yml
+- import_playbook: ../pre/verify_inventory_vars.yml
tags:
- pre_upgrade
-- include: ../pre/verify_health_checks.yml
+- import_playbook: ../pre/verify_health_checks.yml
tags:
- pre_upgrade
-- include: ../disable_node_excluders.yml
+- import_playbook: ../disable_node_excluders.yml
tags:
- pre_upgrade
-- include: ../../../../init/version.yml
+- import_playbook: ../../../../init/version.yml
tags:
- pre_upgrade
vars:
@@ -66,12 +66,6 @@
openshift_release: "{{ openshift_upgrade_target }}"
openshift_protect_installed_version: False
- # We skip the docker role at this point in upgrade to prevent
- # unintended package, container, or config upgrades which trigger
- # docker restarts. At this early stage of upgrade we can assume
- # docker is configured and running.
- skip_docker_role: True
-
- name: Verify masters are already upgraded
hosts: oo_masters_to_config
tags:
@@ -80,25 +74,25 @@
- fail: msg="Master running {{ openshift.common.version }} must be upgraded to {{ openshift_version }} before node upgrade can be run."
when: openshift.common.version != openshift_version
-- include: ../pre/verify_control_plane_running.yml
+- import_playbook: ../pre/verify_control_plane_running.yml
tags:
- pre_upgrade
- name: Verify upgrade targets
hosts: oo_nodes_to_upgrade
tasks:
- - include: ../pre/verify_upgrade_targets.yml
+ - include_tasks: ../pre/verify_upgrade_targets.yml
tags:
- pre_upgrade
- name: Verify docker upgrade targets
hosts: oo_nodes_to_upgrade
tasks:
- - include: ../pre/tasks/verify_docker_upgrade_targets.yml
+ - include_tasks: ../pre/tasks/verify_docker_upgrade_targets.yml
tags:
- pre_upgrade
-- include: ../pre/gate_checks.yml
+- import_playbook: ../pre/gate_checks.yml
tags:
- pre_upgrade
@@ -110,6 +104,6 @@
- name: Cleanup unused Docker images
hosts: oo_nodes_to_upgrade
tasks:
- - include: ../cleanup_unused_images.yml
+ - include_tasks: ../cleanup_unused_images.yml
-- include: ../upgrade_nodes.yml
+- import_playbook: ../upgrade_nodes.yml
diff --git a/playbooks/common/openshift-cluster/upgrades/v3_8/upgrade.yml b/playbooks/common/openshift-cluster/upgrades/v3_8/upgrade.yml
index b602cdd0e..ead2efbd0 100644
--- a/playbooks/common/openshift-cluster/upgrades/v3_8/upgrade.yml
+++ b/playbooks/common/openshift-cluster/upgrades/v3_8/upgrade.yml
@@ -2,7 +2,7 @@
#
# Full Control Plane + Nodes Upgrade
#
-- include: ../init.yml
+- import_playbook: ../init.yml
tags:
- pre_upgrade
@@ -17,11 +17,11 @@
# Pre-upgrade
-- include: ../initialize_nodes_to_upgrade.yml
+- import_playbook: ../initialize_nodes_to_upgrade.yml
tags:
- pre_upgrade
-- include: ../pre/verify_etcd3_backend.yml
+- import_playbook: ../pre/verify_etcd3_backend.yml
tags:
- pre_upgrade
@@ -47,27 +47,27 @@
- openshift_http_proxy is defined or openshift_https_proxy is defined
- openshift_generate_no_proxy_hosts | default(True) | bool
-- include: ../pre/verify_inventory_vars.yml
+- import_playbook: ../pre/verify_inventory_vars.yml
tags:
- pre_upgrade
-- include: ../pre/verify_health_checks.yml
+- import_playbook: ../pre/verify_health_checks.yml
tags:
- pre_upgrade
-- include: ../pre/verify_control_plane_running.yml
+- import_playbook: ../pre/verify_control_plane_running.yml
tags:
- pre_upgrade
-- include: ../disable_master_excluders.yml
+- import_playbook: ../disable_master_excluders.yml
tags:
- pre_upgrade
-- include: ../disable_node_excluders.yml
+- import_playbook: ../disable_node_excluders.yml
tags:
- pre_upgrade
-- include: ../../../../init/version.yml
+- import_playbook: ../../../../init/version.yml
tags:
- pre_upgrade
vars:
@@ -77,35 +77,29 @@
openshift_release: "{{ openshift_upgrade_target }}"
openshift_protect_installed_version: False
- # We skip the docker role at this point in upgrade to prevent
- # unintended package, container, or config upgrades which trigger
- # docker restarts. At this early stage of upgrade we can assume
- # docker is configured and running.
- skip_docker_role: True
-
-- include: ../../../../openshift-master/private/validate_restart.yml
+- import_playbook: ../../../../openshift-master/private/validate_restart.yml
tags:
- pre_upgrade
- name: Verify upgrade targets
hosts: oo_masters_to_config:oo_nodes_to_upgrade
tasks:
- - include: ../pre/verify_upgrade_targets.yml
+ - include_tasks: ../pre/verify_upgrade_targets.yml
tags:
- pre_upgrade
- name: Verify docker upgrade targets
hosts: oo_masters_to_config:oo_nodes_to_upgrade:oo_etcd_to_config
tasks:
- - include: ../pre/tasks/verify_docker_upgrade_targets.yml
+ - include_tasks: ../pre/tasks/verify_docker_upgrade_targets.yml
tags:
- pre_upgrade
-- include: validator.yml
+- import_playbook: validator.yml
tags:
- pre_upgrade
-- include: ../pre/gate_checks.yml
+- import_playbook: ../pre/gate_checks.yml
tags:
- pre_upgrade
@@ -117,9 +111,9 @@
- name: Cleanup unused Docker images
hosts: oo_masters_to_config:oo_nodes_to_upgrade:oo_etcd_to_config
tasks:
- - include: ../cleanup_unused_images.yml
+ - include_tasks: ../cleanup_unused_images.yml
-- include: ../upgrade_control_plane.yml
+- import_playbook: ../upgrade_control_plane.yml
vars:
master_config_hook: "v3_7/master_config_upgrade.yml"
@@ -128,15 +122,15 @@
hosts: oo_masters_to_config
gather_facts: no
tasks:
- - name: Stop {{ openshift.common.service_type }}-master-controllers
+ - name: Stop {{ openshift_service_type }}-master-controllers
systemd:
- name: "{{ openshift.common.service_type }}-master-controllers"
+ name: "{{ openshift_service_type }}-master-controllers"
state: stopped
- - name: Start {{ openshift.common.service_type }}-master-controllers
+ - name: Start {{ openshift_service_type }}-master-controllers
systemd:
- name: "{{ openshift.common.service_type }}-master-controllers"
+ name: "{{ openshift_service_type }}-master-controllers"
state: started
-- include: ../upgrade_nodes.yml
+- import_playbook: ../upgrade_nodes.yml
-- include: ../post_control_plane.yml
+- import_playbook: ../post_control_plane.yml
diff --git a/playbooks/common/openshift-cluster/upgrades/v3_8/upgrade_control_plane.yml b/playbooks/common/openshift-cluster/upgrades/v3_8/upgrade_control_plane.yml
index da81e6dea..ae37b1359 100644
--- a/playbooks/common/openshift-cluster/upgrades/v3_8/upgrade_control_plane.yml
+++ b/playbooks/common/openshift-cluster/upgrades/v3_8/upgrade_control_plane.yml
@@ -11,7 +11,7 @@
#
# You can run the upgrade_nodes.yml playbook after this to upgrade these components separately.
#
-- include: ../init.yml
+- import_playbook: ../init.yml
tags:
- pre_upgrade
@@ -25,11 +25,11 @@
openshift_upgrade_min: '3.7'
# Pre-upgrade
-- include: ../initialize_nodes_to_upgrade.yml
+- import_playbook: ../initialize_nodes_to_upgrade.yml
tags:
- pre_upgrade
-- include: ../pre/verify_etcd3_backend.yml
+- import_playbook: ../pre/verify_etcd3_backend.yml
tags:
- pre_upgrade
@@ -55,23 +55,23 @@
- openshift_http_proxy is defined or openshift_https_proxy is defined
- openshift_generate_no_proxy_hosts | default(True) | bool
-- include: ../pre/verify_inventory_vars.yml
+- import_playbook: ../pre/verify_inventory_vars.yml
tags:
- pre_upgrade
-- include: ../pre/verify_health_checks.yml
+- import_playbook: ../pre/verify_health_checks.yml
tags:
- pre_upgrade
-- include: ../pre/verify_control_plane_running.yml
+- import_playbook: ../pre/verify_control_plane_running.yml
tags:
- pre_upgrade
-- include: ../disable_master_excluders.yml
+- import_playbook: ../disable_master_excluders.yml
tags:
- pre_upgrade
-- include: ../../../../init/version.yml
+- import_playbook: ../../../../init/version.yml
tags:
- pre_upgrade
vars:
@@ -81,35 +81,29 @@
openshift_release: "{{ openshift_upgrade_target }}"
openshift_protect_installed_version: False
- # We skip the docker role at this point in upgrade to prevent
- # unintended package, container, or config upgrades which trigger
- # docker restarts. At this early stage of upgrade we can assume
- # docker is configured and running.
- skip_docker_role: True
-
-- include: ../../../../openshift-master/private/validate_restart.yml
+- import_playbook: ../../../../openshift-master/private/validate_restart.yml
tags:
- pre_upgrade
- name: Verify upgrade targets
hosts: oo_masters_to_config
tasks:
- - include: ../pre/verify_upgrade_targets.yml
+ - include_tasks: ../pre/verify_upgrade_targets.yml
tags:
- pre_upgrade
- name: Verify docker upgrade targets
hosts: oo_masters_to_config:oo_etcd_to_config
tasks:
- - include: ../pre/tasks/verify_docker_upgrade_targets.yml
+ - include_tasks: ../pre/tasks/verify_docker_upgrade_targets.yml
tags:
- pre_upgrade
-- include: validator.yml
+- import_playbook: validator.yml
tags:
- pre_upgrade
-- include: ../pre/gate_checks.yml
+- import_playbook: ../pre/gate_checks.yml
tags:
- pre_upgrade
@@ -121,9 +115,9 @@
- name: Cleanup unused Docker images
hosts: oo_masters_to_config:oo_etcd_to_config
tasks:
- - include: ../cleanup_unused_images.yml
+ - include_tasks: ../cleanup_unused_images.yml
-- include: ../upgrade_control_plane.yml
+- import_playbook: ../upgrade_control_plane.yml
vars:
master_config_hook: "v3_7/master_config_upgrade.yml"
@@ -132,13 +126,13 @@
hosts: oo_masters_to_config
gather_facts: no
tasks:
- - name: Stop {{ openshift.common.service_type }}-master-controllers
+ - name: Stop {{ openshift_service_type }}-master-controllers
systemd:
- name: "{{ openshift.common.service_type }}-master-controllers"
+ name: "{{ openshift_service_type }}-master-controllers"
state: stopped
- - name: Start {{ openshift.common.service_type }}-master-controllers
+ - name: Start {{ openshift_service_type }}-master-controllers
systemd:
- name: "{{ openshift.common.service_type }}-master-controllers"
+ name: "{{ openshift_service_type }}-master-controllers"
state: started
-- include: ../post_control_plane.yml
+- import_playbook: ../post_control_plane.yml
diff --git a/playbooks/common/openshift-cluster/upgrades/v3_8/upgrade_nodes.yml b/playbooks/common/openshift-cluster/upgrades/v3_8/upgrade_nodes.yml
index abd56e762..dd716b241 100644
--- a/playbooks/common/openshift-cluster/upgrades/v3_8/upgrade_nodes.yml
+++ b/playbooks/common/openshift-cluster/upgrades/v3_8/upgrade_nodes.yml
@@ -4,7 +4,7 @@
#
# Upgrades nodes only, but requires the control plane to have already been upgraded.
#
-- include: ../init.yml
+- import_playbook: ../init.yml
tags:
- pre_upgrade
@@ -18,7 +18,7 @@
openshift_upgrade_min: '3.7'
# Pre-upgrade
-- include: ../initialize_nodes_to_upgrade.yml
+- import_playbook: ../initialize_nodes_to_upgrade.yml
tags:
- pre_upgrade
@@ -44,19 +44,19 @@
- openshift_http_proxy is defined or openshift_https_proxy is defined
- openshift_generate_no_proxy_hosts | default(True) | bool
-- include: ../pre/verify_inventory_vars.yml
+- import_playbook: ../pre/verify_inventory_vars.yml
tags:
- pre_upgrade
-- include: ../pre/verify_health_checks.yml
+- import_playbook: ../pre/verify_health_checks.yml
tags:
- pre_upgrade
-- include: ../disable_node_excluders.yml
+- import_playbook: ../disable_node_excluders.yml
tags:
- pre_upgrade
-- include: ../../../../init/version.yml
+- import_playbook: ../../../../init/version.yml
tags:
- pre_upgrade
vars:
@@ -66,12 +66,6 @@
openshift_release: "{{ openshift_upgrade_target }}"
openshift_protect_installed_version: False
- # We skip the docker role at this point in upgrade to prevent
- # unintended package, container, or config upgrades which trigger
- # docker restarts. At this early stage of upgrade we can assume
- # docker is configured and running.
- skip_docker_role: True
-
- name: Verify masters are already upgraded
hosts: oo_masters_to_config
tags:
@@ -80,25 +74,25 @@
- fail: msg="Master running {{ openshift.common.version }} must be upgraded to {{ openshift_version }} before node upgrade can be run."
when: openshift.common.version != openshift_version
-- include: ../pre/verify_control_plane_running.yml
+- import_playbook: ../pre/verify_control_plane_running.yml
tags:
- pre_upgrade
- name: Verify upgrade targets
hosts: oo_nodes_to_upgrade
tasks:
- - include: ../pre/verify_upgrade_targets.yml
+ - include_tasks: ../pre/verify_upgrade_targets.yml
tags:
- pre_upgrade
- name: Verify docker upgrade targets
hosts: oo_nodes_to_upgrade
tasks:
- - include: ../pre/tasks/verify_docker_upgrade_targets.yml
+ - include_tasks: ../pre/tasks/verify_docker_upgrade_targets.yml
tags:
- pre_upgrade
-- include: ../pre/gate_checks.yml
+- import_playbook: ../pre/gate_checks.yml
tags:
- pre_upgrade
@@ -110,6 +104,6 @@
- name: Cleanup unused Docker images
hosts: oo_nodes_to_upgrade
tasks:
- - include: ../cleanup_unused_images.yml
+ - include_tasks: ../cleanup_unused_images.yml
-- include: ../upgrade_nodes.yml
+- import_playbook: ../upgrade_nodes.yml
diff --git a/playbooks/common/openshift-cluster/upgrades/v3_9/filter_plugins b/playbooks/common/openshift-cluster/upgrades/v3_9/filter_plugins
new file mode 120000
index 000000000..7de3c1dd7
--- /dev/null
+++ b/playbooks/common/openshift-cluster/upgrades/v3_9/filter_plugins
@@ -0,0 +1 @@
+../../../../../filter_plugins/ \ No newline at end of file
diff --git a/playbooks/common/openshift-cluster/upgrades/v3_9/master_config_upgrade.yml b/playbooks/common/openshift-cluster/upgrades/v3_9/master_config_upgrade.yml
new file mode 100644
index 000000000..1d4d1919c
--- /dev/null
+++ b/playbooks/common/openshift-cluster/upgrades/v3_9/master_config_upgrade.yml
@@ -0,0 +1,20 @@
+---
+- modify_yaml:
+ dest: "{{ openshift.common.config_base}}/master/master-config.yaml"
+ yaml_key: 'controllerConfig.election.lockName'
+ yaml_value: 'openshift-master-controllers'
+
+- modify_yaml:
+ dest: "{{ openshift.common.config_base}}/master/master-config.yaml"
+ yaml_key: 'controllerConfig.serviceServingCert.signer.certFile'
+ yaml_value: service-signer.crt
+
+- modify_yaml:
+ dest: "{{ openshift.common.config_base}}/master/master-config.yaml"
+ yaml_key: 'controllerConfig.serviceServingCert.signer.keyFile'
+ yaml_value: service-signer.key
+
+- modify_yaml:
+ dest: "{{ openshift.common.config_base }}/master/master-config.yaml"
+ yaml_key: servingInfo.clientCA
+ yaml_value: ca.crt
diff --git a/playbooks/common/openshift-cluster/upgrades/v3_9/roles b/playbooks/common/openshift-cluster/upgrades/v3_9/roles
new file mode 120000
index 000000000..415645be6
--- /dev/null
+++ b/playbooks/common/openshift-cluster/upgrades/v3_9/roles
@@ -0,0 +1 @@
+../../../../../roles/ \ No newline at end of file
diff --git a/playbooks/common/openshift-cluster/upgrades/v3_9/upgrade.yml b/playbooks/common/openshift-cluster/upgrades/v3_9/upgrade.yml
new file mode 100644
index 000000000..eb688f189
--- /dev/null
+++ b/playbooks/common/openshift-cluster/upgrades/v3_9/upgrade.yml
@@ -0,0 +1,142 @@
+---
+#
+# Full Control Plane + Nodes Upgrade
+#
+- import_playbook: ../init.yml
+ tags:
+ - pre_upgrade
+
+- name: Configure the upgrade target for the common upgrade tasks
+ hosts: oo_all_hosts
+ tags:
+ - pre_upgrade
+ tasks:
+ - set_fact:
+ openshift_upgrade_target: '3.9'
+ openshift_upgrade_min: '3.7'
+
+# Pre-upgrade
+
+- import_playbook: ../initialize_nodes_to_upgrade.yml
+ tags:
+ - pre_upgrade
+
+- import_playbook: ../pre/verify_etcd3_backend.yml
+ tags:
+ - pre_upgrade
+
+- name: Update repos and initialize facts on all hosts
+ hosts: oo_masters_to_config:oo_nodes_to_upgrade:oo_etcd_to_config:oo_lb_to_config
+ tags:
+ - pre_upgrade
+ roles:
+ - openshift_repos
+
+- name: Set openshift_no_proxy_internal_hostnames
+ hosts: oo_masters_to_config:oo_nodes_to_upgrade
+ tags:
+ - pre_upgrade
+ tasks:
+ - set_fact:
+ openshift_no_proxy_internal_hostnames: "{{ hostvars | oo_select_keys(groups['oo_nodes_to_config']
+ | union(groups['oo_masters_to_config'])
+ | union(groups['oo_etcd_to_config'] | default([])))
+ | oo_collect('openshift.common.hostname') | default([]) | join (',')
+ }}"
+ when:
+ - openshift_http_proxy is defined or openshift_https_proxy is defined
+ - openshift_generate_no_proxy_hosts | default(True) | bool
+
+- import_playbook: ../pre/verify_inventory_vars.yml
+ tags:
+ - pre_upgrade
+
+- import_playbook: ../pre/verify_health_checks.yml
+ tags:
+ - pre_upgrade
+
+- import_playbook: ../pre/verify_control_plane_running.yml
+ tags:
+ - pre_upgrade
+
+- import_playbook: ../disable_master_excluders.yml
+ tags:
+ - pre_upgrade
+
+- import_playbook: ../disable_node_excluders.yml
+ tags:
+ - pre_upgrade
+
+- import_playbook: ../../../../init/version.yml
+ tags:
+ - pre_upgrade
+ vars:
+ # Request specific openshift_release and let the openshift_version role handle converting this
+ # to a more specific version, respecting openshift_image_tag and openshift_pkg_version if
+ # defined, and overriding the normal behavior of protecting the installed version
+ openshift_release: "{{ openshift_upgrade_target }}"
+ openshift_protect_installed_version: False
+
+ # We skip the docker role at this point in upgrade to prevent
+ # unintended package, container, or config upgrades which trigger
+ # docker restarts. At this early stage of upgrade we can assume
+ # docker is configured and running.
+ skip_docker_role: True
+
+- import_playbook: ../../../../openshift-master/private/validate_restart.yml
+ tags:
+ - pre_upgrade
+
+- name: Verify upgrade targets
+ hosts: oo_masters_to_config:oo_nodes_to_upgrade
+ tasks:
+ - include_tasks: ../pre/verify_upgrade_targets.yml
+ tags:
+ - pre_upgrade
+
+- name: Verify docker upgrade targets
+ hosts: oo_masters_to_config:oo_nodes_to_upgrade:oo_etcd_to_config
+ tasks:
+ - import_tasks: ../pre/tasks/verify_docker_upgrade_targets.yml
+ tags:
+ - pre_upgrade
+
+- import_playbook: validator.yml
+ tags:
+ - pre_upgrade
+
+- import_playbook: ../pre/gate_checks.yml
+ tags:
+ - pre_upgrade
+
+# Pre-upgrade completed, nothing after this should be tagged pre_upgrade.
+
+# Separate step so we can execute in parallel and clear out anything unused
+# before we get into the serialized upgrade process which will then remove
+# remaining images if possible.
+- name: Cleanup unused Docker images
+ hosts: oo_masters_to_config:oo_nodes_to_upgrade:oo_etcd_to_config
+ tasks:
+ - include_tasks: ../cleanup_unused_images.yml
+
+- import_playbook: ../upgrade_control_plane.yml
+ vars:
+ master_config_hook: "v3_7/master_config_upgrade.yml"
+
+# All controllers must be stopped at the same time then restarted
+- name: Cycle all controller services to force new leader election mode
+ hosts: oo_masters_to_config
+ gather_facts: no
+ tasks:
+ - name: Stop {{ openshift.common.service_type }}-master-controllers
+ systemd:
+ name: "{{ openshift.common.service_type }}-master-controllers"
+ state: stopped
+ - name: Start {{ openshift.common.service_type }}-master-controllers
+ systemd:
+ name: "{{ openshift.common.service_type }}-master-controllers"
+ state: started
+
+- import_playbook: ../upgrade_nodes.yml
+
+- import_playbook: ../post_control_plane.yml
diff --git a/playbooks/common/openshift-cluster/upgrades/v3_9/upgrade_control_plane.yml b/playbooks/common/openshift-cluster/upgrades/v3_9/upgrade_control_plane.yml
new file mode 100644
index 000000000..983bb4a63
--- /dev/null
+++ b/playbooks/common/openshift-cluster/upgrades/v3_9/upgrade_control_plane.yml
@@ -0,0 +1,144 @@
+---
+#
+# Control Plane Upgrade Playbook
+#
+# Upgrades masters and Docker (only on standalone etcd hosts)
+#
+# This upgrade does not include:
+# - node service running on masters
+# - docker running on masters
+# - node service running on dedicated nodes
+#
+# You can run the upgrade_nodes.yml playbook after this to upgrade these components separately.
+#
+- import_playbook: ../init.yml
+ tags:
+ - pre_upgrade
+
+- name: Configure the upgrade target for the common upgrade tasks
+ hosts: oo_all_hosts
+ tags:
+ - pre_upgrade
+ tasks:
+ - set_fact:
+ openshift_upgrade_target: '3.9'
+ openshift_upgrade_min: '3.7'
+
+# Pre-upgrade
+- import_playbook: ../initialize_nodes_to_upgrade.yml
+ tags:
+ - pre_upgrade
+
+- import_playbook: ../pre/verify_etcd3_backend.yml
+ tags:
+ - pre_upgrade
+
+- name: Update repos on control plane hosts
+ hosts: oo_masters_to_config:oo_etcd_to_config:oo_lb_to_config
+ tags:
+ - pre_upgrade
+ roles:
+ - openshift_repos
+
+- name: Set openshift_no_proxy_internal_hostnames
+ hosts: oo_masters_to_config:oo_nodes_to_upgrade
+ tags:
+ - pre_upgrade
+ tasks:
+ - set_fact:
+ openshift_no_proxy_internal_hostnames: "{{ hostvars | oo_select_keys(groups['oo_nodes_to_config']
+ | union(groups['oo_masters_to_config'])
+ | union(groups['oo_etcd_to_config'] | default([])))
+ | oo_collect('openshift.common.hostname') | default([]) | join (',')
+ }}"
+ when:
+ - openshift_http_proxy is defined or openshift_https_proxy is defined
+ - openshift_generate_no_proxy_hosts | default(True) | bool
+
+- import_playbook: ../pre/verify_inventory_vars.yml
+ tags:
+ - pre_upgrade
+
+- import_playbook: ../pre/verify_health_checks.yml
+ tags:
+ - pre_upgrade
+
+- import_playbook: ../pre/verify_control_plane_running.yml
+ tags:
+ - pre_upgrade
+
+- import_playbook: ../disable_master_excluders.yml
+ tags:
+ - pre_upgrade
+
+- import_playbook: ../../../../init/version.yml
+ tags:
+ - pre_upgrade
+ vars:
+ # Request specific openshift_release and let the openshift_version role handle converting this
+ # to a more specific version, respecting openshift_image_tag and openshift_pkg_version if
+ # defined, and overriding the normal behavior of protecting the installed version
+ openshift_release: "{{ openshift_upgrade_target }}"
+ openshift_protect_installed_version: False
+
+ # We skip the docker role at this point in upgrade to prevent
+ # unintended package, container, or config upgrades which trigger
+ # docker restarts. At this early stage of upgrade we can assume
+ # docker is configured and running.
+ skip_docker_role: True
+
+- import_playbook: ../../../../openshift-master/private/validate_restart.yml
+ tags:
+ - pre_upgrade
+
+- name: Verify upgrade targets
+ hosts: oo_masters_to_config
+ tasks:
+ - include_tasks: ../pre/verify_upgrade_targets.yml
+ tags:
+ - pre_upgrade
+
+- name: Verify docker upgrade targets
+ hosts: oo_masters_to_config:oo_etcd_to_config
+ tasks:
+ - include_tasks: ../pre/tasks/verify_docker_upgrade_targets.yml
+ tags:
+ - pre_upgrade
+
+- import_playbook: validator.yml
+ tags:
+ - pre_upgrade
+
+- import_playbook: ../pre/gate_checks.yml
+ tags:
+ - pre_upgrade
+
+# Pre-upgrade completed, nothing after this should be tagged pre_upgrade.
+
+# Separate step so we can execute in parallel and clear out anything unused
+# before we get into the serialized upgrade process which will then remove
+# remaining images if possible.
+- name: Cleanup unused Docker images
+ hosts: oo_masters_to_config:oo_etcd_to_config
+ tasks:
+ - include_tasks: ../cleanup_unused_images.yml
+
+- import_playbook: ../upgrade_control_plane.yml
+ vars:
+ master_config_hook: "v3_7/master_config_upgrade.yml"
+
+# All controllers must be stopped at the same time then restarted
+- name: Cycle all controller services to force new leader election mode
+ hosts: oo_masters_to_config
+ gather_facts: no
+ tasks:
+ - name: Stop {{ openshift.common.service_type }}-master-controllers
+ systemd:
+ name: "{{ openshift.common.service_type }}-master-controllers"
+ state: stopped
+ - name: Start {{ openshift.common.service_type }}-master-controllers
+ systemd:
+ name: "{{ openshift.common.service_type }}-master-controllers"
+ state: started
+
+- import_playbook: ../post_control_plane.yml
diff --git a/playbooks/common/openshift-cluster/upgrades/v3_9/upgrade_nodes.yml b/playbooks/common/openshift-cluster/upgrades/v3_9/upgrade_nodes.yml
new file mode 100644
index 000000000..d95cfa4e1
--- /dev/null
+++ b/playbooks/common/openshift-cluster/upgrades/v3_9/upgrade_nodes.yml
@@ -0,0 +1,115 @@
+---
+#
+# Node Upgrade Playbook
+#
+# Upgrades nodes only, but requires the control plane to have already been upgraded.
+#
+- import_playbook: ../init.yml
+ tags:
+ - pre_upgrade
+
+- name: Configure the upgrade target for the common upgrade tasks
+ hosts: oo_all_hosts
+ tags:
+ - pre_upgrade
+ tasks:
+ - set_fact:
+ openshift_upgrade_target: '3.9'
+ openshift_upgrade_min: '3.7'
+
+# Pre-upgrade
+- import_playbook: ../initialize_nodes_to_upgrade.yml
+ tags:
+ - pre_upgrade
+
+- name: Update repos on nodes
+ hosts: oo_masters_to_config:oo_nodes_to_upgrade:oo_etcd_to_config:oo_lb_to_config
+ roles:
+ - openshift_repos
+ tags:
+ - pre_upgrade
+
+- name: Set openshift_no_proxy_internal_hostnames
+ hosts: oo_masters_to_config:oo_nodes_to_upgrade
+ tags:
+ - pre_upgrade
+ tasks:
+ - set_fact:
+ openshift_no_proxy_internal_hostnames: "{{ hostvars | oo_select_keys(groups['oo_nodes_to_upgrade']
+ | union(groups['oo_masters_to_config'])
+ | union(groups['oo_etcd_to_config'] | default([])))
+ | oo_collect('openshift.common.hostname') | default([]) | join (',')
+ }}"
+ when:
+ - openshift_http_proxy is defined or openshift_https_proxy is defined
+ - openshift_generate_no_proxy_hosts | default(True) | bool
+
+- import_playbook: ../pre/verify_inventory_vars.yml
+ tags:
+ - pre_upgrade
+
+- import_playbook: ../pre/verify_health_checks.yml
+ tags:
+ - pre_upgrade
+
+- import_playbook: ../disable_node_excluders.yml
+ tags:
+ - pre_upgrade
+
+- import_playbook: ../../../../init/version.yml
+ tags:
+ - pre_upgrade
+ vars:
+ # Request specific openshift_release and let the openshift_version role handle converting this
+ # to a more specific version, respecting openshift_image_tag and openshift_pkg_version if
+ # defined, and overriding the normal behavior of protecting the installed version
+ openshift_release: "{{ openshift_upgrade_target }}"
+ openshift_protect_installed_version: False
+
+ # We skip the docker role at this point in upgrade to prevent
+ # unintended package, container, or config upgrades which trigger
+ # docker restarts. At this early stage of upgrade we can assume
+ # docker is configured and running.
+ skip_docker_role: True
+
+- name: Verify masters are already upgraded
+ hosts: oo_masters_to_config
+ tags:
+ - pre_upgrade
+ tasks:
+ - fail: msg="Master running {{ openshift.common.version }} must be upgraded to {{ openshift_version }} before node upgrade can be run."
+ when: openshift.common.version != openshift_version
+
+- import_playbook: ../pre/verify_control_plane_running.yml
+ tags:
+ - pre_upgrade
+
+- name: Verify upgrade targets
+ hosts: oo_nodes_to_upgrade
+ tasks:
+ - include_tasks: ../pre/verify_upgrade_targets.yml
+ tags:
+ - pre_upgrade
+
+- name: Verify docker upgrade targets
+ hosts: oo_nodes_to_upgrade
+ tasks:
+ - include_tasks: ../pre/tasks/verify_docker_upgrade_targets.yml
+ tags:
+ - pre_upgrade
+
+- import_playbook: ../pre/gate_checks.yml
+ tags:
+ - pre_upgrade
+
+# Pre-upgrade completed, nothing after this should be tagged pre_upgrade.
+
+# Separate step so we can execute in parallel and clear out anything unused
+# before we get into the serialized upgrade process which will then remove
+# remaining images if possible.
+- name: Cleanup unused Docker images
+ hosts: oo_nodes_to_upgrade
+ tasks:
+ - include_tasks: ../cleanup_unused_images.yml
+
+- import_playbook: ../upgrade_nodes.yml
diff --git a/playbooks/common/openshift-cluster/upgrades/v3_9/validator.yml b/playbooks/common/openshift-cluster/upgrades/v3_9/validator.yml
new file mode 100644
index 000000000..4bd2d87b1
--- /dev/null
+++ b/playbooks/common/openshift-cluster/upgrades/v3_9/validator.yml
@@ -0,0 +1,7 @@
+---
+- name: Verify 3.9 specific upgrade checks
+ hosts: oo_first_master
+ roles:
+ - { role: lib_openshift }
+ tasks:
+ - debug: msg="noop"
diff --git a/playbooks/container-runtime/config.yml b/playbooks/container-runtime/config.yml
new file mode 100644
index 000000000..f15aa771f
--- /dev/null
+++ b/playbooks/container-runtime/config.yml
@@ -0,0 +1,6 @@
+---
+- import_playbook: ../init/main.yml
+ vars:
+ skip_verison: True
+
+- import_playbook: private/config.yml
diff --git a/playbooks/container-runtime/private/config.yml b/playbooks/container-runtime/private/config.yml
new file mode 100644
index 000000000..67445edeb
--- /dev/null
+++ b/playbooks/container-runtime/private/config.yml
@@ -0,0 +1,28 @@
+---
+- hosts: "{{ l_containerized_host_groups }}"
+ vars:
+ l_chg_temp: "{{ openshift_containerized_host_groups | default([]) }}"
+ l_containerized_host_groups: "{{ (['oo_nodes_to_config'] | union(l_chg_temp)) | join(':') }}"
+ # role: container_runtime is necessary here to bring role default variables
+ # into the play scope.
+ roles:
+ - role: container_runtime
+ tasks:
+ - include_role:
+ name: container_runtime
+ tasks_from: package_docker.yml
+ when:
+ - not openshift_docker_use_system_container | bool
+ - not openshift_use_crio_only | bool
+ - include_role:
+ name: container_runtime
+ tasks_from: systemcontainer_docker.yml
+ when:
+ - openshift_docker_use_system_container | bool
+ - not openshift_use_crio_only | bool
+ - include_role:
+ name: container_runtime
+ tasks_from: systemcontainer_crio.yml
+ when:
+ - openshift_use_crio | bool
+ - openshift_docker_is_node_or_master | bool
diff --git a/playbooks/container-runtime/private/roles b/playbooks/container-runtime/private/roles
new file mode 120000
index 000000000..148b13206
--- /dev/null
+++ b/playbooks/container-runtime/private/roles
@@ -0,0 +1 @@
+../../roles/ \ No newline at end of file
diff --git a/playbooks/deploy_cluster.yml b/playbooks/deploy_cluster.yml
new file mode 100644
index 000000000..0e6bde09a
--- /dev/null
+++ b/playbooks/deploy_cluster.yml
@@ -0,0 +1,46 @@
+---
+- import_playbook: init/main.yml
+
+- import_playbook: openshift-checks/private/install.yml
+
+- import_playbook: openshift-etcd/private/config.yml
+
+- import_playbook: openshift-nfs/private/config.yml
+ when: groups.oo_nfs_to_config | default([]) | count > 0
+
+- import_playbook: openshift-loadbalancer/private/config.yml
+ when: groups.oo_lb_to_config | default([]) | count > 0
+
+- import_playbook: openshift-master/private/config.yml
+
+- import_playbook: openshift-master/private/additional_config.yml
+
+- import_playbook: openshift-node/private/config.yml
+
+- import_playbook: openshift-glusterfs/private/config.yml
+ when: groups.oo_glusterfs_to_config | default([]) | count > 0
+
+- import_playbook: openshift-hosted/private/config.yml
+
+- import_playbook: openshift-metrics/private/config.yml
+ when: openshift_metrics_install_metrics | default(false) | bool
+
+- import_playbook: openshift-logging/private/config.yml
+ when: openshift_logging_install_logging | default(false) | bool
+
+- import_playbook: openshift-prometheus/private/config.yml
+ when: openshift_hosted_prometheus_deploy | default(false) | bool
+
+- import_playbook: openshift-service-catalog/private/config.yml
+ when: openshift_enable_service_catalog | default(true) | bool
+
+- import_playbook: openshift-management/private/config.yml
+ when: openshift_management_install_management | default(false) | bool
+
+- name: Print deprecated variable warning message if necessary
+ hosts: oo_first_master
+ gather_facts: no
+ tasks:
+ - debug: msg="{{__deprecation_message}}"
+ when:
+ - __deprecation_message | default ('') | length > 0
diff --git a/playbooks/gcp/openshift-cluster/provision.yml b/playbooks/gcp/provision.yml
index 097717607..6016e6a78 100644
--- a/playbooks/gcp/openshift-cluster/provision.yml
+++ b/playbooks/gcp/provision.yml
@@ -9,8 +9,5 @@
include_role:
name: openshift_gcp
-- name: run the init
- include: ../../init/main.yml
-
-- name: run the config
- include: ../../common/openshift-cluster/config.yml
+- name: run the cluster deploy
+ import_playbook: ../deploy_cluster.yml
diff --git a/playbooks/init/facts.yml b/playbooks/init/facts.yml
index 05142f9b6..d41f365dc 100644
--- a/playbooks/init/facts.yml
+++ b/playbooks/init/facts.yml
@@ -84,6 +84,8 @@
- "{{ 'python3-dbus' if ansible_distribution == 'Fedora' else 'dbus-python' }}"
- "{{ 'python3-PyYAML' if ansible_distribution == 'Fedora' else 'PyYAML' }}"
- yum-utils
+ register: result
+ until: result | success
- name: Ensure various deps for running system containers are installed
package:
@@ -100,6 +102,8 @@
or (openshift_use_openvswitch_system_container | default(False)) | bool
or (openshift_use_node_system_container | default(False)) | bool
or (openshift_use_master_system_container | default(False)) | bool
+ register: result
+ until: result | success
- name: Gather Cluster facts and set is_containerized if needed
openshift_facts:
@@ -131,11 +135,13 @@
- openshift_http_proxy is defined or openshift_https_proxy is defined
- openshift_generate_no_proxy_hosts | default(True) | bool
+ - name: Initialize openshift.node.sdn_mtu
+ openshift_facts:
+ role: node
+ local_facts:
+ sdn_mtu: "{{ openshift_node_sdn_mtu | default(None) }}"
+
- name: initialize_facts set_fact repoquery command
set_fact:
repoquery_cmd: "{{ 'dnf repoquery --latest-limit 1 -d 0' if ansible_pkg_mgr == 'dnf' else 'repoquery --plugins' }}"
repoquery_installed: "{{ 'dnf repoquery --latest-limit 1 -d 0 --disableexcludes=all --installed' if ansible_pkg_mgr == 'dnf' else 'repoquery --plugins --installed' }}"
-
- - name: initialize_facts set_fact on openshift_docker_hosted_registry_network
- set_fact:
- openshift_docker_hosted_registry_network: "{{ '' if 'oo_first_master' not in groups else hostvars[groups.oo_first_master.0].openshift.common.portal_net }}"
diff --git a/playbooks/init/main.yml b/playbooks/init/main.yml
index 1d4f41ffc..b2b972a7d 100644
--- a/playbooks/init/main.yml
+++ b/playbooks/init/main.yml
@@ -18,12 +18,15 @@
- import_playbook: facts.yml
- import_playbook: sanity_checks.yml
+ when: not (skip_sanity_checks | default(False))
- import_playbook: validate_hostnames.yml
+ when: not (skip_validate_hostnames | default(False))
- import_playbook: repos.yml
- import_playbook: version.yml
+ when: not (skip_verison | default(False))
- name: Initialization Checkpoint End
hosts: all
diff --git a/playbooks/openshift-etcd/private/ca.yml b/playbooks/openshift-etcd/private/ca.yml
index c9f186e72..f3bb3c2d1 100644
--- a/playbooks/openshift-etcd/private/ca.yml
+++ b/playbooks/openshift-etcd/private/ca.yml
@@ -7,7 +7,7 @@
tasks:
- include_role:
name: etcd
- tasks_from: ca
+ tasks_from: ca.yml
vars:
etcd_peers: "{{ groups.oo_etcd_to_config | default([], true) }}"
etcd_ca_host: "{{ groups.oo_etcd_to_config.0 }}"
diff --git a/playbooks/common/openshift-cluster/redeploy-certificates/etcd-backup.yml b/playbooks/openshift-etcd/private/certificates-backup.yml
index d738c8207..ce21a1f96 100644
--- a/playbooks/common/openshift-cluster/redeploy-certificates/etcd-backup.yml
+++ b/playbooks/openshift-etcd/private/certificates-backup.yml
@@ -5,10 +5,10 @@
tasks:
- include_role:
name: etcd
- tasks_from: backup_generated_certificates
+ tasks_from: backup_generated_certificates.yml
- include_role:
name: etcd
- tasks_from: remove_generated_certificates
+ tasks_from: remove_generated_certificates.yml
- name: Backup deployed etcd certificates
hosts: oo_etcd_to_config
@@ -16,4 +16,4 @@
tasks:
- include_role:
name: etcd
- tasks_from: backup_server_certificates
+ tasks_from: backup_server_certificates.yml
diff --git a/playbooks/openshift-etcd/private/config.yml b/playbooks/openshift-etcd/private/config.yml
index 3d6c79834..35407969e 100644
--- a/playbooks/openshift-etcd/private/config.yml
+++ b/playbooks/openshift-etcd/private/config.yml
@@ -19,7 +19,6 @@
hosts: oo_etcd_to_config
any_errors_fatal: true
roles:
- - role: os_firewall
- role: openshift_clock
- role: openshift_etcd
etcd_peers: "{{ groups.oo_etcd_to_config | default([], true) }}"
diff --git a/playbooks/openshift-etcd/private/embedded2external.yml b/playbooks/openshift-etcd/private/embedded2external.yml
index 514319b88..be177b714 100644
--- a/playbooks/openshift-etcd/private/embedded2external.yml
+++ b/playbooks/openshift-etcd/private/embedded2external.yml
@@ -20,9 +20,9 @@
- name: Check the master API is ready
include_role:
name: openshift_master
- tasks_from: check_master_api_is_ready
+ tasks_from: check_master_api_is_ready.yml
- set_fact:
- master_service: "{{ openshift.common.service_type + '-master' }}"
+ master_service: "{{ openshift_service_type + '-master' }}"
embedded_etcd_backup_suffix: "{{ lookup('pipe', 'date +%Y%m%d%H%M%S') }}"
- debug:
msg: "master service name: {{ master_service }}"
@@ -34,7 +34,7 @@
# Can't use with_items with include_role: https://github.com/ansible/ansible/issues/21285
- include_role:
name: etcd
- tasks_from: backup
+ tasks_from: backup.yml
vars:
r_etcd_common_backup_tag: pre-migrate
r_etcd_common_embedded_etcd: "{{ true }}"
@@ -42,7 +42,7 @@
- include_role:
name: etcd
- tasks_from: backup.archive
+ tasks_from: backup.archive.yml
vars:
r_etcd_common_backup_tag: pre-migrate
r_etcd_common_embedded_etcd: "{{ true }}"
@@ -58,7 +58,7 @@
tasks:
- include_role:
name: etcd
- tasks_from: backup_master_etcd_certificates
+ tasks_from: backup_master_etcd_certificates.yml
- name: Redeploy master etcd certificates
import_playbook: master_etcd_certificates.yml
@@ -75,10 +75,10 @@
pre_tasks:
- include_role:
name: etcd
- tasks_from: disable_etcd
+ tasks_from: disable_etcd.yml
- include_role:
name: etcd
- tasks_from: clean_data
+ tasks_from: clean_data.yml
# 6. copy the embedded etcd backup to the external host
# TODO(jchaloup): if the etcd and first master are on the same host, just copy the directory
@@ -93,7 +93,7 @@
- include_role:
name: etcd
- tasks_from: backup.fetch
+ tasks_from: backup.fetch.yml
vars:
etcd_backup_sync_directory: "{{ g_etcd_client_mktemp.stdout }}"
r_etcd_common_backup_tag: pre-migrate
@@ -103,7 +103,7 @@
- include_role:
name: etcd
- tasks_from: backup.copy
+ tasks_from: backup.copy.yml
vars:
etcd_backup_sync_directory: "{{ g_etcd_client_mktemp.stdout }}"
r_etcd_common_backup_tag: pre-migrate
@@ -124,14 +124,14 @@
tasks:
- include_role:
name: etcd
- tasks_from: backup.unarchive
+ tasks_from: backup.unarchive.yml
vars:
r_etcd_common_backup_tag: pre-migrate
r_etcd_common_backup_sufix_name: "{{ hostvars[groups.oo_first_master.0].embedded_etcd_backup_suffix }}"
- include_role:
name: etcd
- tasks_from: backup.force_new_cluster
+ tasks_from: backup.force_new_cluster.yml
vars:
r_etcd_common_backup_tag: pre-migrate
r_etcd_common_backup_sufix_name: "{{ hostvars[groups.oo_first_master.0].embedded_etcd_backup_suffix }}"
@@ -145,7 +145,7 @@
tasks:
- include_role:
name: openshift_master
- tasks_from: configure_external_etcd
+ tasks_from: configure_external_etcd.yml
vars:
etcd_peer_url_scheme: "https"
etcd_ip: "{{ hostvars[groups.oo_etcd_to_config.0].openshift.common.ip }}"
diff --git a/playbooks/openshift-etcd/private/migrate.yml b/playbooks/openshift-etcd/private/migrate.yml
index 4269918c2..9ddb4afe2 100644
--- a/playbooks/openshift-etcd/private/migrate.yml
+++ b/playbooks/openshift-etcd/private/migrate.yml
@@ -17,7 +17,7 @@
tasks:
- include_role:
name: etcd
- tasks_from: migrate.pre_check
+ tasks_from: migrate.pre_check.yml
vars:
r_etcd_common_embedded_etcd: "{{ groups.oo_etcd_to_config | default([]) | length == 0 }}"
etcd_peer: "{{ ansible_default_ipv4.address }}"
@@ -28,8 +28,8 @@
tasks:
- set_fact:
master_services:
- - "{{ openshift.common.service_type + '-master-controllers' }}"
- - "{{ openshift.common.service_type + '-master-api' }}"
+ - "{{ openshift_service_type + '-master-controllers' }}"
+ - "{{ openshift_service_type + '-master-api' }}"
- debug:
msg: "master service name: {{ master_services }}"
- name: Stop masters
@@ -46,7 +46,7 @@
post_tasks:
- include_role:
name: etcd
- tasks_from: backup
+ tasks_from: backup.yml
vars:
r_etcd_common_backup_tag: pre-migration
r_etcd_common_embedded_etcd: "{{ groups.oo_etcd_to_config | default([]) | length == 0 }}"
@@ -74,7 +74,7 @@
pre_tasks:
- include_role:
name: etcd
- tasks_from: disable_etcd
+ tasks_from: disable_etcd.yml
- name: Migrate data on first etcd
hosts: oo_etcd_to_migrate[0]
@@ -82,7 +82,7 @@
tasks:
- include_role:
name: etcd
- tasks_from: migrate
+ tasks_from: migrate.yml
vars:
r_etcd_common_embedded_etcd: "{{ groups.oo_etcd_to_config | default([]) | length == 0 }}"
etcd_peer: "{{ openshift.common.ip }}"
@@ -95,7 +95,7 @@
tasks:
- include_role:
name: etcd
- tasks_from: clean_data
+ tasks_from: clean_data.yml
vars:
r_etcd_common_embedded_etcd: "{{ groups.oo_etcd_to_config | default([]) | length == 0 }}"
etcd_peer: "{{ openshift.common.ip }}"
@@ -132,7 +132,7 @@
tasks:
- include_role:
name: etcd
- tasks_from: migrate.add_ttls
+ tasks_from: migrate.add_ttls.yml
vars:
etcd_peer: "{{ hostvars[groups.oo_etcd_to_migrate.0].openshift.common.ip }}"
etcd_url_scheme: "https"
@@ -144,7 +144,7 @@
tasks:
- include_role:
name: etcd
- tasks_from: migrate.configure_master
+ tasks_from: migrate.configure_master.yml
when: etcd_migration_failed | length == 0
- debug:
msg: "Skipping master re-configuration since migration failed."
diff --git a/playbooks/common/openshift-cluster/redeploy-certificates/etcd-ca.yml b/playbooks/openshift-etcd/private/redeploy-ca.yml
index 438f704bc..158bcb849 100644
--- a/playbooks/common/openshift-cluster/redeploy-certificates/etcd-ca.yml
+++ b/playbooks/openshift-etcd/private/redeploy-ca.yml
@@ -16,12 +16,12 @@
tasks:
- include_role:
name: etcd
- tasks_from: backup_ca_certificates
+ tasks_from: backup_ca_certificates.yml
- include_role:
name: etcd
- tasks_from: remove_ca_certificates
+ tasks_from: remove_ca_certificates.yml
-- include: ../../../openshift-etcd/private/ca.yml
+- import_playbook: ca.yml
- name: Create temp directory for syncing certs
hosts: localhost
@@ -44,7 +44,7 @@
etcd_sync_cert_dir: "{{ hostvars['localhost'].g_etcd_mktemp.stdout }}"
etcd_ca_host: "{{ groups.oo_etcd_to_config.0 }}"
-- include: ../../../openshift-etcd/private/restart.yml
+- import_playbook: restart.yml
# Do not restart etcd when etcd certificates were previously expired.
when: ('expired' not in (hostvars
| oo_select_keys(groups['etcd'])
@@ -56,7 +56,7 @@
tasks:
- include_role:
name: etcd
- tasks_from: retrieve_ca_certificates
+ tasks_from: retrieve_ca_certificates.yml
vars:
etcd_sync_cert_dir: "{{ hostvars['localhost'].g_etcd_mktemp.stdout }}"
@@ -82,7 +82,7 @@
state: absent
changed_when: false
-- include: ../../../openshift-master/private/restart.yml
+- import_playbook: ../../openshift-master/private/restart.yml
# Do not restart masters when master or etcd certificates were previously expired.
when:
# masters
diff --git a/playbooks/common/openshift-cluster/redeploy-certificates/check-expiry.yml b/playbooks/openshift-etcd/private/redeploy-certificates.yml
index 4a9fbf7eb..1c8eb27ac 100644
--- a/playbooks/common/openshift-cluster/redeploy-certificates/check-expiry.yml
+++ b/playbooks/openshift-etcd/private/redeploy-certificates.yml
@@ -1,6 +1,6 @@
---
- name: Check cert expirys
- hosts: "{{ g_check_expiry_hosts }}"
+ hosts: oo_etcd_to_config
vars:
openshift_certificate_expiry_show_all: yes
roles:
@@ -10,3 +10,9 @@
# this playbook. Service restarts will be skipped if any
# certificates were previously expired.
- role: openshift_certificate_expiry
+
+- import_playbook: certificates-backup.yml
+
+- import_playbook: certificates.yml
+ vars:
+ etcd_certificates_redeploy: true
diff --git a/playbooks/openshift-etcd/private/scaleup.yml b/playbooks/openshift-etcd/private/scaleup.yml
index fac8e3f02..3ef043ec8 100644
--- a/playbooks/openshift-etcd/private/scaleup.yml
+++ b/playbooks/openshift-etcd/private/scaleup.yml
@@ -32,7 +32,7 @@
until: etcd_add_check.rc == 0
- include_role:
name: etcd
- tasks_from: server_certificates
+ tasks_from: server_certificates.yml
vars:
etcd_peers: "{{ groups.oo_new_etcd_to_config | default([], true) }}"
etcd_certificates_etcd_hosts: "{{ groups.oo_new_etcd_to_config | default([], true) }}"
@@ -78,4 +78,4 @@
post_tasks:
- include_role:
name: openshift_master
- tasks_from: update_etcd_client_urls
+ tasks_from: update_etcd_client_urls.yml
diff --git a/playbooks/openshift-etcd/private/server_certificates.yml b/playbooks/openshift-etcd/private/server_certificates.yml
index 14c74baf3..695b53990 100644
--- a/playbooks/openshift-etcd/private/server_certificates.yml
+++ b/playbooks/openshift-etcd/private/server_certificates.yml
@@ -7,7 +7,7 @@
post_tasks:
- include_role:
name: etcd
- tasks_from: server_certificates
+ tasks_from: server_certificates.yml
vars:
etcd_ca_host: "{{ groups.oo_etcd_to_config.0 }}"
etcd_peers: "{{ groups.oo_etcd_to_config | default([], true) }}"
diff --git a/playbooks/common/openshift-cluster/upgrades/etcd/backup.yml b/playbooks/openshift-etcd/private/upgrade_backup.yml
index 531175c85..22ed7e610 100644
--- a/playbooks/common/openshift-cluster/upgrades/etcd/backup.yml
+++ b/playbooks/openshift-etcd/private/upgrade_backup.yml
@@ -6,7 +6,7 @@
post_tasks:
- include_role:
name: etcd
- tasks_from: backup
+ tasks_from: backup.yml
vars:
r_etcd_common_backup_tag: "{{ etcd_backup_tag }}"
r_etcd_common_embedded_etcd: "{{ groups.oo_etcd_to_config | default([]) | length == 0 }}"
diff --git a/playbooks/common/openshift-cluster/upgrades/etcd/upgrade_image_members.yml b/playbooks/openshift-etcd/private/upgrade_image_members.yml
index 6fca42bd0..c133c0201 100644
--- a/playbooks/common/openshift-cluster/upgrades/etcd/upgrade_image_members.yml
+++ b/playbooks/openshift-etcd/private/upgrade_image_members.yml
@@ -8,7 +8,7 @@
tasks:
- include_role:
name: etcd
- tasks_from: upgrade_image
+ tasks_from: upgrade_image.yml
vars:
r_etcd_upgrade_version: "{{ etcd_upgrade_version }}"
etcd_peer: "{{ openshift.common.hostname }}"
diff --git a/playbooks/common/openshift-cluster/upgrades/etcd/main.yml b/playbooks/openshift-etcd/private/upgrade_main.yml
index 5b8ba3bb2..e373a4a4c 100644
--- a/playbooks/common/openshift-cluster/upgrades/etcd/main.yml
+++ b/playbooks/openshift-etcd/private/upgrade_main.yml
@@ -6,7 +6,7 @@
# available in the repos. So for Fedora we'll simply skip this, sorry.
- name: Backup etcd before upgrading anything
- include: backup.yml
+ import_playbook: upgrade_backup.yml
vars:
etcd_backup_tag: "pre-upgrade-"
when: openshift_etcd_backup | default(true) | bool
@@ -16,14 +16,14 @@
tasks:
- include_role:
name: etcd
- tasks_from: drop_etcdctl
+ tasks_from: drop_etcdctl.yml
- name: Perform etcd upgrade
- include: ./upgrade.yml
+ import_playbook: upgrade_step.yml
when: openshift_etcd_upgrade | default(true) | bool
- name: Backup etcd
- include: backup.yml
+ import_playbook: upgrade_backup.yml
vars:
etcd_backup_tag: "post-3.0-"
when: openshift_etcd_backup | default(true) | bool
diff --git a/playbooks/common/openshift-cluster/upgrades/etcd/upgrade_rpm_members.yml b/playbooks/openshift-etcd/private/upgrade_rpm_members.yml
index 51e8786b3..902c39d9c 100644
--- a/playbooks/common/openshift-cluster/upgrades/etcd/upgrade_rpm_members.yml
+++ b/playbooks/openshift-etcd/private/upgrade_rpm_members.yml
@@ -8,7 +8,7 @@
tasks:
- include_role:
name: etcd
- tasks_from: upgrade_rpm
+ tasks_from: upgrade_rpm.yml
vars:
r_etcd_upgrade_version: "{{ etcd_upgrade_version }}"
etcd_peer: "{{ openshift.common.hostname }}"
diff --git a/playbooks/common/openshift-cluster/upgrades/etcd/upgrade.yml b/playbooks/openshift-etcd/private/upgrade_step.yml
index c5ff4133c..60127fc68 100644
--- a/playbooks/common/openshift-cluster/upgrades/etcd/upgrade.yml
+++ b/playbooks/openshift-etcd/private/upgrade_step.yml
@@ -6,47 +6,47 @@
name: etcd
tasks_from: version_detect.yml
-- include: upgrade_rpm_members.yml
+- import_playbook: upgrade_rpm_members.yml
vars:
etcd_upgrade_version: '2.1'
-- include: upgrade_rpm_members.yml
+- import_playbook: upgrade_rpm_members.yml
vars:
etcd_upgrade_version: '2.2'
-- include: upgrade_image_members.yml
+- import_playbook: upgrade_image_members.yml
vars:
etcd_upgrade_version: '2.2.5'
-- include: upgrade_rpm_members.yml
+- import_playbook: upgrade_rpm_members.yml
vars:
etcd_upgrade_version: '2.3'
-- include: upgrade_image_members.yml
+- import_playbook: upgrade_image_members.yml
vars:
etcd_upgrade_version: '2.3.7'
-- include: upgrade_rpm_members.yml
+- import_playbook: upgrade_rpm_members.yml
vars:
etcd_upgrade_version: '3.0'
-- include: upgrade_image_members.yml
+- import_playbook: upgrade_image_members.yml
vars:
etcd_upgrade_version: '3.0.15'
-- include: upgrade_rpm_members.yml
+- import_playbook: upgrade_rpm_members.yml
vars:
etcd_upgrade_version: '3.1'
-- include: upgrade_image_members.yml
+- import_playbook: upgrade_image_members.yml
vars:
etcd_upgrade_version: '3.1.3'
-- include: upgrade_rpm_members.yml
+- import_playbook: upgrade_rpm_members.yml
vars:
etcd_upgrade_version: '3.2'
-- include: upgrade_image_members.yml
+- import_playbook: upgrade_image_members.yml
vars:
etcd_upgrade_version: '3.2.7'
@@ -56,7 +56,7 @@
tasks:
- include_role:
name: etcd
- tasks_from: upgrade_image
+ tasks_from: upgrade_image.yml
vars:
etcd_peer: "{{ openshift.common.hostname }}"
when:
diff --git a/playbooks/openshift-etcd/redeploy-ca.yml b/playbooks/openshift-etcd/redeploy-ca.yml
new file mode 100644
index 000000000..769d694ba
--- /dev/null
+++ b/playbooks/openshift-etcd/redeploy-ca.yml
@@ -0,0 +1,4 @@
+---
+- import_playbook: ../init/main.yml
+
+- import_playbook: private/redeploy-ca.yml
diff --git a/playbooks/openshift-etcd/redeploy-certificates.yml b/playbooks/openshift-etcd/redeploy-certificates.yml
new file mode 100644
index 000000000..753878d70
--- /dev/null
+++ b/playbooks/openshift-etcd/redeploy-certificates.yml
@@ -0,0 +1,10 @@
+---
+- import_playbook: ../init/main.yml
+
+- import_playbook: private/redeploy-certificates.yml
+
+- import_playbook: private/restart.yml
+ vars:
+ g_etcd_certificates_expired: "{{ ('expired' in (hostvars | oo_select_keys(groups['etcd']) | oo_collect('check_results.check_results.etcd') | oo_collect('health'))) | bool }}"
+
+- import_playbook: ../openshift-master/private/restart.yml
diff --git a/playbooks/openshift-etcd/upgrade.yml b/playbooks/openshift-etcd/upgrade.yml
new file mode 100644
index 000000000..ccc797527
--- /dev/null
+++ b/playbooks/openshift-etcd/upgrade.yml
@@ -0,0 +1,4 @@
+---
+- import_playbook: ../init/evaluate_groups.yml
+
+- import_playbook: private/upgrade_main.yml
diff --git a/playbooks/openshift-glusterfs/private/registry.yml b/playbooks/openshift-glusterfs/private/registry.yml
index 75c1f0300..917b729f9 100644
--- a/playbooks/openshift-glusterfs/private/registry.yml
+++ b/playbooks/openshift-glusterfs/private/registry.yml
@@ -1,40 +1,11 @@
---
- import_playbook: config.yml
-- name: Initialize GlusterFS registry PV and PVC vars
- hosts: oo_first_master
- tags: hosted
- tasks:
- - set_fact:
- glusterfs_pv: []
- glusterfs_pvc: []
-
- - set_fact:
- glusterfs_pv:
- - name: "{{ openshift.hosted.registry.storage.volume.name }}-glusterfs-volume"
- capacity: "{{ openshift.hosted.registry.storage.volume.size }}"
- access_modes: "{{ openshift.hosted.registry.storage.access.modes }}"
- storage:
- glusterfs:
- endpoints: "{{ openshift.hosted.registry.storage.glusterfs.endpoints }}"
- path: "{{ openshift.hosted.registry.storage.glusterfs.path }}"
- readOnly: "{{ openshift.hosted.registry.storage.glusterfs.readOnly }}"
- glusterfs_pvc:
- - name: "{{ openshift.hosted.registry.storage.volume.name }}-glusterfs-claim"
- capacity: "{{ openshift.hosted.registry.storage.volume.size }}"
- access_modes: "{{ openshift.hosted.registry.storage.access.modes }}"
- when: openshift.hosted.registry.storage.glusterfs.swap
-
- name: Create persistent volumes
hosts: oo_first_master
- tags:
- - hosted
- vars:
- persistent_volumes: "{{ hostvars[groups.oo_first_master.0] | oo_persistent_volumes(groups, glusterfs_pv) }}"
- persistent_volume_claims: "{{ hostvars[groups.oo_first_master.0] | oo_persistent_volume_claims(glusterfs_pvc) }}"
roles:
- role: openshift_persistent_volumes
- when: persistent_volumes | union(glusterfs_pv) | length > 0 or persistent_volume_claims | union(glusterfs_pvc) | length > 0
+ when: openshift_hosted_registry_storage_glusterfs_swap | default(False)
- name: Create Hosted Resources
hosts: oo_first_master
diff --git a/playbooks/openshift-hosted/private/cockpit-ui.yml b/playbooks/openshift-hosted/private/cockpit-ui.yml
index 359132dd0..d6529425b 100644
--- a/playbooks/openshift-hosted/private/cockpit-ui.yml
+++ b/playbooks/openshift-hosted/private/cockpit-ui.yml
@@ -5,4 +5,4 @@
- role: cockpit-ui
when:
- openshift_hosted_manage_registry | default(true) | bool
- - not openshift.docker.hosted_registry_insecure | default(false) | bool
+ - not (openshift_docker_hosted_registry_insecure | default(false)) | bool
diff --git a/playbooks/openshift-hosted/private/create_persistent_volumes.yml b/playbooks/openshift-hosted/private/create_persistent_volumes.yml
index 8a60a30b8..41ae2eb69 100644
--- a/playbooks/openshift-hosted/private/create_persistent_volumes.yml
+++ b/playbooks/openshift-hosted/private/create_persistent_volumes.yml
@@ -1,9 +1,5 @@
---
- name: Create Hosted Resources - persistent volumes
hosts: oo_first_master
- vars:
- persistent_volumes: "{{ hostvars[groups.oo_first_master.0] | oo_persistent_volumes(groups) }}"
- persistent_volume_claims: "{{ hostvars[groups.oo_first_master.0] | oo_persistent_volume_claims }}"
roles:
- role: openshift_persistent_volumes
- when: persistent_volumes | length > 0 or persistent_volume_claims | length > 0
diff --git a/playbooks/common/openshift-cluster/redeploy-certificates/registry.yml b/playbooks/openshift-hosted/private/redeploy-registry-certificates.yml
index 7e9363c5f..7e9363c5f 100644
--- a/playbooks/common/openshift-cluster/redeploy-certificates/registry.yml
+++ b/playbooks/openshift-hosted/private/redeploy-registry-certificates.yml
diff --git a/playbooks/common/openshift-cluster/redeploy-certificates/router.yml b/playbooks/openshift-hosted/private/redeploy-router-certificates.yml
index 2116c745c..2116c745c 100644
--- a/playbooks/common/openshift-cluster/redeploy-certificates/router.yml
+++ b/playbooks/openshift-hosted/private/redeploy-router-certificates.yml
diff --git a/playbooks/openshift-hosted/redeploy-registry-certificates.yml b/playbooks/openshift-hosted/redeploy-registry-certificates.yml
new file mode 100644
index 000000000..518a1d624
--- /dev/null
+++ b/playbooks/openshift-hosted/redeploy-registry-certificates.yml
@@ -0,0 +1,4 @@
+---
+- import_playbook: ../init/main.yml
+
+- import_playbook: private/redeploy-registry-certificates.yml
diff --git a/playbooks/openshift-hosted/redeploy-router-certificates.yml b/playbooks/openshift-hosted/redeploy-router-certificates.yml
new file mode 100644
index 000000000..a74dd8c79
--- /dev/null
+++ b/playbooks/openshift-hosted/redeploy-router-certificates.yml
@@ -0,0 +1,4 @@
+---
+- import_playbook: ../init/main.yml
+
+- import_playbook: private/redeploy-router-certificates.yml
diff --git a/playbooks/openshift-loadbalancer/private/config.yml b/playbooks/openshift-loadbalancer/private/config.yml
index d737b836b..2636d857e 100644
--- a/playbooks/openshift-loadbalancer/private/config.yml
+++ b/playbooks/openshift-loadbalancer/private/config.yml
@@ -11,15 +11,6 @@
status: "In Progress"
start: "{{ lookup('pipe', 'date +%Y%m%d%H%M%SZ') }}"
-- name: Configure firewall and docker for load balancers
- hosts: oo_lb_to_config:!oo_masters_to_config:!oo_nodes_to_config
- vars:
- openshift_image_tag: "{{ hostvars[groups.oo_first_master.0].openshift_image_tag }}"
- roles:
- - role: os_firewall
- - role: openshift_docker
- when: openshift.common.is_containerized | default(False) | bool and not skip_docker_role | default(False) | bool
-
- name: Configure load balancers
hosts: oo_lb_to_config
vars:
diff --git a/playbooks/byo/openshift-cluster/openshift-logging.yml b/playbooks/openshift-logging/config.yml
index 74e186f33..d71b4f1c5 100644
--- a/playbooks/byo/openshift-cluster/openshift-logging.yml
+++ b/playbooks/openshift-logging/config.yml
@@ -4,6 +4,6 @@
# Hosted logging on. See inventory/byo/hosts.*.example for the
# currently supported method.
#
-- include: ../../init/main.yml
+- import_playbook: ../init/main.yml
-- include: ../../common/openshift-cluster/openshift_logging.yml
+- import_playbook: private/config.yml
diff --git a/playbooks/common/openshift-cluster/openshift_logging.yml b/playbooks/openshift-logging/private/config.yml
index bc59bd95a..bc59bd95a 100644
--- a/playbooks/common/openshift-cluster/openshift_logging.yml
+++ b/playbooks/openshift-logging/private/config.yml
diff --git a/playbooks/openshift-logging/private/filter_plugins b/playbooks/openshift-logging/private/filter_plugins
new file mode 120000
index 000000000..99a95e4ca
--- /dev/null
+++ b/playbooks/openshift-logging/private/filter_plugins
@@ -0,0 +1 @@
+../../../filter_plugins \ No newline at end of file
diff --git a/playbooks/openshift-logging/private/library b/playbooks/openshift-logging/private/library
new file mode 120000
index 000000000..ba40d2f56
--- /dev/null
+++ b/playbooks/openshift-logging/private/library
@@ -0,0 +1 @@
+../../../library \ No newline at end of file
diff --git a/playbooks/openshift-logging/private/lookup_plugins b/playbooks/openshift-logging/private/lookup_plugins
new file mode 120000
index 000000000..ac79701db
--- /dev/null
+++ b/playbooks/openshift-logging/private/lookup_plugins
@@ -0,0 +1 @@
+../../../lookup_plugins \ No newline at end of file
diff --git a/playbooks/openshift-logging/private/roles b/playbooks/openshift-logging/private/roles
new file mode 120000
index 000000000..20c4c58cf
--- /dev/null
+++ b/playbooks/openshift-logging/private/roles
@@ -0,0 +1 @@
+../../../roles \ No newline at end of file
diff --git a/playbooks/openshift-master/private/additional_config.yml b/playbooks/openshift-master/private/additional_config.yml
index b7cfbe4e4..a90cd6b22 100644
--- a/playbooks/openshift-master/private/additional_config.yml
+++ b/playbooks/openshift-master/private/additional_config.yml
@@ -19,8 +19,6 @@
openshift_master_ha: "{{ groups.oo_masters | length > 1 }}"
omc_cluster_hosts: "{{ groups.oo_masters | join(' ')}}"
roles:
- - role: openshift_master_cluster
- when: openshift_master_ha | bool and openshift.master.cluster_method == "pacemaker"
- role: openshift_project_request_template
when: openshift_project_request_template_manage
- role: openshift_examples
diff --git a/playbooks/common/openshift-cluster/redeploy-certificates/masters-backup.yml b/playbooks/openshift-master/private/certificates-backup.yml
index 4dbc041b0..4dbc041b0 100644
--- a/playbooks/common/openshift-cluster/redeploy-certificates/masters-backup.yml
+++ b/playbooks/openshift-master/private/certificates-backup.yml
diff --git a/playbooks/openshift-master/private/config.yml b/playbooks/openshift-master/private/config.yml
index afb8d6bd1..9f6d5afcc 100644
--- a/playbooks/openshift-master/private/config.yml
+++ b/playbooks/openshift-master/private/config.yml
@@ -19,7 +19,6 @@
roles:
- role: openshift_excluder
r_openshift_excluder_action: disable
- r_openshift_excluder_service_type: "{{ openshift.common.service_type }}"
- name: Gather and set facts for master hosts
hosts: oo_masters_to_config
@@ -180,7 +179,6 @@
| oo_collect('openshift.common.ip') | default([]) | join(',')
}}"
roles:
- - role: os_firewall
- role: openshift_master_facts
- role: openshift_hosted_facts
- role: openshift_clock
@@ -228,6 +226,8 @@
- name: Configure API Aggregation on masters
hosts: oo_masters
serial: 1
+ roles:
+ - role: openshift_facts
tasks:
- include_tasks: tasks/wire_aggregator.yml
@@ -237,7 +237,6 @@
roles:
- role: openshift_excluder
r_openshift_excluder_action: enable
- r_openshift_excluder_service_type: "{{ openshift.common.service_type }}"
- name: Master Install Checkpoint End
hosts: all
diff --git a/playbooks/openshift-master/private/redeploy-certificates.yml b/playbooks/openshift-master/private/redeploy-certificates.yml
new file mode 100644
index 000000000..c0f75ae80
--- /dev/null
+++ b/playbooks/openshift-master/private/redeploy-certificates.yml
@@ -0,0 +1,6 @@
+---
+- import_playbook: certificates-backup.yml
+
+- import_playbook: certificates.yml
+ vars:
+ openshift_certificates_redeploy: true
diff --git a/playbooks/common/openshift-cluster/redeploy-certificates/openshift-ca.yml b/playbooks/openshift-master/private/redeploy-openshift-ca.yml
index 5a837d80d..2a190935e 100644
--- a/playbooks/common/openshift-cluster/redeploy-certificates/openshift-ca.yml
+++ b/playbooks/openshift-master/private/redeploy-openshift-ca.yml
@@ -207,7 +207,7 @@
group: "{{ 'root' if item == 'root' else _ansible_ssh_user_gid.stdout }}"
with_items: "{{ client_users }}"
-- include: ../../../openshift-master/private/restart.yml
+- import_playbook: restart.yml
# Do not restart masters when master or etcd certificates were previously expired.
when:
# masters
@@ -272,7 +272,7 @@
state: absent
changed_when: false
-- include: ../../../openshift-node/private/restart.yml
+- import_playbook: ../../openshift-node/private/restart.yml
# Do not restart nodes when node, master or etcd certificates were previously expired.
when:
# nodes
diff --git a/playbooks/openshift-master/private/scaleup.yml b/playbooks/openshift-master/private/scaleup.yml
index 8229eccfa..007b23ea3 100644
--- a/playbooks/openshift-master/private/scaleup.yml
+++ b/playbooks/openshift-master/private/scaleup.yml
@@ -20,11 +20,11 @@
- restart master controllers
handlers:
- name: restart master api
- service: name={{ openshift.common.service_type }}-master-controllers state=restarted
+ service: name={{ openshift_service_type }}-master-controllers state=restarted
notify: verify api server
# We retry the controllers because the API may not be 100% initialized yet.
- name: restart master controllers
- command: "systemctl restart {{ openshift.common.service_type }}-master-controllers"
+ command: "systemctl restart {{ openshift_service_type }}-master-controllers"
retries: 3
delay: 5
register: result
diff --git a/playbooks/openshift-master/private/tasks/wire_aggregator.yml b/playbooks/openshift-master/private/tasks/wire_aggregator.yml
index 97acc5d5d..4f55d5c82 100644
--- a/playbooks/openshift-master/private/tasks/wire_aggregator.yml
+++ b/playbooks/openshift-master/private/tasks/wire_aggregator.yml
@@ -180,21 +180,19 @@
#restart master serially here
- name: restart master api
- systemd: name={{ openshift.common.service_type }}-master-api state=restarted
+ systemd: name={{ openshift_service_type }}-master-api state=restarted
when:
- yedit_output.changed
- - openshift.master.cluster_method == 'native'
# We retry the controllers because the API may not be 100% initialized yet.
- name: restart master controllers
- command: "systemctl restart {{ openshift.common.service_type }}-master-controllers"
+ command: "systemctl restart {{ openshift_service_type }}-master-controllers"
retries: 3
delay: 5
register: result
until: result.rc == 0
when:
- yedit_output.changed
- - openshift.master.cluster_method == 'native'
- name: Verify API Server
# Using curl here since the uri module requires python-httplib2 and
diff --git a/playbooks/openshift-master/private/validate_restart.yml b/playbooks/openshift-master/private/validate_restart.yml
index 5dbb21502..1077d0b9c 100644
--- a/playbooks/openshift-master/private/validate_restart.yml
+++ b/playbooks/openshift-master/private/validate_restart.yml
@@ -14,9 +14,6 @@
- role: common
local_facts:
rolling_restart_mode: "{{ openshift_rolling_restart_mode | default('services') }}"
- - role: master
- local_facts:
- cluster_method: "{{ openshift_master_cluster_method | default(None) }}"
# Creating a temp file on localhost, we then check each system that will
# be rebooted to see if that file exists, if so we know we're running
diff --git a/playbooks/openshift-master/redeploy-certificates.yml b/playbooks/openshift-master/redeploy-certificates.yml
new file mode 100644
index 000000000..8b7272485
--- /dev/null
+++ b/playbooks/openshift-master/redeploy-certificates.yml
@@ -0,0 +1,6 @@
+---
+- import_playbook: ../init/main.yml
+
+- import_playbook: private/redeploy-certificates.yml
+
+- import_playbook: private/restart.yml
diff --git a/playbooks/openshift-master/redeploy-openshift-ca.yml b/playbooks/openshift-master/redeploy-openshift-ca.yml
new file mode 100644
index 000000000..27f4e6b7d
--- /dev/null
+++ b/playbooks/openshift-master/redeploy-openshift-ca.yml
@@ -0,0 +1,4 @@
+---
+- import_playbook: ../init/main.yml
+
+- import_playbook: private/redeploy-openshift-ca.yml
diff --git a/playbooks/openshift-nfs/private/config.yml b/playbooks/openshift-nfs/private/config.yml
index 6ea77e00b..3625efcc6 100644
--- a/playbooks/openshift-nfs/private/config.yml
+++ b/playbooks/openshift-nfs/private/config.yml
@@ -14,7 +14,6 @@
- name: Configure nfs
hosts: oo_nfs_to_config
roles:
- - role: os_firewall
- role: openshift_storage_nfs
- name: NFS Install Checkpoint End
diff --git a/playbooks/common/openshift-cluster/redeploy-certificates/nodes-backup.yml b/playbooks/openshift-node/private/certificates-backup.yml
index 2ad84b3b9..2ad84b3b9 100644
--- a/playbooks/common/openshift-cluster/redeploy-certificates/nodes-backup.yml
+++ b/playbooks/openshift-node/private/certificates-backup.yml
diff --git a/playbooks/openshift-node/private/configure_nodes.yml b/playbooks/openshift-node/private/configure_nodes.yml
index 06f3df9fa..32b288c8b 100644
--- a/playbooks/openshift-node/private/configure_nodes.yml
+++ b/playbooks/openshift-node/private/configure_nodes.yml
@@ -4,14 +4,12 @@
vars:
openshift_node_master_api_url: "{{ hostvars[groups.oo_first_master.0].openshift.master.api_url }}"
openshift_node_first_master_ip: "{{ hostvars[groups.oo_first_master.0].openshift.common.ip }}"
- openshift_docker_hosted_registry_network: "{{ hostvars[groups.oo_first_master.0].openshift.common.portal_net }}"
openshift_no_proxy_internal_hostnames: "{{ hostvars | oo_select_keys(groups['oo_nodes_to_config']
| union(groups['oo_masters_to_config'])
| union(groups['oo_etcd_to_config'] | default([])))
| oo_collect('openshift.common.hostname') | default([]) | join (',')
}}"
roles:
- - role: os_firewall
- role: openshift_clock
- role: openshift_node
- role: tuned
diff --git a/playbooks/openshift-node/private/containerized_nodes.yml b/playbooks/openshift-node/private/containerized_nodes.yml
index 3c3ac3646..ef07669cb 100644
--- a/playbooks/openshift-node/private/containerized_nodes.yml
+++ b/playbooks/openshift-node/private/containerized_nodes.yml
@@ -5,7 +5,6 @@
vars:
openshift_node_master_api_url: "{{ hostvars[groups.oo_first_master.0].openshift.master.api_url }}"
openshift_node_first_master_ip: "{{ hostvars[groups.oo_first_master.0].openshift.common.ip }}"
- openshift_docker_hosted_registry_network: "{{ hostvars[groups.oo_first_master.0].openshift.common.portal_net }}"
openshift_no_proxy_internal_hostnames: "{{ hostvars | oo_select_keys(groups['oo_nodes_to_config']
| union(groups['oo_masters_to_config'])
| union(groups['oo_etcd_to_config'] | default([])))
@@ -13,7 +12,6 @@
}}"
roles:
- - role: os_firewall
- role: openshift_clock
- role: openshift_node
openshift_ca_host: "{{ groups.oo_first_master.0 }}"
diff --git a/playbooks/openshift-node/private/enable_excluders.yml b/playbooks/openshift-node/private/enable_excluders.yml
index 5288b14f9..30713e694 100644
--- a/playbooks/openshift-node/private/enable_excluders.yml
+++ b/playbooks/openshift-node/private/enable_excluders.yml
@@ -5,4 +5,3 @@
roles:
- role: openshift_excluder
r_openshift_excluder_action: enable
- r_openshift_excluder_service_type: "{{ openshift.common.service_type }}"
diff --git a/playbooks/openshift-node/private/image_prep.yml b/playbooks/openshift-node/private/image_prep.yml
index b7ac27bda..6b517197d 100644
--- a/playbooks/openshift-node/private/image_prep.yml
+++ b/playbooks/openshift-node/private/image_prep.yml
@@ -1,12 +1,10 @@
---
- name: normalize groups
- import_playbook: ../../init/evaluate_groups.yml
-
-- name: initialize the facts
- import_playbook: ../../init/facts.yml
-
-- name: initialize the repositories
- import_playbook: ../../init/repos.yml
+ import_playbook: ../../prerequisites.yml
+ vars:
+ skip_version: True
+ skip_sanity_checks: True
+ skip_validate_hostnames: True
- name: run node config setup
import_playbook: setup.yml
diff --git a/playbooks/openshift-node/private/network_manager.yml b/playbooks/openshift-node/private/network_manager.yml
index fffed4dfb..39640345f 100644
--- a/playbooks/openshift-node/private/network_manager.yml
+++ b/playbooks/openshift-node/private/network_manager.yml
@@ -1,6 +1,4 @@
---
-- import_playbook: ../../init/evaluate_groups.yml
-
- name: Install and configure NetworkManager
hosts: oo_all_hosts
become: yes
@@ -9,6 +7,8 @@
package:
name: 'NetworkManager'
state: present
+ register: result
+ until: result | success
- name: configure NetworkManager
lineinfile:
diff --git a/playbooks/openshift-node/private/redeploy-certificates.yml b/playbooks/openshift-node/private/redeploy-certificates.yml
new file mode 100644
index 000000000..c0f75ae80
--- /dev/null
+++ b/playbooks/openshift-node/private/redeploy-certificates.yml
@@ -0,0 +1,6 @@
+---
+- import_playbook: certificates-backup.yml
+
+- import_playbook: certificates.yml
+ vars:
+ openshift_certificates_redeploy: true
diff --git a/playbooks/openshift-node/private/restart.yml b/playbooks/openshift-node/private/restart.yml
index c3beb59b7..0786bd7d3 100644
--- a/playbooks/openshift-node/private/restart.yml
+++ b/playbooks/openshift-node/private/restart.yml
@@ -16,10 +16,6 @@
retries: 3
delay: 30
- - name: Update docker facts
- openshift_facts:
- role: docker
-
- name: Restart containerized services
service:
name: "{{ item }}"
@@ -27,9 +23,9 @@
with_items:
- etcd_container
- openvswitch
- - "{{ openshift.common.service_type }}-master-api"
- - "{{ openshift.common.service_type }}-master-controllers"
- - "{{ openshift.common.service_type }}-node"
+ - "{{ openshift_service_type }}-master-api"
+ - "{{ openshift_service_type }}-master-controllers"
+ - "{{ openshift_service_type }}-node"
failed_when: false
when: openshift.common.is_containerized | bool
@@ -44,7 +40,7 @@
- name: restart node
service:
- name: "{{ openshift.common.service_type }}-node"
+ name: "{{ openshift_service_type }}-node"
state: restarted
- name: Wait for node to be ready
diff --git a/playbooks/openshift-node/private/setup.yml b/playbooks/openshift-node/private/setup.yml
index 794c03a67..541913aef 100644
--- a/playbooks/openshift-node/private/setup.yml
+++ b/playbooks/openshift-node/private/setup.yml
@@ -5,7 +5,6 @@
roles:
- role: openshift_excluder
r_openshift_excluder_action: disable
- r_openshift_excluder_service_type: "{{ openshift.common.service_type }}"
- name: Evaluate node groups
hosts: localhost
diff --git a/playbooks/openshift-node/redeploy-certificates.yml b/playbooks/openshift-node/redeploy-certificates.yml
new file mode 100644
index 000000000..8b7272485
--- /dev/null
+++ b/playbooks/openshift-node/redeploy-certificates.yml
@@ -0,0 +1,6 @@
+---
+- import_playbook: ../init/main.yml
+
+- import_playbook: private/redeploy-certificates.yml
+
+- import_playbook: private/restart.yml
diff --git a/playbooks/openstack/README.md b/playbooks/openstack/README.md
index c762169eb..f567242cd 100644
--- a/playbooks/openstack/README.md
+++ b/playbooks/openstack/README.md
@@ -24,7 +24,7 @@ The OpenStack release must be Newton (for Red Hat OpenStack this is
version 10) or newer. It must also satisfy these requirements:
* Heat (Orchestration) must be available
-* The deployment image (CentOS 7 or RHEL 7) must be loaded
+* The deployment image (CentOS 7.4 or RHEL 7) must be loaded
* The deployment flavor must be available to your user
- `m1.medium` / 4GB RAM + 40GB disk should be enough for testing
- look at
@@ -183,9 +183,14 @@ Then run the provision + install playbook -- this will create the OpenStack
resources:
```bash
-$ ansible-playbook --user openshift -i inventory openshift-ansible/playbooks/openstack/openshift-cluster/provision_install.yaml
+$ ansible-playbook --user openshift -i inventory \
+ openshift-ansible/playbooks/openstack/openshift-cluster/provision_install.yaml \
+ -e openshift_repos_enable_testing=true
```
+Note, you may want to use the testing repo for development purposes only.
+Normally, `openshift_repos_enable_testing` should not be specified.
+
If you're using multiple inventories, make sure you pass the path to
the right one to `-i`.
@@ -210,7 +215,6 @@ advanced configuration:
* [External Dns][external-dns]
* Multiple Clusters (TODO)
* [Cinder Registry][cinder-registry]
-* [Bastion Node][bastion]
[ansible]: https://www.ansible.com/
@@ -229,4 +233,3 @@ advanced configuration:
[loadbalancer]: ./advanced-configuration.md#multi-master-configuration
[external-dns]: ./advanced-configuration.md#dns-configuration-variables
[cinder-registry]: ./advanced-configuration.md#creating-and-using-a-cinder-volume-for-the-openshift-registry
-[bastion]: ./advanced-configuration.md#configure-static-inventory-and-access-via-a-bastion-node
diff --git a/playbooks/openstack/advanced-configuration.md b/playbooks/openstack/advanced-configuration.md
index c0bdf5020..db2a13d38 100644
--- a/playbooks/openstack/advanced-configuration.md
+++ b/playbooks/openstack/advanced-configuration.md
@@ -23,35 +23,14 @@ There are no additional dependencies for the cluster nodes. Required
configuration steps are done by Heat given a specific user data config
that normally should not be changed.
-## Required galaxy modules
-
-In order to pull in external dependencies for DNS configuration steps,
-the following commads need to be executed:
-
- ansible-galaxy install \
- -r openshift-ansible-contrib/playbooks/provisioning/openstack/galaxy-requirements.yaml \
- -p openshift-ansible-contrib/roles
-
-Alternatively you can install directly from github:
-
- ansible-galaxy install git+https://github.com/redhat-cop/infra-ansible,master \
- -p openshift-ansible-contrib/roles
-
-Notes:
-* This assumes we're in the directory that contains the clonned
-openshift-ansible-contrib repo in its root path.
-* When trying to install a different version, the previous one must be removed first
-(`infra-ansible` directory from [roles](https://github.com/openshift/openshift-ansible-contrib/tree/master/roles)).
-Otherwise, even if there are differences between the two versions, installation of the newer version is skipped.
-
-
## Accessing the OpenShift Cluster
### Configure DNS
-OpenShift requires two DNS records to function fully. The first one points to
+OpenShift requires a two public DNS records to function fully. The first one points to
the master/load balancer and provides the UI/API access. The other one is a
-wildcard domain that resolves app route requests to the infra node.
+wildcard domain that resolves app route requests to the infra node. A private DNS
+server and records are not required and not managed here.
If you followed the default installation from the README section, there is no
DNS configured. You should add two entries to the `/etc/hosts` file on the
@@ -187,8 +166,8 @@ That sudomain can be set as well by the `openshift_openstack_app_subdomain` vari
the inventory.
The `openstack_<role name>_hostname` is a set of variables used for customising
-hostnames of servers with a given role. When such a variable stays commented,
-default hostname (usually the role name) is used.
+public names of Nova servers provisioned with a given role. When such a variable stays commented,
+default value (usually the role name) is used.
The `openshift_openstack_dns_nameservers` is a list of DNS servers accessible from all
the created Nova servers. These will provide the internal name resolution for
@@ -203,7 +182,7 @@ When Network Manager is enabled for provisioned cluster nodes, which is
normally the case, you should not change the defaults and always deploy dnsmasq.
`openshift_openstack_external_nsupdate_keys` describes an external authoritative DNS server(s)
-processing dynamic records updates in the public and private cluster views:
+processing dynamic records updates in the public only cluster view:
openshift_openstack_external_nsupdate_keys:
public:
@@ -211,10 +190,6 @@ processing dynamic records updates in the public and private cluster views:
key_algorithm: 'hmac-md5'
key_name: 'update-key'
server: <public DNS server IP>
- private:
- key_secret: <some nsupdate key 2>
- key_algorithm: 'hmac-sha256'
- server: <public or private DNS server IP>
Here, for the public view section, we specified another key algorithm and
optional `key_name`, which normally defaults to the cluster's DNS domain.
@@ -222,24 +197,6 @@ This just illustrates a compatibility mode with a DNS service deployed
by OpenShift on OSP10 reference architecture, and used in a mixed mode with
another external DNS server.
-Another example defines an external DNS server for the public view
-additionally to the in-stack DNS server used for the private view only:
-
- openshift_openstack_external_nsupdate_keys:
- public:
- key_secret: <some nsupdate key>
- key_algorithm: 'hmac-sha256'
- server: <public DNS server IP>
-
-Here, updates matching the public view will be hitting the given public
-server IP. While updates matching the private view will be sent to the
-auto evaluated in-stack DNS server's **public** IP.
-
-Note, for the in-stack DNS server, private view updates may be sent only
-via the public IP of the server. You can not send updates via the private
-IP yet. This forces the in-stack private server to have a floating IP.
-See also the [security notes](#security-notes)
-
## Flannel networking
In order to configure the
@@ -328,14 +285,6 @@ The `openshift_openstack_required_packages` variable also provides a list of the
prerequisite packages to be installed before to deploy an OpenShift cluster.
Those are ignored though, if the `manage_packages: False`.
-The `openstack_inventory` controls either a static inventory will be created after the
-cluster nodes provisioned on OpenStack cloud. Note, the fully dynamic inventory
-is yet to be supported, so the static inventory will be created anyway.
-
-The `openstack_inventory_path` points the directory to host the generated static inventory.
-It should point to the copied example inventory directory, otherwise ti creates
-a new one for you.
-
## Multi-master configuration
Please refer to the official documentation for the
@@ -345,7 +294,6 @@ variables](https://docs.openshift.com/container-platform/3.6/install_config/inst
in `inventory/group_vars/OSEv3.yml`. For example, given a load balancer node
under the ansible group named `ext_lb`:
- openshift_master_cluster_method: native
openshift_master_cluster_hostname: "{{ groups.ext_lb.0 }}"
openshift_master_cluster_public_hostname: "{{ groups.ext_lb.0 }}"
@@ -384,18 +332,6 @@ be the case for development environments. When turned off, the servers will
be provisioned omitting the ``yum update`` command. This brings security
implications though, and is not recommended for production deployments.
-### DNS servers security options
-
-Aside from `openshift_openstack_node_ingress_cidr` restricting public access to in-stack DNS
-servers, there are following (bind/named specific) DNS security
-options available:
-
- named_public_recursion: 'no'
- named_private_recursion: 'yes'
-
-External DNS servers, which is not included in the 'dns' hosts group,
-are not managed. It is up to you to configure such ones.
-
## Configure the OpenShift parameters
Finally, you need to update the DNS entry in
@@ -538,43 +474,6 @@ You can also run the registry setup playbook directly:
-## Configure static inventory and access via a bastion node
-
-Example inventory variables:
-
- openshift_openstack_use_bastion: true
- openshift_openstack_bastion_ingress_cidr: "{{openshift_openstack_subnet_prefix}}.0/24"
- openstack_private_ssh_key: ~/.ssh/id_rsa
- openstack_inventory: static
- openstack_inventory_path: ../../../../inventory
- openstack_ssh_config_path: /tmp/ssh.config.openshift.ansible.openshift.example.com
-
-The `openshift_openstack_subnet_prefix` is the openstack private network for your cluster.
-And the `openshift_openstack_bastion_ingress_cidr` defines accepted range for SSH connections to nodes
-additionally to the `openshift_openstack_ssh_ingress_cidr`` (see the security notes above).
-
-The SSH config will be stored on the ansible control node by the
-gitven path. Ansible uses it automatically. To access the cluster nodes with
-that ssh config, use the `-F` prefix, f.e.:
-
- ssh -F /tmp/ssh.config.openshift.ansible.openshift.example.com master-0.openshift.example.com echo OK
-
-Note, relative paths will not work for the `openstack_ssh_config_path`, but it
-works for the `openstack_private_ssh_key` and `openstack_inventory_path`. In this
-guide, the latter points to the current directory, where you run ansible commands
-from.
-
-To verify nodes connectivity, use the command:
-
- ansible -v -i inventory/hosts -m ping all
-
-If something is broken, double-check the inventory variables, paths and the
-generated `<openstack_inventory_path>/hosts` and `openstack_ssh_config_path` files.
-
-The `inventory: dynamic` can be used instead to access cluster nodes directly via
-floating IPs. In this mode you can not use a bastion node and should specify
-the dynamic inventory file in your ansible commands , like `-i openstack.py`.
-
## Using Docker on the Ansible host
If you don't want to worry about the dependencies, you can use the
@@ -604,28 +503,6 @@ the playbooks:
ansible-playbook openshift-ansible-contrib/playbooks/provisioning/openstack/provision.yaml
-### Run the playbook
-
-Assuming your OpenStack (Keystone) credentials are in the `keystonerc`
-this is how you stat the provisioning process from your ansible control node:
-
- . keystonerc
- ansible-playbook openshift-ansible-contrib/playbooks/provisioning/openstack/provision.yaml
-
-Note, here you start with an empty inventory. The static inventory will be populated
-with data so you can omit providing additional arguments for future ansible commands.
-
-If bastion enabled, the generates SSH config must be applied for ansible.
-Otherwise, it is auto included by the previous step. In order to execute it
-as a separate playbook, use the following command:
-
- ansible-playbook openshift-ansible-contrib/playbooks/provisioning/openstack/post-provision-openstack.yml
-
-The first infra node then becomes a bastion node as well and proxies access
-for future ansible commands. The post-provision step also configures Satellite,
-if requested, and DNS server, and ensures other OpenShift requirements to be met.
-
-
## Running Custom Post-Provision Actions
A custom playbook can be run like this:
@@ -733,21 +610,6 @@ Once it succeeds, you can install openshift by running:
OpenShift UI may be accessed via the 1st master node FQDN, port 8443.
-When using a bastion, you may want to make an SSH tunnel from your control node
-to access UI on the `https://localhost:8443`, with this inventory variable:
-
- openshift_openstack_ui_ssh_tunnel: True
-
-Note, this requires sudo rights on the ansible control node and an absolute path
-for the `openstack_private_ssh_key`. You should also update the control node's
-`/etc/hosts`:
-
- 127.0.0.1 master-0.openshift.example.com
-
-In order to access UI, the ssh-tunnel service will be created and started on the
-control node. Make sure to remove these changes and the service manually, when not
-needed anymore.
-
## Scale Deployment up/down
### Scaling up
@@ -766,5 +628,3 @@ Usage:
```
ansible-playbook -i <path to inventory> openshift-ansible-contrib/playbooks/provisioning/openstack/scale-up.yaml` [-e increment_by=<number>] [-e openshift_ansible_dir=<path to openshift-ansible>]
```
-
-Note: This playbook works only without a bastion node (`openshift_openstack_use_bastion: False`).
diff --git a/playbooks/openstack/openshift-cluster/install.yml b/playbooks/openstack/openshift-cluster/install.yml
index 1c4f609e3..3211f619a 100644
--- a/playbooks/openstack/openshift-cluster/install.yml
+++ b/playbooks/openstack/openshift-cluster/install.yml
@@ -8,8 +8,5 @@
# values here. We do it in the OSEv3 group vars. Do we need to add
# some logic here?
-- name: run the initialization
- include: ../../init/main.yml
-
-- name: run the config
- include: ../../common/openshift-cluster/config.yml
+- name: run the cluster deploy
+ import_playbook: ../../deploy_cluster.yml
diff --git a/playbooks/openstack/openshift-cluster/provision.yml b/playbooks/openstack/openshift-cluster/provision.yml
index 36d8c8215..583e72b51 100644
--- a/playbooks/openstack/openshift-cluster/provision.yml
+++ b/playbooks/openstack/openshift-cluster/provision.yml
@@ -10,7 +10,7 @@
# NOTE(shadower): Bring in the host groups:
- name: evaluate groups
- include: ../../init/evaluate_groups.yml
+ import_playbook: ../../init/evaluate_groups.yml
- name: Wait for the nodes and gather their facts
@@ -26,9 +26,9 @@
- name: Gather facts for the new nodes
setup:
+- name: set common facts
+ import_playbook: ../../init/facts.yml
-# NOTE(shadower): the (internal) DNS must be functional at this point!!
-# That will have happened in provision.yml if nsupdate was configured.
# TODO(shadower): consider splitting this up so people can stop here
# and configure their DNS if they have to.
@@ -47,6 +47,13 @@
hosts: oo_all_hosts
become: yes
gather_facts: yes
+ roles:
+ - role: rhel_subscribe
+ when:
+ - ansible_distribution == "RedHat"
+ - rhsub_user | default(False)
+ - rhsub_pass | default(False)
+
tasks:
- name: Install dependencies
include_role:
diff --git a/playbooks/openstack/openshift-cluster/provision_install.yml b/playbooks/openstack/openshift-cluster/provision_install.yml
index 5d88c105f..fc2854605 100644
--- a/playbooks/openstack/openshift-cluster/provision_install.yml
+++ b/playbooks/openstack/openshift-cluster/provision_install.yml
@@ -1,9 +1,9 @@
---
- name: Check the prerequisites for cluster provisioning in OpenStack
- include: prerequisites.yml
+ import_playbook: prerequisites.yml
- name: Include the provision.yml playbook to create cluster
- include: provision.yml
+ import_playbook: provision.yml
- name: Include the install.yml playbook to install cluster
- include: install.yml
+ import_playbook: install.yml
diff --git a/playbooks/openstack/sample-inventory/group_vars/OSEv3.yml b/playbooks/openstack/sample-inventory/group_vars/OSEv3.yml
index 90608bbc0..933117127 100644
--- a/playbooks/openstack/sample-inventory/group_vars/OSEv3.yml
+++ b/playbooks/openstack/sample-inventory/group_vars/OSEv3.yml
@@ -1,10 +1,11 @@
---
+## Openshift product versions and repos to install from
openshift_deployment_type: origin
+#openshift_repos_enable_testing: true
#openshift_deployment_type: openshift-enterprise
#openshift_release: v3.5
openshift_master_default_subdomain: "apps.{{ openshift_openstack_clusterid }}.{{ openshift_openstack_public_dns_domain }}"
-openshift_master_cluster_method: native
openshift_master_cluster_public_hostname: "console.{{ openshift_openstack_clusterid }}.{{ openshift_openstack_public_dns_domain }}"
osm_default_node_selector: 'region=primary'
diff --git a/playbooks/openstack/sample-inventory/group_vars/all.yml b/playbooks/openstack/sample-inventory/group_vars/all.yml
index ae1528123..c7afe9a24 100644
--- a/playbooks/openstack/sample-inventory/group_vars/all.yml
+++ b/playbooks/openstack/sample-inventory/group_vars/all.yml
@@ -82,27 +82,10 @@ openshift_openstack_docker_volume_size: "15"
openshift_openstack_subnet_prefix: "192.168.99"
-## Red Hat subscription defaults to false which means we will not attempt to
-## subscribe the nodes
-#rhsm_register: False
-
-# # Using Red Hat Satellite:
-#rhsm_register: True
-#rhsm_satellite: 'sat-6.example.com'
-#rhsm_org: 'OPENSHIFT_ORG'
-#rhsm_activationkey: '<activation-key>'
-
-# # Or using RHN username, password and optionally pool:
-#rhsm_register: True
-#rhsm_username: '<username>'
-#rhsm_password: '<password>'
-#rhsm_pool: '<pool id>'
-
-#rhsm_repos:
-# - "rhel-7-server-rpms"
-# - "rhel-7-server-ose-3.5-rpms"
-# - "rhel-7-server-extras-rpms"
-# - "rhel-7-fast-datapath-rpms"
+## Red Hat subscription:
+#rhsub_user: '<username>'
+#rhsub_pass: '<password>'
+#rhsub_pool: '<pool name>'
# # Roll-your-own DNS
diff --git a/playbooks/prerequisites.yml b/playbooks/prerequisites.yml
index 582dfe794..7b7868cfe 100644
--- a/playbooks/prerequisites.yml
+++ b/playbooks/prerequisites.yml
@@ -1,7 +1,12 @@
---
-- name: Place holder for prerequisites
- hosts: localhost
- gather_facts: false
- tasks:
- - name: Debug placeholder
- debug: msg="Prerequisites ran."
+- import_playbook: init/main.yml
+ vars:
+ skip_verison: True
+
+# This is required for container runtime for crio, only needs to run once.
+- name: Configure os_firewall
+ hosts: oo_masters_to_config:oo_etcd_to_config:oo_lb_to_config:oo_nfs_to_config:oo_nodes_to_config
+ roles:
+ - role: os_firewall
+
+- import_playbook: container-runtime/private/config.yml
diff --git a/playbooks/redeploy-certificates.yml b/playbooks/redeploy-certificates.yml
new file mode 100644
index 000000000..b5fcb951d
--- /dev/null
+++ b/playbooks/redeploy-certificates.yml
@@ -0,0 +1,26 @@
+---
+- import_playbook: init/main.yml
+
+- import_playbook: openshift-etcd/private/redeploy-certificates.yml
+
+- import_playbook: openshift-master/private/redeploy-certificates.yml
+
+- import_playbook: openshift-node/private/redeploy-certificates.yml
+
+- import_playbook: openshift-etcd/private/restart.yml
+ vars:
+ g_etcd_certificates_expired: "{{ ('expired' in (hostvars | oo_select_keys(groups['etcd']) | oo_collect('check_results.check_results.etcd') | oo_collect('health'))) | bool }}"
+
+- import_playbook: openshift-master/private/restart.yml
+
+- import_playbook: openshift-node/private/restart.yml
+
+- import_playbook: openshift-hosted/private/redeploy-router-certificates.yml
+ when: openshift_hosted_manage_router | default(true) | bool
+
+- import_playbook: openshift-hosted/private/redeploy-registry-certificates.yml
+ when: openshift_hosted_manage_registry | default(true) | bool
+
+- import_playbook: openshift-master/private/revert-client-ca.yml
+
+- import_playbook: openshift-master/private/restart.yml
diff --git a/playbooks/roles b/playbooks/roles
new file mode 120000
index 000000000..d8c4472ca
--- /dev/null
+++ b/playbooks/roles
@@ -0,0 +1 @@
+../roles \ No newline at end of file