summaryrefslogtreecommitdiffstats
path: root/playbooks/common
diff options
context:
space:
mode:
Diffstat (limited to 'playbooks/common')
-rw-r--r--playbooks/common/openshift-checks/adhoc.yml12
-rw-r--r--playbooks/common/openshift-checks/health.yml11
-rw-r--r--playbooks/common/openshift-checks/pre-install.yml11
l---------playbooks/common/openshift-checks/roles1
-rw-r--r--playbooks/common/openshift-cluster/cockpit-ui.yml6
-rw-r--r--playbooks/common/openshift-cluster/config.yml67
-rw-r--r--playbooks/common/openshift-cluster/create_persistent_volumes.yml18
-rw-r--r--playbooks/common/openshift-cluster/enable_dnsmasq.yml60
-rw-r--r--playbooks/common/openshift-cluster/evaluate_groups.yml190
l---------playbooks/common/openshift-cluster/filter_plugins1
-rw-r--r--playbooks/common/openshift-cluster/initialize_facts.yml156
-rw-r--r--playbooks/common/openshift-cluster/initialize_openshift_repos.yml8
-rw-r--r--playbooks/common/openshift-cluster/initialize_openshift_version.yml32
l---------playbooks/common/openshift-cluster/library1
l---------playbooks/common/openshift-cluster/lookup_plugins1
-rw-r--r--playbooks/common/openshift-cluster/openshift_default_storage_class.yml6
-rw-r--r--playbooks/common/openshift-cluster/openshift_hosted.yml35
-rw-r--r--playbooks/common/openshift-cluster/openshift_hosted_create_projects.yml7
-rw-r--r--playbooks/common/openshift-cluster/openshift_hosted_registry.yml13
-rw-r--r--playbooks/common/openshift-cluster/openshift_hosted_router.yml13
-rw-r--r--playbooks/common/openshift-cluster/openshift_logging.yml33
-rw-r--r--playbooks/common/openshift-cluster/openshift_metrics.yml34
-rw-r--r--playbooks/common/openshift-cluster/openshift_prometheus.yml5
-rw-r--r--playbooks/common/openshift-cluster/openshift_provisioners.yml5
-rw-r--r--playbooks/common/openshift-cluster/redeploy-certificates/check-expiry.yml12
-rw-r--r--playbooks/common/openshift-cluster/redeploy-certificates/etcd-backup.yml19
-rw-r--r--playbooks/common/openshift-cluster/redeploy-certificates/etcd-ca.yml103
l---------playbooks/common/openshift-cluster/redeploy-certificates/filter_plugins1
l---------playbooks/common/openshift-cluster/redeploy-certificates/library1
l---------playbooks/common/openshift-cluster/redeploy-certificates/lookup_plugins1
-rw-r--r--playbooks/common/openshift-cluster/redeploy-certificates/masters-backup.yml38
-rw-r--r--playbooks/common/openshift-cluster/redeploy-certificates/nodes-backup.yml24
-rw-r--r--playbooks/common/openshift-cluster/redeploy-certificates/openshift-ca.yml303
-rw-r--r--playbooks/common/openshift-cluster/redeploy-certificates/registry.yml102
l---------playbooks/common/openshift-cluster/redeploy-certificates/roles1
-rw-r--r--playbooks/common/openshift-cluster/redeploy-certificates/router.yml141
-rw-r--r--playbooks/common/openshift-cluster/sanity_checks.yml51
-rw-r--r--playbooks/common/openshift-cluster/service_catalog.yml29
-rw-r--r--playbooks/common/openshift-cluster/std_include.yml46
-rw-r--r--playbooks/common/openshift-cluster/tasks/set_etcd_launch_facts.yml13
-rw-r--r--playbooks/common/openshift-cluster/tasks/set_master_launch_facts.yml13
-rw-r--r--playbooks/common/openshift-cluster/tasks/set_node_launch_facts.yml15
-rw-r--r--playbooks/common/openshift-cluster/upgrades/cleanup_unused_images.yml21
-rw-r--r--playbooks/common/openshift-cluster/upgrades/create_service_signer_cert.yml2
-rw-r--r--playbooks/common/openshift-cluster/upgrades/disable_excluders.yml (renamed from playbooks/common/openshift-cluster/upgrades/disable_master_excluders.yml)3
-rw-r--r--playbooks/common/openshift-cluster/upgrades/disable_node_excluders.yml12
-rw-r--r--playbooks/common/openshift-cluster/upgrades/docker/docker_upgrade.yml26
-rw-r--r--playbooks/common/openshift-cluster/upgrades/docker/nuke_images.sh25
-rw-r--r--playbooks/common/openshift-cluster/upgrades/docker/tasks/restart.yml14
-rw-r--r--playbooks/common/openshift-cluster/upgrades/docker/tasks/upgrade.yml14
-rw-r--r--playbooks/common/openshift-cluster/upgrades/docker/upgrade_check.yml57
-rw-r--r--playbooks/common/openshift-cluster/upgrades/etcd/backup.yml29
l---------playbooks/common/openshift-cluster/upgrades/etcd/filter_plugins1
l---------playbooks/common/openshift-cluster/upgrades/etcd/lookup_plugins1
-rw-r--r--playbooks/common/openshift-cluster/upgrades/etcd/main.yml29
l---------playbooks/common/openshift-cluster/upgrades/etcd/roles1
-rw-r--r--playbooks/common/openshift-cluster/upgrades/etcd/upgrade.yml108
-rw-r--r--playbooks/common/openshift-cluster/upgrades/etcd/upgrade_image_members.yml18
-rw-r--r--playbooks/common/openshift-cluster/upgrades/etcd/upgrade_rpm_members.yml19
l---------playbooks/common/openshift-cluster/upgrades/filter_plugins1
-rw-r--r--playbooks/common/openshift-cluster/upgrades/init.yml11
l---------playbooks/common/openshift-cluster/upgrades/lookup_plugins1
-rw-r--r--playbooks/common/openshift-cluster/upgrades/post_control_plane.yml17
-rw-r--r--playbooks/common/openshift-cluster/upgrades/pre/config.yml77
-rw-r--r--playbooks/common/openshift-cluster/upgrades/pre/gate_checks.yml6
-rw-r--r--playbooks/common/openshift-cluster/upgrades/pre/tasks/verify_docker_upgrade_targets.yml22
-rw-r--r--playbooks/common/openshift-cluster/upgrades/pre/verify_cluster.yml94
-rw-r--r--playbooks/common/openshift-cluster/upgrades/pre/verify_control_plane_running.yml24
-rw-r--r--playbooks/common/openshift-cluster/upgrades/pre/verify_etcd3_backend.yml22
-rw-r--r--playbooks/common/openshift-cluster/upgrades/pre/verify_health_checks.yml16
-rw-r--r--playbooks/common/openshift-cluster/upgrades/pre/verify_inventory_vars.yml37
-rw-r--r--playbooks/common/openshift-cluster/upgrades/pre/verify_upgrade_targets.yml20
-rw-r--r--playbooks/common/openshift-cluster/upgrades/rpm_upgrade.yml38
-rw-r--r--playbooks/common/openshift-cluster/upgrades/upgrade_control_plane.yml183
-rw-r--r--playbooks/common/openshift-cluster/upgrades/upgrade_nodes.yml48
-rw-r--r--playbooks/common/openshift-cluster/upgrades/upgrade_scale_group.yml66
-rw-r--r--playbooks/common/openshift-cluster/upgrades/upgrade_scheduler.yml173
-rw-r--r--playbooks/common/openshift-cluster/upgrades/v3_3/master_config_upgrade.yml66
-rw-r--r--playbooks/common/openshift-cluster/upgrades/v3_3/node_config_upgrade.yml20
l---------playbooks/common/openshift-cluster/upgrades/v3_3/roles1
-rw-r--r--playbooks/common/openshift-cluster/upgrades/v3_3/upgrade.yml118
-rw-r--r--playbooks/common/openshift-cluster/upgrades/v3_3/upgrade_control_plane.yml119
-rw-r--r--playbooks/common/openshift-cluster/upgrades/v3_3/upgrade_nodes.yml113
-rw-r--r--playbooks/common/openshift-cluster/upgrades/v3_4/master_config_upgrade.yml10
l---------playbooks/common/openshift-cluster/upgrades/v3_4/roles1
-rw-r--r--playbooks/common/openshift-cluster/upgrades/v3_4/upgrade.yml116
-rw-r--r--playbooks/common/openshift-cluster/upgrades/v3_4/upgrade_control_plane.yml119
-rw-r--r--playbooks/common/openshift-cluster/upgrades/v3_4/upgrade_nodes.yml111
l---------playbooks/common/openshift-cluster/upgrades/v3_5/filter_plugins1
-rw-r--r--playbooks/common/openshift-cluster/upgrades/v3_5/master_config_upgrade.yml10
-rw-r--r--playbooks/common/openshift-cluster/upgrades/v3_5/upgrade.yml118
-rw-r--r--playbooks/common/openshift-cluster/upgrades/v3_5/upgrade_control_plane.yml123
-rw-r--r--playbooks/common/openshift-cluster/upgrades/v3_5/upgrade_nodes.yml111
-rw-r--r--playbooks/common/openshift-cluster/upgrades/v3_5/validator.yml67
l---------playbooks/common/openshift-cluster/upgrades/v3_6/filter_plugins1
-rw-r--r--playbooks/common/openshift-cluster/upgrades/v3_6/upgrade.yml117
-rw-r--r--playbooks/common/openshift-cluster/upgrades/v3_6/upgrade_control_plane.yml117
-rw-r--r--playbooks/common/openshift-cluster/upgrades/v3_6/upgrade_nodes.yml111
l---------playbooks/common/openshift-cluster/upgrades/v3_7/filter_plugins1
-rw-r--r--playbooks/common/openshift-cluster/upgrades/v3_7/upgrade.yml133
-rw-r--r--playbooks/common/openshift-cluster/upgrades/v3_7/upgrade_control_plane.yml131
-rw-r--r--playbooks/common/openshift-cluster/upgrades/v3_7/upgrade_nodes.yml109
-rw-r--r--playbooks/common/openshift-cluster/upgrades/v3_7/validator.yml10
-rw-r--r--playbooks/common/openshift-cluster/upgrades/v3_8/master_config_upgrade.yml (renamed from playbooks/common/openshift-cluster/upgrades/v3_7/master_config_upgrade.yml)0
l---------playbooks/common/openshift-cluster/upgrades/v3_8/roles (renamed from playbooks/common/openshift-cluster/upgrades/v3_5/roles)0
-rw-r--r--playbooks/common/openshift-cluster/upgrades/v3_8/upgrade.yml59
-rw-r--r--playbooks/common/openshift-cluster/upgrades/v3_8/upgrade_control_plane.yml64
-rw-r--r--playbooks/common/openshift-cluster/upgrades/v3_8/upgrade_nodes.yml38
-rw-r--r--playbooks/common/openshift-cluster/upgrades/v3_8/validator.yml7
-rw-r--r--playbooks/common/openshift-cluster/upgrades/v3_9/master_config_upgrade.yml (renamed from playbooks/common/openshift-cluster/upgrades/v3_6/master_config_upgrade.yml)5
l---------playbooks/common/openshift-cluster/upgrades/v3_9/roles1
-rw-r--r--playbooks/common/openshift-cluster/upgrades/v3_9/upgrade.yml55
-rw-r--r--playbooks/common/openshift-cluster/upgrades/v3_9/upgrade_control_plane.yml65
-rw-r--r--playbooks/common/openshift-cluster/upgrades/v3_9/upgrade_nodes.yml34
-rw-r--r--playbooks/common/openshift-cluster/upgrades/v3_9/validator.yml7
-rw-r--r--playbooks/common/openshift-cluster/validate_hostnames.yml23
-rw-r--r--playbooks/common/openshift-etcd/ca.yml15
-rw-r--r--playbooks/common/openshift-etcd/certificates.yml4
-rw-r--r--playbooks/common/openshift-etcd/config.yml36
-rw-r--r--playbooks/common/openshift-etcd/embedded2external.yml172
l---------playbooks/common/openshift-etcd/filter_plugins1
l---------playbooks/common/openshift-etcd/lookup_plugins1
-rw-r--r--playbooks/common/openshift-etcd/master_etcd_certificates.yml14
-rw-r--r--playbooks/common/openshift-etcd/migrate.yml169
-rw-r--r--playbooks/common/openshift-etcd/restart.yml27
l---------playbooks/common/openshift-etcd/roles1
-rw-r--r--playbooks/common/openshift-etcd/scaleup.yml83
-rw-r--r--playbooks/common/openshift-etcd/server_certificates.yml15
-rw-r--r--playbooks/common/openshift-glusterfs/config.yml46
l---------playbooks/common/openshift-glusterfs/filter_plugins1
l---------playbooks/common/openshift-glusterfs/lookup_plugins1
-rw-r--r--playbooks/common/openshift-glusterfs/registry.yml49
l---------playbooks/common/openshift-glusterfs/roles1
-rw-r--r--playbooks/common/openshift-loadbalancer/config.yml47
l---------playbooks/common/openshift-loadbalancer/filter_plugins1
l---------playbooks/common/openshift-loadbalancer/lookup_plugins1
l---------playbooks/common/openshift-loadbalancer/roles1
-rw-r--r--playbooks/common/openshift-management/config.yml35
l---------playbooks/common/openshift-management/filter_plugins1
l---------playbooks/common/openshift-management/library1
l---------playbooks/common/openshift-management/roles1
-rw-r--r--playbooks/common/openshift-management/uninstall.yml8
-rw-r--r--playbooks/common/openshift-master/additional_config.yml46
-rw-r--r--playbooks/common/openshift-master/certificates.yml14
-rw-r--r--playbooks/common/openshift-master/config.yml242
-rw-r--r--playbooks/common/openshift-master/files/openshift-ansible-catalog-console.js2
l---------playbooks/common/openshift-master/filter_plugins1
l---------playbooks/common/openshift-master/library1
l---------playbooks/common/openshift-master/lookup_plugins1
-rw-r--r--playbooks/common/openshift-master/restart.yml19
-rw-r--r--playbooks/common/openshift-master/restart_hosts.yml40
-rw-r--r--playbooks/common/openshift-master/restart_services.yml22
l---------playbooks/common/openshift-master/roles1
-rw-r--r--playbooks/common/openshift-master/scaleup.yml56
-rw-r--r--playbooks/common/openshift-master/set_network_facts.yml34
-rw-r--r--playbooks/common/openshift-master/tasks/wire_aggregator.yml215
-rw-r--r--playbooks/common/openshift-master/validate_restart.yml65
-rw-r--r--playbooks/common/openshift-nfs/config.yml26
l---------playbooks/common/openshift-nfs/filter_plugins1
l---------playbooks/common/openshift-nfs/lookup_plugins1
l---------playbooks/common/openshift-nfs/roles1
-rw-r--r--playbooks/common/openshift-node/additional_config.yml64
-rw-r--r--playbooks/common/openshift-node/certificates.yml8
-rw-r--r--playbooks/common/openshift-node/config.yml34
-rw-r--r--playbooks/common/openshift-node/configure_nodes.yml17
-rw-r--r--playbooks/common/openshift-node/containerized_nodes.yml19
-rw-r--r--playbooks/common/openshift-node/enable_excluders.yml8
-rw-r--r--playbooks/common/openshift-node/etcd_client_config.yml11
l---------playbooks/common/openshift-node/filter_plugins1
-rw-r--r--playbooks/common/openshift-node/image_prep.yml21
l---------playbooks/common/openshift-node/lookup_plugins1
-rw-r--r--playbooks/common/openshift-node/manage_node.yml12
-rw-r--r--playbooks/common/openshift-node/network_manager.yml28
-rw-r--r--playbooks/common/openshift-node/restart.yml61
l---------playbooks/common/openshift-node/roles1
-rw-r--r--playbooks/common/openshift-node/setup.yml27
176 files changed, 847 insertions, 6173 deletions
diff --git a/playbooks/common/openshift-checks/adhoc.yml b/playbooks/common/openshift-checks/adhoc.yml
deleted file mode 100644
index dfcef8435..000000000
--- a/playbooks/common/openshift-checks/adhoc.yml
+++ /dev/null
@@ -1,12 +0,0 @@
----
-- name: OpenShift health checks
- hosts: oo_all_hosts
- roles:
- - openshift_health_checker
- vars:
- - r_openshift_health_checker_playbook_context: adhoc
- post_tasks:
- - name: Run health checks
- action: openshift_health_check
- args:
- checks: '{{ openshift_checks | default([]) }}'
diff --git a/playbooks/common/openshift-checks/health.yml b/playbooks/common/openshift-checks/health.yml
deleted file mode 100644
index 21ea785ef..000000000
--- a/playbooks/common/openshift-checks/health.yml
+++ /dev/null
@@ -1,11 +0,0 @@
----
-- name: Run OpenShift health checks
- hosts: oo_all_hosts
- roles:
- - openshift_health_checker
- vars:
- - r_openshift_health_checker_playbook_context: health
- post_tasks:
- - action: openshift_health_check
- args:
- checks: ['@health']
diff --git a/playbooks/common/openshift-checks/pre-install.yml b/playbooks/common/openshift-checks/pre-install.yml
deleted file mode 100644
index 88e6f9120..000000000
--- a/playbooks/common/openshift-checks/pre-install.yml
+++ /dev/null
@@ -1,11 +0,0 @@
----
-- name: run OpenShift pre-install checks
- hosts: oo_all_hosts
- roles:
- - openshift_health_checker
- vars:
- - r_openshift_health_checker_playbook_context: pre-install
- post_tasks:
- - action: openshift_health_check
- args:
- checks: ['@preflight']
diff --git a/playbooks/common/openshift-checks/roles b/playbooks/common/openshift-checks/roles
deleted file mode 120000
index 20c4c58cf..000000000
--- a/playbooks/common/openshift-checks/roles
+++ /dev/null
@@ -1 +0,0 @@
-../../../roles \ No newline at end of file
diff --git a/playbooks/common/openshift-cluster/cockpit-ui.yml b/playbooks/common/openshift-cluster/cockpit-ui.yml
deleted file mode 100644
index 5ddafdb07..000000000
--- a/playbooks/common/openshift-cluster/cockpit-ui.yml
+++ /dev/null
@@ -1,6 +0,0 @@
----
-- name: Create Hosted Resources - cockpit-ui
- hosts: oo_first_master
- roles:
- - role: cockpit-ui
- when: ( openshift.common.version_gte_3_3_or_1_3 | bool ) and ( openshift_hosted_manage_registry | default(true) | bool ) and not (openshift.docker.hosted_registry_insecure | default(false) | bool)
diff --git a/playbooks/common/openshift-cluster/config.yml b/playbooks/common/openshift-cluster/config.yml
deleted file mode 100644
index 395eb51f1..000000000
--- a/playbooks/common/openshift-cluster/config.yml
+++ /dev/null
@@ -1,67 +0,0 @@
----
-# TODO: refactor this into its own include
-# and pass a variable for ctx
-- name: Verify Requirements
- hosts: oo_all_hosts
- roles:
- - openshift_health_checker
- vars:
- - r_openshift_health_checker_playbook_context: install
- post_tasks:
-
- - name: Verify Requirements - EL
- when: ansible_distribution != "Fedora"
- action: openshift_health_check
- args:
- checks:
- - disk_availability
- - memory_availability
- - package_availability
- - package_version
- - docker_image_availability
- - docker_storage
- - name: Verify Requirements - Fedora
- when: ansible_distribution == "Fedora"
- action: openshift_health_check
- args:
- checks:
- - docker_image_availability
-
-- include: ../openshift-etcd/config.yml
-
-- include: ../openshift-nfs/config.yml
- when: groups.oo_nfs_to_config | default([]) | count > 0
-
-- include: ../openshift-loadbalancer/config.yml
- when: groups.oo_lb_to_config | default([]) | count > 0
-
-- include: ../openshift-master/config.yml
-
-- include: ../openshift-master/additional_config.yml
-
-- include: ../openshift-node/config.yml
-
-- include: ../openshift-glusterfs/config.yml
- when: groups.oo_glusterfs_to_config | default([]) | count > 0
-
-- include: openshift_hosted.yml
-
-- include: openshift_metrics.yml
- when: openshift_metrics_install_metrics | default(false) | bool
-
-- include: openshift_logging.yml
- when: openshift_logging_install_logging | default(false) | bool
-
-- include: service_catalog.yml
- when: openshift_enable_service_catalog | default(false) | bool
-
-- include: ../openshift-management/config.yml
- when: openshift_management_install_management | default(false) | bool
-
-- name: Print deprecated variable warning message if necessary
- hosts: oo_first_master
- gather_facts: no
- tasks:
- - debug: msg="{{__deprecation_message}}"
- when:
- - __deprecation_message | default ('') | length > 0
diff --git a/playbooks/common/openshift-cluster/create_persistent_volumes.yml b/playbooks/common/openshift-cluster/create_persistent_volumes.yml
deleted file mode 100644
index ec6f2c52c..000000000
--- a/playbooks/common/openshift-cluster/create_persistent_volumes.yml
+++ /dev/null
@@ -1,18 +0,0 @@
----
-- name: Create persistent volumes
- hosts: oo_first_master
- vars:
- persistent_volumes: "{{ hostvars[groups.oo_first_master.0] | oo_persistent_volumes(groups) }}"
- persistent_volume_claims: "{{ hostvars[groups.oo_first_master.0] | oo_persistent_volume_claims }}"
- tasks:
- - debug: var=persistent_volumes
- - debug: var=persistent_volume_claims
-
-- name: Create Hosted Resources - persistent volumes
- hosts: oo_first_master
- vars:
- persistent_volumes: "{{ hostvars[groups.oo_first_master.0] | oo_persistent_volumes(groups) }}"
- persistent_volume_claims: "{{ hostvars[groups.oo_first_master.0] | oo_persistent_volume_claims }}"
- roles:
- - role: openshift_persistent_volumes
- when: persistent_volumes | length > 0 or persistent_volume_claims | length > 0
diff --git a/playbooks/common/openshift-cluster/enable_dnsmasq.yml b/playbooks/common/openshift-cluster/enable_dnsmasq.yml
deleted file mode 100644
index be14b06f0..000000000
--- a/playbooks/common/openshift-cluster/enable_dnsmasq.yml
+++ /dev/null
@@ -1,60 +0,0 @@
----
-- include: evaluate_groups.yml
-
-- name: Load openshift_facts
- hosts: oo_masters_to_config:oo_nodes_to_config
- roles:
- - openshift_facts
- post_tasks:
- - fail: msg="This playbook requires a master version of at least Origin 1.1 or OSE 3.1"
- when: not openshift.common.version_gte_3_1_1_or_1_1_1 | bool
-
-- name: Reconfigure masters to listen on our new dns_port
- hosts: oo_masters_to_config
- handlers:
- - include: ../../../roles/openshift_master/handlers/main.yml
- static: yes
- vars:
- os_firewall_allow:
- - service: skydns tcp
- port: "{{ openshift.master.dns_port }}/tcp"
- - service: skydns udp
- port: "{{ openshift.master.dns_port }}/udp"
- roles:
- - os_firewall
- tasks:
- - openshift_facts:
- role: "{{ item.role }}"
- local_facts: "{{ item.local_facts }}"
- with_items:
- - role: master
- local_facts:
- dns_port: '8053'
- - modify_yaml:
- dest: "{{ openshift.common.config_base }}/master/master-config.yaml"
- yaml_key: dnsConfig.bindAddress
- yaml_value: "{{ openshift.master.bind_addr }}:{{ openshift.master.dns_port }}"
- notify: restart master api
- - meta: flush_handlers
-
-- name: Configure nodes for dnsmasq
- hosts: oo_nodes_to_config
- handlers:
- - include: ../../../roles/openshift_node/handlers/main.yml
- static: yes
- pre_tasks:
- - openshift_facts:
- role: "{{ item.role }}"
- local_facts: "{{ item.local_facts }}"
- with_items:
- - role: node
- local_facts:
- dns_ip: "{{ hostvars[inventory_hostname]['ansible_default_ipv4']['address'] }}"
- roles:
- - openshift_node_dnsmasq
- post_tasks:
- - modify_yaml:
- dest: "{{ openshift.common.config_base }}/node/node-config.yaml"
- yaml_key: dnsIP
- yaml_value: "{{ openshift.node.dns_ip }}"
- notify: restart node
diff --git a/playbooks/common/openshift-cluster/evaluate_groups.yml b/playbooks/common/openshift-cluster/evaluate_groups.yml
deleted file mode 100644
index 78b552279..000000000
--- a/playbooks/common/openshift-cluster/evaluate_groups.yml
+++ /dev/null
@@ -1,190 +0,0 @@
----
-- name: Populate config host groups
- hosts: localhost
- connection: local
- become: no
- gather_facts: no
- tasks:
- - name: Evaluate groups - g_etcd_hosts or g_new_etcd_hosts required
- fail:
- msg: This playbook requires g_etcd_hosts or g_new_etcd_hosts to be set
- when: g_etcd_hosts is not defined and g_new_etcd_hosts is not defined
-
- - name: Evaluate groups - g_master_hosts or g_new_master_hosts required
- fail:
- msg: This playbook requires g_master_hosts or g_new_master_hosts to be set
- when: g_master_hosts is not defined and g_new_master_hosts is not defined
-
- - name: Evaluate groups - g_node_hosts or g_new_node_hosts required
- fail:
- msg: This playbook requires g_node_hosts or g_new_node_hosts to be set
- when: g_node_hosts is not defined and g_new_node_hosts is not defined
-
- - name: Evaluate groups - g_lb_hosts required
- fail:
- msg: This playbook requires g_lb_hosts to be set
- when: g_lb_hosts is not defined
-
- - name: Evaluate groups - g_nfs_hosts required
- fail:
- msg: This playbook requires g_nfs_hosts to be set
- when: g_nfs_hosts is not defined
-
- - name: Evaluate groups - g_nfs_hosts is single host
- fail:
- msg: The nfs group must be limited to one host
- when: g_nfs_hosts | default([]) | length > 1
-
- - name: Evaluate groups - g_glusterfs_hosts required
- fail:
- msg: This playbook requires g_glusterfs_hosts to be set
- when: g_glusterfs_hosts is not defined
-
- - name: Evaluate groups - Fail if no etcd hosts group is defined
- fail:
- msg: >
- Running etcd as an embedded service is no longer supported. If this is a
- new install please define an 'etcd' group with either one or three
- hosts. These hosts may be the same hosts as your masters. If this is an
- upgrade you may set openshift_master_unsupported_embedded_etcd=true
- until a migration playbook becomes available.
- when:
- - g_etcd_hosts | default([]) | length not in [3,1]
- - not openshift_master_unsupported_embedded_etcd | default(False)
- - not (openshift_node_bootstrap | default(False))
-
- - name: Evaluate oo_all_hosts
- add_host:
- name: "{{ item }}"
- groups: oo_all_hosts
- ansible_ssh_user: "{{ g_ssh_user | default(omit) }}"
- ansible_become: "{{ g_sudo | default(omit) }}"
- with_items: "{{ g_all_hosts | default([]) }}"
- changed_when: no
-
- - name: Evaluate oo_masters
- add_host:
- name: "{{ item }}"
- groups: oo_masters
- ansible_ssh_user: "{{ g_ssh_user | default(omit) }}"
- ansible_become: "{{ g_sudo | default(omit) }}"
- with_items: "{{ g_master_hosts | union(g_new_master_hosts) | default([]) }}"
- changed_when: no
-
- - name: Evaluate oo_first_master
- add_host:
- name: "{{ g_master_hosts[0] }}"
- groups: oo_first_master
- ansible_ssh_user: "{{ g_ssh_user | default(omit) }}"
- ansible_become: "{{ g_sudo | default(omit) }}"
- when: g_master_hosts|length > 0
- changed_when: no
-
- - name: Evaluate oo_new_etcd_to_config
- add_host:
- name: "{{ item }}"
- groups: oo_new_etcd_to_config
- ansible_ssh_user: "{{ g_ssh_user | default(omit) }}"
- ansible_become: "{{ g_sudo | default(omit) }}"
- with_items: "{{ g_new_etcd_hosts | default([]) }}"
- changed_when: no
-
- - name: Evaluate oo_masters_to_config
- add_host:
- name: "{{ item }}"
- groups: oo_masters_to_config
- ansible_ssh_user: "{{ g_ssh_user | default(omit) }}"
- ansible_become: "{{ g_sudo | default(omit) }}"
- with_items: "{{ g_new_master_hosts | default(g_master_hosts | default([], true), true) }}"
- changed_when: no
-
- - name: Evaluate oo_etcd_to_config
- add_host:
- name: "{{ item }}"
- groups: oo_etcd_to_config
- ansible_ssh_user: "{{ g_ssh_user | default(omit) }}"
- ansible_become: "{{ g_sudo | default(omit) }}"
- with_items: "{{ g_etcd_hosts | default([]) }}"
- changed_when: no
-
- - name: Evaluate oo_first_etcd
- add_host:
- name: "{{ g_etcd_hosts[0] }}"
- groups: oo_first_etcd
- ansible_ssh_user: "{{ g_ssh_user | default(omit) }}"
- ansible_become: "{{ g_sudo | default(omit) }}"
- when: g_etcd_hosts|length > 0
- changed_when: no
-
- # We use two groups one for hosts we're upgrading which doesn't include embedded etcd
- # The other for backing up which includes the embedded etcd host, there's no need to
- # upgrade embedded etcd that just happens when the master is updated.
- - name: Evaluate oo_etcd_hosts_to_upgrade
- add_host:
- name: "{{ item }}"
- groups: oo_etcd_hosts_to_upgrade
- with_items: "{{ groups.oo_etcd_to_config if groups.oo_etcd_to_config is defined and groups.oo_etcd_to_config | length > 0 else [] }}"
- changed_when: False
-
- - name: Evaluate oo_etcd_hosts_to_backup
- add_host:
- name: "{{ item }}"
- groups: oo_etcd_hosts_to_backup
- with_items: "{{ groups.oo_etcd_to_config if groups.oo_etcd_to_config is defined and groups.oo_etcd_to_config | length > 0 else (groups.oo_first_master | default([])) }}"
- changed_when: False
-
- - name: Evaluate oo_nodes_to_config
- add_host:
- name: "{{ item }}"
- groups: oo_nodes_to_config
- ansible_ssh_user: "{{ g_ssh_user | default(omit) }}"
- ansible_become: "{{ g_sudo | default(omit) }}"
- with_items: "{{ g_new_node_hosts | default(g_node_hosts | default([], true), true) }}"
- changed_when: no
-
- # Skip adding the master to oo_nodes_to_config when g_new_node_hosts is
- - name: Add master to oo_nodes_to_config
- add_host:
- name: "{{ item }}"
- groups: oo_nodes_to_config
- ansible_ssh_user: "{{ g_ssh_user | default(omit) }}"
- ansible_become: "{{ g_sudo | default(omit) }}"
- with_items: "{{ g_master_hosts | default([]) }}"
- when: g_nodeonmaster | default(false) | bool and not g_new_node_hosts | default(false) | bool
- changed_when: no
-
- - name: Evaluate oo_lb_to_config
- add_host:
- name: "{{ item }}"
- groups: oo_lb_to_config
- ansible_ssh_user: "{{ g_ssh_user | default(omit) }}"
- ansible_become: "{{ g_sudo | default(omit) }}"
- with_items: "{{ g_lb_hosts | default([]) }}"
- changed_when: no
-
- - name: Evaluate oo_nfs_to_config
- add_host:
- name: "{{ item }}"
- groups: oo_nfs_to_config
- ansible_ssh_user: "{{ g_ssh_user | default(omit) }}"
- ansible_become: "{{ g_sudo | default(omit) }}"
- with_items: "{{ g_nfs_hosts | default([]) }}"
- changed_when: no
-
- - name: Evaluate oo_glusterfs_to_config
- add_host:
- name: "{{ item }}"
- groups: oo_glusterfs_to_config
- ansible_ssh_user: "{{ g_ssh_user | default(omit) }}"
- ansible_become: "{{ g_sudo | default(omit) }}"
- with_items: "{{ g_glusterfs_hosts | union(g_glusterfs_registry_hosts | default([])) }}"
- changed_when: no
-
- - name: Evaluate oo_etcd_to_migrate
- add_host:
- name: "{{ item }}"
- groups: oo_etcd_to_migrate
- ansible_ssh_user: "{{ g_ssh_user | default(omit) }}"
- ansible_become: "{{ g_sudo | default(omit) }}"
- with_items: "{{ groups.oo_etcd_to_config if groups.oo_etcd_to_config | default([]) | length != 0 else (groups.oo_first_master |default([]))}}"
- changed_when: no
diff --git a/playbooks/common/openshift-cluster/filter_plugins b/playbooks/common/openshift-cluster/filter_plugins
deleted file mode 120000
index 99a95e4ca..000000000
--- a/playbooks/common/openshift-cluster/filter_plugins
+++ /dev/null
@@ -1 +0,0 @@
-../../../filter_plugins \ No newline at end of file
diff --git a/playbooks/common/openshift-cluster/initialize_facts.yml b/playbooks/common/openshift-cluster/initialize_facts.yml
deleted file mode 100644
index be2f8b5f4..000000000
--- a/playbooks/common/openshift-cluster/initialize_facts.yml
+++ /dev/null
@@ -1,156 +0,0 @@
----
-- name: Ensure that all non-node hosts are accessible
- hosts: oo_masters_to_config:oo_etcd_to_config:oo_lb_to_config:oo_nfs_to_config
- any_errors_fatal: true
- tasks:
-
-- name: Initialize host facts
- hosts: oo_all_hosts
- tasks:
- - name: load openshift_facts module
- include_role:
- name: openshift_facts
-
- # TODO: Should this role be refactored into health_checks??
- - name: Run openshift_sanitize_inventory to set variables
- include_role:
- name: openshift_sanitize_inventory
-
- - name: Detecting Operating System from ostree_booted
- stat:
- path: /run/ostree-booted
- register: ostree_booted
-
- # Locally setup containerized facts for now
- - name: initialize_facts set fact l_is_atomic
- set_fact:
- l_is_atomic: "{{ ostree_booted.stat.exists }}"
-
- - name: initialize_facts set fact for containerized and l_is_*_system_container
- set_fact:
- l_is_containerized: "{{ (l_is_atomic | bool) or (containerized | default(false) | bool) }}"
- l_is_openvswitch_system_container: "{{ (openshift_use_openvswitch_system_container | default(openshift_use_system_containers | default(false)) | bool) }}"
- l_is_node_system_container: "{{ (openshift_use_node_system_container | default(openshift_use_system_containers | default(false)) | bool) }}"
- l_is_master_system_container: "{{ (openshift_use_master_system_container | default(openshift_use_system_containers | default(false)) | bool) }}"
- l_is_etcd_system_container: "{{ (openshift_use_etcd_system_container | default(openshift_use_system_containers | default(false)) | bool) }}"
-
- - name: initialize_facts set facts for l_any_system_container
- set_fact:
- l_any_system_container: "{{ l_is_etcd_system_container or l_is_openvswitch_system_container or l_is_node_system_container or l_is_master_system_container }}"
-
- - name: initialize_facts set fact for l_etcd_runtime
- set_fact:
- l_etcd_runtime: "{{ 'runc' if l_is_etcd_system_container else 'docker' if l_is_containerized else 'host' }}"
-
- # TODO: Should this be moved into health checks??
- # Seems as though any check that happens with a corresponding fail should move into health_checks
- - name: Validate python version - ans_dist is fedora and python is v3
- fail:
- msg: |
- openshift-ansible requires Python 3 for {{ ansible_distribution }};
- For information on enabling Python 3 with Ansible, see https://docs.ansible.com/ansible/python_3_support.html
- when:
- - ansible_distribution == 'Fedora'
- - ansible_python['version']['major'] != 3
-
- # TODO: Should this be moved into health checks??
- # Seems as though any check that happens with a corresponding fail should move into health_checks
- - name: Validate python version - ans_dist not Fedora and python must be v2
- fail:
- msg: "openshift-ansible requires Python 2 for {{ ansible_distribution }}"
- when:
- - ansible_distribution != 'Fedora'
- - ansible_python['version']['major'] != 2
-
- # TODO: Should this be moved into health checks??
- # Seems as though any check that happens with a corresponding fail should move into health_checks
- # Fail as early as possible if Atomic and old version of Docker
- - when:
- - l_is_atomic | bool
- block:
-
- # See https://access.redhat.com/articles/2317361
- # and https://github.com/ansible/ansible/issues/15892
- # NOTE: the "'s can not be removed at this level else the docker command will fail
- # NOTE: When ansible >2.2.1.x is used this can be updated per
- # https://github.com/openshift/openshift-ansible/pull/3475#discussion_r103525121
- - name: Determine Atomic Host Docker Version
- shell: 'CURLY="{"; docker version --format "$CURLY{json .Server.Version}}"'
- register: l_atomic_docker_version
-
- - name: assert atomic host docker version is 1.12 or later
- assert:
- that:
- - l_atomic_docker_version.stdout | replace('"', '') | version_compare('1.12','>=')
- msg: Installation on Atomic Host requires Docker 1.12 or later. Please upgrade and restart the Atomic Host.
-
- - when:
- - not l_is_atomic | bool
- block:
- - name: Ensure openshift-ansible installer package deps are installed
- package:
- name: "{{ item }}"
- state: present
- with_items:
- - iproute
- - "{{ 'python3-dbus' if ansible_distribution == 'Fedora' else 'dbus-python' }}"
- - "{{ 'python3-PyYAML' if ansible_distribution == 'Fedora' else 'PyYAML' }}"
- - yum-utils
-
- - name: Ensure various deps for running system containers are installed
- package:
- name: "{{ item }}"
- state: present
- with_items:
- - atomic
- - ostree
- - runc
- when:
- - l_any_system_container | bool
-
- - name: Default system_images_registry to a enterprise registry
- set_fact:
- system_images_registry: "registry.access.redhat.com"
- when:
- - system_images_registry is not defined
- - openshift_deployment_type == "openshift-enterprise"
-
- - name: Default system_images_registry to community registry
- set_fact:
- system_images_registry: "docker.io"
- when:
- - system_images_registry is not defined
- - openshift_deployment_type == "origin"
-
- - name: Gather Cluster facts and set is_containerized if needed
- openshift_facts:
- role: common
- local_facts:
- deployment_type: "{{ openshift_deployment_type }}"
- deployment_subtype: "{{ openshift_deployment_subtype | default(None) }}"
- cli_image: "{{ osm_image | default(None) }}"
- hostname: "{{ openshift_hostname | default(None) }}"
- ip: "{{ openshift_ip | default(None) }}"
- is_containerized: "{{ l_is_containerized | default(None) }}"
- is_openvswitch_system_container: "{{ l_is_openvswitch_system_container | default(false) }}"
- is_node_system_container: "{{ l_is_node_system_container | default(false) }}"
- is_master_system_container: "{{ l_is_master_system_container | default(false) }}"
- is_etcd_system_container: "{{ l_is_etcd_system_container | default(false) }}"
- etcd_runtime: "{{ l_etcd_runtime }}"
- system_images_registry: "{{ system_images_registry }}"
- public_hostname: "{{ openshift_public_hostname | default(None) }}"
- public_ip: "{{ openshift_public_ip | default(None) }}"
- portal_net: "{{ openshift_portal_net | default(openshift_master_portal_net) | default(None) }}"
- http_proxy: "{{ openshift_http_proxy | default(None) }}"
- https_proxy: "{{ openshift_https_proxy | default(None) }}"
- no_proxy: "{{ openshift_no_proxy | default(None) }}"
- generate_no_proxy_hosts: "{{ openshift_generate_no_proxy_hosts | default(True) }}"
- no_proxy_internal_hostnames: "{{ openshift_no_proxy_internal_hostnames | default(None) }}"
-
- - name: initialize_facts set_fact repoquery command
- set_fact:
- repoquery_cmd: "{{ 'dnf repoquery --latest-limit 1 -d 0' if ansible_pkg_mgr == 'dnf' else 'repoquery --plugins' }}"
-
- - name: initialize_facts set_fact on openshift_docker_hosted_registry_network
- set_fact:
- openshift_docker_hosted_registry_network: "{{ '' if 'oo_first_master' not in groups else hostvars[groups.oo_first_master.0].openshift.common.portal_net }}"
diff --git a/playbooks/common/openshift-cluster/initialize_openshift_repos.yml b/playbooks/common/openshift-cluster/initialize_openshift_repos.yml
deleted file mode 100644
index a7114fc80..000000000
--- a/playbooks/common/openshift-cluster/initialize_openshift_repos.yml
+++ /dev/null
@@ -1,8 +0,0 @@
----
-- name: Setup yum repositories for all hosts
- hosts: oo_all_hosts
- gather_facts: no
- tasks:
- - name: initialize openshift repos
- include_role:
- name: openshift_repos
diff --git a/playbooks/common/openshift-cluster/initialize_openshift_version.yml b/playbooks/common/openshift-cluster/initialize_openshift_version.yml
deleted file mode 100644
index e6400ea61..000000000
--- a/playbooks/common/openshift-cluster/initialize_openshift_version.yml
+++ /dev/null
@@ -1,32 +0,0 @@
----
-# openshift_install_base_package_group may be set in a play variable to limit
-# the host groups the base package is installed on. This is currently used
-# for master/control-plane upgrades.
-- name: Set version_install_base_package true on masters and nodes
- hosts: "{{ openshift_install_base_package_group | default('oo_masters_to_config:oo_nodes_to_config') }}"
- tasks:
- - name: Set version_install_base_package true
- set_fact:
- version_install_base_package: True
- when: version_install_base_package is not defined
-
-# NOTE: requires openshift_facts be run
-- name: Determine openshift_version to configure on first master
- hosts: oo_first_master
- roles:
- - openshift_version
-
-# NOTE: We set this even on etcd hosts as they may also later run as masters,
-# and we don't want to install wrong version of docker and have to downgrade
-# later.
-- name: Set openshift_version for etcd, node, and master hosts
- hosts: oo_etcd_to_config:oo_nodes_to_config:oo_masters_to_config:!oo_first_master
- vars:
- openshift_version: "{{ hostvars[groups.oo_first_master.0].openshift_version }}"
- pre_tasks:
- - set_fact:
- openshift_pkg_version: -{{ openshift_version }}
- when: openshift_pkg_version is not defined
- - debug: msg="openshift_pkg_version set to {{ openshift_pkg_version }}"
- roles:
- - openshift_version
diff --git a/playbooks/common/openshift-cluster/library b/playbooks/common/openshift-cluster/library
deleted file mode 120000
index d0b7393d3..000000000
--- a/playbooks/common/openshift-cluster/library
+++ /dev/null
@@ -1 +0,0 @@
-../../../library/ \ No newline at end of file
diff --git a/playbooks/common/openshift-cluster/lookup_plugins b/playbooks/common/openshift-cluster/lookup_plugins
deleted file mode 120000
index ac79701db..000000000
--- a/playbooks/common/openshift-cluster/lookup_plugins
+++ /dev/null
@@ -1 +0,0 @@
-../../../lookup_plugins \ No newline at end of file
diff --git a/playbooks/common/openshift-cluster/openshift_default_storage_class.yml b/playbooks/common/openshift-cluster/openshift_default_storage_class.yml
deleted file mode 100644
index 4b4f19690..000000000
--- a/playbooks/common/openshift-cluster/openshift_default_storage_class.yml
+++ /dev/null
@@ -1,6 +0,0 @@
----
-- name: Create Hosted Resources - openshift_default_storage_class
- hosts: oo_first_master
- roles:
- - role: openshift_default_storage_class
- when: openshift_cloudprovider_kind is defined and (openshift_cloudprovider_kind == 'aws' or openshift_cloudprovider_kind == 'gce')
diff --git a/playbooks/common/openshift-cluster/openshift_hosted.yml b/playbooks/common/openshift-cluster/openshift_hosted.yml
deleted file mode 100644
index c1536eb36..000000000
--- a/playbooks/common/openshift-cluster/openshift_hosted.yml
+++ /dev/null
@@ -1,35 +0,0 @@
----
-- name: Hosted Install Checkpoint Start
- hosts: oo_all_hosts
- gather_facts: false
- tasks:
- - name: Set Hosted install 'In Progress'
- set_stats:
- data:
- installer_phase_hosted: "In Progress"
- aggregate: false
-
-- include: create_persistent_volumes.yml
-
-- include: openshift_default_storage_class.yml
-
-- include: openshift_hosted_create_projects.yml
-
-- include: openshift_hosted_router.yml
-
-- include: openshift_hosted_registry.yml
-
-- include: cockpit-ui.yml
-
-- include: openshift_prometheus.yml
- when: openshift_hosted_prometheus_deploy | default(False) | bool
-
-- name: Hosted Install Checkpoint End
- hosts: oo_all_hosts
- gather_facts: false
- tasks:
- - name: Set Hosted install 'Complete'
- set_stats:
- data:
- installer_phase_hosted: "Complete"
- aggregate: false
diff --git a/playbooks/common/openshift-cluster/openshift_hosted_create_projects.yml b/playbooks/common/openshift-cluster/openshift_hosted_create_projects.yml
deleted file mode 100644
index d5ca5185c..000000000
--- a/playbooks/common/openshift-cluster/openshift_hosted_create_projects.yml
+++ /dev/null
@@ -1,7 +0,0 @@
----
-- name: Create Hosted Resources - openshift projects
- hosts: oo_first_master
- tasks:
- - include_role:
- name: openshift_hosted
- tasks_from: create_projects.yml
diff --git a/playbooks/common/openshift-cluster/openshift_hosted_registry.yml b/playbooks/common/openshift-cluster/openshift_hosted_registry.yml
deleted file mode 100644
index 2a91a827c..000000000
--- a/playbooks/common/openshift-cluster/openshift_hosted_registry.yml
+++ /dev/null
@@ -1,13 +0,0 @@
----
-- name: Create Hosted Resources - registry
- hosts: oo_first_master
- tasks:
- - set_fact:
- openshift_hosted_registry_registryurl: "{{ hostvars[groups.oo_first_master.0].openshift.master.registry_url }}"
- when: "'master' in hostvars[groups.oo_first_master.0].openshift and 'registry_url' in hostvars[groups.oo_first_master.0].openshift.master"
- - include_role:
- name: openshift_hosted
- tasks_from: registry.yml
- when:
- - openshift_hosted_manage_registry | default(True) | bool
- - openshift_hosted_registry_registryurl is defined
diff --git a/playbooks/common/openshift-cluster/openshift_hosted_router.yml b/playbooks/common/openshift-cluster/openshift_hosted_router.yml
deleted file mode 100644
index bcb5a34a4..000000000
--- a/playbooks/common/openshift-cluster/openshift_hosted_router.yml
+++ /dev/null
@@ -1,13 +0,0 @@
----
-- name: Create Hosted Resources - router
- hosts: oo_first_master
- tasks:
- - set_fact:
- openshift_hosted_router_registryurl: "{{ hostvars[groups.oo_first_master.0].openshift.master.registry_url }}"
- when: "'master' in hostvars[groups.oo_first_master.0].openshift and 'registry_url' in hostvars[groups.oo_first_master.0].openshift.master"
- - include_role:
- name: openshift_hosted
- tasks_from: router.yml
- when:
- - openshift_hosted_manage_router | default(True) | bool
- - openshift_hosted_router_registryurl is defined
diff --git a/playbooks/common/openshift-cluster/openshift_logging.yml b/playbooks/common/openshift-cluster/openshift_logging.yml
deleted file mode 100644
index 529a4c939..000000000
--- a/playbooks/common/openshift-cluster/openshift_logging.yml
+++ /dev/null
@@ -1,33 +0,0 @@
----
-- name: Logging Install Checkpoint Start
- hosts: oo_all_hosts
- gather_facts: false
- tasks:
- - name: Set Logging install 'In Progress'
- set_stats:
- data:
- installer_phase_logging: "In Progress"
- aggregate: false
-
-- name: OpenShift Aggregated Logging
- hosts: oo_first_master
- roles:
- - openshift_logging
-
-- name: Update Master configs
- hosts: oo_masters:!oo_first_master
- tasks:
- - block:
- - include_role:
- name: openshift_logging
- tasks_from: update_master_config
-
-- name: Logging Install Checkpoint End
- hosts: oo_all_hosts
- gather_facts: false
- tasks:
- - name: Set Logging install 'Complete'
- set_stats:
- data:
- installer_phase_logging: "Complete"
- aggregate: false
diff --git a/playbooks/common/openshift-cluster/openshift_metrics.yml b/playbooks/common/openshift-cluster/openshift_metrics.yml
deleted file mode 100644
index 9c0bd489b..000000000
--- a/playbooks/common/openshift-cluster/openshift_metrics.yml
+++ /dev/null
@@ -1,34 +0,0 @@
----
-- name: Metrics Install Checkpoint Start
- hosts: oo_all_hosts
- gather_facts: false
- tasks:
- - name: Set Metrics install 'In Progress'
- set_stats:
- data:
- installer_phase_metrics: "In Progress"
- aggregate: false
-
-- name: OpenShift Metrics
- hosts: oo_first_master
- roles:
- - role: openshift_metrics
-
-- name: OpenShift Metrics
- hosts: oo_masters:!oo_first_master
- serial: 1
- tasks:
- - name: Setup the non-first masters configs
- include_role:
- name: openshift_metrics
- tasks_from: update_master_config.yaml
-
-- name: Metrics Install Checkpoint End
- hosts: oo_all_hosts
- gather_facts: false
- tasks:
- - name: Set Metrics install 'Complete'
- set_stats:
- data:
- installer_phase_metrics: "Complete"
- aggregate: false
diff --git a/playbooks/common/openshift-cluster/openshift_prometheus.yml b/playbooks/common/openshift-cluster/openshift_prometheus.yml
deleted file mode 100644
index ac2d250a3..000000000
--- a/playbooks/common/openshift-cluster/openshift_prometheus.yml
+++ /dev/null
@@ -1,5 +0,0 @@
----
-- name: Create Hosted Resources - openshift_prometheus
- hosts: oo_first_master
- roles:
- - role: openshift_prometheus
diff --git a/playbooks/common/openshift-cluster/openshift_provisioners.yml b/playbooks/common/openshift-cluster/openshift_provisioners.yml
deleted file mode 100644
index b1ca6f606..000000000
--- a/playbooks/common/openshift-cluster/openshift_provisioners.yml
+++ /dev/null
@@ -1,5 +0,0 @@
----
-- name: OpenShift Provisioners
- hosts: oo_first_master
- roles:
- - openshift_provisioners
diff --git a/playbooks/common/openshift-cluster/redeploy-certificates/check-expiry.yml b/playbooks/common/openshift-cluster/redeploy-certificates/check-expiry.yml
deleted file mode 100644
index 4a9fbf7eb..000000000
--- a/playbooks/common/openshift-cluster/redeploy-certificates/check-expiry.yml
+++ /dev/null
@@ -1,12 +0,0 @@
----
-- name: Check cert expirys
- hosts: "{{ g_check_expiry_hosts }}"
- vars:
- openshift_certificate_expiry_show_all: yes
- roles:
- # Sets 'check_results' per host which contains health status for
- # etcd, master and node certificates. We will use 'check_results'
- # to determine if any certificates were expired prior to running
- # this playbook. Service restarts will be skipped if any
- # certificates were previously expired.
- - role: openshift_certificate_expiry
diff --git a/playbooks/common/openshift-cluster/redeploy-certificates/etcd-backup.yml b/playbooks/common/openshift-cluster/redeploy-certificates/etcd-backup.yml
deleted file mode 100644
index d738c8207..000000000
--- a/playbooks/common/openshift-cluster/redeploy-certificates/etcd-backup.yml
+++ /dev/null
@@ -1,19 +0,0 @@
----
-- name: Backup and remove generated etcd certificates
- hosts: oo_first_etcd
- any_errors_fatal: true
- tasks:
- - include_role:
- name: etcd
- tasks_from: backup_generated_certificates
- - include_role:
- name: etcd
- tasks_from: remove_generated_certificates
-
-- name: Backup deployed etcd certificates
- hosts: oo_etcd_to_config
- any_errors_fatal: true
- tasks:
- - include_role:
- name: etcd
- tasks_from: backup_server_certificates
diff --git a/playbooks/common/openshift-cluster/redeploy-certificates/etcd-ca.yml b/playbooks/common/openshift-cluster/redeploy-certificates/etcd-ca.yml
deleted file mode 100644
index 044875d1c..000000000
--- a/playbooks/common/openshift-cluster/redeploy-certificates/etcd-ca.yml
+++ /dev/null
@@ -1,103 +0,0 @@
----
-- name: Check cert expirys
- hosts: oo_etcd_to_config:oo_masters_to_config
- vars:
- openshift_certificate_expiry_show_all: yes
- roles:
- # Sets 'check_results' per host which contains health status for
- # etcd, master and node certificates. We will use 'check_results'
- # to determine if any certificates were expired prior to running
- # this playbook. Service restarts will be skipped if any
- # certificates were previously expired.
- - role: openshift_certificate_expiry
-
-- name: Backup existing etcd CA certificate directories
- hosts: oo_etcd_to_config
- tasks:
- - include_role:
- name: etcd
- tasks_from: backup_ca_certificates
- - include_role:
- name: etcd
- tasks_from: remove_ca_certificates
-
-- include: ../../openshift-etcd/ca.yml
-
-- name: Create temp directory for syncing certs
- hosts: localhost
- connection: local
- become: no
- gather_facts: no
- tasks:
- - name: Create local temp directory for syncing certs
- local_action: command mktemp -d /tmp/openshift-ansible-XXXXXXX
- register: g_etcd_mktemp
- changed_when: false
-
-- name: Distribute etcd CA to etcd hosts
- hosts: oo_etcd_to_config
- tasks:
- - include_role:
- name: etcd
- tasks_from: distribute_ca
- vars:
- r_etcd_common_etcd_runtime: "{{ openshift.common.etcd_runtime }}"
- etcd_sync_cert_dir: "{{ hostvars['localhost'].g_etcd_mktemp.stdout }}"
- etcd_ca_host: "{{ groups.oo_etcd_to_config.0 }}"
-
-- include: ../../openshift-etcd/restart.yml
- # Do not restart etcd when etcd certificates were previously expired.
- when: ('expired' not in (hostvars
- | oo_select_keys(groups['etcd'])
- | oo_collect('check_results.check_results.etcd')
- | oo_collect('health')))
-
-- name: Retrieve etcd CA certificate
- hosts: oo_first_etcd
- tasks:
- - include_role:
- name: etcd
- tasks_from: retrieve_ca_certificates
- vars:
- etcd_sync_cert_dir: "{{ hostvars['localhost'].g_etcd_mktemp.stdout }}"
- r_etcd_common_etcd_runtime: "{{ openshift.common.etcd_runtime }}"
-
-- name: Distribute etcd CA to masters
- hosts: oo_masters_to_config
- vars:
- openshift_ca_host: "{{ groups.oo_first_master.0 }}"
- tasks:
- - name: Deploy etcd CA
- copy:
- src: "{{ hostvars['localhost'].g_etcd_mktemp.stdout }}/ca.crt"
- dest: "{{ openshift.common.config_base }}/master/master.etcd-ca.crt"
- when: groups.oo_etcd_to_config | default([]) | length > 0
-
-- name: Delete temporary directory on localhost
- hosts: localhost
- connection: local
- become: no
- gather_facts: no
- tasks:
- - file:
- name: "{{ g_etcd_mktemp.stdout }}"
- state: absent
- changed_when: false
-
-- include: ../../openshift-master/restart.yml
- # Do not restart masters when master or etcd certificates were previously expired.
- when:
- # masters
- - ('expired' not in hostvars
- | oo_select_keys(groups['oo_masters_to_config'])
- | oo_collect('check_results.check_results.ocp_certs')
- | oo_collect('health', {'path':hostvars[groups.oo_first_master.0].openshift.common.config_base ~ "/master/master.server.crt"}))
- - ('expired' not in hostvars
- | oo_select_keys(groups['oo_masters_to_config'])
- | oo_collect('check_results.check_results.ocp_certs')
- | oo_collect('health', {'path':hostvars[groups.oo_first_master.0].openshift.common.config_base ~ "/master/ca-bundle.crt"}))
- # etcd
- - ('expired' not in (hostvars
- | oo_select_keys(groups['etcd'])
- | oo_collect('check_results.check_results.etcd')
- | oo_collect('health')))
diff --git a/playbooks/common/openshift-cluster/redeploy-certificates/filter_plugins b/playbooks/common/openshift-cluster/redeploy-certificates/filter_plugins
deleted file mode 120000
index b1213dedb..000000000
--- a/playbooks/common/openshift-cluster/redeploy-certificates/filter_plugins
+++ /dev/null
@@ -1 +0,0 @@
-../../../../filter_plugins \ No newline at end of file
diff --git a/playbooks/common/openshift-cluster/redeploy-certificates/library b/playbooks/common/openshift-cluster/redeploy-certificates/library
deleted file mode 120000
index 9a53f009d..000000000
--- a/playbooks/common/openshift-cluster/redeploy-certificates/library
+++ /dev/null
@@ -1 +0,0 @@
-../../../../library \ No newline at end of file
diff --git a/playbooks/common/openshift-cluster/redeploy-certificates/lookup_plugins b/playbooks/common/openshift-cluster/redeploy-certificates/lookup_plugins
deleted file mode 120000
index aff753026..000000000
--- a/playbooks/common/openshift-cluster/redeploy-certificates/lookup_plugins
+++ /dev/null
@@ -1 +0,0 @@
-../../../../lookup_plugins \ No newline at end of file
diff --git a/playbooks/common/openshift-cluster/redeploy-certificates/masters-backup.yml b/playbooks/common/openshift-cluster/redeploy-certificates/masters-backup.yml
deleted file mode 100644
index 4dbc041b0..000000000
--- a/playbooks/common/openshift-cluster/redeploy-certificates/masters-backup.yml
+++ /dev/null
@@ -1,38 +0,0 @@
----
-- name: Backup and remove master cerftificates
- hosts: oo_masters_to_config
- any_errors_fatal: true
- vars:
- openshift_ca_host: "{{ groups.oo_first_master.0 }}"
- openshift_master_count: "{{ openshift.master.master_count | default(groups.oo_masters | length) }}"
- pre_tasks:
- - stat:
- path: "{{ openshift.common.config_base }}/generated-configs"
- register: openshift_generated_configs_dir_stat
- - name: Backup generated certificate and config directories
- command: >
- tar -czvf /etc/origin/master-node-cert-config-backup-{{ ansible_date_time.epoch }}.tgz
- {{ openshift.common.config_base }}/generated-configs
- {{ openshift.common.config_base }}/master
- when: openshift_generated_configs_dir_stat.stat.exists
- delegate_to: "{{ openshift_ca_host }}"
- run_once: true
- - name: Remove generated certificate directories
- file:
- path: "{{ item }}"
- state: absent
- with_items:
- - "{{ openshift.common.config_base }}/generated-configs"
- - name: Remove generated certificates
- file:
- path: "{{ openshift.common.config_base }}/master/{{ item }}"
- state: absent
- with_items:
- - "{{ hostvars[inventory_hostname] | certificates_to_synchronize(include_keys=false, include_ca=false) }}"
- - "etcd.server.crt"
- - "etcd.server.key"
- - "master.server.crt"
- - "master.server.key"
- - "openshift-master.crt"
- - "openshift-master.key"
- - "openshift-master.kubeconfig"
diff --git a/playbooks/common/openshift-cluster/redeploy-certificates/nodes-backup.yml b/playbooks/common/openshift-cluster/redeploy-certificates/nodes-backup.yml
deleted file mode 100644
index 2ad84b3b9..000000000
--- a/playbooks/common/openshift-cluster/redeploy-certificates/nodes-backup.yml
+++ /dev/null
@@ -1,24 +0,0 @@
----
-- name: Ensure node directory is absent from generated configs
- hosts: oo_first_master
- tasks:
- # The generated configs directory (/etc/origin/generated-configs) is
- # backed up during redeployment of the control plane certificates.
- # We need to ensure that the generated config directory for
- # individual nodes has been deleted before continuing, so verify
- # that it is missing here.
- - name: Ensure node directories and tarballs are absent from generated configs
- shell: >
- rm -rf {{ openshift.common.config_base }}/generated-configs/node-*
- args:
- warn: no
-
-- name: Redeploy node certificates
- hosts: oo_nodes_to_config
- pre_tasks:
- - name: Remove CA certificate
- file:
- path: "{{ item }}"
- state: absent
- with_items:
- - "{{ openshift.common.config_base }}/node/ca.crt"
diff --git a/playbooks/common/openshift-cluster/redeploy-certificates/openshift-ca.yml b/playbooks/common/openshift-cluster/redeploy-certificates/openshift-ca.yml
deleted file mode 100644
index 2068ed199..000000000
--- a/playbooks/common/openshift-cluster/redeploy-certificates/openshift-ca.yml
+++ /dev/null
@@ -1,303 +0,0 @@
----
-- name: Verify OpenShift version is greater than or equal to 1.2 or 3.2
- hosts: oo_first_master
- tasks:
- - fail:
- msg: "The current OpenShift version is less than 1.2/3.2 and does not support CA bundles."
- when: not openshift.common.version_gte_3_2_or_1_2 | bool
-
-- name: Check cert expirys
- hosts: oo_nodes_to_config:oo_masters_to_config:oo_etcd_to_config
- vars:
- openshift_certificate_expiry_show_all: yes
- roles:
- # Sets 'check_results' per host which contains health status for
- # etcd, master and node certificates. We will use 'check_results'
- # to determine if any certificates were expired prior to running
- # this playbook. Service restarts will be skipped if any
- # certificates were previously expired.
- - role: openshift_certificate_expiry
-
-# Update master config when ca-bundle not referenced. Services will be
-# restarted below after new CA certificate has been distributed.
-- name: Ensure ca-bundle.crt is referenced in master configuration
- hosts: oo_masters_to_config
- tasks:
- - slurp:
- src: "{{ openshift.common.config_base }}/master/master-config.yaml"
- register: g_master_config_output
- - modify_yaml:
- dest: "{{ openshift.common.config_base }}/master/master-config.yaml"
- yaml_key: kubeletClientInfo.ca
- yaml_value: ca-bundle.crt
- when: (g_master_config_output.content|b64decode|from_yaml).kubeletClientInfo.ca != 'ca-bundle.crt'
- - modify_yaml:
- dest: "{{ openshift.common.config_base }}/master/master-config.yaml"
- yaml_key: serviceAccountConfig.masterCA
- yaml_value: ca-bundle.crt
- when: (g_master_config_output.content|b64decode|from_yaml).serviceAccountConfig.masterCA != 'ca-bundle.crt'
- - modify_yaml:
- dest: "{{ openshift.common.config_base }}/master/master-config.yaml"
- yaml_key: oauthConfig.masterCA
- yaml_value: ca-bundle.crt
- when: (g_master_config_output.content|b64decode|from_yaml).oauthConfig.masterCA != 'ca-bundle.crt'
- - modify_yaml:
- dest: "{{ openshift.common.config_base }}/master/master-config.yaml"
- yaml_key: servingInfo.clientCA
- yaml_value: ca.crt
- when: (g_master_config_output.content|b64decode|from_yaml).servingInfo.clientCA != 'ca.crt'
- - modify_yaml:
- dest: "{{ openshift.common.config_base }}/master/master-config.yaml"
- yaml_key: etcdClientInfo.ca
- yaml_value: ca-bundle.crt
- when:
- - groups.oo_etcd_to_config | default([]) | length == 0
- - (g_master_config_output.content|b64decode|from_yaml).etcdClientInfo.ca != 'ca-bundle.crt'
- - modify_yaml:
- dest: "{{ openshift.common.config_base }}/master/master-config.yaml"
- yaml_key: etcdConfig.peerServingInfo.clientCA
- yaml_value: ca-bundle.crt
- when:
- - groups.oo_etcd_to_config | default([]) | length == 0
- - (g_master_config_output.content|b64decode|from_yaml).etcdConfig.peerServingInfo.clientCA != 'ca-bundle.crt'
- - modify_yaml:
- dest: "{{ openshift.common.config_base }}/master/master-config.yaml"
- yaml_key: etcdConfig.servingInfo.clientCA
- yaml_value: ca-bundle.crt
- when:
- - groups.oo_etcd_to_config | default([]) | length == 0
- - (g_master_config_output.content|b64decode|from_yaml).etcdConfig.servingInfo.clientCA != 'ca-bundle.crt'
-
-- name: Copy current OpenShift CA to legacy directory
- hosts: oo_masters_to_config
- pre_tasks:
- - name: Create legacy-ca directory
- file:
- path: "{{ openshift.common.config_base }}/master/legacy-ca"
- state: directory
- mode: 0700
- owner: root
- group: root
- - command: mktemp -u XXXXXX
- register: g_legacy_ca_mktemp
- changed_when: false
- # Copy CA certificate, key, serial and bundle to legacy-ca with a
- # prefix generated by mktemp, ie. XXXXXX-ca.crt.
- #
- # The following roles will pick up all CA certificates matching
- # /.*-ca.crt/ in the legacy-ca directory and ensure they are present
- # in the OpenShift CA bundle.
- # - openshift_ca
- # - openshift_master_certificates
- # - openshift_node_certificates
- - name: Copy current OpenShift CA to legacy directory
- copy:
- src: "{{ openshift.common.config_base }}/master/{{ item }}"
- dest: "{{ openshift.common.config_base }}/master/legacy-ca/{{ g_legacy_ca_mktemp.stdout }}-{{ item }}"
- remote_src: true
- # It is possible that redeploying failed and files may be missing.
- # Ignore errors in this case. Files should have been copied to
- # legacy-ca directory in previous run.
- ignore_errors: true
- with_items:
- - "ca.crt"
- - "ca.key"
- - "ca.serial.txt"
- - "ca-bundle.crt"
-
-- name: Create temporary directory for creating new CA certificate
- hosts: oo_first_master
- tasks:
- - name: Create temporary directory for creating new CA certificate
- command: >
- mktemp -d /tmp/openshift-ansible-XXXXXXX
- register: g_new_openshift_ca_mktemp
- changed_when: false
-
-- name: Create OpenShift CA
- hosts: oo_first_master
- vars:
- # Set openshift_ca_config_dir to a temporary directory where CA
- # will be created. We'll replace the existing CA with the CA
- # created in the temporary directory.
- openshift_ca_config_dir: "{{ hostvars[groups.oo_first_master.0].g_new_openshift_ca_mktemp.stdout }}"
- roles:
- - role: openshift_master_facts
- - role: openshift_named_certificates
- - role: openshift_ca
- openshift_ca_host: "{{ groups.oo_first_master.0 }}"
-
-- name: Create temp directory for syncing certs
- hosts: localhost
- connection: local
- become: no
- gather_facts: no
- tasks:
- - name: Create local temp directory for syncing certs
- local_action: command mktemp -d /tmp/openshift-ansible-XXXXXXX
- register: g_master_mktemp
- changed_when: false
-
-- name: Retrieve OpenShift CA
- hosts: oo_first_master
- vars:
- openshift_ca_host: "{{ groups.oo_first_master.0 }}"
- tasks:
- - name: Retrieve CA certificate, key, bundle and serial
- fetch:
- src: "{{ hostvars[openshift_ca_host].g_new_openshift_ca_mktemp.stdout }}/{{ item }}"
- dest: "{{ hostvars['localhost'].g_master_mktemp.stdout }}/"
- flat: yes
- fail_on_missing: yes
- validate_checksum: yes
- with_items:
- - ca.crt
- - ca.key
- - ca-bundle.crt
- - ca.serial.txt
- delegate_to: "{{ openshift_ca_host }}"
- run_once: true
- changed_when: false
-
-- name: Distribute OpenShift CA to masters
- hosts: oo_masters_to_config
- vars:
- openshift_ca_host: "{{ groups.oo_first_master.0 }}"
- tasks:
- - name: Deploy CA certificate, key, bundle and serial
- copy:
- src: "{{ hostvars['localhost'].g_master_mktemp.stdout }}/{{ item }}"
- dest: "{{ openshift.common.config_base }}/master/"
- with_items:
- - ca.crt
- - ca.key
- - ca-bundle.crt
- - ca.serial.txt
- - name: Update master client kubeconfig CA data
- kubeclient_ca:
- client_path: "{{ openshift.common.config_base }}/master/openshift-master.kubeconfig"
- ca_path: "{{ openshift.common.config_base }}/master/ca-bundle.crt"
- - name: Update admin client kubeconfig CA data
- kubeclient_ca:
- client_path: "{{ openshift.common.config_base }}/master/admin.kubeconfig"
- ca_path: "{{ openshift.common.config_base }}/master/ca-bundle.crt"
- - name: Lookup default group for ansible_ssh_user
- command: "/usr/bin/id -g {{ ansible_ssh_user | quote }}"
- changed_when: false
- register: _ansible_ssh_user_gid
- - set_fact:
- client_users: "{{ [ansible_ssh_user, 'root'] | unique }}"
- - name: Create the client config dir(s)
- file:
- path: "~{{ item }}/.kube"
- state: directory
- mode: 0700
- owner: "{{ item }}"
- group: "{{ 'root' if item == 'root' else _ansible_ssh_user_gid.stdout }}"
- with_items: "{{ client_users }}"
- - name: Copy the admin client config(s)
- copy:
- src: "{{ openshift.common.config_base }}/master/admin.kubeconfig"
- dest: "~{{ item }}/.kube/config"
- remote_src: yes
- with_items: "{{ client_users }}"
- - name: Update the permissions on the admin client config(s)
- file:
- path: "~{{ item }}/.kube/config"
- state: file
- mode: 0700
- owner: "{{ item }}"
- group: "{{ 'root' if item == 'root' else _ansible_ssh_user_gid.stdout }}"
- with_items: "{{ client_users }}"
-
-- include: ../../openshift-master/restart.yml
- # Do not restart masters when master or etcd certificates were previously expired.
- when:
- # masters
- - ('expired' not in hostvars
- | oo_select_keys(groups['oo_masters_to_config'])
- | oo_collect('check_results.check_results.ocp_certs')
- | oo_collect('health', {'path':hostvars[groups.oo_first_master.0].openshift.common.config_base ~ "/master/master.server.crt"}))
- - ('expired' not in hostvars
- | oo_select_keys(groups['oo_masters_to_config'])
- | oo_collect('check_results.check_results.ocp_certs')
- | oo_collect('health', {'path':hostvars[groups.oo_first_master.0].openshift.common.config_base ~ "/master/ca-bundle.crt"}))
- # etcd
- - ('expired' not in (hostvars
- | oo_select_keys(groups['etcd'])
- | oo_collect('check_results.check_results.etcd')
- | oo_collect('health')))
-
-- name: Distribute OpenShift CA certificate to nodes
- hosts: oo_nodes_to_config
- vars:
- openshift_ca_host: "{{ groups.oo_first_master.0 }}"
- tasks:
- - copy:
- src: "{{ hostvars['localhost'].g_master_mktemp.stdout }}/ca-bundle.crt"
- dest: "{{ openshift.common.config_base }}/node/ca.crt"
- - name: Copy OpenShift CA to system CA trust
- copy:
- src: "{{ item.cert }}"
- dest: "/etc/pki/ca-trust/source/anchors/{{ item.id }}-{{ item.cert | basename }}"
- remote_src: yes
- with_items:
- - id: openshift
- cert: "{{ openshift.common.config_base }}/node/ca.crt"
- notify:
- - update ca trust
- - name: Update node client kubeconfig CA data
- kubeclient_ca:
- client_path: "{{ openshift.common.config_base }}/node/system:node:{{ openshift.common.hostname }}.kubeconfig"
- ca_path: "{{ openshift.common.config_base }}/node/ca.crt"
- handlers:
- # Normally this handler would restart docker after updating ca
- # trust. We'll do that when we restart nodes to avoid restarting
- # docker on all nodes in parallel.
- - name: update ca trust
- command: update-ca-trust
-
-- name: Delete temporary directory on CA host
- hosts: oo_first_master
- tasks:
- - file:
- path: "{{ g_new_openshift_ca_mktemp.stdout }}"
- state: absent
-
-- name: Delete temporary directory on localhost
- hosts: localhost
- connection: local
- become: no
- gather_facts: no
- tasks:
- - file:
- name: "{{ g_master_mktemp.stdout }}"
- state: absent
- changed_when: false
-
-- include: ../../openshift-node/restart.yml
- # Do not restart nodes when node, master or etcd certificates were previously expired.
- when:
- # nodes
- - ('expired' not in hostvars
- | oo_select_keys(groups['oo_nodes_to_config'])
- | oo_collect('check_results.check_results.ocp_certs')
- | oo_collect('health', {'path':hostvars[groups.oo_nodes_to_config.0].openshift.common.config_base ~ "/node/server.crt"}))
- - ('expired' not in hostvars
- | oo_select_keys(groups['oo_nodes_to_config'])
- | oo_collect('check_results.check_results.ocp_certs')
- | oo_collect('health', {'path':hostvars[groups.oo_nodes_to_config.0].openshift.common.config_base ~ "/node/ca.crt"}))
- # masters
- - ('expired' not in hostvars
- | oo_select_keys(groups['oo_masters_to_config'])
- | oo_collect('check_results.check_results.ocp_certs')
- | oo_collect('health', {'path':hostvars[groups.oo_first_master.0].openshift.common.config_base ~ "/master/master.server.crt"}))
- - ('expired' not in hostvars
- | oo_select_keys(groups['oo_masters_to_config'])
- | oo_collect('check_results.check_results.ocp_certs')
- | oo_collect('health', {'path':hostvars[groups.oo_first_master.0].openshift.common.config_base ~ "/master/ca-bundle.crt"}))
- # etcd
- - ('expired' not in (hostvars
- | oo_select_keys(groups['etcd'])
- | oo_collect('check_results.check_results.etcd')
- | oo_collect('health')))
diff --git a/playbooks/common/openshift-cluster/redeploy-certificates/registry.yml b/playbooks/common/openshift-cluster/redeploy-certificates/registry.yml
deleted file mode 100644
index afd5463b2..000000000
--- a/playbooks/common/openshift-cluster/redeploy-certificates/registry.yml
+++ /dev/null
@@ -1,102 +0,0 @@
----
-- name: Update registry certificates
- hosts: oo_first_master
- vars:
- roles:
- - lib_openshift
- tasks:
- - name: Create temp directory for kubeconfig
- command: mktemp -d /tmp/openshift-ansible-XXXXXX
- register: mktemp
- changed_when: false
-
- - name: Copy admin client config(s)
- command: >
- cp {{ openshift.common.config_base }}/master//admin.kubeconfig {{ mktemp.stdout }}/admin.kubeconfig
- changed_when: false
-
- - name: Determine if docker-registry exists
- command: >
- {{ openshift.common.client_binary }} get dc/docker-registry -o json
- --config={{ mktemp.stdout }}/admin.kubeconfig
- -n default
- register: l_docker_registry_dc
- failed_when: false
- changed_when: false
-
- - set_fact:
- docker_registry_env_vars: "{{ ((l_docker_registry_dc.stdout | from_json)['spec']['template']['spec']['containers'][0]['env']
- | oo_collect('name'))
- | default([]) }}"
- docker_registry_secrets: "{{ ((l_docker_registry_dc.stdout | from_json)['spec']['template']['spec']['volumes']
- | oo_collect('secret')
- | oo_collect('secretName'))
- | default([]) }}"
- changed_when: false
- when: l_docker_registry_dc.rc == 0
-
- # Replace dc/docker-registry environment variable certificate data if set.
- - name: Update docker-registry environment variables
- shell: >
- {{ openshift.common.client_binary }} env dc/docker-registry
- OPENSHIFT_CA_DATA="$(cat /etc/origin/master/ca.crt)"
- OPENSHIFT_CERT_DATA="$(cat /etc/origin/master/openshift-registry.crt)"
- OPENSHIFT_KEY_DATA="$(cat /etc/origin/master/openshift-registry.key)"
- --config={{ mktemp.stdout }}/admin.kubeconfig
- -n default
- when: l_docker_registry_dc.rc == 0 and 'OPENSHIFT_CA_DATA' in docker_registry_env_vars and 'OPENSHIFT_CERT_DATA' in docker_registry_env_vars and 'OPENSHIFT_KEY_DATA' in docker_registry_env_vars
-
- # Replace dc/docker-registry certificate secret contents if set.
- - block:
- - name: Retrieve registry service IP
- oc_service:
- namespace: default
- name: docker-registry
- state: list
- register: docker_registry_service_ip
- changed_when: false
-
- - set_fact:
- docker_registry_route_hostname: "{{ 'docker-registry-default.' ~ (openshift.master.default_subdomain | default('router.default.svc.cluster.local', true)) }}"
- changed_when: false
-
- - name: Generate registry certificate
- command: >
- {{ openshift.common.client_binary }} adm ca create-server-cert
- --signer-cert={{ openshift.common.config_base }}/master/ca.crt
- --signer-key={{ openshift.common.config_base }}/master/ca.key
- --signer-serial={{ openshift.common.config_base }}/master/ca.serial.txt
- --config={{ mktemp.stdout }}/admin.kubeconfig
- --hostnames="{{ docker_registry_service_ip.results.clusterip }},docker-registry.default.svc,docker-registry.default.svc.cluster.local,{{ docker_registry_route_hostname }}"
- --cert={{ openshift.common.config_base }}/master/registry.crt
- --key={{ openshift.common.config_base }}/master/registry.key
- {% if openshift_version | oo_version_gte_3_5_or_1_5(openshift.common.deployment_type) | bool %}
- --expire-days={{ openshift_hosted_registry_cert_expire_days | default(730) }}
- {% endif %}
-
- - name: Update registry certificates secret
- oc_secret:
- kubeconfig: "{{ mktemp.stdout }}/admin.kubeconfig"
- name: registry-certificates
- namespace: default
- state: present
- files:
- - name: registry.crt
- path: "{{ openshift.common.config_base }}/master/registry.crt"
- - name: registry.key
- path: "{{ openshift.common.config_base }}/master/registry.key"
- run_once: true
- when: l_docker_registry_dc.rc == 0 and 'registry-certificates' in docker_registry_secrets and 'REGISTRY_HTTP_TLS_CERTIFICATE' in docker_registry_env_vars and 'REGISTRY_HTTP_TLS_KEY' in docker_registry_env_vars
-
- - name: Redeploy docker registry
- command: >
- {{ openshift.common.client_binary }} deploy dc/docker-registry
- --latest
- --config={{ mktemp.stdout }}/admin.kubeconfig
- -n default
-
- - name: Delete temp directory
- file:
- name: "{{ mktemp.stdout }}"
- state: absent
- changed_when: False
diff --git a/playbooks/common/openshift-cluster/redeploy-certificates/roles b/playbooks/common/openshift-cluster/redeploy-certificates/roles
deleted file mode 120000
index 4bdbcbad3..000000000
--- a/playbooks/common/openshift-cluster/redeploy-certificates/roles
+++ /dev/null
@@ -1 +0,0 @@
-../../../../roles \ No newline at end of file
diff --git a/playbooks/common/openshift-cluster/redeploy-certificates/router.yml b/playbooks/common/openshift-cluster/redeploy-certificates/router.yml
deleted file mode 100644
index 2116c745c..000000000
--- a/playbooks/common/openshift-cluster/redeploy-certificates/router.yml
+++ /dev/null
@@ -1,141 +0,0 @@
----
-- name: Update router certificates
- hosts: oo_first_master
- vars:
- roles:
- - lib_openshift
- tasks:
- - name: Create temp directory for kubeconfig
- command: mktemp -d /tmp/openshift-ansible-XXXXXX
- register: router_cert_redeploy_tempdir
- changed_when: false
-
- - name: Copy admin client config(s)
- command: >
- cp {{ openshift.common.config_base }}/master//admin.kubeconfig {{ router_cert_redeploy_tempdir.stdout }}/admin.kubeconfig
- changed_when: false
-
- - name: Determine if router exists
- command: >
- {{ openshift.common.client_binary }} get dc/router -o json
- --config={{ router_cert_redeploy_tempdir.stdout }}/admin.kubeconfig
- -n default
- register: l_router_dc
- failed_when: false
- changed_when: false
-
- - name: Determine if router service exists
- command: >
- {{ openshift.common.client_binary }} get svc/router -o json
- --config={{ router_cert_redeploy_tempdir.stdout }}/admin.kubeconfig
- -n default
- register: l_router_svc
- failed_when: false
- changed_when: false
-
- - name: Collect router environment variables and secrets
- set_fact:
- router_env_vars: "{{ ((l_router_dc.stdout | from_json)['spec']['template']['spec']['containers'][0]['env']
- | oo_collect('name'))
- | default([]) }}"
- router_secrets: "{{ ((l_router_dc.stdout | from_json)['spec']['template']['spec']['volumes']
- | oo_collect('secret')
- | oo_collect('secretName'))
- | default([]) }}"
- changed_when: false
- when: l_router_dc.rc == 0
-
- - name: Collect router service annotations
- set_fact:
- router_service_annotations: "{{ (l_router_svc.stdout | from_json)['metadata']['annotations'] if 'annotations' in (l_router_svc.stdout | from_json)['metadata'] else [] }}"
- when: l_router_svc.rc == 0
-
- - name: Update router environment variables
- shell: >
- {{ openshift.common.client_binary }} env dc/router
- OPENSHIFT_CA_DATA="$(cat /etc/origin/master/ca.crt)"
- OPENSHIFT_CERT_DATA="$(cat /etc/origin/master/openshift-router.crt)"
- OPENSHIFT_KEY_DATA="$(cat /etc/origin/master/openshift-router.key)"
- --config={{ router_cert_redeploy_tempdir.stdout }}/admin.kubeconfig
- -n default
- when:
- - l_router_dc.rc == 0
- - ('OPENSHIFT_CA_DATA' in router_env_vars)
- - ('OPENSHIFT_CERT_DATA' in router_env_vars)
- - ('OPENSHIFT_KEY_DATA' in router_env_vars)
-
- # When the router service contains service signer annotations we
- # will delete the existing certificate secret and allow OpenShift to
- # replace the secret.
- - block:
- - name: Delete existing router certificate secret
- oc_secret:
- kubeconfig: "{{ router_cert_redeploy_tempdir.stdout }}/admin.kubeconfig"
- name: router-certs
- namespace: default
- state: absent
- run_once: true
-
- - name: Remove router service annotations
- command: >
- {{ openshift.common.client_binary }} annotate service/router
- service.alpha.openshift.io/serving-cert-secret-name-
- service.alpha.openshift.io/serving-cert-signed-by-
- --config={{ router_cert_redeploy_tempdir.stdout }}/admin.kubeconfig
- -n default
-
- - name: Add serving-cert-secret annotation to router service
- command: >
- {{ openshift.common.client_binary }} annotate service/router
- service.alpha.openshift.io/serving-cert-secret-name=router-certs
- --config={{ router_cert_redeploy_tempdir.stdout }}/admin.kubeconfig
- -n default
- when:
- - l_router_dc.rc == 0
- - l_router_svc.rc == 0
- - ('router-certs' in router_secrets)
- - openshift_hosted_router_certificate is undefined
- - ('service.alpha.openshift.io/serving-cert-secret-name') in router_service_annotations
- - ('service.alpha.openshift.io/serving-cert-signed-by') in router_service_annotations
-
- # When there are no annotations on the router service we will allow
- # the openshift_hosted role to either create a new wildcard
- # certificate (since we deleted the original) or reapply a custom
- # openshift_hosted_router_certificate.
- - file:
- path: "{{ item }}"
- state: absent
- with_items:
- - /etc/origin/master/openshift-router.crt
- - /etc/origin/master/openshift-router.key
- when:
- - l_router_dc.rc == 0
- - l_router_svc.rc == 0
- - ('router-certs' in router_secrets)
- - ('service.alpha.openshift.io/serving-cert-secret-name') not in router_service_annotations
- - ('service.alpha.openshift.io/serving-cert-signed-by') not in router_service_annotations
-
- - include_role:
- name: openshift_hosted
- tasks_from: main
- vars:
- openshift_hosted_manage_registry: false
- when:
- - l_router_dc.rc == 0
- - l_router_svc.rc == 0
- - ('router-certs' in router_secrets)
- - ('service.alpha.openshift.io/serving-cert-secret-name') not in router_service_annotations
- - ('service.alpha.openshift.io/serving-cert-signed-by') not in router_service_annotations
-
- - name: Redeploy router
- command: >
- {{ openshift.common.client_binary }} deploy dc/router
- --latest
- --config={{ router_cert_redeploy_tempdir.stdout }}/admin.kubeconfig
- -n default
-
- - name: Delete temp directory
- file:
- name: "{{ router_cert_redeploy_tempdir.stdout }}"
- state: absent
- changed_when: False
diff --git a/playbooks/common/openshift-cluster/sanity_checks.yml b/playbooks/common/openshift-cluster/sanity_checks.yml
deleted file mode 100644
index 26716a92d..000000000
--- a/playbooks/common/openshift-cluster/sanity_checks.yml
+++ /dev/null
@@ -1,51 +0,0 @@
----
-- name: Verify Requirements
- hosts: oo_all_hosts
- tasks:
- - fail:
- msg: Flannel can not be used with openshift sdn, set openshift_use_openshift_sdn=false if you want to use flannel
- when: openshift_use_openshift_sdn | default(true) | bool and openshift_use_flannel | default(false) | bool
-
- - fail:
- msg: Nuage sdn can not be used with openshift sdn, set openshift_use_openshift_sdn=false if you want to use nuage
- when: openshift_use_openshift_sdn | default(true) | bool and openshift_use_nuage | default(false) | bool
-
- - fail:
- msg: Nuage sdn can not be used with flannel
- when: openshift_use_flannel | default(false) | bool and openshift_use_nuage | default(false) | bool
-
- - fail:
- msg: Contiv can not be used with openshift sdn, set openshift_use_openshift_sdn=false if you want to use contiv
- when: openshift_use_openshift_sdn | default(true) | bool and openshift_use_contiv | default(false) | bool
-
- - fail:
- msg: Contiv can not be used with flannel
- when: openshift_use_flannel | default(false) | bool and openshift_use_contiv | default(false) | bool
-
- - fail:
- msg: Contiv can not be used with nuage
- when: openshift_use_nuage | default(false) | bool and openshift_use_contiv | default(false) | bool
-
- - fail:
- msg: Calico can not be used with openshift sdn, set openshift_use_openshift_sdn=false if you want to use Calico
- when: openshift_use_openshift_sdn | default(true) | bool and openshift_use_calico | default(false) | bool
-
- - fail:
- msg: The Calico playbook does not yet integrate with the Flannel playbook in Openshift. Set either openshift_use_calico or openshift_use_flannel, but not both.
- when: openshift_use_calico | default(false) | bool and openshift_use_flannel | default(false) | bool
-
- - fail:
- msg: Calico can not be used with Nuage in Openshift. Set either openshift_use_calico or openshift_use_nuage, but not both
- when: openshift_use_calico | default(false) | bool and openshift_use_nuage | default(false) | bool
-
- - fail:
- msg: Calico can not be used with Contiv in Openshift. Set either openshift_use_calico or openshift_use_contiv, but not both
- when: openshift_use_calico | default(false) | bool and openshift_use_contiv | default(false) | bool
-
- - fail:
- msg: openshift_hostname must be 63 characters or less
- when: openshift_hostname is defined and openshift_hostname | length > 63
-
- - fail:
- msg: openshift_public_hostname must be 63 characters or less
- when: openshift_public_hostname is defined and openshift_public_hostname | length > 63
diff --git a/playbooks/common/openshift-cluster/service_catalog.yml b/playbooks/common/openshift-cluster/service_catalog.yml
deleted file mode 100644
index bd964b2ce..000000000
--- a/playbooks/common/openshift-cluster/service_catalog.yml
+++ /dev/null
@@ -1,29 +0,0 @@
----
-- name: Service Catalog Install Checkpoint Start
- hosts: oo_all_hosts
- gather_facts: false
- tasks:
- - name: Set Service Catalog install 'In Progress'
- set_stats:
- data:
- installer_phase_servicecatalog: "In Progress"
- aggregate: false
-
-- name: Service Catalog
- hosts: oo_first_master
- roles:
- - openshift_service_catalog
- - ansible_service_broker
- - template_service_broker
- vars:
- first_master: "{{ groups.oo_first_master[0] }}"
-
-- name: Service Catalog Install Checkpoint End
- hosts: oo_all_hosts
- gather_facts: false
- tasks:
- - name: Set Service Catalog install 'Complete'
- set_stats:
- data:
- installer_phase_servicecatalog: "Complete"
- aggregate: false
diff --git a/playbooks/common/openshift-cluster/std_include.yml b/playbooks/common/openshift-cluster/std_include.yml
deleted file mode 100644
index 45b34c8bd..000000000
--- a/playbooks/common/openshift-cluster/std_include.yml
+++ /dev/null
@@ -1,46 +0,0 @@
----
-- name: Initialization Checkpoint Start
- hosts: oo_all_hosts
- gather_facts: false
- roles:
- - installer_checkpoint
- tasks:
- - name: Set install initialization 'In Progress'
- set_stats:
- data:
- installer_phase_initialize: "In Progress"
- aggregate: false
-
-- include: evaluate_groups.yml
- tags:
- - always
-
-- include: initialize_facts.yml
- tags:
- - always
-
-- include: sanity_checks.yml
- tags:
- - always
-
-- include: validate_hostnames.yml
- tags:
- - node
-
-- include: initialize_openshift_repos.yml
- tags:
- - always
-
-- include: initialize_openshift_version.yml
- tags:
- - always
-
-- name: Initialization Checkpoint End
- hosts: oo_all_hosts
- gather_facts: false
- tasks:
- - name: Set install initialization 'Complete'
- set_stats:
- data:
- installer_phase_initialize: "Complete"
- aggregate: false
diff --git a/playbooks/common/openshift-cluster/tasks/set_etcd_launch_facts.yml b/playbooks/common/openshift-cluster/tasks/set_etcd_launch_facts.yml
deleted file mode 100644
index eb118365a..000000000
--- a/playbooks/common/openshift-cluster/tasks/set_etcd_launch_facts.yml
+++ /dev/null
@@ -1,13 +0,0 @@
----
-- set_fact: k8s_type="etcd"
-
-- name: Generate etcd instance names(s)
- set_fact:
- scratch_name: "{{ openshift_cluster_id | default('default') }}-{{ k8s_type }}-{{ '%05x' | format(1048576 | random) }}"
- register: etcd_names_output
- with_sequence: count={{ num_etcd }}
-
-- set_fact:
- etcd_names: "{{ etcd_names_output.results | default([])
- | oo_collect('ansible_facts')
- | oo_collect('scratch_name') }}"
diff --git a/playbooks/common/openshift-cluster/tasks/set_master_launch_facts.yml b/playbooks/common/openshift-cluster/tasks/set_master_launch_facts.yml
deleted file mode 100644
index 783f70f50..000000000
--- a/playbooks/common/openshift-cluster/tasks/set_master_launch_facts.yml
+++ /dev/null
@@ -1,13 +0,0 @@
----
-- set_fact: k8s_type="master"
-
-- name: Generate master instance names(s)
- set_fact:
- scratch_name: "{{ openshift_cluster_id | default('default') }}-{{ k8s_type }}-{{ '%05x' | format(1048576 | random) }}"
- register: master_names_output
- with_sequence: count={{ num_masters }}
-
-- set_fact:
- master_names: "{{ master_names_output.results | default([])
- | oo_collect('ansible_facts')
- | oo_collect('scratch_name') }}"
diff --git a/playbooks/common/openshift-cluster/tasks/set_node_launch_facts.yml b/playbooks/common/openshift-cluster/tasks/set_node_launch_facts.yml
deleted file mode 100644
index c103e40a9..000000000
--- a/playbooks/common/openshift-cluster/tasks/set_node_launch_facts.yml
+++ /dev/null
@@ -1,15 +0,0 @@
----
-- set_fact: k8s_type=node
-- set_fact: sub_host_type="{{ type }}"
-- set_fact: number_nodes="{{ count }}"
-
-- name: Generate node instance names(s)
- set_fact:
- scratch_name: "{{ openshift_cluster_id | default('default') }}-{{ k8s_type }}-{{ sub_host_type }}-{{ '%05x' | format(1048576 | random) }}"
- register: node_names_output
- with_sequence: count={{ number_nodes }}
-
-- set_fact:
- node_names: "{{ node_names_output.results | default([])
- | oo_collect('ansible_facts')
- | oo_collect('scratch_name') }}"
diff --git a/playbooks/common/openshift-cluster/upgrades/cleanup_unused_images.yml b/playbooks/common/openshift-cluster/upgrades/cleanup_unused_images.yml
index 6e953be69..ed97d539c 100644
--- a/playbooks/common/openshift-cluster/upgrades/cleanup_unused_images.yml
+++ b/playbooks/common/openshift-cluster/upgrades/cleanup_unused_images.yml
@@ -1,22 +1 @@
---
-- name: Check Docker image count
- shell: "docker images -aq | wc -l"
- register: docker_image_count
- when: docker_upgrade_nuke_images is defined and docker_upgrade_nuke_images | bool
-
-- debug: var=docker_image_count.stdout
- when: docker_upgrade_nuke_images is defined and docker_upgrade_nuke_images | bool
-
-- name: Remove unused Docker images for Docker 1.10+ migration
- shell: "docker rmi `docker images -aq`"
- # Will fail on images still in use:
- failed_when: false
- when: docker_upgrade_nuke_images is defined and docker_upgrade_nuke_images | bool
-
-- name: Check Docker image count
- shell: "docker images -aq | wc -l"
- register: docker_image_count
- when: docker_upgrade_nuke_images is defined and docker_upgrade_nuke_images | bool
-
-- debug: var=docker_image_count.stdout
- when: docker_upgrade_nuke_images is defined and docker_upgrade_nuke_images | bool
diff --git a/playbooks/common/openshift-cluster/upgrades/create_service_signer_cert.yml b/playbooks/common/openshift-cluster/upgrades/create_service_signer_cert.yml
index 23cf8cf76..372a39e74 100644
--- a/playbooks/common/openshift-cluster/upgrades/create_service_signer_cert.yml
+++ b/playbooks/common/openshift-cluster/upgrades/create_service_signer_cert.yml
@@ -22,7 +22,7 @@
- name: Create service signer certificate
command: >
- {{ openshift.common.client_binary }} adm ca create-signer-cert
+ {{ openshift_client_binary }} adm ca create-signer-cert
--cert="{{ remote_cert_create_tmpdir.stdout }}/"service-signer.crt
--key="{{ remote_cert_create_tmpdir.stdout }}/"service-signer.key
--name="{{ remote_cert_create_tmpdir.stdout }}/"openshift-service-serving-signer
diff --git a/playbooks/common/openshift-cluster/upgrades/disable_master_excluders.yml b/playbooks/common/openshift-cluster/upgrades/disable_excluders.yml
index 800621857..858912379 100644
--- a/playbooks/common/openshift-cluster/upgrades/disable_master_excluders.yml
+++ b/playbooks/common/openshift-cluster/upgrades/disable_excluders.yml
@@ -1,11 +1,10 @@
---
- name: Disable excluders
- hosts: oo_masters_to_config
+ hosts: "{{ l_upgrade_excluder_hosts }}"
gather_facts: no
roles:
- role: openshift_excluder
r_openshift_excluder_action: disable
- r_openshift_excluder_service_type: "{{ openshift.common.service_type }}"
r_openshift_excluder_verify_upgrade: true
r_openshift_excluder_upgrade_target: "{{ openshift_upgrade_target }}"
r_openshift_excluder_package_state: latest
diff --git a/playbooks/common/openshift-cluster/upgrades/disable_node_excluders.yml b/playbooks/common/openshift-cluster/upgrades/disable_node_excluders.yml
deleted file mode 100644
index a66301c0d..000000000
--- a/playbooks/common/openshift-cluster/upgrades/disable_node_excluders.yml
+++ /dev/null
@@ -1,12 +0,0 @@
----
-- name: Disable excluders
- hosts: oo_nodes_to_upgrade:!oo_masters_to_config
- gather_facts: no
- roles:
- - role: openshift_excluder
- r_openshift_excluder_action: disable
- r_openshift_excluder_service_type: "{{ openshift.common.service_type }}"
- r_openshift_excluder_verify_upgrade: true
- r_openshift_excluder_upgrade_target: "{{ openshift_upgrade_target }}"
- r_openshift_excluder_package_state: latest
- r_openshift_excluder_docker_package_state: latest
diff --git a/playbooks/common/openshift-cluster/upgrades/docker/docker_upgrade.yml b/playbooks/common/openshift-cluster/upgrades/docker/docker_upgrade.yml
index 98953f72e..ffb11670d 100644
--- a/playbooks/common/openshift-cluster/upgrades/docker/docker_upgrade.yml
+++ b/playbooks/common/openshift-cluster/upgrades/docker/docker_upgrade.yml
@@ -1,25 +1,24 @@
---
-- include: ../../evaluate_groups.yml
+- import_playbook: ../../../../init/evaluate_groups.yml
vars:
# Do not allow adding hosts during upgrade.
g_new_master_hosts: []
g_new_node_hosts: []
-- include: ../initialize_nodes_to_upgrade.yml
+- import_playbook: ../initialize_nodes_to_upgrade.yml
- name: Check for appropriate Docker versions
hosts: oo_masters_to_config:oo_nodes_to_upgrade:oo_etcd_to_config
roles:
- openshift_facts
tasks:
- - set_fact:
- repoquery_cmd: "{{ 'dnf repoquery --latest-limit 1 -d 0' if ansible_pkg_mgr == 'dnf' else 'repoquery --plugins' }}"
-
- fail:
msg: Cannot upgrade Docker on Atomic operating systems.
- when: openshift.common.is_atomic | bool
+ when: openshift_is_atomic | bool
- - include: upgrade_check.yml
+ - import_role:
+ name: container_runtime
+ tasks_from: docker_upgrade_check.yml
when: docker_upgrade is not defined or docker_upgrade | bool
@@ -32,6 +31,7 @@
any_errors_fatal: true
roles:
+ - openshift_facts
- lib_openshift
tasks:
@@ -43,7 +43,7 @@
retries: 10
delay: 5
register: node_unschedulable
- until: node_unschedulable|succeeded
+ until: node_unschedulable is succeeded
when:
- l_docker_upgrade is defined
- l_docker_upgrade | bool
@@ -51,15 +51,15 @@
- name: Drain Node for Kubelet upgrade
command: >
- {{ openshift.common.admin_binary }} drain {{ openshift.node.nodename }} --config={{ openshift.common.config_base }}/master/admin.kubeconfig --force --delete-local-data --ignore-daemonsets
+ {{ openshift_client_binary }} adm drain {{ openshift.node.nodename }} --config={{ openshift.common.config_base }}/master/admin.kubeconfig --force --delete-local-data --ignore-daemonsets
delegate_to: "{{ groups.oo_first_master.0 }}"
when: l_docker_upgrade is defined and l_docker_upgrade | bool and inventory_hostname in groups.oo_nodes_to_upgrade
register: l_docker_upgrade_drain_result
- until: not l_docker_upgrade_drain_result | failed
+ until: not (l_docker_upgrade_drain_result is failed)
retries: 60
delay: 60
- - include: tasks/upgrade.yml
+ - include_tasks: tasks/upgrade.yml
when: l_docker_upgrade is defined and l_docker_upgrade | bool
- name: Set node schedulability
@@ -70,5 +70,5 @@
retries: 10
delay: 5
register: node_schedulable
- until: node_schedulable|succeeded
- when: node_unschedulable|changed
+ until: node_schedulable is succeeded
+ when: node_unschedulable is changed
diff --git a/playbooks/common/openshift-cluster/upgrades/docker/nuke_images.sh b/playbooks/common/openshift-cluster/upgrades/docker/nuke_images.sh
deleted file mode 100644
index 8635eab0d..000000000
--- a/playbooks/common/openshift-cluster/upgrades/docker/nuke_images.sh
+++ /dev/null
@@ -1,25 +0,0 @@
-#!/bin/bash
-
-# Stop any running containers
-running_container_ids=`docker ps -q`
-if test -n "$running_container_ids"
-then
- docker stop $running_container_ids
-fi
-
-# Delete all containers
-container_ids=`docker ps -a -q`
-if test -n "$container_ids"
-then
- docker rm -f -v $container_ids
-fi
-
-# Delete all images (forcefully)
-image_ids=`docker images -aq`
-if test -n "$image_ids"
-then
- # Some layers are deleted recursively and are no longer present
- # when docker goes to remove them:
- docker rmi -f `docker images -aq` || true
-fi
-
diff --git a/playbooks/common/openshift-cluster/upgrades/docker/tasks/restart.yml b/playbooks/common/openshift-cluster/upgrades/docker/tasks/restart.yml
index 83f16ac0d..3b47a11e0 100644
--- a/playbooks/common/openshift-cluster/upgrades/docker/tasks/restart.yml
+++ b/playbooks/common/openshift-cluster/upgrades/docker/tasks/restart.yml
@@ -2,24 +2,20 @@
- name: Restart docker
service: name=docker state=restarted
register: l_docker_restart_docker_in_upgrade_result
- until: not l_docker_restart_docker_in_upgrade_result | failed
+ until: not (l_docker_restart_docker_in_upgrade_result is failed)
retries: 3
delay: 30
-- name: Update docker facts
- openshift_facts:
- role: docker
-
- name: Restart containerized services
service: name={{ item }} state=started
with_items:
- etcd_container
- openvswitch
- - "{{ openshift.common.service_type }}-master-api"
- - "{{ openshift.common.service_type }}-master-controllers"
- - "{{ openshift.common.service_type }}-node"
+ - "{{ openshift_service_type }}-master-api"
+ - "{{ openshift_service_type }}-master-controllers"
+ - "{{ openshift_service_type }}-node"
failed_when: false
- when: openshift.common.is_containerized | bool
+ when: openshift_is_containerized | bool
- name: Wait for master API to come back online
wait_for:
diff --git a/playbooks/common/openshift-cluster/upgrades/docker/tasks/upgrade.yml b/playbooks/common/openshift-cluster/upgrades/docker/tasks/upgrade.yml
index 808cc562c..54eeb2ef5 100644
--- a/playbooks/common/openshift-cluster/upgrades/docker/tasks/upgrade.yml
+++ b/playbooks/common/openshift-cluster/upgrades/docker/tasks/upgrade.yml
@@ -4,13 +4,13 @@
- name: Stop containerized services
service: name={{ item }} state=stopped
with_items:
- - "{{ openshift.common.service_type }}-master-api"
- - "{{ openshift.common.service_type }}-master-controllers"
- - "{{ openshift.common.service_type }}-node"
+ - "{{ openshift_service_type }}-master-api"
+ - "{{ openshift_service_type }}-master-controllers"
+ - "{{ openshift_service_type }}-node"
- etcd_container
- openvswitch
failed_when: false
- when: openshift.common.is_containerized | bool
+ when: openshift_is_containerized | bool
- name: Check Docker image count
shell: "docker images -aq | wc -l"
@@ -35,12 +35,14 @@
name: docker
state: stopped
register: l_pb_docker_upgrade_stop_result
- until: not l_pb_docker_upgrade_stop_result | failed
+ until: not (l_pb_docker_upgrade_stop_result is failed)
retries: 3
delay: 30
- name: Upgrade Docker
package: name=docker{{ '-' + docker_version }} state=present
+ register: result
+ until: result is succeeded
-- include: restart.yml
+- include_tasks: restart.yml
when: not skip_docker_restart | default(False) | bool
diff --git a/playbooks/common/openshift-cluster/upgrades/docker/upgrade_check.yml b/playbooks/common/openshift-cluster/upgrades/docker/upgrade_check.yml
index 52345a9ba..ed97d539c 100644
--- a/playbooks/common/openshift-cluster/upgrades/docker/upgrade_check.yml
+++ b/playbooks/common/openshift-cluster/upgrades/docker/upgrade_check.yml
@@ -1,58 +1 @@
---
-
-# This snippet determines if a Docker upgrade is required by checking the inventory
-# variables, the available packages, and sets l_docker_upgrade to True if so.
-
-- set_fact:
- docker_upgrade: True
- when: docker_upgrade is not defined
-
-- name: Check if Docker is installed
- command: rpm -q docker
- args:
- warn: no
- register: pkg_check
- failed_when: pkg_check.rc > 1
- changed_when: no
-
-- name: Get current version of Docker
- command: "{{ repoquery_cmd }} --installed --qf '%{version}' docker"
- register: curr_docker_version
- retries: 4
- until: curr_docker_version | succeeded
- changed_when: false
-
-- name: Get latest available version of Docker
- command: >
- {{ repoquery_cmd }} --qf '%{version}' "docker"
- register: avail_docker_version
- retries: 4
- until: avail_docker_version | succeeded
- # Don't expect docker rpm to be available on hosts that don't already have it installed:
- when: pkg_check.rc == 0
- failed_when: false
- changed_when: false
-
-- fail:
- msg: This playbook requires access to Docker 1.12 or later
- # Disable the 1.12 requirement if the user set a specific Docker version
- when: docker_version is not defined and (docker_upgrade is not defined or docker_upgrade | bool == True) and (pkg_check.rc == 0 and (avail_docker_version.stdout == "" or avail_docker_version.stdout | version_compare('1.12','<')))
-
-# Default l_docker_upgrade to False, we'll set to True if an upgrade is required:
-- set_fact:
- l_docker_upgrade: False
-
-# Make sure a docker_version is set if none was requested:
-- set_fact:
- docker_version: "{{ avail_docker_version.stdout }}"
- when: pkg_check.rc == 0 and docker_version is not defined
-
-- name: Flag for Docker upgrade if necessary
- set_fact:
- l_docker_upgrade: True
- when: pkg_check.rc == 0 and curr_docker_version.stdout | version_compare(docker_version,'<')
-
-- name: Flag to delete all images prior to upgrade if crossing Docker 1.10 boundary
- set_fact:
- docker_upgrade_nuke_images: True
- when: l_docker_upgrade | bool and docker_upgrade_nuke_images is not defined and curr_docker_version.stdout | version_compare('1.10','<') and docker_version | version_compare('1.10','>=')
diff --git a/playbooks/common/openshift-cluster/upgrades/etcd/backup.yml b/playbooks/common/openshift-cluster/upgrades/etcd/backup.yml
deleted file mode 100644
index d086cad00..000000000
--- a/playbooks/common/openshift-cluster/upgrades/etcd/backup.yml
+++ /dev/null
@@ -1,29 +0,0 @@
----
-- name: Backup etcd
- hosts: oo_etcd_hosts_to_backup
- roles:
- - role: openshift_etcd_facts
- post_tasks:
- - include_role:
- name: etcd
- tasks_from: backup
- vars:
- r_etcd_common_backup_tag: "{{ etcd_backup_tag }}"
- r_etcd_common_etcd_runtime: "{{ openshift.common.etcd_runtime }}"
- r_etcd_common_embedded_etcd: "{{ groups.oo_etcd_to_config | default([]) | length == 0 }}"
- r_etcd_common_backup_sufix_name: "{{ lookup('pipe', 'date +%Y%m%d%H%M%S') }}"
-
-- name: Gate on etcd backup
- hosts: localhost
- connection: local
- become: no
- tasks:
- - set_fact:
- etcd_backup_completed: "{{ hostvars
- | oo_select_keys(groups.oo_etcd_hosts_to_backup)
- | oo_collect('inventory_hostname', {'r_etcd_common_backup_complete': true}) }}"
- - set_fact:
- etcd_backup_failed: "{{ groups.oo_etcd_hosts_to_backup | difference(etcd_backup_completed) | list }}"
- - fail:
- msg: "Upgrade cannot continue. The following hosts did not complete etcd backup: {{ etcd_backup_failed | join(',') }}"
- when: etcd_backup_failed | length > 0
diff --git a/playbooks/common/openshift-cluster/upgrades/etcd/filter_plugins b/playbooks/common/openshift-cluster/upgrades/etcd/filter_plugins
deleted file mode 120000
index 27ddaa18b..000000000
--- a/playbooks/common/openshift-cluster/upgrades/etcd/filter_plugins
+++ /dev/null
@@ -1 +0,0 @@
-../../../../../filter_plugins \ No newline at end of file
diff --git a/playbooks/common/openshift-cluster/upgrades/etcd/lookup_plugins b/playbooks/common/openshift-cluster/upgrades/etcd/lookup_plugins
deleted file mode 120000
index cf407f69b..000000000
--- a/playbooks/common/openshift-cluster/upgrades/etcd/lookup_plugins
+++ /dev/null
@@ -1 +0,0 @@
-../../../../../lookup_plugins \ No newline at end of file
diff --git a/playbooks/common/openshift-cluster/upgrades/etcd/main.yml b/playbooks/common/openshift-cluster/upgrades/etcd/main.yml
deleted file mode 100644
index 5b8ba3bb2..000000000
--- a/playbooks/common/openshift-cluster/upgrades/etcd/main.yml
+++ /dev/null
@@ -1,29 +0,0 @@
----
-# For 1.4/3.4 we want to upgrade everyone to etcd-3.0. etcd docs say to
-# upgrade from 2.0.x to 2.1.x to 2.2.x to 2.3.x to 3.0.x. While this is a tedius
-# task for RHEL and CENTOS it's simply not possible in Fedora unless you've
-# mirrored packages on your own because only the GA and latest versions are
-# available in the repos. So for Fedora we'll simply skip this, sorry.
-
-- name: Backup etcd before upgrading anything
- include: backup.yml
- vars:
- etcd_backup_tag: "pre-upgrade-"
- when: openshift_etcd_backup | default(true) | bool
-
-- name: Drop etcdctl profiles
- hosts: oo_etcd_hosts_to_upgrade
- tasks:
- - include_role:
- name: etcd
- tasks_from: drop_etcdctl
-
-- name: Perform etcd upgrade
- include: ./upgrade.yml
- when: openshift_etcd_upgrade | default(true) | bool
-
-- name: Backup etcd
- include: backup.yml
- vars:
- etcd_backup_tag: "post-3.0-"
- when: openshift_etcd_backup | default(true) | bool
diff --git a/playbooks/common/openshift-cluster/upgrades/etcd/roles b/playbooks/common/openshift-cluster/upgrades/etcd/roles
deleted file mode 120000
index 6bc1a7aef..000000000
--- a/playbooks/common/openshift-cluster/upgrades/etcd/roles
+++ /dev/null
@@ -1 +0,0 @@
-../../../../../roles \ No newline at end of file
diff --git a/playbooks/common/openshift-cluster/upgrades/etcd/upgrade.yml b/playbooks/common/openshift-cluster/upgrades/etcd/upgrade.yml
deleted file mode 100644
index d71c96cd7..000000000
--- a/playbooks/common/openshift-cluster/upgrades/etcd/upgrade.yml
+++ /dev/null
@@ -1,108 +0,0 @@
----
-- name: Determine etcd version
- hosts: oo_etcd_hosts_to_upgrade
- tasks:
- - block:
- - name: Record RPM based etcd version
- command: rpm -qa --qf '%{version}' etcd\*
- args:
- warn: no
- register: etcd_rpm_version
- failed_when: false
- # AUDIT:changed_when: `false` because we are only inspecting
- # state, not manipulating anything
- changed_when: false
- - debug:
- msg: "Etcd rpm version {{ etcd_rpm_version.stdout }} detected"
- when:
- - not openshift.common.is_containerized | bool
-
- - block:
- - name: Record containerized etcd version (docker)
- command: docker exec etcd_container rpm -qa --qf '%{version}' etcd\*
- register: etcd_container_version_docker
- failed_when: false
- # AUDIT:changed_when: `false` because we are only inspecting
- # state, not manipulating anything
- changed_when: false
- when:
- - not openshift.common.is_etcd_system_container | bool
-
- # Given a register variables is set even if the whwen condition
- # is false, we need to set etcd_container_version separately
- - set_fact:
- etcd_container_version: "{{ etcd_container_version_docker.stdout }}"
- when:
- - not openshift.common.is_etcd_system_container | bool
-
- - name: Record containerized etcd version (runc)
- command: runc exec etcd rpm -qa --qf '%{version}' etcd\*
- register: etcd_container_version_runc
- failed_when: false
- # AUDIT:changed_when: `false` because we are only inspecting
- # state, not manipulating anything
- changed_when: false
- when:
- - openshift.common.is_etcd_system_container | bool
-
- # Given a register variables is set even if the whwen condition
- # is false, we need to set etcd_container_version separately
- - set_fact:
- etcd_container_version: "{{ etcd_container_version_runc.stdout }}"
- when:
- - openshift.common.is_etcd_system_container | bool
-
- - debug:
- msg: "Etcd containerized version {{ etcd_container_version }} detected"
- when:
- - openshift.common.is_containerized | bool
-
-- include: upgrade_rpm_members.yml
- vars:
- etcd_upgrade_version: '2.1'
-
-- include: upgrade_rpm_members.yml
- vars:
- etcd_upgrade_version: '2.2'
-
-- include: upgrade_image_members.yml
- vars:
- etcd_upgrade_version: '2.2.5'
-
-- include: upgrade_rpm_members.yml
- vars:
- etcd_upgrade_version: '2.3'
-
-- include: upgrade_image_members.yml
- vars:
- etcd_upgrade_version: '2.3.7'
-
-- include: upgrade_rpm_members.yml
- vars:
- etcd_upgrade_version: '3.0'
-
-- include: upgrade_image_members.yml
- vars:
- etcd_upgrade_version: '3.0.15'
-
-- include: upgrade_rpm_members.yml
- vars:
- etcd_upgrade_version: '3.1'
-
-- include: upgrade_image_members.yml
- vars:
- etcd_upgrade_version: '3.1.3'
-
-- name: Upgrade fedora to latest
- hosts: oo_etcd_hosts_to_upgrade
- serial: 1
- tasks:
- - include_role:
- name: etcd
- tasks_from: upgrade_image
- vars:
- r_etcd_common_etcd_runtime: "host"
- etcd_peer: "{{ openshift.common.hostname }}"
- when:
- - ansible_distribution == 'Fedora'
- - not openshift.common.is_containerized | bool
diff --git a/playbooks/common/openshift-cluster/upgrades/etcd/upgrade_image_members.yml b/playbooks/common/openshift-cluster/upgrades/etcd/upgrade_image_members.yml
deleted file mode 100644
index e5e895775..000000000
--- a/playbooks/common/openshift-cluster/upgrades/etcd/upgrade_image_members.yml
+++ /dev/null
@@ -1,18 +0,0 @@
----
-# INPUT etcd_upgrade_version
-# INPUT etcd_container_version
-# INPUT openshift.common.is_containerized
-- name: Upgrade containerized hosts to {{ etcd_upgrade_version }}
- hosts: oo_etcd_hosts_to_upgrade
- serial: 1
- tasks:
- - include_role:
- name: etcd
- tasks_from: upgrade_image
- vars:
- r_etcd_upgrade_version: "{{ etcd_upgrade_version }}"
- r_etcd_common_etcd_runtime: "{{ openshift.common.etcd_runtime }}"
- etcd_peer: "{{ openshift.common.hostname }}"
- when:
- - etcd_container_version | default('99') | version_compare(etcd_upgrade_version,'<')
- - openshift.common.is_containerized | bool
diff --git a/playbooks/common/openshift-cluster/upgrades/etcd/upgrade_rpm_members.yml b/playbooks/common/openshift-cluster/upgrades/etcd/upgrade_rpm_members.yml
deleted file mode 100644
index a2a26bad4..000000000
--- a/playbooks/common/openshift-cluster/upgrades/etcd/upgrade_rpm_members.yml
+++ /dev/null
@@ -1,19 +0,0 @@
----
-# INPUT etcd_upgrade_version
-# INPUT etcd_rpm_version
-# INPUT openshift.common.is_containerized
-- name: Upgrade to {{ etcd_upgrade_version }}
- hosts: oo_etcd_hosts_to_upgrade
- serial: 1
- tasks:
- - include_role:
- name: etcd
- tasks_from: upgrade_rpm
- vars:
- r_etcd_upgrade_version: "{{ etcd_upgrade_version }}"
- r_etcd_common_etcd_runtime: "host"
- etcd_peer: "{{ openshift.common.hostname }}"
- when:
- - etcd_rpm_version.stdout | default('99') | version_compare(etcd_upgrade_version, '<')
- - ansible_distribution == 'RedHat'
- - not openshift.common.is_containerized | bool
diff --git a/playbooks/common/openshift-cluster/upgrades/filter_plugins b/playbooks/common/openshift-cluster/upgrades/filter_plugins
deleted file mode 120000
index b1213dedb..000000000
--- a/playbooks/common/openshift-cluster/upgrades/filter_plugins
+++ /dev/null
@@ -1 +0,0 @@
-../../../../filter_plugins \ No newline at end of file
diff --git a/playbooks/common/openshift-cluster/upgrades/init.yml b/playbooks/common/openshift-cluster/upgrades/init.yml
index 2826951e6..8ee83819e 100644
--- a/playbooks/common/openshift-cluster/upgrades/init.yml
+++ b/playbooks/common/openshift-cluster/upgrades/init.yml
@@ -1,15 +1,20 @@
---
-- include: ../evaluate_groups.yml
+- import_playbook: ../../../init/evaluate_groups.yml
vars:
# Do not allow adding hosts during upgrade.
g_new_master_hosts: []
g_new_node_hosts: []
-- include: ../initialize_facts.yml
+- import_playbook: ../../../init/facts.yml
- name: Ensure firewall is not switched during upgrade
- hosts: oo_all_hosts
+ hosts: "{{ l_upgrade_no_switch_firewall_hosts | default('oo_all_hosts') }}"
+ vars:
+ openshift_master_installed_version: "{{ hostvars[groups.oo_first_master.0].openshift.common.version }}"
tasks:
+ - name: set currently installed version
+ set_fact:
+ openshift_currently_installed_version: "{{ openshift_master_installed_version }}"
- name: Check if iptables is running
command: systemctl status iptables
changed_when: false
diff --git a/playbooks/common/openshift-cluster/upgrades/lookup_plugins b/playbooks/common/openshift-cluster/upgrades/lookup_plugins
deleted file mode 120000
index aff753026..000000000
--- a/playbooks/common/openshift-cluster/upgrades/lookup_plugins
+++ /dev/null
@@ -1 +0,0 @@
-../../../../lookup_plugins \ No newline at end of file
diff --git a/playbooks/common/openshift-cluster/upgrades/post_control_plane.yml b/playbooks/common/openshift-cluster/upgrades/post_control_plane.yml
index 122066955..1b57521df 100644
--- a/playbooks/common/openshift-cluster/upgrades/post_control_plane.yml
+++ b/playbooks/common/openshift-cluster/upgrades/post_control_plane.yml
@@ -14,8 +14,9 @@
pre_tasks:
- name: Load lib_openshift modules
- include_role:
+ import_role:
name: lib_openshift
+
- name: Collect all routers
oc_obj:
state: list
@@ -26,8 +27,8 @@
- set_fact:
haproxy_routers: "{{ all_routers.results.results[0]['items'] |
- oo_pods_match_component(openshift_deployment_type, 'haproxy-router') |
- oo_select_keys_from_list(['metadata']) }}"
+ lib_utils_oo_pods_match_component(openshift_deployment_type, 'haproxy-router') |
+ lib_utils_oo_select_keys_from_list(['metadata']) }}"
when:
- all_routers.results.returncode == 0
@@ -85,17 +86,19 @@
roles:
- openshift_manageiq
+ - role: openshift_project_request_template
+ when: openshift_project_request_template_manage
# Create the new templates shipped in 3.2, existing templates are left
# unmodified. This prevents the subsequent role definition for
# openshift_examples from failing when trying to replace templates that do
# not already exist. We could have potentially done a replace --force to
# create and update in one step.
- role: openshift_examples
- when: openshift_install_examples | default(true,true) | bool
+ when: openshift_install_examples | default(true) | bool
- openshift_hosted_templates
# Update the existing templates
- role: openshift_examples
- when: openshift_install_examples | default(true,true) | bool
+ when: openshift_install_examples | default(true) | bool
registry_url: "{{ openshift.master.registry_url }}"
openshift_examples_import_command: replace
- role: openshift_hosted_templates
@@ -111,13 +114,11 @@
roles:
- role: openshift_excluder
r_openshift_excluder_action: enable
- r_openshift_excluder_service_type: "{{ openshift.common.service_type }}"
post_tasks:
# Check if any masters are using pluginOrderOverride and warn if so, only for 1.3/3.3 and beyond:
- name: grep pluginOrderOverride
command: grep pluginOrderOverride {{ openshift.common.config_base }}/master/master-config.yaml
register: grep_plugin_order_override
- when: openshift.common.version_gte_3_3_or_1_3 | bool
changed_when: false
failed_when: false
@@ -125,7 +126,7 @@
debug:
msg: "WARNING pluginOrderOverride is being deprecated in master-config.yaml, please see https://docs.openshift.com/enterprise/latest/architecture/additional_concepts/admission_controllers.html for more information."
when:
- - not grep_plugin_order_override | skipped
+ - not (grep_plugin_order_override is skipped)
- grep_plugin_order_override.rc == 0
- name: Warn if shared-resource-viewer could not be updated
diff --git a/playbooks/common/openshift-cluster/upgrades/pre/config.yml b/playbooks/common/openshift-cluster/upgrades/pre/config.yml
new file mode 100644
index 000000000..cfc0c8745
--- /dev/null
+++ b/playbooks/common/openshift-cluster/upgrades/pre/config.yml
@@ -0,0 +1,77 @@
+---
+
+# Pre-upgrade
+- import_playbook: ../initialize_nodes_to_upgrade.yml
+
+- import_playbook: verify_cluster.yml
+
+- name: Update repos on upgrade hosts
+ hosts: "{{ l_upgrade_repo_hosts }}"
+ roles:
+ - openshift_repos
+
+- name: Set openshift_no_proxy_internal_hostnames
+ hosts: "{{ l_upgrade_no_proxy_hosts }}"
+ tasks:
+ - set_fact:
+ openshift_no_proxy_internal_hostnames: "{{ hostvars | lib_utils_oo_select_keys(groups['oo_nodes_to_config']
+ | union(groups['oo_masters_to_config'])
+ | union(groups['oo_etcd_to_config'] | default([])))
+ | lib_utils_oo_collect('openshift.common.hostname') | default([]) | join (',')
+ }}"
+ when:
+ - openshift_http_proxy is defined or openshift_https_proxy is defined
+ - openshift_generate_no_proxy_hosts | default(True) | bool
+
+- name: OpenShift Health Checks
+ hosts: "{{ l_upgrade_health_check_hosts }}"
+ any_errors_fatal: true
+ roles:
+ - openshift_health_checker
+ vars:
+ - r_openshift_health_checker_playbook_context: upgrade
+ post_tasks:
+ - name: Run health checks (upgrade)
+ action: openshift_health_check
+ args:
+ checks:
+ - disk_availability
+ - memory_availability
+ - docker_image_availability
+
+- import_playbook: ../disable_excluders.yml
+
+- import_playbook: ../../../../init/version.yml
+ vars:
+ # Request specific openshift_release and let the openshift_version role handle converting this
+ # to a more specific version, respecting openshift_image_tag and openshift_pkg_version if
+ # defined, and overriding the normal behavior of protecting the installed version
+ openshift_release: "{{ openshift_upgrade_target }}"
+ openshift_protect_installed_version: False
+
+# If we're only upgrading nodes, we need to ensure masters are already upgraded
+- name: Verify masters are already upgraded
+ hosts: oo_masters_to_config
+ tasks:
+ - fail: msg="Master running {{ openshift.common.version }} must be upgraded to {{ openshift_version }} before node upgrade can be run."
+ when:
+ - l_upgrade_nodes_only | default(False) | bool
+ - openshift.common.version != openshift_version
+
+# If we're only upgrading nodes, skip this.
+- import_playbook: ../../../../openshift-master/private/validate_restart.yml
+ when: not (l_upgrade_nodes_only | default(False)) | bool
+
+- name: Verify upgrade targets
+ hosts: "{{ l_upgrade_verify_targets_hosts }}"
+ roles:
+ - role: openshift_facts
+ tasks:
+ - include_tasks: verify_upgrade_targets.yml
+
+- name: Verify docker upgrade targets
+ hosts: "{{ l_upgrade_docker_target_hosts }}"
+ tasks:
+ - import_role:
+ name: container_runtime
+ tasks_from: docker_upgrade_check.yml
diff --git a/playbooks/common/openshift-cluster/upgrades/pre/gate_checks.yml b/playbooks/common/openshift-cluster/upgrades/pre/gate_checks.yml
deleted file mode 100644
index 8ecae4539..000000000
--- a/playbooks/common/openshift-cluster/upgrades/pre/gate_checks.yml
+++ /dev/null
@@ -1,6 +0,0 @@
----
-- name: Flag pre-upgrade checks complete for hosts without errors
- hosts: oo_masters_to_config:oo_nodes_to_upgrade:oo_etcd_to_config
- tasks:
- - set_fact:
- pre_upgrade_complete: True
diff --git a/playbooks/common/openshift-cluster/upgrades/pre/tasks/verify_docker_upgrade_targets.yml b/playbooks/common/openshift-cluster/upgrades/pre/tasks/verify_docker_upgrade_targets.yml
deleted file mode 100644
index 6d8503879..000000000
--- a/playbooks/common/openshift-cluster/upgrades/pre/tasks/verify_docker_upgrade_targets.yml
+++ /dev/null
@@ -1,22 +0,0 @@
----
-# Only check if docker upgrade is required if docker_upgrade is not
-# already set to False.
-- include: ../../docker/upgrade_check.yml
- when:
- - docker_upgrade is not defined or (docker_upgrade | bool)
- - not (openshift.common.is_atomic | bool)
-
-# Additional checks for Atomic hosts:
-
-- name: Determine available Docker
- shell: "rpm -q --queryformat '---\ncurr_version: %{VERSION}\navail_version: \n' docker"
- register: g_atomic_docker_version_result
- when: openshift.common.is_atomic | bool
-
-- set_fact:
- l_docker_version: "{{ g_atomic_docker_version_result.stdout | from_yaml }}"
- when: openshift.common.is_atomic | bool
-
-- fail:
- msg: This playbook requires access to Docker 1.12 or later
- when: openshift.common.is_atomic | bool and l_docker_version.avail_version | default(l_docker_version.curr_version, true) | version_compare('1.12','<')
diff --git a/playbooks/common/openshift-cluster/upgrades/pre/verify_cluster.yml b/playbooks/common/openshift-cluster/upgrades/pre/verify_cluster.yml
new file mode 100644
index 000000000..693ab2d96
--- /dev/null
+++ b/playbooks/common/openshift-cluster/upgrades/pre/verify_cluster.yml
@@ -0,0 +1,94 @@
+---
+# Verify a few items before we proceed with upgrade process.
+
+- name: Verify upgrade can proceed on first master
+ hosts: oo_first_master
+ gather_facts: no
+ tasks:
+
+ # Error out in situations where the user has older versions specified in their
+ # inventory in any of the openshift_release, openshift_image_tag, and
+ # openshift_pkg_version variables. These must be removed or updated to proceed
+ # with upgrade.
+ # TODO: Should we block if you're *over* the next major release version as well?
+ - fail:
+ msg: >
+ openshift_pkg_version is {{ openshift_pkg_version }} which is not a
+ valid version for a {{ openshift_upgrade_target }} upgrade
+ when:
+ - openshift_pkg_version is defined
+ - openshift_pkg_version.split('-',1).1 is version_compare(openshift_upgrade_target ,'<')
+
+ - fail:
+ msg: >
+ openshift_image_tag is {{ openshift_image_tag }} which is not a
+ valid version for a {{ openshift_upgrade_target }} upgrade
+ when:
+ - openshift_image_tag is defined
+ - openshift_image_tag.split('v',1).1 is version_compare(openshift_upgrade_target ,'<')
+
+ - set_fact:
+ openshift_release: "{{ openshift_release[1:] }}"
+ when: openshift_release is defined and openshift_release[0] == 'v'
+
+ - fail:
+ msg: >
+ openshift_release is {{ openshift_release }} which is not a
+ valid release for a {{ openshift_upgrade_target }} upgrade
+ when:
+ - openshift_release is defined
+ - not (openshift_release is version_compare(openshift_upgrade_target ,'='))
+
+- name: Verify master processes
+ hosts: oo_masters_to_config
+ roles:
+ - lib_utils
+ - openshift_facts
+ tasks:
+ - name: Read master storage backend setting
+ yedit:
+ state: list
+ src: /etc/origin/master/master-config.yaml
+ key: kubernetesMasterConfig.apiServerArguments.storage-backend
+ register: _storage_backend
+
+ - fail:
+ msg: "Storage backend in /etc/origin/master/master-config.yaml must be set to 'etcd3' before the upgrade can continue"
+ when:
+ # assuming the master-config.yml is properly configured, i.e. the value is a list
+ - _storage_backend.result | default([], true) | length == 0 or _storage_backend.result[0] != "etcd3"
+
+ - debug:
+ msg: "Storage backend is set to etcd3"
+
+ - openshift_facts:
+ role: master
+ local_facts:
+ ha: "{{ groups.oo_masters_to_config | length > 1 }}"
+
+ - when: openshift_is_containerized | bool
+ block:
+ - set_fact:
+ master_services:
+ - "{{ openshift_service_type }}-master"
+
+ # In case of the non-ha to ha upgrade.
+ - name: Check if the {{ openshift_service_type }}-master-api.service exists
+ command: >
+ systemctl list-units {{ openshift_service_type }}-master-api.service --no-legend
+ register: master_api_service_status
+
+ - set_fact:
+ master_services:
+ - "{{ openshift_service_type }}-master-api"
+ - "{{ openshift_service_type }}-master-controllers"
+ when:
+ - master_api_service_status.stdout_lines | length > 0
+ - (openshift_service_type + '-master-api.service') in master_api_service_status.stdout_lines[0]
+
+ - name: Ensure Master is running
+ service:
+ name: "{{ item }}"
+ state: started
+ enabled: yes
+ with_items: "{{ master_services }}"
diff --git a/playbooks/common/openshift-cluster/upgrades/pre/verify_control_plane_running.yml b/playbooks/common/openshift-cluster/upgrades/pre/verify_control_plane_running.yml
deleted file mode 100644
index 45022cd61..000000000
--- a/playbooks/common/openshift-cluster/upgrades/pre/verify_control_plane_running.yml
+++ /dev/null
@@ -1,24 +0,0 @@
----
-- name: Verify master processes
- hosts: oo_masters_to_config
- roles:
- - openshift_facts
- tasks:
- - openshift_facts:
- role: master
- local_facts:
- ha: "{{ groups.oo_masters_to_config | length > 1 }}"
-
- - name: Ensure HA Master is running
- service:
- name: "{{ openshift.common.service_type }}-master-api"
- state: started
- enabled: yes
- when: openshift.common.is_containerized | bool
-
- - name: Ensure HA Master is running
- service:
- name: "{{ openshift.common.service_type }}-master-controllers"
- state: started
- enabled: yes
- when: openshift.common.is_containerized | bool
diff --git a/playbooks/common/openshift-cluster/upgrades/pre/verify_etcd3_backend.yml b/playbooks/common/openshift-cluster/upgrades/pre/verify_etcd3_backend.yml
deleted file mode 100644
index f75ae3b15..000000000
--- a/playbooks/common/openshift-cluster/upgrades/pre/verify_etcd3_backend.yml
+++ /dev/null
@@ -1,22 +0,0 @@
----
-- name: Verify all masters has etcd3 storage backend set
- hosts: oo_masters_to_config
- gather_facts: no
- roles:
- - lib_utils
- tasks:
- - name: Read master storage backend setting
- yedit:
- state: list
- src: /etc/origin/master/master-config.yaml
- key: kubernetesMasterConfig.apiServerArguments.storage-backend
- register: _storage_backend
-
- - fail:
- msg: "Storage backend in /etc/origin/master/master-config.yaml must be set to 'etcd3' before the upgrade can continue"
- when:
- # assuming the master-config.yml is properly configured, i.e. the value is a list
- - _storage_backend.result | default([], true) | length == 0 or _storage_backend.result[0] != "etcd3"
-
- - debug:
- msg: "Storage backend is set to etcd3"
diff --git a/playbooks/common/openshift-cluster/upgrades/pre/verify_health_checks.yml b/playbooks/common/openshift-cluster/upgrades/pre/verify_health_checks.yml
deleted file mode 100644
index 2a8de50a2..000000000
--- a/playbooks/common/openshift-cluster/upgrades/pre/verify_health_checks.yml
+++ /dev/null
@@ -1,16 +0,0 @@
----
-- name: OpenShift Health Checks
- hosts: oo_all_hosts
- any_errors_fatal: true
- roles:
- - openshift_health_checker
- vars:
- - r_openshift_health_checker_playbook_context: upgrade
- post_tasks:
- - name: Run health checks (upgrade)
- action: openshift_health_check
- args:
- checks:
- - disk_availability
- - memory_availability
- - docker_image_availability
diff --git a/playbooks/common/openshift-cluster/upgrades/pre/verify_inventory_vars.yml b/playbooks/common/openshift-cluster/upgrades/pre/verify_inventory_vars.yml
deleted file mode 100644
index 3c0017891..000000000
--- a/playbooks/common/openshift-cluster/upgrades/pre/verify_inventory_vars.yml
+++ /dev/null
@@ -1,37 +0,0 @@
----
-- name: Verify upgrade can proceed on first master
- hosts: oo_first_master
- gather_facts: no
- tasks:
- - fail:
- msg: >
- This upgrade is only supported for origin and openshift-enterprise
- deployment types
- when: deployment_type not in ['origin','openshift-enterprise']
-
- # Error out in situations where the user has older versions specified in their
- # inventory in any of the openshift_release, openshift_image_tag, and
- # openshift_pkg_version variables. These must be removed or updated to proceed
- # with upgrade.
- # TODO: Should we block if you're *over* the next major release version as well?
- - fail:
- msg: >
- openshift_pkg_version is {{ openshift_pkg_version }} which is not a
- valid version for a {{ openshift_upgrade_target }} upgrade
- when: openshift_pkg_version is defined and openshift_pkg_version.split('-',1).1 | version_compare(openshift_upgrade_target ,'<')
-
- - fail:
- msg: >
- openshift_image_tag is {{ openshift_image_tag }} which is not a
- valid version for a {{ openshift_upgrade_target }} upgrade
- when: openshift_image_tag is defined and openshift_image_tag.split('v',1).1 | version_compare(openshift_upgrade_target ,'<')
-
- - set_fact:
- openshift_release: "{{ openshift_release[1:] }}"
- when: openshift_release is defined and openshift_release[0] == 'v'
-
- - fail:
- msg: >
- openshift_release is {{ openshift_release }} which is not a
- valid release for a {{ openshift_upgrade_target }} upgrade
- when: openshift_release is defined and not openshift_release | version_compare(openshift_upgrade_target ,'=')
diff --git a/playbooks/common/openshift-cluster/upgrades/pre/verify_upgrade_targets.yml b/playbooks/common/openshift-cluster/upgrades/pre/verify_upgrade_targets.yml
index 142ce5f3d..4c1156f4b 100644
--- a/playbooks/common/openshift-cluster/upgrades/pre/verify_upgrade_targets.yml
+++ b/playbooks/common/openshift-cluster/upgrades/pre/verify_upgrade_targets.yml
@@ -4,18 +4,24 @@
msg: Verify OpenShift is already installed
when: openshift.common.version is not defined
+- name: Update oreg_auth docker login credentials if necessary
+ import_role:
+ name: container_runtime
+ tasks_from: registry_auth.yml
+ when: oreg_auth_user is defined
+
- name: Verify containers are available for upgrade
command: >
- docker pull {{ openshift.common.cli_image }}:{{ openshift_image_tag }}
+ docker pull {{ openshift_cli_image }}:{{ openshift_image_tag }}
register: pull_result
changed_when: "'Downloaded newer image' in pull_result.stdout"
- when: openshift.common.is_containerized | bool
+ when: openshift_is_containerized | bool
-- when: not openshift.common.is_containerized | bool
+- when: not openshift_is_containerized | bool
block:
- name: Check latest available OpenShift RPM version
repoquery:
- name: "{{ openshift.common.service_type }}"
+ name: "{{ openshift_service_type }}"
ignore_excluders: true
register: repoquery_out
@@ -37,11 +43,11 @@
fail:
msg: "OpenShift {{ avail_openshift_version }} is available, but {{ openshift_upgrade_target }} or greater is required"
when:
- - openshift_pkg_version | default('0.0', True) | version_compare(openshift_release, '<')
+ - (openshift_pkg_version | default('-0.0', True)).split('-')[1] is version_compare(openshift_release, '<')
- name: Fail when openshift version does not meet minium requirement for Origin upgrade
fail:
msg: "This upgrade playbook must be run against OpenShift {{ openshift_upgrade_min }} or later"
when:
- - deployment_type == 'origin'
- - openshift.common.version | version_compare(openshift_upgrade_min,'<')
+ - openshift_deployment_type == 'origin'
+ - openshift.common.version is version_compare(openshift_upgrade_min,'<')
diff --git a/playbooks/common/openshift-cluster/upgrades/rpm_upgrade.yml b/playbooks/common/openshift-cluster/upgrades/rpm_upgrade.yml
deleted file mode 100644
index 8cc46ab68..000000000
--- a/playbooks/common/openshift-cluster/upgrades/rpm_upgrade.yml
+++ /dev/null
@@ -1,38 +0,0 @@
----
-# When we update package "a-${version}" and a requires b >= ${version} if we
-# don't specify the version of b yum will choose the latest version of b
-# available and the whole set of dependencies end up at the latest version.
-# Since the package module, unlike the yum module, doesn't flatten a list
-# of packages into one transaction we need to do that explicitly. The ansible
-# core team tells us not to rely on yum module transaction flattening anyway.
-
-# TODO: If the sdn package isn't already installed this will install it, we
-# should fix that
-- name: Upgrade master packages
- package: name={{ master_pkgs | join(',') }} state=present
- vars:
- master_pkgs:
- - "{{ openshift.common.service_type }}{{ openshift_pkg_version }}"
- - "{{ openshift.common.service_type }}-master{{ openshift_pkg_version }}"
- - "{{ openshift.common.service_type }}-node{{ openshift_pkg_version }}"
- - "{{ openshift.common.service_type }}-sdn-ovs{{ openshift_pkg_version }}"
- - "{{ openshift.common.service_type }}-clients{{ openshift_pkg_version }}"
- - "tuned-profiles-{{ openshift.common.service_type }}-node{{ openshift_pkg_version }}"
- - PyYAML
- when:
- - component == "master"
- - not openshift.common.is_atomic | bool
-
-- name: Upgrade node packages
- package: name={{ node_pkgs | join(',') }} state=present
- vars:
- node_pkgs:
- - "{{ openshift.common.service_type }}{{ openshift_pkg_version }}"
- - "{{ openshift.common.service_type }}-node{{ openshift_pkg_version }}"
- - "{{ openshift.common.service_type }}-sdn-ovs{{ openshift_pkg_version }}"
- - "{{ openshift.common.service_type }}-clients{{ openshift_pkg_version }}"
- - "tuned-profiles-{{ openshift.common.service_type }}-node{{ openshift_pkg_version }}"
- - PyYAML
- when:
- - component == "node"
- - not openshift.common.is_atomic | bool
diff --git a/playbooks/common/openshift-cluster/upgrades/upgrade_control_plane.yml b/playbooks/common/openshift-cluster/upgrades/upgrade_control_plane.yml
index c37a5f9ab..50be0dee0 100644
--- a/playbooks/common/openshift-cluster/upgrades/upgrade_control_plane.yml
+++ b/playbooks/common/openshift-cluster/upgrades/upgrade_control_plane.yml
@@ -2,6 +2,21 @@
###############################################################################
# Upgrade Masters
###############################################################################
+- name: Backup and upgrade etcd
+ import_playbook: ../../../openshift-etcd/private/upgrade_main.yml
+
+# Create service signer cert when missing. Service signer certificate
+# is added to master config in the master_config_upgrade hook.
+- name: Determine if service signer cert must be created
+ hosts: oo_first_master
+ tasks:
+ - name: Determine if service signer certificate must be created
+ stat:
+ path: "{{ openshift.common.config_base }}/master/service-signer.crt"
+ register: service_signer_cert_stat
+ changed_when: false
+
+- import_playbook: create_service_signer_cert.yml
# oc adm migrate storage should be run prior to etcd v3 upgrade
# See: https://github.com/openshift/origin/pull/14625#issuecomment-308467060
@@ -10,7 +25,7 @@
tasks:
- name: Upgrade all storage
command: >
- {{ openshift.common.client_binary }} adm --config={{ openshift.common.config_base }}/master/admin.kubeconfig
+ {{ openshift_client_binary }} adm --config={{ openshift.common.config_base }}/master/admin.kubeconfig
migrate storage --include=* --confirm
register: l_pb_upgrade_control_plane_pre_upgrade_storage
when: openshift_upgrade_pre_storage_migration_enabled | default(true) | bool
@@ -19,35 +34,6 @@
- l_pb_upgrade_control_plane_pre_upgrade_storage.rc != 0
- openshift_upgrade_pre_storage_migration_fatal | default(true) | bool
-# If facts cache were for some reason deleted, this fact may not be set, and if not set
-# it will always default to true. This causes problems for the etcd data dir fact detection
-# so we must first make sure this is set correctly before attempting the backup.
-- name: Set master embedded_etcd fact
- hosts: oo_masters_to_config
- roles:
- - openshift_facts
- tasks:
- - openshift_facts:
- role: master
- local_facts:
- embedded_etcd: "{{ groups.oo_etcd_to_config | default([]) | length == 0 }}"
-
-- name: Upgrade and backup etcd
- include: ./etcd/main.yml
-
-# Create service signer cert when missing. Service signer certificate
-# is added to master config in the master_config_upgrade hook.
-- name: Determine if service signer cert must be created
- hosts: oo_first_master
- tasks:
- - name: Determine if service signer certificate must be created
- stat:
- path: "{{ openshift.common.config_base }}/master/service-signer.crt"
- register: service_signer_cert_stat
- changed_when: false
-
-- include: create_service_signer_cert.yml
-
# Set openshift_master_facts separately. In order to reconcile
# admission_config's, we currently must run openshift_master_facts and
# then run openshift_facts.
@@ -63,94 +49,49 @@
vars:
openshift_master_ha: "{{ groups.oo_masters_to_config | length > 1 }}"
serial: 1
- handlers:
- - include: ../../../../roles/openshift_master/handlers/main.yml
- static: yes
- roles:
- - openshift_facts
- - lib_utils
- post_tasks:
+ tasks:
+ - import_role:
+ name: openshift_facts
# Run the pre-upgrade hook if defined:
- debug: msg="Running master pre-upgrade hook {{ openshift_master_upgrade_pre_hook }}"
when: openshift_master_upgrade_pre_hook is defined
- - include: "{{ openshift_master_upgrade_pre_hook }}"
+ - include_tasks: "{{ openshift_master_upgrade_pre_hook }}"
when: openshift_master_upgrade_pre_hook is defined
- - include: rpm_upgrade.yml component=master
- when: not openshift.common.is_containerized | bool
-
- - include_vars: ../../../../roles/openshift_master_facts/vars/main.yml
-
- - include: upgrade_scheduler.yml
-
- - include: "{{ master_config_hook }}"
- when: master_config_hook is defined
-
- - include_vars: ../../../../roles/openshift_master/vars/main.yml
-
- - name: Remove any legacy systemd units and update systemd units
- include: ../../../../roles/openshift_master/tasks/systemd_units.yml
-
- - name: Check for ca-bundle.crt
- stat:
- path: "{{ openshift.common.config_base }}/master/ca-bundle.crt"
- register: ca_bundle_stat
- failed_when: false
-
- - name: Check for ca.crt
- stat:
- path: "{{ openshift.common.config_base }}/master/ca.crt"
- register: ca_crt_stat
- failed_when: false
-
- - name: Migrate ca.crt to ca-bundle.crt
- command: mv ca.crt ca-bundle.crt
- args:
- chdir: "{{ openshift.common.config_base }}/master"
- when: ca_crt_stat.stat.isreg and not ca_bundle_stat.stat.exists
-
- - name: Link ca.crt to ca-bundle.crt
- file:
- src: "{{ openshift.common.config_base }}/master/ca-bundle.crt"
- path: "{{ openshift.common.config_base }}/master/ca.crt"
- state: link
- when: ca_crt_stat.stat.isreg and not ca_bundle_stat.stat.exists
-
- - name: Update oreg value
- yedit:
- src: "{{ openshift.common.config_base }}/master/master-config.yaml"
- key: 'imageConfig.format'
- value: "{{ oreg_url | default(oreg_url_master) }}"
- when: oreg_url is defined or oreg_url_master is defined
+ - import_role:
+ name: openshift_master
+ tasks_from: upgrade.yml
# Run the upgrade hook prior to restarting services/system if defined:
- debug: msg="Running master upgrade hook {{ openshift_master_upgrade_hook }}"
when: openshift_master_upgrade_hook is defined
- - include: "{{ openshift_master_upgrade_hook }}"
+ - include_tasks: "{{ openshift_master_upgrade_hook }}"
when: openshift_master_upgrade_hook is defined
- - include: ../../openshift-master/restart_hosts.yml
+ - include_tasks: ../../../openshift-master/private/tasks/restart_hosts.yml
when: openshift.common.rolling_restart_mode == 'system'
- - include: ../../openshift-master/restart_services.yml
+ - include_tasks: ../../../openshift-master/private/tasks/restart_services.yml
when: openshift.common.rolling_restart_mode == 'services'
# Run the post-upgrade hook if defined:
- debug: msg="Running master post-upgrade hook {{ openshift_master_upgrade_post_hook }}"
when: openshift_master_upgrade_post_hook is defined
- - include: "{{ openshift_master_upgrade_post_hook }}"
+ - include_tasks: "{{ openshift_master_upgrade_post_hook }}"
when: openshift_master_upgrade_post_hook is defined
- name: Post master upgrade - Upgrade clusterpolicies storage
command: >
- {{ openshift.common.client_binary }} adm --config={{ openshift.common.config_base }}/master/admin.kubeconfig
+ {{ openshift_client_binary }} adm --config={{ openshift.common.config_base }}/master/admin.kubeconfig
migrate storage --include=clusterpolicies --confirm
register: l_pb_upgrade_control_plane_post_upgrade_storage
- when: openshift_upgrade_post_storage_migration_enabled | default(true) | bool
+ when:
+ - openshift_upgrade_post_storage_migration_enabled | default(true) | bool
+ - openshift_version is version_compare('3.7','<')
failed_when:
- openshift_upgrade_post_storage_migration_enabled | default(true) | bool
- l_pb_upgrade_control_plane_post_upgrade_storage.rc != 0
@@ -171,8 +112,8 @@
tasks:
- set_fact:
master_update_completed: "{{ hostvars
- | oo_select_keys(groups.oo_masters_to_config)
- | oo_collect('inventory_hostname', {'master_update_complete': true}) }}"
+ | lib_utils_oo_select_keys(groups.oo_masters_to_config)
+ | lib_utils_oo_collect('inventory_hostname', {'master_update_complete': true}) }}"
- set_fact:
master_update_failed: "{{ groups.oo_masters_to_config | difference(master_update_completed) | list }}"
- fail:
@@ -188,18 +129,14 @@
roles:
- { role: openshift_cli }
vars:
- openshift_docker_hosted_registry_network: "{{ hostvars[groups.oo_first_master.0].openshift.common.portal_net }}"
- # Another spot where we assume docker is running and do not want to accidentally trigger an unsafe
- # restart.
- skip_docker_role: True
__master_shared_resource_viewer_file: "shared_resource_viewer_role.yaml"
tasks:
- name: Reconcile Cluster Roles
command: >
- {{ openshift.common.client_binary }} adm --config={{ openshift.common.config_base }}/master/admin.kubeconfig
+ {{ openshift_client_binary }} adm --config={{ openshift.common.config_base }}/master/admin.kubeconfig
policy reconcile-cluster-roles --additive-only=true --confirm -o name
register: reconcile_cluster_role_result
- when: not openshift.common.version_gte_3_7 | bool
+ when: openshift_version is version_compare('3.7','<')
changed_when:
- reconcile_cluster_role_result.stdout != ''
- reconcile_cluster_role_result.rc == 0
@@ -207,14 +144,14 @@
- name: Reconcile Cluster Role Bindings
command: >
- {{ openshift.common.client_binary }} adm --config={{ openshift.common.config_base }}/master/admin.kubeconfig
+ {{ openshift_client_binary }} adm --config={{ openshift.common.config_base }}/master/admin.kubeconfig
policy reconcile-cluster-role-bindings
--exclude-groups=system:authenticated
--exclude-groups=system:authenticated:oauth
--exclude-groups=system:unauthenticated
--exclude-users=system:anonymous
--additive-only=true --confirm -o name
- when: not openshift.common.version_gte_3_7 | bool
+ when: openshift_version is version_compare('3.7','<')
register: reconcile_bindings_result
changed_when:
- reconcile_bindings_result.stdout != ''
@@ -223,15 +160,16 @@
- name: Reconcile Jenkins Pipeline Role Bindings
command: >
- {{ openshift.common.client_binary }} adm --config={{ openshift.common.config_base }}/master/admin.kubeconfig policy reconcile-cluster-role-bindings system:build-strategy-jenkinspipeline --confirm -o name
+ {{ openshift_client_binary }} adm --config={{ openshift.common.config_base }}/master/admin.kubeconfig policy reconcile-cluster-role-bindings system:build-strategy-jenkinspipeline --confirm -o name
run_once: true
register: reconcile_jenkins_role_binding_result
changed_when:
- reconcile_jenkins_role_binding_result.stdout != ''
- reconcile_jenkins_role_binding_result.rc == 0
- when: (not openshift.common.version_gte_3_7 | bool) and (openshift.common.version_gte_3_4_or_1_4 | bool)
+ when:
+ - openshift_version is version_compare('3.7','<')
- - when: (openshift.common.version_gte_3_6 | bool) and (not openshift.common.version_gte_3_7 | bool)
+ - when: openshift_upgrade_target is version_compare('3.7','<')
block:
- name: Retrieve shared-resource-viewer
oc_obj:
@@ -250,7 +188,6 @@
- "'annotations' in objout['results']['results'][0]['metadata']"
- "'openshift.io/reconcile-protect' in objout['results']['results'][0]['metadata']['annotations']"
- "objout['results']['results'][0]['metadata']['annotations']['openshift.io/reconcile-protect'] == 'true'"
-
- copy:
src: "{{ item }}"
dest: "/tmp/{{ item }}"
@@ -268,10 +205,16 @@
- "/tmp/{{ __master_shared_resource_viewer_file }}"
delete_after: true
when: __shared_resource_viewer_protected is not defined
+ register: result
+ retries: 3
+ delay: 5
+ until: result.rc == 0
+ ignore_errors: true
+
- name: Reconcile Security Context Constraints
command: >
- {{ openshift.common.client_binary }} adm policy --config={{ openshift.common.config_base }}/master/admin.kubeconfig reconcile-sccs --confirm --additive-only=true -o name
+ {{ openshift_client_binary }} adm policy --config={{ openshift.common.config_base }}/master/admin.kubeconfig reconcile-sccs --confirm --additive-only=true -o name
register: reconcile_scc_result
changed_when:
- reconcile_scc_result.stdout != ''
@@ -280,7 +223,7 @@
- name: Migrate storage post policy reconciliation
command: >
- {{ openshift.common.client_binary }} adm --config={{ openshift.common.config_base }}/master/admin.kubeconfig
+ {{ openshift_client_binary }} adm --config={{ openshift.common.config_base }}/master/admin.kubeconfig
migrate storage --include=* --confirm
run_once: true
register: l_pb_upgrade_control_plane_post_upgrade_storage
@@ -303,8 +246,8 @@
tasks:
- set_fact:
reconcile_completed: "{{ hostvars
- | oo_select_keys(groups.oo_masters_to_config)
- | oo_collect('inventory_hostname', {'reconcile_complete': true}) }}"
+ | lib_utils_oo_select_keys(groups.oo_masters_to_config)
+ | lib_utils_oo_collect('inventory_hostname', {'reconcile_complete': true}) }}"
- set_fact:
reconcile_failed: "{{ groups.oo_masters_to_config | difference(reconcile_completed) | list }}"
- fail:
@@ -318,8 +261,8 @@
roles:
- openshift_facts
tasks:
- - include: docker/tasks/upgrade.yml
- when: l_docker_upgrade is defined and l_docker_upgrade | bool and not openshift.common.is_atomic | bool
+ - include_tasks: docker/tasks/upgrade.yml
+ when: l_docker_upgrade is defined and l_docker_upgrade | bool and not openshift_is_atomic | bool
- name: Drain and upgrade master nodes
hosts: oo_masters_to_config:&oo_nodes_to_upgrade
@@ -330,7 +273,7 @@
pre_tasks:
- name: Load lib_openshift modules
- include_role:
+ import_role:
name: lib_openshift
# TODO: To better handle re-trying failed upgrades, it would be nice to check if the node
@@ -344,25 +287,23 @@
retries: 10
delay: 5
register: node_unschedulable
- until: node_unschedulable|succeeded
+ until: node_unschedulable is succeeded
- name: Drain Node for Kubelet upgrade
command: >
- {{ hostvars[groups.oo_first_master.0].openshift.common.admin_binary }} drain {{ openshift.node.nodename | lower }} --config={{ openshift.common.config_base }}/master/admin.kubeconfig --force --delete-local-data --ignore-daemonsets
+ {{ hostvars[groups.oo_first_master.0]['first_master_client_binary'] }} adm drain {{ openshift.node.nodename | lower }} --config={{ openshift.common.config_base }}/master/admin.kubeconfig --force --delete-local-data --ignore-daemonsets
delegate_to: "{{ groups.oo_first_master.0 }}"
register: l_upgrade_control_plane_drain_result
- until: not l_upgrade_control_plane_drain_result | failed
+ until: not (l_upgrade_control_plane_drain_result is failed)
retries: 60
delay: 60
roles:
- - lib_openshift
- openshift_facts
- - docker
- - openshift_node_dnsmasq
- - openshift_node_upgrade
-
post_tasks:
+ - import_role:
+ name: openshift_node
+ tasks_from: upgrade.yml
- name: Set node schedulability
oc_adm_manage_node:
node: "{{ openshift.node.nodename | lower }}"
@@ -371,5 +312,5 @@
retries: 10
delay: 5
register: node_schedulable
- until: node_schedulable|succeeded
- when: node_unschedulable|changed
+ until: node_schedulable is succeeded
+ when: node_unschedulable is changed
diff --git a/playbooks/common/openshift-cluster/upgrades/upgrade_nodes.yml b/playbooks/common/openshift-cluster/upgrades/upgrade_nodes.yml
index c93a5d89c..464af3ae6 100644
--- a/playbooks/common/openshift-cluster/upgrades/upgrade_nodes.yml
+++ b/playbooks/common/openshift-cluster/upgrades/upgrade_nodes.yml
@@ -1,16 +1,23 @@
---
+- name: Prepull images and rpms before doing rolling restart
+ hosts: oo_nodes_to_upgrade:!oo_masters_to_config
+ roles:
+ - role: openshift_facts
+ tasks:
+ - import_role:
+ name: openshift_node
+ tasks_from: upgrade_pre.yml
+
- name: Drain and upgrade nodes
hosts: oo_nodes_to_upgrade:!oo_masters_to_config
# This var must be set with -e on invocation, as it is not a per-host inventory var
# and is evaluated early. Values such as "20%" can also be used.
serial: "{{ openshift_upgrade_nodes_serial | default(1) }}"
max_fail_percentage: "{{ openshift_upgrade_nodes_max_fail_percentage | default(0) }}"
-
+ roles:
+ - lib_openshift
+ - openshift_facts
pre_tasks:
- - name: Load lib_openshift modules
- include_role:
- name: lib_openshift
-
# TODO: To better handle re-trying failed upgrades, it would be nice to check if the node
# or docker actually needs an upgrade before proceeding. Perhaps best to save this until
# we merge upgrade functionality into the base roles and a normal config.yml playbook run.
@@ -22,28 +29,21 @@
retries: 10
delay: 5
register: node_unschedulable
- until: node_unschedulable|succeeded
+ until: node_unschedulable is succeeded
- name: Drain Node for Kubelet upgrade
command: >
- {{ hostvars[groups.oo_first_master.0].openshift.common.admin_binary }} drain {{ openshift.node.nodename | lower }} --config={{ openshift.common.config_base }}/master/admin.kubeconfig --force --delete-local-data --ignore-daemonsets
+ {{ hostvars[groups.oo_first_master.0]['first_master_client_binary'] }} adm drain {{ openshift.node.nodename | lower }} --config={{ openshift.common.config_base }}/master/admin.kubeconfig --force --delete-local-data --ignore-daemonsets
delegate_to: "{{ groups.oo_first_master.0 }}"
register: l_upgrade_nodes_drain_result
- until: not l_upgrade_nodes_drain_result | failed
+ until: not (l_upgrade_nodes_drain_result is failed)
retries: 60
delay: 60
- roles:
- - lib_openshift
- - openshift_facts
- - docker
- - openshift_node_dnsmasq
- - openshift_node_upgrade
- - role: openshift_excluder
- r_openshift_excluder_action: enable
- r_openshift_excluder_service_type: "{{ openshift.common.service_type }}"
-
post_tasks:
+ - import_role:
+ name: openshift_node
+ tasks_from: upgrade.yml
- name: Set node schedulability
oc_adm_manage_node:
node: "{{ openshift.node.nodename | lower }}"
@@ -52,5 +52,13 @@
retries: 10
delay: 5
register: node_schedulable
- until: node_schedulable|succeeded
- when: node_unschedulable|changed
+ until: node_schedulable is succeeded
+ when: node_unschedulable is changed
+
+- name: Re-enable excluders
+ hosts: oo_nodes_to_upgrade:!oo_masters_to_config
+ tasks:
+ - import_role:
+ name: openshift_excluder
+ vars:
+ r_openshift_excluder_action: enable
diff --git a/playbooks/common/openshift-cluster/upgrades/upgrade_scale_group.yml b/playbooks/common/openshift-cluster/upgrades/upgrade_scale_group.yml
new file mode 100644
index 000000000..6d59bfd0b
--- /dev/null
+++ b/playbooks/common/openshift-cluster/upgrades/upgrade_scale_group.yml
@@ -0,0 +1,66 @@
+---
+- name: create new scale group
+ hosts: localhost
+ tasks:
+ - name: build upgrade scale groups
+ import_role:
+ name: openshift_aws
+ tasks_from: upgrade_node_group.yml
+
+ - fail:
+ msg: "Ensure that new scale groups were provisioned before proceeding to update."
+ when:
+ - "'oo_sg_new_nodes' not in groups or groups.oo_sg_new_nodes|length == 0"
+ - "'oo_sg_current_nodes' not in groups or groups.oo_sg_current_nodes|length == 0"
+ - groups.oo_sg_current_nodes == groups.oo_sg_new_nodes
+
+- name: initialize upgrade bits
+ import_playbook: init.yml
+
+- name: unschedule nodes
+ hosts: oo_sg_current_nodes
+ tasks:
+ - name: Load lib_openshift modules
+ import_role:
+ name: ../roles/lib_openshift
+
+ - name: Mark node unschedulable
+ oc_adm_manage_node:
+ node: "{{ openshift.node.nodename | lower }}"
+ schedulable: False
+ delegate_to: "{{ groups.oo_first_master.0 }}"
+ retries: 10
+ delay: 5
+ register: node_unschedulable
+ until: node_unschedulable is succeeded
+
+- name: Drain nodes
+ hosts: oo_sg_current_nodes
+ # This var must be set with -e on invocation, as it is not a per-host inventory var
+ # and is evaluated early. Values such as "20%" can also be used.
+ serial: "{{ openshift_upgrade_nodes_serial | default(1) }}"
+ max_fail_percentage: "{{ openshift_upgrade_nodes_max_fail_percentage | default(0) }}"
+ tasks:
+ - name: Drain Node for Kubelet upgrade
+ command: >
+ {{ hostvars[groups.oo_first_master.0]['first_master_client_binary'] }} adm drain {{ openshift.node.nodename | lower }}
+ --config={{ openshift.common.config_base }}/master/admin.kubeconfig
+ --force --delete-local-data --ignore-daemonsets
+ --timeout={{ openshift_upgrade_nodes_drain_timeout | default(0) }}s
+ delegate_to: "{{ groups.oo_first_master.0 }}"
+ register: l_upgrade_nodes_drain_result
+ until: not (l_upgrade_nodes_drain_result is failed)
+ retries: "{{ 1 if openshift_upgrade_nodes_drain_timeout | default(0) == '0' else 0 | int }}"
+ delay: 5
+ failed_when:
+ - l_upgrade_nodes_drain_result is failed
+ - openshift_upgrade_nodes_drain_timeout | default(0) == '0'
+
+# Alright, let's clean up!
+- name: clean up the old scale group
+ hosts: localhost
+ tasks:
+ - name: clean up scale group
+ import_role:
+ name: openshift_aws
+ tasks_from: remove_scale_group.yml
diff --git a/playbooks/common/openshift-cluster/upgrades/upgrade_scheduler.yml b/playbooks/common/openshift-cluster/upgrades/upgrade_scheduler.yml
deleted file mode 100644
index 8558bf3e9..000000000
--- a/playbooks/common/openshift-cluster/upgrades/upgrade_scheduler.yml
+++ /dev/null
@@ -1,173 +0,0 @@
----
-# Upgrade predicates
-- vars:
- prev_predicates: "{{ lookup('openshift_master_facts_default_predicates', short_version=openshift_upgrade_min, deployment_type=openshift_deployment_type) }}"
- prev_predicates_no_region: "{{ lookup('openshift_master_facts_default_predicates', short_version=openshift_upgrade_min, deployment_type=openshift_deployment_type, regions_enabled=False) }}"
- default_predicates_no_region: "{{ lookup('openshift_master_facts_default_predicates', regions_enabled=False) }}"
- # older_predicates are the set of predicates that have previously been
- # hard-coded into openshift_facts
- older_predicates:
- - - name: MatchNodeSelector
- - name: PodFitsResources
- - name: PodFitsPorts
- - name: NoDiskConflict
- - name: NoVolumeZoneConflict
- - name: MaxEBSVolumeCount
- - name: MaxGCEPDVolumeCount
- - name: Region
- argument:
- serviceAffinity:
- labels:
- - region
- - - name: MatchNodeSelector
- - name: PodFitsResources
- - name: PodFitsPorts
- - name: NoDiskConflict
- - name: NoVolumeZoneConflict
- - name: Region
- argument:
- serviceAffinity:
- labels:
- - region
- - - name: MatchNodeSelector
- - name: PodFitsResources
- - name: PodFitsPorts
- - name: NoDiskConflict
- - name: Region
- argument:
- serviceAffinity:
- labels:
- - region
- # older_predicates_no_region are the set of predicates that have previously
- # been hard-coded into openshift_facts, with the Region predicate removed
- older_predicates_no_region:
- - - name: MatchNodeSelector
- - name: PodFitsResources
- - name: PodFitsPorts
- - name: NoDiskConflict
- - name: NoVolumeZoneConflict
- - name: MaxEBSVolumeCount
- - name: MaxGCEPDVolumeCount
- - - name: MatchNodeSelector
- - name: PodFitsResources
- - name: PodFitsPorts
- - name: NoDiskConflict
- - name: NoVolumeZoneConflict
- - - name: MatchNodeSelector
- - name: PodFitsResources
- - name: PodFitsPorts
- - name: NoDiskConflict
- block:
-
- # Handle case where openshift_master_predicates is defined
- - block:
- - debug:
- msg: "WARNING: openshift_master_scheduler_predicates is set to defaults from an earlier release of OpenShift current defaults are: {{ openshift_master_scheduler_default_predicates }}"
- when: openshift_master_scheduler_predicates in older_predicates + older_predicates_no_region + [prev_predicates] + [prev_predicates_no_region]
-
- - debug:
- msg: "WARNING: openshift_master_scheduler_predicates does not match current defaults of: {{ openshift_master_scheduler_default_predicates }}"
- when: openshift_master_scheduler_predicates != openshift_master_scheduler_default_predicates
- when: openshift_master_scheduler_predicates | default(none) is not none
-
- # Handle cases where openshift_master_predicates is not defined
- - block:
- - debug:
- msg: "WARNING: existing scheduler config does not match previous known defaults automated upgrade of scheduler config is disabled.\nexisting scheduler predicates: {{ openshift_master_scheduler_current_predicates }}\ncurrent scheduler default predicates are: {{ openshift_master_scheduler_default_predicates }}"
- when:
- - openshift_master_scheduler_current_predicates != openshift_master_scheduler_default_predicates
- - openshift_master_scheduler_current_predicates not in older_predicates + [prev_predicates]
-
- - set_fact:
- openshift_upgrade_scheduler_predicates: "{{ openshift_master_scheduler_default_predicates }}"
- when:
- - openshift_master_scheduler_current_predicates != openshift_master_scheduler_default_predicates
- - openshift_master_scheduler_current_predicates in older_predicates + [prev_predicates]
-
- - set_fact:
- openshift_upgrade_scheduler_predicates: "{{ default_predicates_no_region }}"
- when:
- - openshift_master_scheduler_current_predicates != default_predicates_no_region
- - openshift_master_scheduler_current_predicates in older_predicates_no_region + [prev_predicates_no_region]
-
- when: openshift_master_scheduler_predicates | default(none) is none
-
-
-# Upgrade priorities
-- vars:
- prev_priorities: "{{ lookup('openshift_master_facts_default_priorities', short_version=openshift_upgrade_min, deployment_type=openshift_deployment_type) }}"
- prev_priorities_no_zone: "{{ lookup('openshift_master_facts_default_priorities', short_version=openshift_upgrade_min, deployment_type=openshift_deployment_type, zones_enabled=False) }}"
- default_priorities_no_zone: "{{ lookup('openshift_master_facts_default_priorities', zones_enabled=False) }}"
- # older_priorities are the set of priorities that have previously been
- # hard-coded into openshift_facts
- older_priorities:
- - - name: LeastRequestedPriority
- weight: 1
- - name: SelectorSpreadPriority
- weight: 1
- - name: Zone
- weight: 2
- argument:
- serviceAntiAffinity:
- label: zone
- # older_priorities_no_region are the set of priorities that have previously
- # been hard-coded into openshift_facts, with the Zone priority removed
- older_priorities_no_zone:
- - - name: LeastRequestedPriority
- weight: 1
- - name: SelectorSpreadPriority
- weight: 1
- block:
-
- # Handle case where openshift_master_priorities is defined
- - block:
- - debug:
- msg: "WARNING: openshift_master_scheduler_priorities is set to defaults from an earlier release of OpenShift current defaults are: {{ openshift_master_scheduler_default_priorities }}"
- when: openshift_master_scheduler_priorities in older_priorities + older_priorities_no_zone + [prev_priorities] + [prev_priorities_no_zone]
-
- - debug:
- msg: "WARNING: openshift_master_scheduler_priorities does not match current defaults of: {{ openshift_master_scheduler_default_priorities }}"
- when: openshift_master_scheduler_priorities != openshift_master_scheduler_default_priorities
- when: openshift_master_scheduler_priorities | default(none) is not none
-
- # Handle cases where openshift_master_priorities is not defined
- - block:
- - debug:
- msg: "WARNING: existing scheduler config does not match previous known defaults automated upgrade of scheduler config is disabled.\nexisting scheduler priorities: {{ openshift_master_scheduler_current_priorities }}\ncurrent scheduler default priorities are: {{ openshift_master_scheduler_default_priorities }}"
- when:
- - openshift_master_scheduler_current_priorities != openshift_master_scheduler_default_priorities
- - openshift_master_scheduler_current_priorities not in older_priorities + [prev_priorities]
-
- - set_fact:
- openshift_upgrade_scheduler_priorities: "{{ openshift_master_scheduler_default_priorities }}"
- when:
- - openshift_master_scheduler_current_priorities != openshift_master_scheduler_default_priorities
- - openshift_master_scheduler_current_priorities in older_priorities + [prev_priorities]
-
- - set_fact:
- openshift_upgrade_scheduler_priorities: "{{ default_priorities_no_zone }}"
- when:
- - openshift_master_scheduler_current_priorities != default_priorities_no_zone
- - openshift_master_scheduler_current_priorities in older_priorities_no_zone + [prev_priorities_no_zone]
-
- when: openshift_master_scheduler_priorities | default(none) is none
-
-
-# Update scheduler
-- vars:
- scheduler_config:
- kind: Policy
- apiVersion: v1
- predicates: "{{ openshift_upgrade_scheduler_predicates
- | default(openshift_master_scheduler_current_predicates) }}"
- priorities: "{{ openshift_upgrade_scheduler_priorities
- | default(openshift_master_scheduler_current_priorities) }}"
- block:
- - name: Update scheduler config
- copy:
- content: "{{ scheduler_config | to_nice_json }}"
- dest: "{{ openshift_master_scheduler_conf }}"
- backup: true
- when: >
- openshift_upgrade_scheduler_predicates is defined or
- openshift_upgrade_scheduler_priorities is defined
diff --git a/playbooks/common/openshift-cluster/upgrades/v3_3/master_config_upgrade.yml b/playbooks/common/openshift-cluster/upgrades/v3_3/master_config_upgrade.yml
deleted file mode 100644
index 5e7a66171..000000000
--- a/playbooks/common/openshift-cluster/upgrades/v3_3/master_config_upgrade.yml
+++ /dev/null
@@ -1,66 +0,0 @@
----
-- modify_yaml:
- dest: "{{ openshift.common.config_base}}/master/master-config.yaml"
- yaml_key: 'masterClients.externalKubernetesClientConnectionOverrides.acceptContentTypes'
- yaml_value: 'application/vnd.kubernetes.protobuf,application/json'
-
-- modify_yaml:
- dest: "{{ openshift.common.config_base}}/master/master-config.yaml"
- yaml_key: 'masterClients.externalKubernetesClientConnectionOverrides.contentType'
- yaml_value: 'application/vnd.kubernetes.protobuf'
-
-- modify_yaml:
- dest: "{{ openshift.common.config_base}}/master/master-config.yaml"
- yaml_key: 'masterClients.externalKubernetesClientConnectionOverrides.burst'
- yaml_value: 400
-
-- modify_yaml:
- dest: "{{ openshift.common.config_base}}/master/master-config.yaml"
- yaml_key: 'masterClients.externalKubernetesClientConnectionOverrides.qps'
- yaml_value: 200
-
-- modify_yaml:
- dest: "{{ openshift.common.config_base}}/master/master-config.yaml"
- yaml_key: 'masterClients.openshiftLoopbackClientConnectionOverrides.acceptContentTypes'
- yaml_value: 'application/vnd.kubernetes.protobuf,application/json'
-
-- modify_yaml:
- dest: "{{ openshift.common.config_base}}/master/master-config.yaml"
- yaml_key: 'masterClients.openshiftLoopbackClientConnectionOverrides.contentType'
- yaml_value: 'application/vnd.kubernetes.protobuf'
-
-- modify_yaml:
- dest: "{{ openshift.common.config_base}}/master/master-config.yaml"
- yaml_key: 'masterClients.openshiftLoopbackClientConnectionOverrides.burst'
- yaml_value: 600
-
-- modify_yaml:
- dest: "{{ openshift.common.config_base}}/master/master-config.yaml"
- yaml_key: 'masterClients.openshiftLoopbackClientConnectionOverrides.qps'
- yaml_value: 300
-
-- modify_yaml:
- dest: "{{ openshift.common.config_base}}/master/master-config.yaml"
- yaml_key: 'controllerConfig.serviceServingCert.signer.certFile'
- yaml_value: service-signer.crt
-
-- modify_yaml:
- dest: "{{ openshift.common.config_base}}/master/master-config.yaml"
- yaml_key: 'controllerConfig.serviceServingCert.signer.keyFile'
- yaml_value: service-signer.key
-
-- modify_yaml:
- dest: "{{ openshift.common.config_base}}/master/master-config.yaml"
- yaml_key: 'admissionConfig.pluginConfig'
- yaml_value: "{{ openshift.master.admission_plugin_config }}"
- when: "'admission_plugin_config' in openshift.master"
-
-- modify_yaml:
- dest: "{{ openshift.common.config_base}}/master/master-config.yaml"
- yaml_key: 'admissionConfig.pluginOrderOverride'
- yaml_value:
-
-- modify_yaml:
- dest: "{{ openshift.common.config_base}}/master/master-config.yaml"
- yaml_key: 'kubernetesMasterConfig.admissionConfig'
- yaml_value:
diff --git a/playbooks/common/openshift-cluster/upgrades/v3_3/node_config_upgrade.yml b/playbooks/common/openshift-cluster/upgrades/v3_3/node_config_upgrade.yml
deleted file mode 100644
index 89b524f14..000000000
--- a/playbooks/common/openshift-cluster/upgrades/v3_3/node_config_upgrade.yml
+++ /dev/null
@@ -1,20 +0,0 @@
----
-- modify_yaml:
- dest: "{{ openshift.common.config_base}}/node/node-config.yaml"
- yaml_key: 'masterClientConnectionOverrides.acceptContentTypes'
- yaml_value: 'application/vnd.kubernetes.protobuf,application/json'
-
-- modify_yaml:
- dest: "{{ openshift.common.config_base}}/node/node-config.yaml"
- yaml_key: 'masterClientConnectionOverrides.contentType'
- yaml_value: 'application/vnd.kubernetes.protobuf'
-
-- modify_yaml:
- dest: "{{ openshift.common.config_base}}/node/node-config.yaml"
- yaml_key: 'masterClientConnectionOverrides.burst'
- yaml_value: 40
-
-- modify_yaml:
- dest: "{{ openshift.common.config_base}}/node/node-config.yaml"
- yaml_key: 'masterClientConnectionOverrides.qps'
- yaml_value: 20
diff --git a/playbooks/common/openshift-cluster/upgrades/v3_3/roles b/playbooks/common/openshift-cluster/upgrades/v3_3/roles
deleted file mode 120000
index 6bc1a7aef..000000000
--- a/playbooks/common/openshift-cluster/upgrades/v3_3/roles
+++ /dev/null
@@ -1 +0,0 @@
-../../../../../roles \ No newline at end of file
diff --git a/playbooks/common/openshift-cluster/upgrades/v3_3/upgrade.yml b/playbooks/common/openshift-cluster/upgrades/v3_3/upgrade.yml
deleted file mode 100644
index a241ef039..000000000
--- a/playbooks/common/openshift-cluster/upgrades/v3_3/upgrade.yml
+++ /dev/null
@@ -1,118 +0,0 @@
----
-#
-# Full Control Plane + Nodes Upgrade
-#
-- include: ../init.yml
- tags:
- - pre_upgrade
-
-- name: Configure the upgrade target for the common upgrade tasks
- hosts: oo_all_hosts
- tags:
- - pre_upgrade
- tasks:
- - set_fact:
- openshift_upgrade_target: "{{ '1.3' if deployment_type == 'origin' else '3.3' }}"
- openshift_upgrade_min: "{{ '1.2' if deployment_type == 'origin' else '3.2' }}"
-
-# Pre-upgrade
-
-- include: ../initialize_nodes_to_upgrade.yml
- tags:
- - pre_upgrade
-
-- name: Update repos and initialize facts on all hosts
- hosts: oo_masters_to_config:oo_nodes_to_upgrade:oo_etcd_to_config:oo_lb_to_config
- tags:
- - pre_upgrade
- roles:
- - openshift_repos
-
-- name: Set openshift_no_proxy_internal_hostnames
- hosts: oo_masters_to_config:oo_nodes_to_upgrade
- tags:
- - pre_upgrade
- tasks:
- - set_fact:
- openshift_no_proxy_internal_hostnames: "{{ hostvars | oo_select_keys(groups['oo_nodes_to_config']
- | union(groups['oo_masters_to_config'])
- | union(groups['oo_etcd_to_config'] | default([])))
- | oo_collect('openshift.common.hostname') | default([]) | join (',')
- }}"
- when:
- - openshift_http_proxy is defined or openshift_https_proxy is defined
- - openshift_generate_no_proxy_hosts | default(True) | bool
-
-- include: ../pre/verify_inventory_vars.yml
- tags:
- - pre_upgrade
-
-- include: ../disable_master_excluders.yml
- tags:
- - pre_upgrade
-
-- include: ../disable_node_excluders.yml
- tags:
- - pre_upgrade
-
-- include: ../../initialize_openshift_version.yml
- tags:
- - pre_upgrade
- vars:
- # Request specific openshift_release and let the openshift_version role handle converting this
- # to a more specific version, respecting openshift_image_tag and openshift_pkg_version if
- # defined, and overriding the normal behavior of protecting the installed version
- openshift_release: "{{ openshift_upgrade_target }}"
- openshift_protect_installed_version: False
-
- # We skip the docker role at this point in upgrade to prevent
- # unintended package, container, or config upgrades which trigger
- # docker restarts. At this early stage of upgrade we can assume
- # docker is configured and running.
- skip_docker_role: True
-
-- include: ../pre/verify_control_plane_running.yml
- tags:
- - pre_upgrade
-
-- include: ../../../openshift-master/validate_restart.yml
- tags:
- - pre_upgrade
-
-- name: Verify upgrade targets
- hosts: oo_masters_to_config:oo_nodes_to_upgrade
- tasks:
- - include: ../pre/verify_upgrade_targets.yml
- tags:
- - pre_upgrade
-
-- name: Verify docker upgrade targets
- hosts: oo_masters_to_config:oo_nodes_to_upgrade:oo_etcd_to_config
- tasks:
- - include: ../pre/tasks/verify_docker_upgrade_targets.yml
- tags:
- - pre_upgrade
-
-- include: ../pre/gate_checks.yml
- tags:
- - pre_upgrade
-
-# Pre-upgrade completed, nothing after this should be tagged pre_upgrade.
-
-# Separate step so we can execute in parallel and clear out anything unused
-# before we get into the serialized upgrade process which will then remove
-# remaining images if possible.
-- name: Cleanup unused Docker images
- hosts: oo_masters_to_config:oo_nodes_to_upgrade:oo_etcd_to_config
- tasks:
- - include: ../cleanup_unused_images.yml
-
-- include: ../upgrade_control_plane.yml
- vars:
- master_config_hook: "v3_3/master_config_upgrade.yml"
-
-- include: ../upgrade_nodes.yml
- vars:
- node_config_hook: "v3_3/node_config_upgrade.yml"
-
-- include: ../post_control_plane.yml
diff --git a/playbooks/common/openshift-cluster/upgrades/v3_3/upgrade_control_plane.yml b/playbooks/common/openshift-cluster/upgrades/v3_3/upgrade_control_plane.yml
deleted file mode 100644
index f64f0e003..000000000
--- a/playbooks/common/openshift-cluster/upgrades/v3_3/upgrade_control_plane.yml
+++ /dev/null
@@ -1,119 +0,0 @@
----
-#
-# Control Plane Upgrade Playbook
-#
-# Upgrades masters and Docker (only on standalone etcd hosts)
-#
-# This upgrade does not include:
-# - node service running on masters
-# - docker running on masters
-# - node service running on dedicated nodes
-#
-# You can run the upgrade_nodes.yml playbook after this to upgrade these components separately.
-#
-- include: ../init.yml
- tags:
- - pre_upgrade
-
-- name: Configure the upgrade target for the common upgrade tasks
- hosts: oo_all_hosts
- tags:
- - pre_upgrade
- tasks:
- - set_fact:
- openshift_upgrade_target: "{{ '1.3' if deployment_type == 'origin' else '3.3' }}"
- openshift_upgrade_min: "{{ '1.2' if deployment_type == 'origin' else '3.2' }}"
-
-# Pre-upgrade
-- include: ../initialize_nodes_to_upgrade.yml
- tags:
- - pre_upgrade
-
-- name: Update repos on control plane hosts
- hosts: oo_masters_to_config:oo_etcd_to_config:oo_lb_to_config
- tags:
- - pre_upgrade
- roles:
- - openshift_repos
-
-- name: Set openshift_no_proxy_internal_hostnames
- hosts: oo_masters_to_config:oo_nodes_to_upgrade
- tags:
- - pre_upgrade
- tasks:
- - set_fact:
- openshift_no_proxy_internal_hostnames: "{{ hostvars | oo_select_keys(groups['oo_nodes_to_config']
- | union(groups['oo_masters_to_config'])
- | union(groups['oo_etcd_to_config'] | default([])))
- | oo_collect('openshift.common.hostname') | default([]) | join (',')
- }}"
- when:
- - openshift_http_proxy is defined or openshift_https_proxy is defined
- - openshift_generate_no_proxy_hosts | default(True) | bool
-
-- include: ../pre/verify_inventory_vars.yml
- tags:
- - pre_upgrade
-
-- include: ../disable_master_excluders.yml
- tags:
- - pre_upgrade
-
-- include: ../../initialize_openshift_version.yml
- tags:
- - pre_upgrade
- vars:
- # Request specific openshift_release and let the openshift_version role handle converting this
- # to a more specific version, respecting openshift_image_tag and openshift_pkg_version if
- # defined, and overriding the normal behavior of protecting the installed version
- openshift_release: "{{ openshift_upgrade_target }}"
- openshift_protect_installed_version: False
- openshift_install_base_package_group: "oo_masters_to_config"
-
- # We skip the docker role at this point in upgrade to prevent
- # unintended package, container, or config upgrades which trigger
- # docker restarts. At this early stage of upgrade we can assume
- # docker is configured and running.
- skip_docker_role: True
-
-- include: ../pre/verify_control_plane_running.yml
- tags:
- - pre_upgrade
-
-- include: ../../../openshift-master/validate_restart.yml
- tags:
- - pre_upgrade
-
-- name: Verify upgrade targets
- hosts: oo_masters_to_config
- tasks:
- - include: ../pre/verify_upgrade_targets.yml
- tags:
- - pre_upgrade
-
-- name: Verify docker upgrade targets
- hosts: oo_masters_to_config:oo_etcd_to_config
- tasks:
- - include: ../pre/tasks/verify_docker_upgrade_targets.yml
- tags:
- - pre_upgrade
-
-- include: ../pre/gate_checks.yml
- tags:
- - pre_upgrade
-
-# Pre-upgrade completed, nothing after this should be tagged pre_upgrade.
-
-# Separate step so we can execute in parallel and clear out anything unused
-# before we get into the serialized upgrade process which will then remove
-# remaining images if possible.
-- name: Cleanup unused Docker images
- hosts: oo_masters_to_config:oo_etcd_to_config
- tasks:
- - include: ../cleanup_unused_images.yml
-
-- include: ../upgrade_control_plane.yml
- vars:
- master_config_hook: "v3_3/master_config_upgrade.yml"
-
-- include: ../post_control_plane.yml
diff --git a/playbooks/common/openshift-cluster/upgrades/v3_3/upgrade_nodes.yml b/playbooks/common/openshift-cluster/upgrades/v3_3/upgrade_nodes.yml
deleted file mode 100644
index cee4e9087..000000000
--- a/playbooks/common/openshift-cluster/upgrades/v3_3/upgrade_nodes.yml
+++ /dev/null
@@ -1,113 +0,0 @@
----
-#
-# Node Upgrade Playbook
-#
-# Upgrades nodes only, but requires the control plane to have already been upgraded.
-#
-- include: ../init.yml
- tags:
- - pre_upgrade
-
-- name: Configure the upgrade target for the common upgrade tasks
- hosts: oo_all_hosts
- tags:
- - pre_upgrade
- tasks:
- - set_fact:
- openshift_upgrade_target: "{{ '1.3' if deployment_type == 'origin' else '3.3' }}"
- openshift_upgrade_min: "{{ '1.2' if deployment_type == 'origin' else '3.2' }}"
-
-# Pre-upgrade
-- include: ../initialize_nodes_to_upgrade.yml
- tags:
- - pre_upgrade
-
-- name: Update repos on nodes
- hosts: oo_masters_to_config:oo_nodes_to_upgrade:oo_etcd_to_config:oo_lb_to_config
- roles:
- - openshift_repos
- tags:
- - pre_upgrade
-
-- name: Set openshift_no_proxy_internal_hostnames
- hosts: oo_masters_to_config:oo_nodes_to_upgrade
- tags:
- - pre_upgrade
- tasks:
- - set_fact:
- openshift_no_proxy_internal_hostnames: "{{ hostvars | oo_select_keys(groups['oo_nodes_to_upgrade']
- | union(groups['oo_masters_to_config'])
- | union(groups['oo_etcd_to_config'] | default([])))
- | oo_collect('openshift.common.hostname') | default([]) | join (',')
- }}"
- when:
- - openshift_http_proxy is defined or openshift_https_proxy is defined
- - openshift_generate_no_proxy_hosts | default(True) | bool
-
-- include: ../pre/verify_inventory_vars.yml
- tags:
- - pre_upgrade
-
-- include: ../disable_node_excluders.yml
- tags:
- - pre_upgrade
-
-- include: ../../initialize_openshift_version.yml
- tags:
- - pre_upgrade
- vars:
- # Request specific openshift_release and let the openshift_version role handle converting this
- # to a more specific version, respecting openshift_image_tag and openshift_pkg_version if
- # defined, and overriding the normal behavior of protecting the installed version
- openshift_release: "{{ openshift_upgrade_target }}"
- openshift_protect_installed_version: False
-
- # We skip the docker role at this point in upgrade to prevent
- # unintended package, container, or config upgrades which trigger
- # docker restarts. At this early stage of upgrade we can assume
- # docker is configured and running.
- skip_docker_role: True
-
-- name: Verify masters are already upgraded
- hosts: oo_masters_to_config
- tags:
- - pre_upgrade
- tasks:
- - fail: msg="Master running {{ openshift.common.version }} must be upgraded to {{ openshift_version }} before node upgrade can be run."
- when: openshift.common.version != openshift_version
-
-- include: ../pre/verify_control_plane_running.yml
- tags:
- - pre_upgrade
-
-- name: Verify upgrade targets
- hosts: oo_nodes_to_upgrade
- tasks:
- - include: ../pre/verify_upgrade_targets.yml
- tags:
- - pre_upgrade
-
-- name: Verify docker upgrade targets
- hosts: oo_nodes_to_upgrade
- tasks:
- - include: ../pre/tasks/verify_docker_upgrade_targets.yml
- tags:
- - pre_upgrade
-
-- include: ../pre/gate_checks.yml
- tags:
- - pre_upgrade
-
-# Pre-upgrade completed, nothing after this should be tagged pre_upgrade.
-
-# Separate step so we can execute in parallel and clear out anything unused
-# before we get into the serialized upgrade process which will then remove
-# remaining images if possible.
-- name: Cleanup unused Docker images
- hosts: oo_nodes_to_upgrade
- tasks:
- - include: ../cleanup_unused_images.yml
-
-- include: ../upgrade_nodes.yml
- vars:
- node_config_hook: "v3_3/node_config_upgrade.yml"
diff --git a/playbooks/common/openshift-cluster/upgrades/v3_4/master_config_upgrade.yml b/playbooks/common/openshift-cluster/upgrades/v3_4/master_config_upgrade.yml
deleted file mode 100644
index 52458e03c..000000000
--- a/playbooks/common/openshift-cluster/upgrades/v3_4/master_config_upgrade.yml
+++ /dev/null
@@ -1,10 +0,0 @@
----
-- modify_yaml:
- dest: "{{ openshift.common.config_base}}/master/master-config.yaml"
- yaml_key: 'controllerConfig.serviceServingCert.signer.certFile'
- yaml_value: service-signer.crt
-
-- modify_yaml:
- dest: "{{ openshift.common.config_base}}/master/master-config.yaml"
- yaml_key: 'controllerConfig.serviceServingCert.signer.keyFile'
- yaml_value: service-signer.key
diff --git a/playbooks/common/openshift-cluster/upgrades/v3_4/roles b/playbooks/common/openshift-cluster/upgrades/v3_4/roles
deleted file mode 120000
index 6bc1a7aef..000000000
--- a/playbooks/common/openshift-cluster/upgrades/v3_4/roles
+++ /dev/null
@@ -1 +0,0 @@
-../../../../../roles \ No newline at end of file
diff --git a/playbooks/common/openshift-cluster/upgrades/v3_4/upgrade.yml b/playbooks/common/openshift-cluster/upgrades/v3_4/upgrade.yml
deleted file mode 100644
index ae217ba2e..000000000
--- a/playbooks/common/openshift-cluster/upgrades/v3_4/upgrade.yml
+++ /dev/null
@@ -1,116 +0,0 @@
----
-#
-# Full Control Plane + Nodes Upgrade
-#
-- include: ../init.yml
- tags:
- - pre_upgrade
-
-- name: Configure the upgrade target for the common upgrade tasks
- hosts: oo_all_hosts
- tags:
- - pre_upgrade
- tasks:
- - set_fact:
- openshift_upgrade_target: "{{ '1.4' if deployment_type == 'origin' else '3.4' }}"
- openshift_upgrade_min: "{{ '1.3' if deployment_type == 'origin' else '3.3' }}"
-
-# Pre-upgrade
-
-- include: ../initialize_nodes_to_upgrade.yml
- tags:
- - pre_upgrade
-
-- name: Update repos and initialize facts on all hosts
- hosts: oo_masters_to_config:oo_nodes_to_upgrade:oo_etcd_to_config:oo_lb_to_config
- tags:
- - pre_upgrade
- roles:
- - openshift_repos
-
-- name: Set openshift_no_proxy_internal_hostnames
- hosts: oo_masters_to_config:oo_nodes_to_upgrade
- tags:
- - pre_upgrade
- tasks:
- - set_fact:
- openshift_no_proxy_internal_hostnames: "{{ hostvars | oo_select_keys(groups['oo_nodes_to_config']
- | union(groups['oo_masters_to_config'])
- | union(groups['oo_etcd_to_config'] | default([])))
- | oo_collect('openshift.common.hostname') | default([]) | join (',')
- }}"
- when:
- - openshift_http_proxy is defined or openshift_https_proxy is defined
- - openshift_generate_no_proxy_hosts | default(True) | bool
-
-- include: ../pre/verify_inventory_vars.yml
- tags:
- - pre_upgrade
-
-- include: ../disable_master_excluders.yml
- tags:
- - pre_upgrade
-
-- include: ../disable_node_excluders.yml
- tags:
- - pre_upgrade
-
-- include: ../../initialize_openshift_version.yml
- tags:
- - pre_upgrade
- vars:
- # Request specific openshift_release and let the openshift_version role handle converting this
- # to a more specific version, respecting openshift_image_tag and openshift_pkg_version if
- # defined, and overriding the normal behavior of protecting the installed version
- openshift_release: "{{ openshift_upgrade_target }}"
- openshift_protect_installed_version: False
-
- # We skip the docker role at this point in upgrade to prevent
- # unintended package, container, or config upgrades which trigger
- # docker restarts. At this early stage of upgrade we can assume
- # docker is configured and running.
- skip_docker_role: True
-
-- include: ../pre/verify_control_plane_running.yml
- tags:
- - pre_upgrade
-
-- include: ../../../openshift-master/validate_restart.yml
- tags:
- - pre_upgrade
-
-- name: Verify upgrade targets
- hosts: oo_masters_to_config:oo_nodes_to_upgrade
- tasks:
- - include: ../pre/verify_upgrade_targets.yml
- tags:
- - pre_upgrade
-
-- name: Verify docker upgrade targets
- hosts: oo_masters_to_config:oo_nodes_to_upgrade:oo_etcd_to_config
- tasks:
- - include: ../pre/tasks/verify_docker_upgrade_targets.yml
- tags:
- - pre_upgrade
-
-- include: ../pre/gate_checks.yml
- tags:
- - pre_upgrade
-
-# Pre-upgrade completed, nothing after this should be tagged pre_upgrade.
-
-# Separate step so we can execute in parallel and clear out anything unused
-# before we get into the serialized upgrade process which will then remove
-# remaining images if possible.
-- name: Cleanup unused Docker images
- hosts: oo_masters_to_config:oo_nodes_to_upgrade:oo_etcd_to_config
- tasks:
- - include: ../cleanup_unused_images.yml
-
-- include: ../upgrade_control_plane.yml
- vars:
- master_config_hook: "v3_4/master_config_upgrade.yml"
-
-- include: ../upgrade_nodes.yml
-
-- include: ../post_control_plane.yml
diff --git a/playbooks/common/openshift-cluster/upgrades/v3_4/upgrade_control_plane.yml b/playbooks/common/openshift-cluster/upgrades/v3_4/upgrade_control_plane.yml
deleted file mode 100644
index 43da5b629..000000000
--- a/playbooks/common/openshift-cluster/upgrades/v3_4/upgrade_control_plane.yml
+++ /dev/null
@@ -1,119 +0,0 @@
----
-#
-# Control Plane Upgrade Playbook
-#
-# Upgrades masters and Docker (only on standalone etcd hosts)
-#
-# This upgrade does not include:
-# - node service running on masters
-# - docker running on masters
-# - node service running on dedicated nodes
-#
-# You can run the upgrade_nodes.yml playbook after this to upgrade these components separately.
-#
-- include: ../init.yml
- tags:
- - pre_upgrade
-
-- name: Configure the upgrade target for the common upgrade tasks
- hosts: oo_all_hosts
- tags:
- - pre_upgrade
- tasks:
- - set_fact:
- openshift_upgrade_target: "{{ '1.4' if deployment_type == 'origin' else '3.4' }}"
- openshift_upgrade_min: "{{ '1.3' if deployment_type == 'origin' else '3.3' }}"
-
-# Pre-upgrade
-- include: ../initialize_nodes_to_upgrade.yml
- tags:
- - pre_upgrade
-
-- name: Update repos on control plane hosts
- hosts: oo_masters_to_config:oo_etcd_to_config:oo_lb_to_config
- tags:
- - pre_upgrade
- roles:
- - openshift_repos
-
-- name: Set openshift_no_proxy_internal_hostnames
- hosts: oo_masters_to_config:oo_nodes_to_upgrade
- tags:
- - pre_upgrade
- tasks:
- - set_fact:
- openshift_no_proxy_internal_hostnames: "{{ hostvars | oo_select_keys(groups['oo_nodes_to_config']
- | union(groups['oo_masters_to_config'])
- | union(groups['oo_etcd_to_config'] | default([])))
- | oo_collect('openshift.common.hostname') | default([]) | join (',')
- }}"
- when:
- - openshift_http_proxy is defined or openshift_https_proxy is defined
- - openshift_generate_no_proxy_hosts | default(True) | bool
-
-- include: ../pre/verify_inventory_vars.yml
- tags:
- - pre_upgrade
-
-- include: ../disable_master_excluders.yml
- tags:
- - pre_upgrade
-
-- include: ../../initialize_openshift_version.yml
- tags:
- - pre_upgrade
- vars:
- # Request specific openshift_release and let the openshift_version role handle converting this
- # to a more specific version, respecting openshift_image_tag and openshift_pkg_version if
- # defined, and overriding the normal behavior of protecting the installed version
- openshift_release: "{{ openshift_upgrade_target }}"
- openshift_protect_installed_version: False
- openshift_install_base_package_group: "oo_masters_to_config"
-
- # We skip the docker role at this point in upgrade to prevent
- # unintended package, container, or config upgrades which trigger
- # docker restarts. At this early stage of upgrade we can assume
- # docker is configured and running.
- skip_docker_role: True
-
-- include: ../pre/verify_control_plane_running.yml
- tags:
- - pre_upgrade
-
-- include: ../../../openshift-master/validate_restart.yml
- tags:
- - pre_upgrade
-
-- name: Verify upgrade targets
- hosts: oo_masters_to_config
- tasks:
- - include: ../pre/verify_upgrade_targets.yml
- tags:
- - pre_upgrade
-
-- name: Verify docker upgrade targets
- hosts: oo_masters_to_config:oo_etcd_to_config
- tasks:
- - include: ../pre/tasks/verify_docker_upgrade_targets.yml
- tags:
- - pre_upgrade
-
-- include: ../pre/gate_checks.yml
- tags:
- - pre_upgrade
-
-# Pre-upgrade completed, nothing after this should be tagged pre_upgrade.
-
-# Separate step so we can execute in parallel and clear out anything unused
-# before we get into the serialized upgrade process which will then remove
-# remaining images if possible.
-- name: Cleanup unused Docker images
- hosts: oo_masters_to_config:oo_etcd_to_config
- tasks:
- - include: ../cleanup_unused_images.yml
-
-- include: ../upgrade_control_plane.yml
- vars:
- master_config_hook: "v3_4/master_config_upgrade.yml"
-
-- include: ../post_control_plane.yml
diff --git a/playbooks/common/openshift-cluster/upgrades/v3_4/upgrade_nodes.yml b/playbooks/common/openshift-cluster/upgrades/v3_4/upgrade_nodes.yml
deleted file mode 100644
index 8531e6045..000000000
--- a/playbooks/common/openshift-cluster/upgrades/v3_4/upgrade_nodes.yml
+++ /dev/null
@@ -1,111 +0,0 @@
----
-#
-# Node Upgrade Playbook
-#
-# Upgrades nodes only, but requires the control plane to have already been upgraded.
-#
-- include: ../init.yml
- tags:
- - pre_upgrade
-
-- name: Configure the upgrade target for the common upgrade tasks
- hosts: oo_all_hosts
- tags:
- - pre_upgrade
- tasks:
- - set_fact:
- openshift_upgrade_target: "{{ '1.4' if deployment_type == 'origin' else '3.4' }}"
- openshift_upgrade_min: "{{ '1.3' if deployment_type == 'origin' else '3.3' }}"
-
-# Pre-upgrade
-- include: ../initialize_nodes_to_upgrade.yml
- tags:
- - pre_upgrade
-
-- name: Update repos on nodes
- hosts: oo_masters_to_config:oo_nodes_to_upgrade:oo_etcd_to_config:oo_lb_to_config
- roles:
- - openshift_repos
- tags:
- - pre_upgrade
-
-- name: Set openshift_no_proxy_internal_hostnames
- hosts: oo_masters_to_config:oo_nodes_to_upgrade
- tags:
- - pre_upgrade
- tasks:
- - set_fact:
- openshift_no_proxy_internal_hostnames: "{{ hostvars | oo_select_keys(groups['oo_nodes_to_upgrade']
- | union(groups['oo_masters_to_config'])
- | union(groups['oo_etcd_to_config'] | default([])))
- | oo_collect('openshift.common.hostname') | default([]) | join (',')
- }}"
- when:
- - openshift_http_proxy is defined or openshift_https_proxy is defined
- - openshift_generate_no_proxy_hosts | default(True) | bool
-
-- include: ../pre/verify_inventory_vars.yml
- tags:
- - pre_upgrade
-
-- include: ../disable_node_excluders.yml
- tags:
- - pre_upgrade
-
-- include: ../../initialize_openshift_version.yml
- tags:
- - pre_upgrade
- vars:
- # Request specific openshift_release and let the openshift_version role handle converting this
- # to a more specific version, respecting openshift_image_tag and openshift_pkg_version if
- # defined, and overriding the normal behavior of protecting the installed version
- openshift_release: "{{ openshift_upgrade_target }}"
- openshift_protect_installed_version: False
-
- # We skip the docker role at this point in upgrade to prevent
- # unintended package, container, or config upgrades which trigger
- # docker restarts. At this early stage of upgrade we can assume
- # docker is configured and running.
- skip_docker_role: True
-
-- name: Verify masters are already upgraded
- hosts: oo_masters_to_config
- tags:
- - pre_upgrade
- tasks:
- - fail: msg="Master running {{ openshift.common.version }} must be upgraded to {{ openshift_version }} before node upgrade can be run."
- when: openshift.common.version != openshift_version
-
-- include: ../pre/verify_control_plane_running.yml
- tags:
- - pre_upgrade
-
-- name: Verify upgrade targets
- hosts: oo_nodes_to_upgrade
- tasks:
- - include: ../pre/verify_upgrade_targets.yml
- tags:
- - pre_upgrade
-
-- name: Verify docker upgrade targets
- hosts: oo_nodes_to_upgrade
- tasks:
- - include: ../pre/tasks/verify_docker_upgrade_targets.yml
- tags:
- - pre_upgrade
-
-- include: ../pre/gate_checks.yml
- tags:
- - pre_upgrade
-
-# Pre-upgrade completed, nothing after this should be tagged pre_upgrade.
-
-# Separate step so we can execute in parallel and clear out anything unused
-# before we get into the serialized upgrade process which will then remove
-# remaining images if possible.
-- name: Cleanup unused Docker images
- hosts: oo_nodes_to_upgrade
- tasks:
- - include: ../cleanup_unused_images.yml
-
-- include: ../upgrade_nodes.yml
diff --git a/playbooks/common/openshift-cluster/upgrades/v3_5/filter_plugins b/playbooks/common/openshift-cluster/upgrades/v3_5/filter_plugins
deleted file mode 120000
index 7de3c1dd7..000000000
--- a/playbooks/common/openshift-cluster/upgrades/v3_5/filter_plugins
+++ /dev/null
@@ -1 +0,0 @@
-../../../../../filter_plugins/ \ No newline at end of file
diff --git a/playbooks/common/openshift-cluster/upgrades/v3_5/master_config_upgrade.yml b/playbooks/common/openshift-cluster/upgrades/v3_5/master_config_upgrade.yml
deleted file mode 100644
index 52458e03c..000000000
--- a/playbooks/common/openshift-cluster/upgrades/v3_5/master_config_upgrade.yml
+++ /dev/null
@@ -1,10 +0,0 @@
----
-- modify_yaml:
- dest: "{{ openshift.common.config_base}}/master/master-config.yaml"
- yaml_key: 'controllerConfig.serviceServingCert.signer.certFile'
- yaml_value: service-signer.crt
-
-- modify_yaml:
- dest: "{{ openshift.common.config_base}}/master/master-config.yaml"
- yaml_key: 'controllerConfig.serviceServingCert.signer.keyFile'
- yaml_value: service-signer.key
diff --git a/playbooks/common/openshift-cluster/upgrades/v3_5/upgrade.yml b/playbooks/common/openshift-cluster/upgrades/v3_5/upgrade.yml
deleted file mode 100644
index 30e719d8f..000000000
--- a/playbooks/common/openshift-cluster/upgrades/v3_5/upgrade.yml
+++ /dev/null
@@ -1,118 +0,0 @@
----
-#
-# Full Control Plane + Nodes Upgrade
-#
-- include: ../init.yml
- tags:
- - pre_upgrade
-
-- name: Configure the upgrade target for the common upgrade tasks
- hosts: oo_all_hosts
- tags:
- - pre_upgrade
- tasks:
- - set_fact:
- openshift_upgrade_target: "{{ '1.5' if deployment_type == 'origin' else '3.5' }}"
- openshift_upgrade_min: "{{ '1.4' if deployment_type == 'origin' else '3.4' }}"
-
-# Pre-upgrade
-
-- include: ../initialize_nodes_to_upgrade.yml
- tags:
- - pre_upgrade
-
-- name: Update repos and initialize facts on all hosts
- hosts: oo_masters_to_config:oo_nodes_to_upgrade:oo_etcd_to_config:oo_lb_to_config
- tags:
- - pre_upgrade
- roles:
- - openshift_repos
-
-- name: Set openshift_no_proxy_internal_hostnames
- hosts: oo_masters_to_config:oo_nodes_to_upgrade
- tags:
- - pre_upgrade
- tasks:
- - set_fact:
- openshift_no_proxy_internal_hostnames: "{{ hostvars | oo_select_keys(groups['oo_nodes_to_config']
- | union(groups['oo_masters_to_config'])
- | union(groups['oo_etcd_to_config'] | default([])))
- | oo_collect('openshift.common.hostname') | default([]) | join (',')
- }}"
- when:
- - openshift_http_proxy is defined or openshift_https_proxy is defined
- - openshift_generate_no_proxy_hosts | default(True) | bool
-
-- include: ../pre/verify_inventory_vars.yml
- tags:
- - pre_upgrade
-
-- include: ../pre/verify_control_plane_running.yml
- tags:
- - pre_upgrade
-
-- include: ../disable_master_excluders.yml
- tags:
- - pre_upgrade
-
-- include: ../disable_node_excluders.yml
- tags:
- - pre_upgrade
-
-- include: ../../initialize_openshift_version.yml
- tags:
- - pre_upgrade
- vars:
- # Request specific openshift_release and let the openshift_version role handle converting this
- # to a more specific version, respecting openshift_image_tag and openshift_pkg_version if
- # defined, and overriding the normal behavior of protecting the installed version
- openshift_release: "{{ openshift_upgrade_target }}"
- openshift_protect_installed_version: False
-
- # We skip the docker role at this point in upgrade to prevent
- # unintended package, container, or config upgrades which trigger
- # docker restarts. At this early stage of upgrade we can assume
- # docker is configured and running.
- skip_docker_role: True
-
-- include: ../../../openshift-master/validate_restart.yml
- tags:
- - pre_upgrade
-
-- name: Verify upgrade targets
- hosts: oo_masters_to_config:oo_nodes_to_upgrade
- tasks:
- - include: ../pre/verify_upgrade_targets.yml
- tags:
- - pre_upgrade
-
-- name: Verify docker upgrade targets
- hosts: oo_masters_to_config:oo_nodes_to_upgrade:oo_etcd_to_config
- tasks:
- - include: ../pre/tasks/verify_docker_upgrade_targets.yml
- tags:
- - pre_upgrade
-
-- include: validator.yml
- tags:
- - pre_upgrade
-
-- include: ../pre/gate_checks.yml
- tags:
- - pre_upgrade
-
-# Pre-upgrade completed, nothing after this should be tagged pre_upgrade.
-
-# Separate step so we can execute in parallel and clear out anything unused
-# before we get into the serialized upgrade process which will then remove
-# remaining images if possible.
-- name: Cleanup unused Docker images
- hosts: oo_masters_to_config:oo_nodes_to_upgrade:oo_etcd_to_config
- tasks:
- - include: ../cleanup_unused_images.yml
-
-- include: ../upgrade_control_plane.yml
-
-- include: ../upgrade_nodes.yml
-
-- include: ../post_control_plane.yml
diff --git a/playbooks/common/openshift-cluster/upgrades/v3_5/upgrade_control_plane.yml b/playbooks/common/openshift-cluster/upgrades/v3_5/upgrade_control_plane.yml
deleted file mode 100644
index e9cec9220..000000000
--- a/playbooks/common/openshift-cluster/upgrades/v3_5/upgrade_control_plane.yml
+++ /dev/null
@@ -1,123 +0,0 @@
----
-#
-# Control Plane Upgrade Playbook
-#
-# Upgrades masters and Docker (only on standalone etcd hosts)
-#
-# This upgrade does not include:
-# - node service running on masters
-# - docker running on masters
-# - node service running on dedicated nodes
-#
-# You can run the upgrade_nodes.yml playbook after this to upgrade these components separately.
-#
-- include: ../init.yml
- tags:
- - pre_upgrade
-
-- name: Configure the upgrade target for the common upgrade tasks
- hosts: oo_all_hosts
- tags:
- - pre_upgrade
- tasks:
- - set_fact:
- openshift_upgrade_target: "{{ '1.5' if deployment_type == 'origin' else '3.5' }}"
- openshift_upgrade_min: "{{ '1.4' if deployment_type == 'origin' else '3.4' }}"
-
-# Pre-upgrade
-- include: ../initialize_nodes_to_upgrade.yml
- tags:
- - pre_upgrade
-
-- name: Update repos on control plane hosts
- hosts: oo_masters_to_config:oo_etcd_to_config:oo_lb_to_config
- tags:
- - pre_upgrade
- roles:
- - openshift_repos
-
-- name: Set openshift_no_proxy_internal_hostnames
- hosts: oo_masters_to_config:oo_nodes_to_upgrade
- tags:
- - pre_upgrade
- tasks:
- - set_fact:
- openshift_no_proxy_internal_hostnames: "{{ hostvars | oo_select_keys(groups['oo_nodes_to_config']
- | union(groups['oo_masters_to_config'])
- | union(groups['oo_etcd_to_config'] | default([])))
- | oo_collect('openshift.common.hostname') | default([]) | join (',')
- }}"
- when:
- - openshift_http_proxy is defined or openshift_https_proxy is defined
- - openshift_generate_no_proxy_hosts | default(True) | bool
-
-- include: ../pre/verify_inventory_vars.yml
- tags:
- - pre_upgrade
-
-- include: ../pre/verify_control_plane_running.yml
- tags:
- - pre_upgrade
-
-- include: ../disable_master_excluders.yml
- tags:
- - pre_upgrade
-
-- include: ../../initialize_openshift_version.yml
- tags:
- - pre_upgrade
- vars:
- # Request specific openshift_release and let the openshift_version role handle converting this
- # to a more specific version, respecting openshift_image_tag and openshift_pkg_version if
- # defined, and overriding the normal behavior of protecting the installed version
- openshift_release: "{{ openshift_upgrade_target }}"
- openshift_protect_installed_version: False
- openshift_install_base_package_group: "oo_masters_to_config"
-
- # We skip the docker role at this point in upgrade to prevent
- # unintended package, container, or config upgrades which trigger
- # docker restarts. At this early stage of upgrade we can assume
- # docker is configured and running.
- skip_docker_role: True
-
-- include: ../../../openshift-master/validate_restart.yml
- tags:
- - pre_upgrade
-
-- name: Verify upgrade targets
- hosts: oo_masters_to_config
- tasks:
- - include: ../pre/verify_upgrade_targets.yml
- tags:
- - pre_upgrade
-
-- name: Verify docker upgrade targets
- hosts: oo_masters_to_config:oo_etcd_to_config
- tasks:
- - include: ../pre/tasks/verify_docker_upgrade_targets.yml
- tags:
- - pre_upgrade
-
-- include: validator.yml
- tags:
- - pre_upgrade
-
-- include: ../pre/gate_checks.yml
- tags:
- - pre_upgrade
-
-# Pre-upgrade completed, nothing after this should be tagged pre_upgrade.
-
-# Separate step so we can execute in parallel and clear out anything unused
-# before we get into the serialized upgrade process which will then remove
-# remaining images if possible.
-- name: Cleanup unused Docker images
- hosts: oo_masters_to_config:oo_etcd_to_config
- tasks:
- - include: ../cleanup_unused_images.yml
-
-- include: ../upgrade_control_plane.yml
- vars:
- master_config_hook: "v3_5/master_config_upgrade.yml"
-
-- include: ../post_control_plane.yml
diff --git a/playbooks/common/openshift-cluster/upgrades/v3_5/upgrade_nodes.yml b/playbooks/common/openshift-cluster/upgrades/v3_5/upgrade_nodes.yml
deleted file mode 100644
index e29d0f8e6..000000000
--- a/playbooks/common/openshift-cluster/upgrades/v3_5/upgrade_nodes.yml
+++ /dev/null
@@ -1,111 +0,0 @@
----
-#
-# Node Upgrade Playbook
-#
-# Upgrades nodes only, but requires the control plane to have already been upgraded.
-#
-- include: ../init.yml
- tags:
- - pre_upgrade
-
-- name: Configure the upgrade target for the common upgrade tasks
- hosts: oo_all_hosts
- tags:
- - pre_upgrade
- tasks:
- - set_fact:
- openshift_upgrade_target: "{{ '1.5' if deployment_type == 'origin' else '3.5' }}"
- openshift_upgrade_min: "{{ '1.4' if deployment_type == 'origin' else '3.4' }}"
-
-# Pre-upgrade
-- include: ../initialize_nodes_to_upgrade.yml
- tags:
- - pre_upgrade
-
-- name: Update repos on nodes
- hosts: oo_masters_to_config:oo_nodes_to_upgrade:oo_etcd_to_config:oo_lb_to_config
- roles:
- - openshift_repos
- tags:
- - pre_upgrade
-
-- name: Set openshift_no_proxy_internal_hostnames
- hosts: oo_masters_to_config:oo_nodes_to_upgrade
- tags:
- - pre_upgrade
- tasks:
- - set_fact:
- openshift_no_proxy_internal_hostnames: "{{ hostvars | oo_select_keys(groups['oo_nodes_to_upgrade']
- | union(groups['oo_masters_to_config'])
- | union(groups['oo_etcd_to_config'] | default([])))
- | oo_collect('openshift.common.hostname') | default([]) | join (',')
- }}"
- when:
- - openshift_http_proxy is defined or openshift_https_proxy is defined
- - openshift_generate_no_proxy_hosts | default(True) | bool
-
-- include: ../pre/verify_inventory_vars.yml
- tags:
- - pre_upgrade
-
-- include: ../disable_node_excluders.yml
- tags:
- - pre_upgrade
-
-- include: ../../initialize_openshift_version.yml
- tags:
- - pre_upgrade
- vars:
- # Request specific openshift_release and let the openshift_version role handle converting this
- # to a more specific version, respecting openshift_image_tag and openshift_pkg_version if
- # defined, and overriding the normal behavior of protecting the installed version
- openshift_release: "{{ openshift_upgrade_target }}"
- openshift_protect_installed_version: False
-
- # We skip the docker role at this point in upgrade to prevent
- # unintended package, container, or config upgrades which trigger
- # docker restarts. At this early stage of upgrade we can assume
- # docker is configured and running.
- skip_docker_role: True
-
-- name: Verify masters are already upgraded
- hosts: oo_masters_to_config
- tags:
- - pre_upgrade
- tasks:
- - fail: msg="Master running {{ openshift.common.version }} must be upgraded to {{ openshift_version }} before node upgrade can be run."
- when: openshift.common.version != openshift_version
-
-- include: ../pre/verify_control_plane_running.yml
- tags:
- - pre_upgrade
-
-- name: Verify upgrade targets
- hosts: oo_nodes_to_upgrade
- tasks:
- - include: ../pre/verify_upgrade_targets.yml
- tags:
- - pre_upgrade
-
-- name: Verify docker upgrade targets
- hosts: oo_nodes_to_upgrade
- tasks:
- - include: ../pre/tasks/verify_docker_upgrade_targets.yml
- tags:
- - pre_upgrade
-
-- include: ../pre/gate_checks.yml
- tags:
- - pre_upgrade
-
-# Pre-upgrade completed, nothing after this should be tagged pre_upgrade.
-
-# Separate step so we can execute in parallel and clear out anything unused
-# before we get into the serialized upgrade process which will then remove
-# remaining images if possible.
-- name: Cleanup unused Docker images
- hosts: oo_nodes_to_upgrade
- tasks:
- - include: ../cleanup_unused_images.yml
-
-- include: ../upgrade_nodes.yml
diff --git a/playbooks/common/openshift-cluster/upgrades/v3_5/validator.yml b/playbooks/common/openshift-cluster/upgrades/v3_5/validator.yml
deleted file mode 100644
index ae63c9ca9..000000000
--- a/playbooks/common/openshift-cluster/upgrades/v3_5/validator.yml
+++ /dev/null
@@ -1,67 +0,0 @@
----
-###############################################################################
-# Pre upgrade checks for known data problems, if this playbook fails you should
-# contact support. If you're not supported contact users@lists.openshift.com
-#
-# oc_objectvalidator provides these two checks
-# 1 - SDN Data issues, never seen in the wild but known possible due to code audits
-# https://github.com/openshift/origin/issues/12697
-# 2 - Namespace protections, https://bugzilla.redhat.com/show_bug.cgi?id=1428934
-#
-###############################################################################
-- name: Verify 3.5 specific upgrade checks
- hosts: oo_first_master
- roles:
- - { role: lib_openshift }
- tasks:
- - name: Check for invalid namespaces and SDN errors
- oc_objectvalidator:
-
- # What's all this PetSet business about?
- #
- # 'PetSets' were ALPHA resources in Kube <= 3.4. In >= 3.5 they are
- # no longer supported. The BETA resource 'StatefulSets' replaces
- # them. We can't migrate clients PetSets to
- # StatefulSets. Additionally, Red Hat has never officially supported
- # these resource types. Sorry users, but if you were using
- # unsupported resources from the Kube documentation then we can't
- # help you at this time.
- #
- # Reference: https://bugzilla.redhat.com/show_bug.cgi?id=1428229
- - name: Check if legacy PetSets exist
- oc_obj:
- state: list
- all_namespaces: true
- kind: petsets
- register: l_do_petsets_exist
-
- - name: Fail on unsupported resource migration 'PetSets'
- fail:
- msg: >
- PetSet objects were detected in your cluster. These are an
- Alpha feature in upstream Kubernetes 1.4 and are not supported
- by Red Hat. In Kubernetes 1.5, they are replaced by the Beta
- feature StatefulSets. Red Hat currently does not offer support
- for either PetSets or StatefulSets.
-
- Automatically migrating PetSets to StatefulSets in OpenShift
- Container Platform (OCP) 3.5 is not supported. See the
- Kubernetes "Upgrading from PetSets to StatefulSets"
- documentation for additional information:
-
- https://kubernetes.io/docs/tasks/manage-stateful-set/upgrade-pet-set-to-stateful-set/
-
- PetSets MUST be removed before upgrading to OCP 3.5. Red Hat
- strongly recommends reading the above referenced documentation
- in its entirety before taking any destructive actions.
-
- If you want to simply remove all PetSets without manually
- migrating to StatefulSets, run this command as a user with
- cluster-admin privileges:
-
- $ oc get petsets --all-namespaces -o yaml | oc delete -f - --cascade=false
- when:
- # Search did not fail, valid resource type found
- - l_do_petsets_exist.results.returncode == 0
- # Items do exist in the search results
- - l_do_petsets_exist.results.results.0['items'] | length > 0
diff --git a/playbooks/common/openshift-cluster/upgrades/v3_6/filter_plugins b/playbooks/common/openshift-cluster/upgrades/v3_6/filter_plugins
deleted file mode 120000
index 7de3c1dd7..000000000
--- a/playbooks/common/openshift-cluster/upgrades/v3_6/filter_plugins
+++ /dev/null
@@ -1 +0,0 @@
-../../../../../filter_plugins/ \ No newline at end of file
diff --git a/playbooks/common/openshift-cluster/upgrades/v3_6/upgrade.yml b/playbooks/common/openshift-cluster/upgrades/v3_6/upgrade.yml
index 920dc2ffc..d520c6aee 100644
--- a/playbooks/common/openshift-cluster/upgrades/v3_6/upgrade.yml
+++ b/playbooks/common/openshift-cluster/upgrades/v3_6/upgrade.yml
@@ -2,7 +2,7 @@
#
# Full Control Plane + Nodes Upgrade
#
-- include: ../init.yml
+- import_playbook: ../init.yml
tags:
- pre_upgrade
@@ -13,110 +13,31 @@
tasks:
- set_fact:
openshift_upgrade_target: '3.6'
- openshift_upgrade_min: "{{ '1.5' if deployment_type == 'origin' else '3.5' }}"
+ openshift_upgrade_min: "{{ '1.5' if openshift_deployment_type == 'origin' else '3.5' }}"
-# Pre-upgrade
-
-- include: ../initialize_nodes_to_upgrade.yml
- tags:
- - pre_upgrade
-
-- name: Update repos and initialize facts on all hosts
- hosts: oo_masters_to_config:oo_nodes_to_upgrade:oo_etcd_to_config:oo_lb_to_config
- tags:
- - pre_upgrade
- roles:
- - openshift_repos
-
-- name: Set openshift_no_proxy_internal_hostnames
- hosts: oo_masters_to_config:oo_nodes_to_upgrade
- tags:
- - pre_upgrade
- tasks:
- - set_fact:
- openshift_no_proxy_internal_hostnames: "{{ hostvars | oo_select_keys(groups['oo_nodes_to_config']
- | union(groups['oo_masters_to_config'])
- | union(groups['oo_etcd_to_config'] | default([])))
- | oo_collect('openshift.common.hostname') | default([]) | join (',')
- }}"
- when:
- - openshift_http_proxy is defined or openshift_https_proxy is defined
- - openshift_generate_no_proxy_hosts | default(True) | bool
-
-- include: ../pre/verify_inventory_vars.yml
- tags:
- - pre_upgrade
-
-- include: ../pre/verify_health_checks.yml
- tags:
- - pre_upgrade
-
-- include: ../pre/verify_control_plane_running.yml
- tags:
- - pre_upgrade
-
-- include: ../disable_master_excluders.yml
- tags:
- - pre_upgrade
-
-- include: ../disable_node_excluders.yml
- tags:
- - pre_upgrade
-
-- include: ../../initialize_openshift_version.yml
- tags:
- - pre_upgrade
+- import_playbook: ../pre/config.yml
vars:
- # Request specific openshift_release and let the openshift_version role handle converting this
- # to a more specific version, respecting openshift_image_tag and openshift_pkg_version if
- # defined, and overriding the normal behavior of protecting the installed version
- openshift_release: "{{ openshift_upgrade_target }}"
- openshift_protect_installed_version: False
-
- # We skip the docker role at this point in upgrade to prevent
- # unintended package, container, or config upgrades which trigger
- # docker restarts. At this early stage of upgrade we can assume
- # docker is configured and running.
- skip_docker_role: True
-
-- include: ../../../openshift-master/validate_restart.yml
- tags:
- - pre_upgrade
+ l_upgrade_repo_hosts: "oo_masters_to_config:oo_nodes_to_upgrade:oo_etcd_to_config:oo_lb_to_config"
+ l_upgrade_no_proxy_hosts: "oo_masters_to_config:oo_nodes_to_upgrade"
+ l_upgrade_health_check_hosts: "oo_masters_to_config:oo_etcd_to_config:oo_lb_to_config"
+ l_upgrade_verify_targets_hosts: "oo_masters_to_config:oo_nodes_to_upgrade"
+ l_upgrade_docker_target_hosts: "oo_masters_to_config:oo_nodes_to_upgrade:oo_etcd_to_config"
+ l_upgrade_excluder_hosts: "oo_nodes_to_config:oo_masters_to_config"
-- name: Verify upgrade targets
- hosts: oo_masters_to_config:oo_nodes_to_upgrade
- tasks:
- - include: ../pre/verify_upgrade_targets.yml
- tags:
- - pre_upgrade
+- import_playbook: validator.yml
-- name: Verify docker upgrade targets
+- name: Flag pre-upgrade checks complete for hosts without errors
hosts: oo_masters_to_config:oo_nodes_to_upgrade:oo_etcd_to_config
tasks:
- - include: ../pre/tasks/verify_docker_upgrade_targets.yml
- tags:
- - pre_upgrade
-
-- include: validator.yml
- tags:
- - pre_upgrade
-
-- include: ../pre/gate_checks.yml
- tags:
- - pre_upgrade
-
-# Pre-upgrade completed, nothing after this should be tagged pre_upgrade.
+ - set_fact:
+ pre_upgrade_complete: True
-# Separate step so we can execute in parallel and clear out anything unused
-# before we get into the serialized upgrade process which will then remove
-# remaining images if possible.
-- name: Cleanup unused Docker images
- hosts: oo_masters_to_config:oo_nodes_to_upgrade:oo_etcd_to_config
- tasks:
- - include: ../cleanup_unused_images.yml
+# Pre-upgrade completed
-- include: ../upgrade_control_plane.yml
+- import_playbook: ../upgrade_control_plane.yml
+ vars:
+ master_config_hook: "v3_6/master_config_upgrade.yml"
-- include: ../upgrade_nodes.yml
+- import_playbook: ../upgrade_nodes.yml
-- include: ../post_control_plane.yml
+- import_playbook: ../post_control_plane.yml
diff --git a/playbooks/common/openshift-cluster/upgrades/v3_6/upgrade_control_plane.yml b/playbooks/common/openshift-cluster/upgrades/v3_6/upgrade_control_plane.yml
index 27d8515dc..a956fdde5 100644
--- a/playbooks/common/openshift-cluster/upgrades/v3_6/upgrade_control_plane.yml
+++ b/playbooks/common/openshift-cluster/upgrades/v3_6/upgrade_control_plane.yml
@@ -11,117 +11,38 @@
#
# You can run the upgrade_nodes.yml playbook after this to upgrade these components separately.
#
-- include: ../init.yml
- tags:
- - pre_upgrade
+- import_playbook: ../init.yml
+ vars:
+ l_upgrade_no_switch_firewall_hosts: "oo_masters_to_config:oo_etcd_to_config:oo_lb_to_config"
- name: Configure the upgrade target for the common upgrade tasks
- hosts: oo_all_hosts
- tags:
- - pre_upgrade
- tasks:
- - set_fact:
- openshift_upgrade_target: '3.6'
- openshift_upgrade_min: "{{ '1.5' if deployment_type == 'origin' else '3.5' }}"
-
-# Pre-upgrade
-- include: ../initialize_nodes_to_upgrade.yml
- tags:
- - pre_upgrade
-
-- name: Update repos on control plane hosts
hosts: oo_masters_to_config:oo_etcd_to_config:oo_lb_to_config
- tags:
- - pre_upgrade
- roles:
- - openshift_repos
-
-- name: Set openshift_no_proxy_internal_hostnames
- hosts: oo_masters_to_config:oo_nodes_to_upgrade
- tags:
- - pre_upgrade
tasks:
- set_fact:
- openshift_no_proxy_internal_hostnames: "{{ hostvars | oo_select_keys(groups['oo_nodes_to_config']
- | union(groups['oo_masters_to_config'])
- | union(groups['oo_etcd_to_config'] | default([])))
- | oo_collect('openshift.common.hostname') | default([]) | join (',')
- }}"
- when:
- - openshift_http_proxy is defined or openshift_https_proxy is defined
- - openshift_generate_no_proxy_hosts | default(True) | bool
-
-- include: ../pre/verify_inventory_vars.yml
- tags:
- - pre_upgrade
-
-- include: ../pre/verify_health_checks.yml
- tags:
- - pre_upgrade
-
-- include: ../pre/verify_control_plane_running.yml
- tags:
- - pre_upgrade
-
-- include: ../disable_master_excluders.yml
- tags:
- - pre_upgrade
+ openshift_upgrade_target: '3.6'
+ openshift_upgrade_min: "{{ '1.5' if openshift_deployment_type == 'origin' else '3.5' }}"
-- include: ../../initialize_openshift_version.yml
- tags:
- - pre_upgrade
+- import_playbook: ../pre/config.yml
vars:
- # Request specific openshift_release and let the openshift_version role handle converting this
- # to a more specific version, respecting openshift_image_tag and openshift_pkg_version if
- # defined, and overriding the normal behavior of protecting the installed version
- openshift_release: "{{ openshift_upgrade_target }}"
- openshift_protect_installed_version: False
- openshift_install_base_package_group: "oo_masters_to_config"
-
- # We skip the docker role at this point in upgrade to prevent
- # unintended package, container, or config upgrades which trigger
- # docker restarts. At this early stage of upgrade we can assume
- # docker is configured and running.
- skip_docker_role: True
+ l_upgrade_repo_hosts: "oo_masters_to_config:oo_etcd_to_config:oo_lb_to_config"
+ l_upgrade_no_proxy_hosts: "oo_masters_to_config"
+ l_upgrade_health_check_hosts: "oo_masters_to_config:oo_etcd_to_config:oo_lb_to_config"
+ l_upgrade_verify_targets_hosts: "oo_masters_to_config"
+ l_upgrade_docker_target_hosts: "oo_masters_to_config:oo_etcd_to_config"
+ l_upgrade_excluder_hosts: "oo_masters_to_config"
-- include: ../../../openshift-master/validate_restart.yml
- tags:
- - pre_upgrade
+- import_playbook: validator.yml
-- name: Verify upgrade targets
- hosts: oo_masters_to_config
- tasks:
- - include: ../pre/verify_upgrade_targets.yml
- tags:
- - pre_upgrade
-
-- name: Verify docker upgrade targets
+- name: Flag pre-upgrade checks complete for hosts without errors
hosts: oo_masters_to_config:oo_etcd_to_config
tasks:
- - include: ../pre/tasks/verify_docker_upgrade_targets.yml
- tags:
- - pre_upgrade
-
-- include: validator.yml
- tags:
- - pre_upgrade
-
-- include: ../pre/gate_checks.yml
- tags:
- - pre_upgrade
-
-# Pre-upgrade completed, nothing after this should be tagged pre_upgrade.
+ - set_fact:
+ pre_upgrade_complete: True
-# Separate step so we can execute in parallel and clear out anything unused
-# before we get into the serialized upgrade process which will then remove
-# remaining images if possible.
-- name: Cleanup unused Docker images
- hosts: oo_masters_to_config:oo_etcd_to_config
- tasks:
- - include: ../cleanup_unused_images.yml
+# Pre-upgrade completed
-- include: ../upgrade_control_plane.yml
+- import_playbook: ../upgrade_control_plane.yml
vars:
master_config_hook: "v3_6/master_config_upgrade.yml"
-- include: ../post_control_plane.yml
+- import_playbook: ../post_control_plane.yml
diff --git a/playbooks/common/openshift-cluster/upgrades/v3_6/upgrade_nodes.yml b/playbooks/common/openshift-cluster/upgrades/v3_6/upgrade_nodes.yml
index ba6fcc3f8..4febe76ee 100644
--- a/playbooks/common/openshift-cluster/upgrades/v3_6/upgrade_nodes.yml
+++ b/playbooks/common/openshift-cluster/upgrades/v3_6/upgrade_nodes.yml
@@ -4,7 +4,7 @@
#
# Upgrades nodes only, but requires the control plane to have already been upgraded.
#
-- include: ../init.yml
+- import_playbook: ../init.yml
tags:
- pre_upgrade
@@ -15,101 +15,24 @@
tasks:
- set_fact:
openshift_upgrade_target: '3.6'
- openshift_upgrade_min: "{{ '1.5' if deployment_type == 'origin' else '3.5' }}"
+ openshift_upgrade_min: "{{ '1.5' if openshift_deployment_type == 'origin' else '3.5' }}"
-# Pre-upgrade
-- include: ../initialize_nodes_to_upgrade.yml
- tags:
- - pre_upgrade
-
-- name: Update repos on nodes
- hosts: oo_masters_to_config:oo_nodes_to_upgrade:oo_etcd_to_config:oo_lb_to_config
- roles:
- - openshift_repos
- tags:
- - pre_upgrade
-
-- name: Set openshift_no_proxy_internal_hostnames
- hosts: oo_masters_to_config:oo_nodes_to_upgrade
- tags:
- - pre_upgrade
- tasks:
- - set_fact:
- openshift_no_proxy_internal_hostnames: "{{ hostvars | oo_select_keys(groups['oo_nodes_to_upgrade']
- | union(groups['oo_masters_to_config'])
- | union(groups['oo_etcd_to_config'] | default([])))
- | oo_collect('openshift.common.hostname') | default([]) | join (',')
- }}"
- when:
- - openshift_http_proxy is defined or openshift_https_proxy is defined
- - openshift_generate_no_proxy_hosts | default(True) | bool
-
-- include: ../pre/verify_inventory_vars.yml
- tags:
- - pre_upgrade
-
-- include: ../pre/verify_health_checks.yml
- tags:
- - pre_upgrade
-
-- include: ../disable_node_excluders.yml
- tags:
- - pre_upgrade
-
-- include: ../../initialize_openshift_version.yml
- tags:
- - pre_upgrade
+- import_playbook: ../pre/config.yml
vars:
- # Request specific openshift_release and let the openshift_version role handle converting this
- # to a more specific version, respecting openshift_image_tag and openshift_pkg_version if
- # defined, and overriding the normal behavior of protecting the installed version
- openshift_release: "{{ openshift_upgrade_target }}"
- openshift_protect_installed_version: False
-
- # We skip the docker role at this point in upgrade to prevent
- # unintended package, container, or config upgrades which trigger
- # docker restarts. At this early stage of upgrade we can assume
- # docker is configured and running.
- skip_docker_role: True
-
-- name: Verify masters are already upgraded
- hosts: oo_masters_to_config
- tags:
- - pre_upgrade
+ l_upgrade_repo_hosts: "oo_nodes_to_config"
+ l_upgrade_no_proxy_hosts: "oo_all_hosts"
+ l_upgrade_health_check_hosts: "oo_nodes_to_config"
+ l_upgrade_verify_targets_hosts: "oo_nodes_to_config"
+ l_upgrade_docker_target_hosts: "oo_nodes_to_config"
+ l_upgrade_excluder_hosts: "oo_nodes_to_config:!oo_masters_to_config"
+ l_upgrade_nodes_only: True
+
+- name: Flag pre-upgrade checks complete for hosts without errors
+ hosts: oo_masters_to_config:oo_nodes_to_upgrade:oo_etcd_to_config
tasks:
- - fail: msg="Master running {{ openshift.common.version }} must be upgraded to {{ openshift_version }} before node upgrade can be run."
- when: openshift.common.version != openshift_version
-
-- include: ../pre/verify_control_plane_running.yml
- tags:
- - pre_upgrade
-
-- name: Verify upgrade targets
- hosts: oo_nodes_to_upgrade
- tasks:
- - include: ../pre/verify_upgrade_targets.yml
- tags:
- - pre_upgrade
-
-- name: Verify docker upgrade targets
- hosts: oo_nodes_to_upgrade
- tasks:
- - include: ../pre/tasks/verify_docker_upgrade_targets.yml
- tags:
- - pre_upgrade
-
-- include: ../pre/gate_checks.yml
- tags:
- - pre_upgrade
-
-# Pre-upgrade completed, nothing after this should be tagged pre_upgrade.
+ - set_fact:
+ pre_upgrade_complete: True
-# Separate step so we can execute in parallel and clear out anything unused
-# before we get into the serialized upgrade process which will then remove
-# remaining images if possible.
-- name: Cleanup unused Docker images
- hosts: oo_nodes_to_upgrade
- tasks:
- - include: ../cleanup_unused_images.yml
+# Pre-upgrade completed
-- include: ../upgrade_nodes.yml
+- import_playbook: ../upgrade_nodes.yml
diff --git a/playbooks/common/openshift-cluster/upgrades/v3_7/filter_plugins b/playbooks/common/openshift-cluster/upgrades/v3_7/filter_plugins
deleted file mode 120000
index 7de3c1dd7..000000000
--- a/playbooks/common/openshift-cluster/upgrades/v3_7/filter_plugins
+++ /dev/null
@@ -1 +0,0 @@
-../../../../../filter_plugins/ \ No newline at end of file
diff --git a/playbooks/common/openshift-cluster/upgrades/v3_7/upgrade.yml b/playbooks/common/openshift-cluster/upgrades/v3_7/upgrade.yml
index bf3b94682..4daa9e490 100644
--- a/playbooks/common/openshift-cluster/upgrades/v3_7/upgrade.yml
+++ b/playbooks/common/openshift-cluster/upgrades/v3_7/upgrade.yml
@@ -2,7 +2,7 @@
#
# Full Control Plane + Nodes Upgrade
#
-- include: ../init.yml
+- import_playbook: ../init.yml
tags:
- pre_upgrade
@@ -15,128 +15,45 @@
openshift_upgrade_target: '3.7'
openshift_upgrade_min: '3.6'
-# Pre-upgrade
-
-- include: ../initialize_nodes_to_upgrade.yml
- tags:
- - pre_upgrade
-
-- include: ../pre/verify_etcd3_backend.yml
- tags:
- - pre_upgrade
-
-- name: Update repos and initialize facts on all hosts
- hosts: oo_masters_to_config:oo_nodes_to_upgrade:oo_etcd_to_config:oo_lb_to_config
- tags:
- - pre_upgrade
- roles:
- - openshift_repos
-
-- name: Set openshift_no_proxy_internal_hostnames
- hosts: oo_masters_to_config:oo_nodes_to_upgrade
- tags:
- - pre_upgrade
- tasks:
- - set_fact:
- openshift_no_proxy_internal_hostnames: "{{ hostvars | oo_select_keys(groups['oo_nodes_to_config']
- | union(groups['oo_masters_to_config'])
- | union(groups['oo_etcd_to_config'] | default([])))
- | oo_collect('openshift.common.hostname') | default([]) | join (',')
- }}"
- when:
- - openshift_http_proxy is defined or openshift_https_proxy is defined
- - openshift_generate_no_proxy_hosts | default(True) | bool
-
-- include: ../pre/verify_inventory_vars.yml
- tags:
- - pre_upgrade
-
-- include: ../pre/verify_health_checks.yml
- tags:
- - pre_upgrade
-
-- include: ../pre/verify_control_plane_running.yml
- tags:
- - pre_upgrade
-
-- include: ../disable_master_excluders.yml
- tags:
- - pre_upgrade
-
-- include: ../disable_node_excluders.yml
- tags:
- - pre_upgrade
-
-- include: ../../initialize_openshift_version.yml
- tags:
- - pre_upgrade
+- import_playbook: ../pre/config.yml
vars:
- # Request specific openshift_release and let the openshift_version role handle converting this
- # to a more specific version, respecting openshift_image_tag and openshift_pkg_version if
- # defined, and overriding the normal behavior of protecting the installed version
- openshift_release: "{{ openshift_upgrade_target }}"
- openshift_protect_installed_version: False
-
- # We skip the docker role at this point in upgrade to prevent
- # unintended package, container, or config upgrades which trigger
- # docker restarts. At this early stage of upgrade we can assume
- # docker is configured and running.
- skip_docker_role: True
+ l_upgrade_repo_hosts: "oo_masters_to_config:oo_nodes_to_upgrade:oo_etcd_to_config:oo_lb_to_config"
+ l_upgrade_no_proxy_hosts: "oo_masters_to_config:oo_nodes_to_upgrade"
+ l_upgrade_health_check_hosts: "oo_masters_to_config:oo_etcd_to_config:oo_lb_to_config"
+ l_upgrade_verify_targets_hosts: "oo_masters_to_config:oo_nodes_to_upgrade"
+ l_upgrade_docker_target_hosts: "oo_masters_to_config:oo_nodes_to_upgrade:oo_etcd_to_config"
+ l_upgrade_excluder_hosts: "oo_nodes_to_config:oo_masters_to_config"
-- include: ../../../openshift-master/validate_restart.yml
- tags:
- - pre_upgrade
+- import_playbook: validator.yml
-- name: Verify upgrade targets
- hosts: oo_masters_to_config:oo_nodes_to_upgrade
- tasks:
- - include: ../pre/verify_upgrade_targets.yml
- tags:
- - pre_upgrade
-
-- name: Verify docker upgrade targets
+- name: Flag pre-upgrade checks complete for hosts without errors
hosts: oo_masters_to_config:oo_nodes_to_upgrade:oo_etcd_to_config
tasks:
- - include: ../pre/tasks/verify_docker_upgrade_targets.yml
- tags:
- - pre_upgrade
-
-- include: validator.yml
- tags:
- - pre_upgrade
-
-- include: ../pre/gate_checks.yml
- tags:
- - pre_upgrade
-
-# Pre-upgrade completed, nothing after this should be tagged pre_upgrade.
+ - set_fact:
+ pre_upgrade_complete: True
-# Separate step so we can execute in parallel and clear out anything unused
-# before we get into the serialized upgrade process which will then remove
-# remaining images if possible.
-- name: Cleanup unused Docker images
- hosts: oo_masters_to_config:oo_nodes_to_upgrade:oo_etcd_to_config
- tasks:
- - include: ../cleanup_unused_images.yml
+# Pre-upgrade completed
-#TODO: Why doesn't this compose using ./upgrade_control_plane rather than
-# ../upgrade_control_plane?
-- include: ../upgrade_control_plane.yml
+- import_playbook: ../upgrade_control_plane.yml
+ vars:
+ master_config_hook: "v3_7/master_config_upgrade.yml"
# All controllers must be stopped at the same time then restarted
- name: Cycle all controller services to force new leader election mode
- hosts: oo_etcd_to_config
+ hosts: oo_masters_to_config
gather_facts: no
+ roles:
+ - role: openshift_facts
tasks:
- - name: Stop {{ openshift.common.service_type }}-master-controllers
+ - name: Stop {{ openshift_service_type }}-master-controllers
systemd:
- name: "{{ openshift.common.service_type }}-master-controllers"
+ name: "{{ openshift_service_type }}-master-controllers"
state: stopped
- - name: Start {{ openshift.common.service_type }}-master-controllers
+ - name: Start {{ openshift_service_type }}-master-controllers
systemd:
- name: "{{ openshift.common.service_type }}-master-controllers"
+ name: "{{ openshift_service_type }}-master-controllers"
state: started
-- include: ../upgrade_nodes.yml
+- import_playbook: ../upgrade_nodes.yml
-- include: ../post_control_plane.yml
+- import_playbook: ../post_control_plane.yml
diff --git a/playbooks/common/openshift-cluster/upgrades/v3_7/upgrade_control_plane.yml b/playbooks/common/openshift-cluster/upgrades/v3_7/upgrade_control_plane.yml
index b91bea617..1750148d4 100644
--- a/playbooks/common/openshift-cluster/upgrades/v3_7/upgrade_control_plane.yml
+++ b/playbooks/common/openshift-cluster/upgrades/v3_7/upgrade_control_plane.yml
@@ -11,135 +11,54 @@
#
# You can run the upgrade_nodes.yml playbook after this to upgrade these components separately.
#
-- include: ../init.yml
- tags:
- - pre_upgrade
+- import_playbook: ../init.yml
+ vars:
+ l_upgrade_no_switch_firewall_hosts: "oo_masters_to_config:oo_etcd_to_config:oo_lb_to_config"
- name: Configure the upgrade target for the common upgrade tasks
- hosts: oo_all_hosts
- tags:
- - pre_upgrade
+ hosts: oo_masters_to_config:oo_etcd_to_config:oo_lb_to_config
tasks:
- set_fact:
openshift_upgrade_target: '3.7'
openshift_upgrade_min: '3.6'
-# Pre-upgrade
-- include: ../initialize_nodes_to_upgrade.yml
- tags:
- - pre_upgrade
-
-- include: ../pre/verify_etcd3_backend.yml
- tags:
- - pre_upgrade
-
-- name: Update repos on control plane hosts
- hosts: oo_masters_to_config:oo_etcd_to_config:oo_lb_to_config
- tags:
- - pre_upgrade
- roles:
- - openshift_repos
-
-- name: Set openshift_no_proxy_internal_hostnames
- hosts: oo_masters_to_config:oo_nodes_to_upgrade
- tags:
- - pre_upgrade
- tasks:
- - set_fact:
- openshift_no_proxy_internal_hostnames: "{{ hostvars | oo_select_keys(groups['oo_nodes_to_config']
- | union(groups['oo_masters_to_config'])
- | union(groups['oo_etcd_to_config'] | default([])))
- | oo_collect('openshift.common.hostname') | default([]) | join (',')
- }}"
- when:
- - openshift_http_proxy is defined or openshift_https_proxy is defined
- - openshift_generate_no_proxy_hosts | default(True) | bool
-
-- include: ../pre/verify_inventory_vars.yml
- tags:
- - pre_upgrade
-
-- include: ../pre/verify_health_checks.yml
- tags:
- - pre_upgrade
-
-- include: ../pre/verify_control_plane_running.yml
- tags:
- - pre_upgrade
-
-- include: ../disable_master_excluders.yml
- tags:
- - pre_upgrade
-
-- include: ../../initialize_openshift_version.yml
- tags:
- - pre_upgrade
+- import_playbook: ../pre/config.yml
vars:
- # Request specific openshift_release and let the openshift_version role handle converting this
- # to a more specific version, respecting openshift_image_tag and openshift_pkg_version if
- # defined, and overriding the normal behavior of protecting the installed version
- openshift_release: "{{ openshift_upgrade_target }}"
- openshift_protect_installed_version: False
- openshift_install_base_package_group: "oo_masters_to_config"
-
- # We skip the docker role at this point in upgrade to prevent
- # unintended package, container, or config upgrades which trigger
- # docker restarts. At this early stage of upgrade we can assume
- # docker is configured and running.
- skip_docker_role: True
+ l_upgrade_repo_hosts: "oo_masters_to_config:oo_etcd_to_config:oo_lb_to_config"
+ l_upgrade_no_proxy_hosts: "oo_masters_to_config"
+ l_upgrade_health_check_hosts: "oo_masters_to_config:oo_etcd_to_config:oo_lb_to_config"
+ l_upgrade_verify_targets_hosts: "oo_masters_to_config"
+ l_upgrade_docker_target_hosts: "oo_masters_to_config:oo_etcd_to_config"
+ l_upgrade_excluder_hosts: "oo_masters_to_config"
-- include: ../../../openshift-master/validate_restart.yml
- tags:
- - pre_upgrade
+- import_playbook: validator.yml
-- name: Verify upgrade targets
- hosts: oo_masters_to_config
- tasks:
- - include: ../pre/verify_upgrade_targets.yml
- tags:
- - pre_upgrade
-
-- name: Verify docker upgrade targets
+- name: Flag pre-upgrade checks complete for hosts without errors
hosts: oo_masters_to_config:oo_etcd_to_config
tasks:
- - include: ../pre/tasks/verify_docker_upgrade_targets.yml
- tags:
- - pre_upgrade
-
-- include: validator.yml
- tags:
- - pre_upgrade
-
-- include: ../pre/gate_checks.yml
- tags:
- - pre_upgrade
-
-# Pre-upgrade completed, nothing after this should be tagged pre_upgrade.
+ - set_fact:
+ pre_upgrade_complete: True
-# Separate step so we can execute in parallel and clear out anything unused
-# before we get into the serialized upgrade process which will then remove
-# remaining images if possible.
-- name: Cleanup unused Docker images
- hosts: oo_masters_to_config:oo_etcd_to_config
- tasks:
- - include: ../cleanup_unused_images.yml
+# Pre-upgrade completed
-- include: ../upgrade_control_plane.yml
+- import_playbook: ../upgrade_control_plane.yml
vars:
master_config_hook: "v3_7/master_config_upgrade.yml"
# All controllers must be stopped at the same time then restarted
- name: Cycle all controller services to force new leader election mode
- hosts: oo_etcd_to_config
+ hosts: oo_masters_to_config
gather_facts: no
+ roles:
+ - role: openshift_facts
tasks:
- - name: Stop {{ openshift.common.service_type }}-master-controllers
+ - name: Stop {{ openshift_service_type }}-master-controllers
systemd:
- name: "{{ openshift.common.service_type }}-master-controllers"
+ name: "{{ openshift_service_type }}-master-controllers"
state: stopped
- - name: Start {{ openshift.common.service_type }}-master-controllers
+ - name: Start {{ openshift_service_type }}-master-controllers
systemd:
- name: "{{ openshift.common.service_type }}-master-controllers"
+ name: "{{ openshift_service_type }}-master-controllers"
state: started
-- include: ../post_control_plane.yml
+- import_playbook: ../post_control_plane.yml
diff --git a/playbooks/common/openshift-cluster/upgrades/v3_7/upgrade_nodes.yml b/playbooks/common/openshift-cluster/upgrades/v3_7/upgrade_nodes.yml
index bc080f9a3..16d95514c 100644
--- a/playbooks/common/openshift-cluster/upgrades/v3_7/upgrade_nodes.yml
+++ b/playbooks/common/openshift-cluster/upgrades/v3_7/upgrade_nodes.yml
@@ -4,7 +4,7 @@
#
# Upgrades nodes only, but requires the control plane to have already been upgraded.
#
-- include: ../init.yml
+- import_playbook: ../init.yml
tags:
- pre_upgrade
@@ -17,99 +17,22 @@
openshift_upgrade_target: '3.7'
openshift_upgrade_min: '3.6'
-# Pre-upgrade
-- include: ../initialize_nodes_to_upgrade.yml
- tags:
- - pre_upgrade
-
-- name: Update repos on nodes
- hosts: oo_masters_to_config:oo_nodes_to_upgrade:oo_etcd_to_config:oo_lb_to_config
- roles:
- - openshift_repos
- tags:
- - pre_upgrade
-
-- name: Set openshift_no_proxy_internal_hostnames
- hosts: oo_masters_to_config:oo_nodes_to_upgrade
- tags:
- - pre_upgrade
- tasks:
- - set_fact:
- openshift_no_proxy_internal_hostnames: "{{ hostvars | oo_select_keys(groups['oo_nodes_to_upgrade']
- | union(groups['oo_masters_to_config'])
- | union(groups['oo_etcd_to_config'] | default([])))
- | oo_collect('openshift.common.hostname') | default([]) | join (',')
- }}"
- when:
- - openshift_http_proxy is defined or openshift_https_proxy is defined
- - openshift_generate_no_proxy_hosts | default(True) | bool
-
-- include: ../pre/verify_inventory_vars.yml
- tags:
- - pre_upgrade
-
-- include: ../pre/verify_health_checks.yml
- tags:
- - pre_upgrade
-
-- include: ../disable_node_excluders.yml
- tags:
- - pre_upgrade
-
-- include: ../../initialize_openshift_version.yml
- tags:
- - pre_upgrade
+- import_playbook: ../pre/config.yml
vars:
- # Request specific openshift_release and let the openshift_version role handle converting this
- # to a more specific version, respecting openshift_image_tag and openshift_pkg_version if
- # defined, and overriding the normal behavior of protecting the installed version
- openshift_release: "{{ openshift_upgrade_target }}"
- openshift_protect_installed_version: False
-
- # We skip the docker role at this point in upgrade to prevent
- # unintended package, container, or config upgrades which trigger
- # docker restarts. At this early stage of upgrade we can assume
- # docker is configured and running.
- skip_docker_role: True
-
-- name: Verify masters are already upgraded
- hosts: oo_masters_to_config
- tags:
- - pre_upgrade
+ l_upgrade_repo_hosts: "oo_nodes_to_config"
+ l_upgrade_no_proxy_hosts: "oo_all_hosts"
+ l_upgrade_health_check_hosts: "oo_nodes_to_config"
+ l_upgrade_verify_targets_hosts: "oo_nodes_to_config"
+ l_upgrade_docker_target_hosts: "oo_nodes_to_config"
+ l_upgrade_excluder_hosts: "oo_nodes_to_config:!oo_masters_to_config"
+ l_upgrade_nodes_only: True
+
+- name: Flag pre-upgrade checks complete for hosts without errors
+ hosts: oo_masters_to_config:oo_nodes_to_upgrade:oo_etcd_to_config
tasks:
- - fail: msg="Master running {{ openshift.common.version }} must be upgraded to {{ openshift_version }} before node upgrade can be run."
- when: openshift.common.version != openshift_version
-
-- include: ../pre/verify_control_plane_running.yml
- tags:
- - pre_upgrade
-
-- name: Verify upgrade targets
- hosts: oo_nodes_to_upgrade
- tasks:
- - include: ../pre/verify_upgrade_targets.yml
- tags:
- - pre_upgrade
-
-- name: Verify docker upgrade targets
- hosts: oo_nodes_to_upgrade
- tasks:
- - include: ../pre/tasks/verify_docker_upgrade_targets.yml
- tags:
- - pre_upgrade
-
-- include: ../pre/gate_checks.yml
- tags:
- - pre_upgrade
-
-# Pre-upgrade completed, nothing after this should be tagged pre_upgrade.
+ - set_fact:
+ pre_upgrade_complete: True
-# Separate step so we can execute in parallel and clear out anything unused
-# before we get into the serialized upgrade process which will then remove
-# remaining images if possible.
-- name: Cleanup unused Docker images
- hosts: oo_nodes_to_upgrade
- tasks:
- - include: ../cleanup_unused_images.yml
+# Pre-upgrade completed
-- include: ../upgrade_nodes.yml
+- import_playbook: ../upgrade_nodes.yml
diff --git a/playbooks/common/openshift-cluster/upgrades/v3_7/validator.yml b/playbooks/common/openshift-cluster/upgrades/v3_7/validator.yml
index f76fc68d1..49e691352 100644
--- a/playbooks/common/openshift-cluster/upgrades/v3_7/validator.yml
+++ b/playbooks/common/openshift-cluster/upgrades/v3_7/validator.yml
@@ -11,13 +11,15 @@
tasks:
- name: Check for invalid namespaces and SDN errors
oc_objectvalidator:
-
+ # DO NOT DISABLE THIS, YOUR UPGRADE WILL FAIL IF YOU DO SO
- name: Confirm OpenShift authorization objects are in sync
command: >
- {{ openshift.common.client_binary }} adm migrate authorization
- when: not openshift.common.version_gte_3_7 | bool
+ {{ openshift_client_binary }} adm migrate authorization
+ when:
+ - openshift_currently_installed_version is version_compare('3.7','<')
+ - openshift_upgrade_pre_authorization_migration_enabled | default(true) | bool
changed_when: false
register: l_oc_result
until: l_oc_result.rc == 0
- retries: 4
+ retries: 2
delay: 15
diff --git a/playbooks/common/openshift-cluster/upgrades/v3_7/master_config_upgrade.yml b/playbooks/common/openshift-cluster/upgrades/v3_8/master_config_upgrade.yml
index 1d4d1919c..1d4d1919c 100644
--- a/playbooks/common/openshift-cluster/upgrades/v3_7/master_config_upgrade.yml
+++ b/playbooks/common/openshift-cluster/upgrades/v3_8/master_config_upgrade.yml
diff --git a/playbooks/common/openshift-cluster/upgrades/v3_5/roles b/playbooks/common/openshift-cluster/upgrades/v3_8/roles
index 415645be6..415645be6 120000
--- a/playbooks/common/openshift-cluster/upgrades/v3_5/roles
+++ b/playbooks/common/openshift-cluster/upgrades/v3_8/roles
diff --git a/playbooks/common/openshift-cluster/upgrades/v3_8/upgrade.yml b/playbooks/common/openshift-cluster/upgrades/v3_8/upgrade.yml
new file mode 100644
index 000000000..0f74e0137
--- /dev/null
+++ b/playbooks/common/openshift-cluster/upgrades/v3_8/upgrade.yml
@@ -0,0 +1,59 @@
+---
+#
+# Full Control Plane + Nodes Upgrade
+#
+- import_playbook: ../init.yml
+ tags:
+ - pre_upgrade
+
+- name: Configure the upgrade target for the common upgrade tasks
+ hosts: oo_all_hosts
+ tags:
+ - pre_upgrade
+ tasks:
+ - set_fact:
+ openshift_upgrade_target: '3.8'
+ openshift_upgrade_min: '3.7'
+
+- import_playbook: ../pre/config.yml
+ vars:
+ l_upgrade_repo_hosts: "oo_masters_to_config:oo_nodes_to_upgrade:oo_etcd_to_config:oo_lb_to_config"
+ l_upgrade_no_proxy_hosts: "oo_masters_to_config:oo_nodes_to_upgrade"
+ l_upgrade_health_check_hosts: "oo_masters_to_config:oo_etcd_to_config:oo_lb_to_config"
+ l_upgrade_verify_targets_hosts: "oo_masters_to_config:oo_nodes_to_upgrade"
+ l_upgrade_docker_target_hosts: "oo_masters_to_config:oo_nodes_to_upgrade:oo_etcd_to_config"
+ l_upgrade_excluder_hosts: "oo_nodes_to_config:oo_masters_to_config"
+
+- import_playbook: validator.yml
+
+- name: Flag pre-upgrade checks complete for hosts without errors
+ hosts: oo_masters_to_config:oo_nodes_to_upgrade:oo_etcd_to_config
+ tasks:
+ - set_fact:
+ pre_upgrade_complete: True
+
+# Pre-upgrade completed
+
+- import_playbook: ../upgrade_control_plane.yml
+ vars:
+ master_config_hook: "v3_7/master_config_upgrade.yml"
+
+# All controllers must be stopped at the same time then restarted
+- name: Cycle all controller services to force new leader election mode
+ hosts: oo_masters_to_config
+ gather_facts: no
+ roles:
+ - role: openshift_facts
+ tasks:
+ - name: Stop {{ openshift_service_type }}-master-controllers
+ systemd:
+ name: "{{ openshift_service_type }}-master-controllers"
+ state: stopped
+ - name: Start {{ openshift_service_type }}-master-controllers
+ systemd:
+ name: "{{ openshift_service_type }}-master-controllers"
+ state: started
+
+- import_playbook: ../upgrade_nodes.yml
+
+- import_playbook: ../post_control_plane.yml
diff --git a/playbooks/common/openshift-cluster/upgrades/v3_8/upgrade_control_plane.yml b/playbooks/common/openshift-cluster/upgrades/v3_8/upgrade_control_plane.yml
new file mode 100644
index 000000000..08bfd239f
--- /dev/null
+++ b/playbooks/common/openshift-cluster/upgrades/v3_8/upgrade_control_plane.yml
@@ -0,0 +1,64 @@
+---
+#
+# Control Plane Upgrade Playbook
+#
+# Upgrades masters and Docker (only on standalone etcd hosts)
+#
+# This upgrade does not include:
+# - node service running on masters
+# - docker running on masters
+# - node service running on dedicated nodes
+#
+# You can run the upgrade_nodes.yml playbook after this to upgrade these components separately.
+#
+- import_playbook: ../init.yml
+ vars:
+ l_upgrade_no_switch_firewall_hosts: "oo_masters_to_config:oo_etcd_to_config:oo_lb_to_config"
+
+- name: Configure the upgrade target for the common upgrade tasks
+ hosts: oo_masters_to_config:oo_etcd_to_config:oo_lb_to_config
+ tasks:
+ - set_fact:
+ openshift_upgrade_target: '3.8'
+ openshift_upgrade_min: '3.7'
+
+- import_playbook: ../pre/config.yml
+ vars:
+ l_upgrade_repo_hosts: "oo_masters_to_config:oo_etcd_to_config:oo_lb_to_config"
+ l_upgrade_no_proxy_hosts: "oo_masters_to_config"
+ l_upgrade_health_check_hosts: "oo_masters_to_config:oo_etcd_to_config:oo_lb_to_config"
+ l_upgrade_verify_targets_hosts: "oo_masters_to_config"
+ l_upgrade_docker_target_hosts: "oo_masters_to_config:oo_etcd_to_config"
+ l_upgrade_excluder_hosts: "oo_masters_to_config"
+
+- import_playbook: validator.yml
+
+- name: Flag pre-upgrade checks complete for hosts without errors
+ hosts: oo_masters_to_config:oo_etcd_to_config
+ tasks:
+ - set_fact:
+ pre_upgrade_complete: True
+
+# Pre-upgrade completed
+
+- import_playbook: ../upgrade_control_plane.yml
+ vars:
+ master_config_hook: "v3_7/master_config_upgrade.yml"
+
+# All controllers must be stopped at the same time then restarted
+- name: Cycle all controller services to force new leader election mode
+ hosts: oo_masters_to_config
+ gather_facts: no
+ roles:
+ - role: openshift_facts
+ tasks:
+ - name: Stop {{ openshift_service_type }}-master-controllers
+ systemd:
+ name: "{{ openshift_service_type }}-master-controllers"
+ state: stopped
+ - name: Start {{ openshift_service_type }}-master-controllers
+ systemd:
+ name: "{{ openshift_service_type }}-master-controllers"
+ state: started
+
+- import_playbook: ../post_control_plane.yml
diff --git a/playbooks/common/openshift-cluster/upgrades/v3_8/upgrade_nodes.yml b/playbooks/common/openshift-cluster/upgrades/v3_8/upgrade_nodes.yml
new file mode 100644
index 000000000..b5f1038fd
--- /dev/null
+++ b/playbooks/common/openshift-cluster/upgrades/v3_8/upgrade_nodes.yml
@@ -0,0 +1,38 @@
+---
+#
+# Node Upgrade Playbook
+#
+# Upgrades nodes only, but requires the control plane to have already been upgraded.
+#
+- import_playbook: ../init.yml
+ tags:
+ - pre_upgrade
+
+- name: Configure the upgrade target for the common upgrade tasks
+ hosts: oo_all_hosts
+ tags:
+ - pre_upgrade
+ tasks:
+ - set_fact:
+ openshift_upgrade_target: '3.8'
+ openshift_upgrade_min: '3.7'
+
+- import_playbook: ../pre/config.yml
+ vars:
+ l_upgrade_repo_hosts: "oo_nodes_to_config"
+ l_upgrade_no_proxy_hosts: "oo_all_hosts"
+ l_upgrade_health_check_hosts: "oo_nodes_to_config"
+ l_upgrade_verify_targets_hosts: "oo_nodes_to_config"
+ l_upgrade_docker_target_hosts: "oo_nodes_to_config"
+ l_upgrade_excluder_hosts: "oo_nodes_to_config:!oo_masters_to_config"
+ l_upgrade_nodes_only: True
+
+- name: Flag pre-upgrade checks complete for hosts without errors
+ hosts: oo_masters_to_config:oo_nodes_to_upgrade:oo_etcd_to_config
+ tasks:
+ - set_fact:
+ pre_upgrade_complete: True
+
+# Pre-upgrade completed
+
+- import_playbook: ../upgrade_nodes.yml
diff --git a/playbooks/common/openshift-cluster/upgrades/v3_8/validator.yml b/playbooks/common/openshift-cluster/upgrades/v3_8/validator.yml
new file mode 100644
index 000000000..d8540abfb
--- /dev/null
+++ b/playbooks/common/openshift-cluster/upgrades/v3_8/validator.yml
@@ -0,0 +1,7 @@
+---
+- name: Verify 3.8 specific upgrade checks
+ hosts: oo_first_master
+ roles:
+ - { role: lib_openshift }
+ tasks:
+ - debug: msg="noop"
diff --git a/playbooks/common/openshift-cluster/upgrades/v3_6/master_config_upgrade.yml b/playbooks/common/openshift-cluster/upgrades/v3_9/master_config_upgrade.yml
index db0c8f886..1d4d1919c 100644
--- a/playbooks/common/openshift-cluster/upgrades/v3_6/master_config_upgrade.yml
+++ b/playbooks/common/openshift-cluster/upgrades/v3_9/master_config_upgrade.yml
@@ -1,6 +1,11 @@
---
- modify_yaml:
dest: "{{ openshift.common.config_base}}/master/master-config.yaml"
+ yaml_key: 'controllerConfig.election.lockName'
+ yaml_value: 'openshift-master-controllers'
+
+- modify_yaml:
+ dest: "{{ openshift.common.config_base}}/master/master-config.yaml"
yaml_key: 'controllerConfig.serviceServingCert.signer.certFile'
yaml_value: service-signer.crt
diff --git a/playbooks/common/openshift-cluster/upgrades/v3_9/roles b/playbooks/common/openshift-cluster/upgrades/v3_9/roles
new file mode 120000
index 000000000..415645be6
--- /dev/null
+++ b/playbooks/common/openshift-cluster/upgrades/v3_9/roles
@@ -0,0 +1 @@
+../../../../../roles/ \ No newline at end of file
diff --git a/playbooks/common/openshift-cluster/upgrades/v3_9/upgrade.yml b/playbooks/common/openshift-cluster/upgrades/v3_9/upgrade.yml
new file mode 100644
index 000000000..0aea5069d
--- /dev/null
+++ b/playbooks/common/openshift-cluster/upgrades/v3_9/upgrade.yml
@@ -0,0 +1,55 @@
+---
+#
+# Full Control Plane + Nodes Upgrade
+#
+- import_playbook: ../init.yml
+
+- name: Configure the upgrade target for the common upgrade tasks
+ hosts: oo_all_hosts
+ tasks:
+ - set_fact:
+ openshift_upgrade_target: '3.9'
+ openshift_upgrade_min: '3.7'
+
+- import_playbook: ../pre/config.yml
+ vars:
+ l_upgrade_repo_hosts: "oo_masters_to_config:oo_nodes_to_upgrade:oo_etcd_to_config:oo_lb_to_config"
+ l_upgrade_no_proxy_hosts: "oo_masters_to_config:oo_nodes_to_upgrade"
+ l_upgrade_health_check_hosts: "oo_masters_to_config:oo_etcd_to_config:oo_lb_to_config"
+ l_upgrade_verify_targets_hosts: "oo_masters_to_config:oo_nodes_to_upgrade"
+ l_upgrade_docker_target_hosts: "oo_masters_to_config:oo_nodes_to_upgrade:oo_etcd_to_config"
+ l_upgrade_excluder_hosts: "oo_nodes_to_config:oo_masters_to_config"
+
+- import_playbook: validator.yml
+
+- name: Flag pre-upgrade checks complete for hosts without errors
+ hosts: oo_masters_to_config:oo_nodes_to_upgrade:oo_etcd_to_config
+ tasks:
+ - set_fact:
+ pre_upgrade_complete: True
+
+# Pre-upgrade completed
+
+- import_playbook: ../upgrade_control_plane.yml
+ vars:
+ master_config_hook: "v3_7/master_config_upgrade.yml"
+
+# All controllers must be stopped at the same time then restarted
+- name: Cycle all controller services to force new leader election mode
+ hosts: oo_masters_to_config
+ gather_facts: no
+ roles:
+ - role: openshift_facts
+ tasks:
+ - name: Stop {{ openshift.common.service_type }}-master-controllers
+ systemd:
+ name: "{{ openshift.common.service_type }}-master-controllers"
+ state: stopped
+ - name: Start {{ openshift.common.service_type }}-master-controllers
+ systemd:
+ name: "{{ openshift.common.service_type }}-master-controllers"
+ state: started
+
+- import_playbook: ../upgrade_nodes.yml
+
+- import_playbook: ../post_control_plane.yml
diff --git a/playbooks/common/openshift-cluster/upgrades/v3_9/upgrade_control_plane.yml b/playbooks/common/openshift-cluster/upgrades/v3_9/upgrade_control_plane.yml
new file mode 100644
index 000000000..05aa737c6
--- /dev/null
+++ b/playbooks/common/openshift-cluster/upgrades/v3_9/upgrade_control_plane.yml
@@ -0,0 +1,65 @@
+---
+#
+# Control Plane Upgrade Playbook
+#
+# Upgrades masters and Docker (only on standalone etcd hosts)
+#
+# This upgrade does not include:
+# - node service running on masters
+# - docker running on masters
+# - node service running on dedicated nodes
+#
+# You can run the upgrade_nodes.yml playbook after this to upgrade these components separately.
+#
+- import_playbook: ../init.yml
+ vars:
+ l_upgrade_no_switch_firewall_hosts: "oo_masters_to_config:oo_etcd_to_config:oo_lb_to_config"
+
+- name: Configure the upgrade target for the common upgrade tasks
+ hosts: oo_masters_to_config:oo_etcd_to_config:oo_lb_to_config
+ tasks:
+ - set_fact:
+ openshift_upgrade_target: '3.9'
+ openshift_upgrade_min: '3.7'
+
+- import_playbook: ../pre/config.yml
+ vars:
+ l_upgrade_repo_hosts: "oo_masters_to_config:oo_etcd_to_config:oo_lb_to_config"
+ l_upgrade_no_proxy_hosts: "oo_masters_to_config"
+ l_upgrade_health_check_hosts: "oo_masters_to_config:oo_etcd_to_config:oo_lb_to_config"
+ l_upgrade_verify_targets_hosts: "oo_masters_to_config"
+ l_upgrade_docker_target_hosts: "oo_masters_to_config:oo_etcd_to_config"
+ l_upgrade_excluder_hosts: "oo_masters_to_config"
+
+- import_playbook: validator.yml
+
+- name: Flag pre-upgrade checks complete for hosts without errors
+ hosts: oo_masters_to_config:oo_etcd_to_config
+ tasks:
+ - set_fact:
+ pre_upgrade_complete: True
+
+# Pre-upgrade completed
+
+
+- import_playbook: ../upgrade_control_plane.yml
+ vars:
+ master_config_hook: "v3_7/master_config_upgrade.yml"
+
+# All controllers must be stopped at the same time then restarted
+- name: Cycle all controller services to force new leader election mode
+ hosts: oo_masters_to_config
+ gather_facts: no
+ roles:
+ - role: openshift_facts
+ tasks:
+ - name: Stop {{ openshift.common.service_type }}-master-controllers
+ systemd:
+ name: "{{ openshift.common.service_type }}-master-controllers"
+ state: stopped
+ - name: Start {{ openshift.common.service_type }}-master-controllers
+ systemd:
+ name: "{{ openshift.common.service_type }}-master-controllers"
+ state: started
+
+- import_playbook: ../post_control_plane.yml
diff --git a/playbooks/common/openshift-cluster/upgrades/v3_9/upgrade_nodes.yml b/playbooks/common/openshift-cluster/upgrades/v3_9/upgrade_nodes.yml
new file mode 100644
index 000000000..1d1b255c1
--- /dev/null
+++ b/playbooks/common/openshift-cluster/upgrades/v3_9/upgrade_nodes.yml
@@ -0,0 +1,34 @@
+---
+#
+# Node Upgrade Playbook
+#
+# Upgrades nodes only, but requires the control plane to have already been upgraded.
+#
+- import_playbook: ../init.yml
+
+- name: Configure the upgrade target for the common upgrade tasks
+ hosts: oo_all_hosts
+ tasks:
+ - set_fact:
+ openshift_upgrade_target: '3.9'
+ openshift_upgrade_min: '3.7'
+
+- import_playbook: ../pre/config.yml
+ vars:
+ l_upgrade_repo_hosts: "oo_nodes_to_config"
+ l_upgrade_no_proxy_hosts: "oo_all_hosts"
+ l_upgrade_health_check_hosts: "oo_nodes_to_config"
+ l_upgrade_verify_targets_hosts: "oo_nodes_to_config"
+ l_upgrade_docker_target_hosts: "oo_nodes_to_config"
+ l_upgrade_excluder_hosts: "oo_nodes_to_config:!oo_masters_to_config"
+ l_upgrade_nodes_only: True
+
+- name: Flag pre-upgrade checks complete for hosts without errors
+ hosts: oo_masters_to_config:oo_nodes_to_upgrade:oo_etcd_to_config
+ tasks:
+ - set_fact:
+ pre_upgrade_complete: True
+
+# Pre-upgrade completed
+
+- import_playbook: ../upgrade_nodes.yml
diff --git a/playbooks/common/openshift-cluster/upgrades/v3_9/validator.yml b/playbooks/common/openshift-cluster/upgrades/v3_9/validator.yml
new file mode 100644
index 000000000..4bd2d87b1
--- /dev/null
+++ b/playbooks/common/openshift-cluster/upgrades/v3_9/validator.yml
@@ -0,0 +1,7 @@
+---
+- name: Verify 3.9 specific upgrade checks
+ hosts: oo_first_master
+ roles:
+ - { role: lib_openshift }
+ tasks:
+ - debug: msg="noop"
diff --git a/playbooks/common/openshift-cluster/validate_hostnames.yml b/playbooks/common/openshift-cluster/validate_hostnames.yml
deleted file mode 100644
index be2e6a15a..000000000
--- a/playbooks/common/openshift-cluster/validate_hostnames.yml
+++ /dev/null
@@ -1,23 +0,0 @@
----
-- name: Validate node hostnames
- hosts: oo_nodes_to_config
- tasks:
- - name: Query DNS for IP address of {{ openshift.common.hostname }}
- shell:
- getent ahostsv4 {{ openshift.common.hostname }} | head -n 1 | awk '{ print $1 }'
- register: lookupip
- changed_when: false
- failed_when: false
- - name: Warn user about bad openshift_hostname values
- pause:
- prompt:
- The hostname {{ openshift.common.hostname }} for {{ ansible_nodename }}
- doesn't resolve to an IP address owned by this host. Please set
- openshift_hostname variable to a hostname that when resolved on the host
- in question resolves to an IP address matching an interface on this
- host. This host will fail liveness checks for pods utilizing hostPorts,
- press ENTER to continue or CTRL-C to abort.
- seconds: "{{ 10 if openshift_override_hostname_check | default(false) | bool else omit }}"
- when:
- - lookupip.stdout != '127.0.0.1'
- - lookupip.stdout not in ansible_all_ipv4_addresses
diff --git a/playbooks/common/openshift-etcd/ca.yml b/playbooks/common/openshift-etcd/ca.yml
deleted file mode 100644
index ac5543be9..000000000
--- a/playbooks/common/openshift-etcd/ca.yml
+++ /dev/null
@@ -1,15 +0,0 @@
----
-- name: Generate new etcd CA
- hosts: oo_first_etcd
- roles:
- - role: openshift_etcd_facts
- tasks:
- - include_role:
- name: etcd
- tasks_from: ca
- vars:
- etcd_peers: "{{ groups.oo_etcd_to_config | default([], true) }}"
- etcd_ca_host: "{{ groups.oo_etcd_to_config.0 }}"
- etcd_certificates_etcd_hosts: "{{ groups.oo_etcd_to_config | default([], true) }}"
- when:
- - etcd_ca_setup | default(True) | bool
diff --git a/playbooks/common/openshift-etcd/certificates.yml b/playbooks/common/openshift-etcd/certificates.yml
deleted file mode 100644
index eb6b94f33..000000000
--- a/playbooks/common/openshift-etcd/certificates.yml
+++ /dev/null
@@ -1,4 +0,0 @@
----
-- include: server_certificates.yml
-
-- include: master_etcd_certificates.yml
diff --git a/playbooks/common/openshift-etcd/config.yml b/playbooks/common/openshift-etcd/config.yml
deleted file mode 100644
index 48d46bbb0..000000000
--- a/playbooks/common/openshift-etcd/config.yml
+++ /dev/null
@@ -1,36 +0,0 @@
----
-- name: etcd Install Checkpoint Start
- hosts: oo_all_hosts
- gather_facts: false
- tasks:
- - name: Set etcd install 'In Progress'
- set_stats:
- data:
- installer_phase_etcd: "In Progress"
- aggregate: false
-
-- include: ca.yml
-
-- include: certificates.yml
-
-- name: Configure etcd
- hosts: oo_etcd_to_config
- any_errors_fatal: true
- roles:
- - role: os_firewall
- - role: openshift_etcd
- etcd_peers: "{{ groups.oo_etcd_to_config | default([], true) }}"
- etcd_ca_host: "{{ groups.oo_etcd_to_config.0 }}"
- etcd_certificates_etcd_hosts: "{{ groups.oo_etcd_to_config | default([], true) }}"
- r_etcd_common_etcd_runtime: "{{ openshift.common.etcd_runtime }}"
- - role: nickhammond.logrotate
-
-- name: etcd Install Checkpoint End
- hosts: oo_all_hosts
- gather_facts: false
- tasks:
- - name: Set etcd install 'Complete'
- set_stats:
- data:
- installer_phase_etcd: "Complete"
- aggregate: false
diff --git a/playbooks/common/openshift-etcd/embedded2external.yml b/playbooks/common/openshift-etcd/embedded2external.yml
deleted file mode 100644
index 9264f3c32..000000000
--- a/playbooks/common/openshift-etcd/embedded2external.yml
+++ /dev/null
@@ -1,172 +0,0 @@
----
-- name: Pre-migrate checks
- hosts: localhost
- tasks:
- # Check there is only one etcd host
- - assert:
- that: groups.oo_etcd_to_config | default([]) | length == 1
- msg: "[etcd] group must contain only one host"
- # Check there is only one master
- - assert:
- that: groups.oo_masters_to_config | default([]) | length == 1
- msg: "[master] group must contain only one host"
-
-# 1. stop a master
-- name: Prepare masters for etcd data migration
- hosts: oo_first_master
- roles:
- - role: openshift_facts
- tasks:
- - name: Check the master API is ready
- include_role:
- name: openshift_master
- tasks_from: check_master_api_is_ready
- - set_fact:
- master_service: "{{ openshift.common.service_type + '-master' }}"
- embedded_etcd_backup_suffix: "{{ lookup('pipe', 'date +%Y%m%d%H%M%S') }}"
- - debug:
- msg: "master service name: {{ master_service }}"
- - name: Stop master
- service:
- name: "{{ master_service }}"
- state: stopped
- # 2. backup embedded etcd
- # Can't use with_items with include_role: https://github.com/ansible/ansible/issues/21285
- - include_role:
- name: etcd
- tasks_from: backup
- vars:
- r_etcd_common_etcd_runtime: "{{ openshift.common.etcd_runtime }}"
- r_etcd_common_backup_tag: pre-migrate
- r_etcd_common_embedded_etcd: "{{ true }}"
- r_etcd_common_backup_sufix_name: "{{ embedded_etcd_backup_suffix }}"
-
- - include_role:
- name: etcd
- tasks_from: backup.archive
- vars:
- r_etcd_common_etcd_runtime: "{{ openshift.common.etcd_runtime }}"
- r_etcd_common_backup_tag: pre-migrate
- r_etcd_common_embedded_etcd: "{{ true }}"
- r_etcd_common_backup_sufix_name: "{{ embedded_etcd_backup_suffix }}"
-
-# 3. deploy certificates (for etcd and master)
-- include: ca.yml
-
-- include: server_certificates.yml
-
-- name: Backup etcd client certificates for master host
- hosts: oo_first_master
- tasks:
- - include_role:
- name: etcd
- tasks_from: backup_master_etcd_certificates
-
-- name: Redeploy master etcd certificates
- include: master_etcd_certificates.yml
- vars:
- etcd_certificates_redeploy: "{{ true }}"
-
-# 4. deploy external etcd
-- include: ../openshift-etcd/config.yml
-
-# 5. stop external etcd
-- name: Cleanse etcd
- hosts: oo_etcd_to_config[0]
- gather_facts: no
- pre_tasks:
- - include_role:
- name: etcd
- tasks_from: disable_etcd
- vars:
- r_etcd_common_etcd_runtime: "{{ openshift.common.etcd_runtime }}"
- - include_role:
- name: etcd
- tasks_from: clean_data
- vars:
- r_etcd_common_etcd_runtime: "{{ openshift.common.etcd_runtime }}"
-
-# 6. copy the embedded etcd backup to the external host
-# TODO(jchaloup): if the etcd and first master are on the same host, just copy the directory
-- name: Copy embedded etcd backup to the external host
- hosts: localhost
- tasks:
- - name: Create local temp directory for syncing etcd backup
- local_action: command mktemp -d /tmp/etcd_backup-XXXXXXX
- register: g_etcd_client_mktemp
- changed_when: False
- become: no
-
- - include_role:
- name: etcd
- tasks_from: backup.fetch
- vars:
- r_etcd_common_etcd_runtime: "{{ hostvars[groups.oo_first_master.0].openshift.common.etcd_runtime }}"
- etcd_backup_sync_directory: "{{ g_etcd_client_mktemp.stdout }}"
- r_etcd_common_backup_tag: pre-migrate
- r_etcd_common_embedded_etcd: "{{ true }}"
- r_etcd_common_backup_sufix_name: "{{ hostvars[groups.oo_first_master.0].embedded_etcd_backup_suffix }}"
- delegate_to: "{{ groups.oo_first_master[0] }}"
-
- - include_role:
- name: etcd
- tasks_from: backup.copy
- vars:
- r_etcd_common_etcd_runtime: "{{ hostvars[groups.oo_etcd_to_config.0].openshift.common.etcd_runtime }}"
- etcd_backup_sync_directory: "{{ g_etcd_client_mktemp.stdout }}"
- r_etcd_common_backup_tag: pre-migrate
- r_etcd_common_backup_sufix_name: "{{ hostvars[groups.oo_first_master.0].embedded_etcd_backup_suffix }}"
- delegate_to: "{{ groups.oo_etcd_to_config[0] }}"
-
- - debug:
- msg: "etcd_backup_dest_directory: {{ g_etcd_client_mktemp.stdout }}"
-
- - name: Delete temporary directory
- local_action: file path="{{ g_etcd_client_mktemp.stdout }}" state=absent
- changed_when: False
- become: no
-
-# 7. force new cluster from the backup
-- name: Force new etcd cluster
- hosts: oo_etcd_to_config[0]
- tasks:
- - include_role:
- name: etcd
- tasks_from: backup.unarchive
- vars:
- r_etcd_common_etcd_runtime: "{{ openshift.common.etcd_runtime }}"
- r_etcd_common_backup_tag: pre-migrate
- r_etcd_common_backup_sufix_name: "{{ hostvars[groups.oo_first_master.0].embedded_etcd_backup_suffix }}"
-
- - include_role:
- name: etcd
- tasks_from: backup.force_new_cluster
- vars:
- r_etcd_common_etcd_runtime: "{{ openshift.common.etcd_runtime }}"
- r_etcd_common_backup_tag: pre-migrate
- r_etcd_common_backup_sufix_name: "{{ hostvars[groups.oo_first_master.0].embedded_etcd_backup_suffix }}"
- etcd_peer: "{{ openshift.common.ip }}"
- etcd_url_scheme: "https"
- etcd_peer_url_scheme: "https"
-
-# 8. re-configure master to use the external etcd
-- name: Configure master to use external etcd
- hosts: oo_first_master
- tasks:
- - include_role:
- name: openshift_master
- tasks_from: configure_external_etcd
- vars:
- etcd_peer_url_scheme: "https"
- etcd_ip: "{{ openshift.common.ip }}"
- etcd_peer_port: 2379
-
- # 9. start the master
- - name: Start master
- service:
- name: "{{ master_service }}"
- state: started
- register: service_status
- until: service_status.state is defined and service_status.state == "started"
- retries: 5
- delay: 10
diff --git a/playbooks/common/openshift-etcd/filter_plugins b/playbooks/common/openshift-etcd/filter_plugins
deleted file mode 120000
index 99a95e4ca..000000000
--- a/playbooks/common/openshift-etcd/filter_plugins
+++ /dev/null
@@ -1 +0,0 @@
-../../../filter_plugins \ No newline at end of file
diff --git a/playbooks/common/openshift-etcd/lookup_plugins b/playbooks/common/openshift-etcd/lookup_plugins
deleted file mode 120000
index ac79701db..000000000
--- a/playbooks/common/openshift-etcd/lookup_plugins
+++ /dev/null
@@ -1 +0,0 @@
-../../../lookup_plugins \ No newline at end of file
diff --git a/playbooks/common/openshift-etcd/master_etcd_certificates.yml b/playbooks/common/openshift-etcd/master_etcd_certificates.yml
deleted file mode 100644
index 0a25aac57..000000000
--- a/playbooks/common/openshift-etcd/master_etcd_certificates.yml
+++ /dev/null
@@ -1,14 +0,0 @@
----
-- name: Create etcd client certificates for master hosts
- hosts: oo_masters_to_config
- any_errors_fatal: true
- roles:
- - role: openshift_etcd_facts
- - role: openshift_etcd_client_certificates
- etcd_ca_host: "{{ groups.oo_etcd_to_config.0 }}"
- etcd_cert_subdir: "openshift-master-{{ openshift.common.hostname }}"
- etcd_cert_config_dir: "{{ openshift.common.config_base }}/master"
- etcd_cert_prefix: "master.etcd-"
- openshift_ca_host: "{{ groups.oo_first_master.0 }}"
- r_etcd_common_etcd_runtime: "{{ openshift.common.etcd_runtime }}"
- when: groups.oo_etcd_to_config is defined and groups.oo_etcd_to_config
diff --git a/playbooks/common/openshift-etcd/migrate.yml b/playbooks/common/openshift-etcd/migrate.yml
deleted file mode 100644
index 31362f2f6..000000000
--- a/playbooks/common/openshift-etcd/migrate.yml
+++ /dev/null
@@ -1,169 +0,0 @@
----
-- name: Check if the master has embedded etcd
- hosts: localhost
- connection: local
- become: no
- gather_facts: no
- tags:
- - always
- tasks:
- - fail:
- msg: "Migration of an embedded etcd is not supported. Please, migrate the embedded etcd into an external etcd first."
- when:
- - groups.oo_etcd_to_config | default([]) | length == 0
-
-- name: Run pre-checks
- hosts: oo_etcd_to_migrate
- tasks:
- - include_role:
- name: etcd
- tasks_from: migrate.pre_check
- vars:
- r_etcd_common_embedded_etcd: "{{ groups.oo_etcd_to_config | default([]) | length == 0 }}"
- etcd_peer: "{{ ansible_default_ipv4.address }}"
-
-# TODO: This will be different for release-3.6 branch
-- name: Prepare masters for etcd data migration
- hosts: oo_masters_to_config
- tasks:
- - set_fact:
- master_services:
- - "{{ openshift.common.service_type + '-master-controllers' }}"
- - "{{ openshift.common.service_type + '-master-api' }}"
- - debug:
- msg: "master service name: {{ master_services }}"
- - name: Stop masters
- service:
- name: "{{ item }}"
- state: stopped
- with_items: "{{ master_services }}"
-
-- name: Backup v2 data
- hosts: oo_etcd_to_migrate
- gather_facts: no
- roles:
- - role: openshift_facts
- post_tasks:
- - include_role:
- name: etcd
- tasks_from: backup
- vars:
- r_etcd_common_etcd_runtime: "{{ openshift.common.etcd_runtime }}"
- r_etcd_common_backup_tag: pre-migration
- r_etcd_common_embedded_etcd: "{{ groups.oo_etcd_to_config | default([]) | length == 0 }}"
- r_etcd_common_backup_sufix_name: "{{ lookup('pipe', 'date +%Y%m%d%H%M%S') }}"
-
-- name: Gate on etcd backup
- hosts: localhost
- connection: local
- become: no
- tasks:
- - set_fact:
- etcd_backup_completed: "{{ hostvars
- | oo_select_keys(groups.oo_etcd_to_migrate)
- | oo_collect('inventory_hostname', {'r_etcd_common_backup_complete': true}) }}"
- - set_fact:
- etcd_backup_failed: "{{ groups.oo_etcd_to_migrate | difference(etcd_backup_completed) | list }}"
- - fail:
- msg: "Migration cannot continue. The following hosts did not complete etcd backup: {{ etcd_backup_failed | join(',') }}"
- when:
- - etcd_backup_failed | length > 0
-
-- name: Stop etcd
- hosts: oo_etcd_to_migrate
- gather_facts: no
- pre_tasks:
- - include_role:
- name: etcd
- tasks_from: disable_etcd
- vars:
- r_etcd_common_etcd_runtime: "{{ openshift.common.etcd_runtime }}"
-
-- name: Migrate data on first etcd
- hosts: oo_etcd_to_migrate[0]
- gather_facts: no
- tasks:
- - include_role:
- name: etcd
- tasks_from: migrate
- vars:
- r_etcd_common_embedded_etcd: "{{ groups.oo_etcd_to_config | default([]) | length == 0 }}"
- etcd_peer: "{{ openshift.common.ip }}"
- etcd_url_scheme: "https"
- etcd_peer_url_scheme: "https"
-
-- name: Clean data stores on remaining etcd hosts
- hosts: oo_etcd_to_migrate[1:]
- gather_facts: no
- tasks:
- - include_role:
- name: etcd
- tasks_from: clean_data
- vars:
- r_etcd_common_embedded_etcd: "{{ groups.oo_etcd_to_config | default([]) | length == 0 }}"
- etcd_peer: "{{ openshift.common.ip }}"
- etcd_url_scheme: "https"
- etcd_peer_url_scheme: "https"
- - name: Add etcd hosts
- delegate_to: localhost
- add_host:
- name: "{{ item }}"
- groups: oo_new_etcd_to_config
- ansible_ssh_user: "{{ g_ssh_user | default(omit) }}"
- ansible_become: "{{ g_sudo | default(omit) }}"
- with_items: "{{ groups.oo_etcd_to_migrate[1:] | default([]) }}"
- changed_when: no
- - name: Set success
- set_fact:
- r_etcd_migrate_success: true
-
-- include: ./scaleup.yml
-
-- name: Gate on etcd migration
- hosts: oo_masters_to_config
- gather_facts: no
- tasks:
- - set_fact:
- etcd_migration_completed: "{{ hostvars
- | oo_select_keys(groups.oo_etcd_to_migrate)
- | oo_collect('inventory_hostname', {'r_etcd_migrate_success': true}) }}"
- - set_fact:
- etcd_migration_failed: "{{ groups.oo_etcd_to_migrate | difference(etcd_migration_completed) | list }}"
-
-- name: Add TTLs on the first master
- hosts: oo_first_master[0]
- tasks:
- - include_role:
- name: etcd
- tasks_from: migrate.add_ttls
- vars:
- etcd_peer: "{{ hostvars[groups.oo_etcd_to_migrate.0].openshift.common.ip }}"
- etcd_url_scheme: "https"
- etcd_peer_url_scheme: "https"
- when: etcd_migration_failed | length == 0
-
-- name: Configure masters if etcd data migration is succesfull
- hosts: oo_masters_to_config
- tasks:
- - include_role:
- name: etcd
- tasks_from: migrate.configure_master
- when: etcd_migration_failed | length == 0
- - debug:
- msg: "Skipping master re-configuration since migration failed."
- when:
- - etcd_migration_failed | length > 0
- - name: Start master services
- service:
- name: "{{ item }}"
- state: started
- register: service_status
- # Sometimes the master-api, resp. master-controllers fails to start for the first time
- until: service_status.state is defined and service_status.state == "started"
- retries: 5
- delay: 10
- with_items: "{{ master_services[::-1] }}"
- - fail:
- msg: "Migration failed. The following hosts were not properly migrated: {{ etcd_migration_failed | join(',') }}"
- when:
- - etcd_migration_failed | length > 0
diff --git a/playbooks/common/openshift-etcd/restart.yml b/playbooks/common/openshift-etcd/restart.yml
deleted file mode 100644
index 5eaea5ae8..000000000
--- a/playbooks/common/openshift-etcd/restart.yml
+++ /dev/null
@@ -1,27 +0,0 @@
----
-- name: Restart etcd
- hosts: oo_etcd_to_config
- serial: 1
- tasks:
- - name: restart etcd
- service:
- name: "{{ 'etcd_container' if openshift.common.etcd_runtime == 'docker' else 'etcd' }}"
- state: restarted
- when:
- - not g_etcd_certificates_expired | default(false) | bool
-
-- name: Restart etcd
- hosts: oo_etcd_to_config
- tasks:
- - name: stop etcd
- service:
- name: "{{ 'etcd_container' if openshift.common.etcd_runtime == 'docker' else 'etcd' }}"
- state: stopped
- when:
- - g_etcd_certificates_expired | default(false) | bool
- - name: start etcd
- service:
- name: "{{ 'etcd_container' if openshift.common.etcd_runtime == 'docker' else 'etcd' }}"
- state: started
- when:
- - g_etcd_certificates_expired | default(false) | bool
diff --git a/playbooks/common/openshift-etcd/roles b/playbooks/common/openshift-etcd/roles
deleted file mode 120000
index e2b799b9d..000000000
--- a/playbooks/common/openshift-etcd/roles
+++ /dev/null
@@ -1 +0,0 @@
-../../../roles/ \ No newline at end of file
diff --git a/playbooks/common/openshift-etcd/scaleup.yml b/playbooks/common/openshift-etcd/scaleup.yml
deleted file mode 100644
index 20061366c..000000000
--- a/playbooks/common/openshift-etcd/scaleup.yml
+++ /dev/null
@@ -1,83 +0,0 @@
----
-- name: Gather facts
- hosts: oo_etcd_to_config:oo_new_etcd_to_config
- roles:
- - openshift_etcd_facts
- post_tasks:
- - set_fact:
- etcd_hostname: "{{ etcd_hostname }}"
- etcd_ip: "{{ etcd_ip }}"
-
-- name: Configure etcd
- hosts: oo_new_etcd_to_config
- serial: 1
- any_errors_fatal: true
- vars:
- etcd_ca_host: "{{ groups.oo_etcd_to_config.0 }}"
- pre_tasks:
- - name: Add new etcd members to cluster
- command: >
- /usr/bin/etcdctl --cert-file {{ etcd_peer_cert_file }}
- --key-file {{ etcd_peer_key_file }}
- --ca-file {{ etcd_peer_ca_file }}
- -C {{ etcd_peer_url_scheme }}://{{ hostvars[etcd_ca_host].etcd_ip }}:{{ etcd_client_port }}
- member add {{ etcd_hostname }} {{ etcd_peer_url_scheme }}://{{ etcd_ip }}:{{ etcd_peer_port }}
- delegate_to: "{{ etcd_ca_host }}"
- failed_when:
- - etcd_add_check.rc == 1
- - ("peerURL exists" not in etcd_add_check.stderr)
- register: etcd_add_check
- retries: 3
- delay: 10
- until: etcd_add_check.rc == 0
- - include_role:
- name: etcd
- tasks_from: server_certificates
- vars:
- etcd_peers: "{{ groups.oo_new_etcd_to_config | default([], true) }}"
- etcd_certificates_etcd_hosts: "{{ groups.oo_new_etcd_to_config | default([], true) }}"
- r_etcd_common_etcd_runtime: "{{ openshift.common.etcd_runtime }}"
- roles:
- - role: os_firewall
- when: etcd_add_check.rc == 0
- - role: openshift_etcd
- when: etcd_add_check.rc == 0
- etcd_peers: "{{ groups.oo_etcd_to_config | union(groups.oo_new_etcd_to_config)| default([], true) }}"
- etcd_ca_host: "{{ groups.oo_etcd_to_config.0 }}"
- etcd_certificates_etcd_hosts: "{{ groups.oo_etcd_to_config | default([], true) }}"
- etcd_initial_cluster_state: "existing"
- etcd_initial_cluster: "{{ etcd_add_check.stdout_lines[3] | regex_replace('ETCD_INITIAL_CLUSTER=','') | regex_replace('\"','') }}"
- etcd_ca_setup: False
- r_etcd_common_etcd_runtime: "{{ openshift.common.etcd_runtime }}"
- - role: nickhammond.logrotate
- when: etcd_add_check.rc == 0
- post_tasks:
- - name: Verify cluster is stable
- command: >
- /usr/bin/etcdctl --cert-file {{ etcd_peer_cert_file }}
- --key-file {{ etcd_peer_key_file }}
- --ca-file {{ etcd_peer_ca_file }}
- -C {{ etcd_peer_url_scheme }}://{{ hostvars[etcd_ca_host].etcd_hostname }}:{{ etcd_client_port }}
- cluster-health
- register: scaleup_health
- retries: 3
- delay: 30
- until: scaleup_health.rc == 0
-
-- name: Update master etcd client urls
- hosts: oo_masters_to_config
- serial: 1
- vars:
- etcd_ca_host: "{{ groups.oo_etcd_to_config.0 }}"
- openshift_ca_host: "{{ groups.oo_first_master.0 }}"
- openshift_master_etcd_hosts: "{{ hostvars
- | oo_select_keys(groups['oo_etcd_to_config'] | union(groups['oo_new_etcd_to_config'] | default([]) ))
- | oo_collect('openshift.common.hostname')
- | default(none, true) }}"
- openshift_master_etcd_port: "{{ (etcd_client_port | default('2379')) if (groups.oo_etcd_to_config is defined and groups.oo_etcd_to_config) else none }}"
- roles:
- - role: openshift_master_facts
- post_tasks:
- - include_role:
- name: openshift_master
- tasks_from: update_etcd_client_urls
diff --git a/playbooks/common/openshift-etcd/server_certificates.yml b/playbooks/common/openshift-etcd/server_certificates.yml
deleted file mode 100644
index 10e06747b..000000000
--- a/playbooks/common/openshift-etcd/server_certificates.yml
+++ /dev/null
@@ -1,15 +0,0 @@
----
-- name: Create etcd server certificates for etcd hosts
- hosts: oo_etcd_to_config
- any_errors_fatal: true
- roles:
- - role: openshift_etcd_facts
- post_tasks:
- - include_role:
- name: etcd
- tasks_from: server_certificates
- vars:
- etcd_ca_host: "{{ groups.oo_etcd_to_config.0 }}"
- etcd_peers: "{{ groups.oo_etcd_to_config | default([], true) }}"
- etcd_certificates_etcd_hosts: "{{ groups.oo_etcd_to_config | default([], true) }}"
- r_etcd_common_etcd_runtime: "{{ openshift.common.etcd_runtime }}"
diff --git a/playbooks/common/openshift-glusterfs/config.yml b/playbooks/common/openshift-glusterfs/config.yml
deleted file mode 100644
index 80cda9e21..000000000
--- a/playbooks/common/openshift-glusterfs/config.yml
+++ /dev/null
@@ -1,46 +0,0 @@
----
-- name: GlusterFS Install Checkpoint Start
- hosts: oo_all_hosts
- gather_facts: false
- tasks:
- - name: Set GlusterFS install 'In Progress'
- set_stats:
- data:
- installer_phase_glusterfs: "In Progress"
- aggregate: false
-
-- name: Open firewall ports for GlusterFS nodes
- hosts: glusterfs
- tasks:
- - include_role:
- name: openshift_storage_glusterfs
- tasks_from: firewall.yml
- when:
- - openshift_storage_glusterfs_is_native | default(True) | bool
-
-- name: Open firewall ports for GlusterFS registry nodes
- hosts: glusterfs_registry
- tasks:
- - include_role:
- name: openshift_storage_glusterfs
- tasks_from: firewall.yml
- when:
- - openshift_storage_glusterfs_registry_is_native | default(True) | bool
-
-- name: Configure GlusterFS
- hosts: oo_first_master
- tasks:
- - name: setup glusterfs
- include_role:
- name: openshift_storage_glusterfs
- when: groups.oo_glusterfs_to_config | default([]) | count > 0
-
-- name: GlusterFS Install Checkpoint End
- hosts: oo_all_hosts
- gather_facts: false
- tasks:
- - name: Set GlusterFS install 'Complete'
- set_stats:
- data:
- installer_phase_glusterfs: "Complete"
- aggregate: false
diff --git a/playbooks/common/openshift-glusterfs/filter_plugins b/playbooks/common/openshift-glusterfs/filter_plugins
deleted file mode 120000
index 99a95e4ca..000000000
--- a/playbooks/common/openshift-glusterfs/filter_plugins
+++ /dev/null
@@ -1 +0,0 @@
-../../../filter_plugins \ No newline at end of file
diff --git a/playbooks/common/openshift-glusterfs/lookup_plugins b/playbooks/common/openshift-glusterfs/lookup_plugins
deleted file mode 120000
index ac79701db..000000000
--- a/playbooks/common/openshift-glusterfs/lookup_plugins
+++ /dev/null
@@ -1 +0,0 @@
-../../../lookup_plugins \ No newline at end of file
diff --git a/playbooks/common/openshift-glusterfs/registry.yml b/playbooks/common/openshift-glusterfs/registry.yml
deleted file mode 100644
index 80cf7529e..000000000
--- a/playbooks/common/openshift-glusterfs/registry.yml
+++ /dev/null
@@ -1,49 +0,0 @@
----
-- include: config.yml
-
-- name: Initialize GlusterFS registry PV and PVC vars
- hosts: oo_first_master
- tags: hosted
- tasks:
- - set_fact:
- glusterfs_pv: []
- glusterfs_pvc: []
-
- - set_fact:
- glusterfs_pv:
- - name: "{{ openshift.hosted.registry.storage.volume.name }}-glusterfs-volume"
- capacity: "{{ openshift.hosted.registry.storage.volume.size }}"
- access_modes: "{{ openshift.hosted.registry.storage.access.modes }}"
- storage:
- glusterfs:
- endpoints: "{{ openshift.hosted.registry.storage.glusterfs.endpoints }}"
- path: "{{ openshift.hosted.registry.storage.glusterfs.path }}"
- readOnly: "{{ openshift.hosted.registry.storage.glusterfs.readOnly }}"
- glusterfs_pvc:
- - name: "{{ openshift.hosted.registry.storage.volume.name }}-glusterfs-claim"
- capacity: "{{ openshift.hosted.registry.storage.volume.size }}"
- access_modes: "{{ openshift.hosted.registry.storage.access.modes }}"
- when: openshift.hosted.registry.storage.glusterfs.swap
-
-- name: Create persistent volumes
- hosts: oo_first_master
- tags:
- - hosted
- vars:
- persistent_volumes: "{{ hostvars[groups.oo_first_master.0] | oo_persistent_volumes(groups, glusterfs_pv) }}"
- persistent_volume_claims: "{{ hostvars[groups.oo_first_master.0] | oo_persistent_volume_claims(glusterfs_pvc) }}"
- roles:
- - role: openshift_persistent_volumes
- when: persistent_volumes | union(glusterfs_pv) | length > 0 or persistent_volume_claims | union(glusterfs_pvc) | length > 0
-
-- name: Create Hosted Resources
- hosts: oo_first_master
- tags:
- - hosted
- pre_tasks:
- - set_fact:
- openshift_hosted_router_registryurl: "{{ hostvars[groups.oo_first_master.0].openshift.master.registry_url }}"
- openshift_hosted_registry_registryurl: "{{ hostvars[groups.oo_first_master.0].openshift.master.registry_url }}"
- when: "'master' in hostvars[groups.oo_first_master.0].openshift and 'registry_url' in hostvars[groups.oo_first_master.0].openshift.master"
- roles:
- - role: openshift_hosted
diff --git a/playbooks/common/openshift-glusterfs/roles b/playbooks/common/openshift-glusterfs/roles
deleted file mode 120000
index e2b799b9d..000000000
--- a/playbooks/common/openshift-glusterfs/roles
+++ /dev/null
@@ -1 +0,0 @@
-../../../roles/ \ No newline at end of file
diff --git a/playbooks/common/openshift-loadbalancer/config.yml b/playbooks/common/openshift-loadbalancer/config.yml
deleted file mode 100644
index 2a703cb61..000000000
--- a/playbooks/common/openshift-loadbalancer/config.yml
+++ /dev/null
@@ -1,47 +0,0 @@
----
-- name: Load Balancer Install Checkpoint Start
- hosts: oo_all_hosts
- gather_facts: false
- tasks:
- - name: Set load balancer install 'In Progress'
- set_stats:
- data:
- installer_phase_loadbalancer: "In Progress"
- aggregate: false
-
-- name: Configure firewall and docker for load balancers
- hosts: oo_lb_to_config:!oo_masters_to_config:!oo_nodes_to_config
- vars:
- openshift_image_tag: "{{ hostvars[groups.oo_first_master.0].openshift_image_tag }}"
- roles:
- - role: os_firewall
- - role: openshift_docker
- when: openshift.common.is_containerized | default(False) | bool and not skip_docker_role | default(False) | bool
-
-- name: Configure load balancers
- hosts: oo_lb_to_config
- vars:
- openshift_loadbalancer_frontends: "{{ (openshift_master_api_port | default(8443)
- | oo_openshift_loadbalancer_frontends(hostvars | oo_select_keys(groups['oo_masters']),
- openshift_use_nuage | default(false),
- nuage_mon_rest_server_port | default(none)))
- + openshift_loadbalancer_additional_frontends | default([]) }}"
- openshift_loadbalancer_backends: "{{ (openshift_master_api_port | default(8443)
- | oo_openshift_loadbalancer_backends(hostvars | oo_select_keys(groups['oo_masters']),
- openshift_use_nuage | default(false),
- nuage_mon_rest_server_port | default(none)))
- + openshift_loadbalancer_additional_backends | default([]) }}"
- openshift_image_tag: "{{ hostvars[groups.oo_first_master.0].openshift_image_tag }}"
- roles:
- - role: openshift_loadbalancer
- - role: tuned
-
-- name: Load Balancer Install Checkpoint End
- hosts: oo_all_hosts
- gather_facts: false
- tasks:
- - name: Set load balancer install 'Complete'
- set_stats:
- data:
- installer_phase_loadbalancer: "Complete"
- aggregate: false
diff --git a/playbooks/common/openshift-loadbalancer/filter_plugins b/playbooks/common/openshift-loadbalancer/filter_plugins
deleted file mode 120000
index 99a95e4ca..000000000
--- a/playbooks/common/openshift-loadbalancer/filter_plugins
+++ /dev/null
@@ -1 +0,0 @@
-../../../filter_plugins \ No newline at end of file
diff --git a/playbooks/common/openshift-loadbalancer/lookup_plugins b/playbooks/common/openshift-loadbalancer/lookup_plugins
deleted file mode 120000
index ac79701db..000000000
--- a/playbooks/common/openshift-loadbalancer/lookup_plugins
+++ /dev/null
@@ -1 +0,0 @@
-../../../lookup_plugins \ No newline at end of file
diff --git a/playbooks/common/openshift-loadbalancer/roles b/playbooks/common/openshift-loadbalancer/roles
deleted file mode 120000
index e2b799b9d..000000000
--- a/playbooks/common/openshift-loadbalancer/roles
+++ /dev/null
@@ -1 +0,0 @@
-../../../roles/ \ No newline at end of file
diff --git a/playbooks/common/openshift-management/config.yml b/playbooks/common/openshift-management/config.yml
deleted file mode 100644
index 908679e81..000000000
--- a/playbooks/common/openshift-management/config.yml
+++ /dev/null
@@ -1,35 +0,0 @@
----
-- name: Management Install Checkpoint Start
- hosts: oo_all_hosts
- gather_facts: false
- tasks:
- - name: Set Management install 'In Progress'
- set_stats:
- data:
- installer_phase_management: "In Progress"
- aggregate: false
-
-- name: Setup CFME
- hosts: oo_first_master
- pre_tasks:
- - name: Create a temporary place to evaluate the PV templates
- command: mktemp -d /tmp/openshift-ansible-XXXXXXX
- register: r_openshift_management_mktemp
- changed_when: false
-
- tasks:
- - name: Run the CFME Setup Role
- include_role:
- name: openshift_management
- vars:
- template_dir: "{{ hostvars[groups.masters.0].r_openshift_management_mktemp.stdout }}"
-
-- name: Management Install Checkpoint End
- hosts: oo_all_hosts
- gather_facts: false
- tasks:
- - name: Set Management install 'Complete'
- set_stats:
- data:
- installer_phase_management: "Complete"
- aggregate: false
diff --git a/playbooks/common/openshift-management/filter_plugins b/playbooks/common/openshift-management/filter_plugins
deleted file mode 120000
index 99a95e4ca..000000000
--- a/playbooks/common/openshift-management/filter_plugins
+++ /dev/null
@@ -1 +0,0 @@
-../../../filter_plugins \ No newline at end of file
diff --git a/playbooks/common/openshift-management/library b/playbooks/common/openshift-management/library
deleted file mode 120000
index ba40d2f56..000000000
--- a/playbooks/common/openshift-management/library
+++ /dev/null
@@ -1 +0,0 @@
-../../../library \ No newline at end of file
diff --git a/playbooks/common/openshift-management/roles b/playbooks/common/openshift-management/roles
deleted file mode 120000
index 20c4c58cf..000000000
--- a/playbooks/common/openshift-management/roles
+++ /dev/null
@@ -1 +0,0 @@
-../../../roles \ No newline at end of file
diff --git a/playbooks/common/openshift-management/uninstall.yml b/playbooks/common/openshift-management/uninstall.yml
deleted file mode 100644
index 698d93405..000000000
--- a/playbooks/common/openshift-management/uninstall.yml
+++ /dev/null
@@ -1,8 +0,0 @@
----
-- name: Uninstall CFME
- hosts: masters
- tasks:
- - name: Run the CFME Uninstall Role Tasks
- include_role:
- name: openshift_management
- tasks_from: uninstall
diff --git a/playbooks/common/openshift-master/additional_config.yml b/playbooks/common/openshift-master/additional_config.yml
deleted file mode 100644
index e1472ce38..000000000
--- a/playbooks/common/openshift-master/additional_config.yml
+++ /dev/null
@@ -1,46 +0,0 @@
----
-- name: Master Additional Install Checkpoint Start
- hosts: oo_all_hosts
- gather_facts: false
- tasks:
- - name: Set Master Additional install 'In Progress'
- set_stats:
- data:
- installer_phase_master_additional: "In Progress"
- aggregate: false
-
-- name: Additional master configuration
- hosts: oo_first_master
- vars:
- cockpit_plugins: "{{ osm_cockpit_plugins | default(['cockpit-kubernetes']) }}"
- etcd_urls: "{{ openshift.master.etcd_urls }}"
- openshift_master_ha: "{{ groups.oo_masters | length > 1 }}"
- omc_cluster_hosts: "{{ groups.oo_masters | join(' ')}}"
- roles:
- - role: openshift_master_cluster
- when: openshift_master_ha | bool and openshift.master.cluster_method == "pacemaker"
- - role: openshift_examples
- when: openshift_install_examples | default(true, true) | bool
- registry_url: "{{ openshift.master.registry_url }}"
- - role: openshift_hosted_templates
- registry_url: "{{ openshift.master.registry_url }}"
- - role: openshift_manageiq
- when: openshift_use_manageiq | default(true) | bool
- - role: cockpit
- when:
- - openshift.common.is_atomic
- - deployment_type == 'openshift-enterprise'
- - osm_use_cockpit is undefined or osm_use_cockpit | bool
- - openshift.common.deployment_subtype != 'registry'
- - role: flannel_register
- when: openshift_use_flannel | default(false) | bool
-
-- name: Master Additional Install Checkpoint End
- hosts: oo_all_hosts
- gather_facts: false
- tasks:
- - name: Set Master Additional install 'Complete'
- set_stats:
- data:
- installer_phase_master_additional: "Complete"
- aggregate: false
diff --git a/playbooks/common/openshift-master/certificates.yml b/playbooks/common/openshift-master/certificates.yml
deleted file mode 100644
index f6afbc36f..000000000
--- a/playbooks/common/openshift-master/certificates.yml
+++ /dev/null
@@ -1,14 +0,0 @@
----
-- name: Create OpenShift certificates for master hosts
- hosts: oo_masters_to_config
- vars:
- openshift_ca_host: "{{ groups.oo_first_master.0 }}"
- roles:
- - role: openshift_master_facts
- - role: openshift_named_certificates
- - role: openshift_ca
- - role: openshift_master_certificates
- openshift_master_etcd_hosts: "{{ hostvars
- | oo_select_keys(groups['oo_etcd_to_config'] | default([]))
- | oo_collect('openshift.common.hostname')
- | default(none, true) }}"
diff --git a/playbooks/common/openshift-master/config.yml b/playbooks/common/openshift-master/config.yml
deleted file mode 100644
index b359919ba..000000000
--- a/playbooks/common/openshift-master/config.yml
+++ /dev/null
@@ -1,242 +0,0 @@
----
-- name: Master Install Checkpoint Start
- hosts: oo_all_hosts
- gather_facts: false
- tasks:
- - name: Set Master install 'In Progress'
- set_stats:
- data:
- installer_phase_master: "In Progress"
- aggregate: false
-
-- include: certificates.yml
-
-- name: Disable excluders
- hosts: oo_masters_to_config
- gather_facts: no
- roles:
- - role: openshift_excluder
- r_openshift_excluder_action: disable
- r_openshift_excluder_service_type: "{{ openshift.common.service_type }}"
-
-- name: Gather and set facts for master hosts
- hosts: oo_masters_to_config
- pre_tasks:
- # Per https://bugzilla.redhat.com/show_bug.cgi?id=1469336
- #
- # When scaling up a cluster upgraded from OCP <= 3.5, ensure that
- # OPENSHIFT_DEFAULT_REGISTRY is present as defined on the existing
- # masters, or absent if such is the case.
- - name: Detect if this host is a new master in a scale up
- set_fact:
- g_openshift_master_is_scaleup: "{{ openshift.common.hostname in ( groups['new_masters'] | default([]) ) }}"
-
- - name: Scaleup Detection
- debug:
- var: g_openshift_master_is_scaleup
-
- - name: Check for RPM generated config marker file .config_managed
- stat:
- path: /etc/origin/.config_managed
- register: rpmgenerated_config
-
- - name: Remove RPM generated config files if present
- file:
- path: "/etc/origin/{{ item }}"
- state: absent
- when:
- - rpmgenerated_config.stat.exists == true
- - deployment_type == 'openshift-enterprise'
- with_items:
- - master
- - node
- - .config_managed
-
- - set_fact:
- openshift_master_etcd_port: "{{ (etcd_client_port | default('2379')) if (groups.oo_etcd_to_config is defined and groups.oo_etcd_to_config) else none }}"
- openshift_master_etcd_hosts: "{{ hostvars
- | oo_select_keys(groups['oo_etcd_to_config']
- | default([]))
- | oo_collect('openshift.common.hostname')
- | default(none, true) }}"
- roles:
- - openshift_facts
- post_tasks:
- - openshift_facts:
- role: master
- local_facts:
- api_port: "{{ openshift_master_api_port | default(None) }}"
- api_url: "{{ openshift_master_api_url | default(None) }}"
- api_use_ssl: "{{ openshift_master_api_use_ssl | default(None) }}"
- controllers_port: "{{ openshift_master_controllers_port | default(None) }}"
- public_api_url: "{{ openshift_master_public_api_url | default(None) }}"
- cluster_hostname: "{{ openshift_master_cluster_hostname | default(None) }}"
- cluster_public_hostname: "{{ openshift_master_cluster_public_hostname | default(None) }}"
- console_path: "{{ openshift_master_console_path | default(None) }}"
- console_port: "{{ openshift_master_console_port | default(None) }}"
- console_url: "{{ openshift_master_console_url | default(None) }}"
- console_use_ssl: "{{ openshift_master_console_use_ssl | default(None) }}"
- public_console_url: "{{ openshift_master_public_console_url | default(None) }}"
- ha: "{{ openshift_master_ha | default(groups.oo_masters | length > 1) }}"
- master_count: "{{ openshift_master_count | default(groups.oo_masters | length) }}"
-
-- name: Inspect state of first master config settings
- hosts: oo_first_master
- roles:
- - role: openshift_facts
- post_tasks:
- - openshift_facts:
- role: master
- local_facts:
- session_auth_secrets: "{{ openshift_master_session_auth_secrets | default(openshift.master.session_auth_secrets | default(None)) }}"
- session_encryption_secrets: "{{ openshift_master_session_encryption_secrets | default(openshift.master.session_encryption_secrets | default(None)) }}"
- - name: Check for existing configuration
- stat:
- path: /etc/origin/master/master-config.yaml
- register: master_config_stat
-
- - name: Set clean install fact
- set_fact:
- l_clean_install: "{{ not master_config_stat.stat.exists | bool }}"
-
- - name: Determine if etcd3 storage is in use
- command: grep -Pzo "storage-backend:\n.*etcd3" /etc/origin/master/master-config.yaml -q
- register: etcd3_grep
- failed_when: false
- changed_when: false
-
- - name: Set etcd3 fact
- set_fact:
- l_etcd3_enabled: "{{ etcd3_grep.rc == 0 | bool }}"
-
- - name: Check if atomic-openshift-master sysconfig exists yet
- stat:
- path: /etc/sysconfig/atomic-openshift-master
- register: l_aom_exists
-
- - name: Preserve OPENSHIFT_DEFAULT_REGISTRY master parameter if present
- command: awk '/^OPENSHIFT_DEFAULT_REGISTRY/' /etc/sysconfig/atomic-openshift-master
- register: l_default_registry_defined
- when: l_aom_exists.stat.exists | bool
-
- - name: Check if atomic-openshift-master-api sysconfig exists yet
- stat:
- path: /etc/sysconfig/atomic-openshift-master-api
- register: l_aom_api_exists
-
- - name: Preserve OPENSHIFT_DEFAULT_REGISTRY master-api parameter if present
- command: awk '/^OPENSHIFT_DEFAULT_REGISTRY/' /etc/sysconfig/atomic-openshift-master-api
- register: l_default_registry_defined_api
- when: l_aom_api_exists.stat.exists | bool
-
- - name: Check if atomic-openshift-master-controllers sysconfig exists yet
- stat:
- path: /etc/sysconfig/atomic-openshift-master-controllers
- register: l_aom_controllers_exists
-
- - name: Preserve OPENSHIFT_DEFAULT_REGISTRY master-controllers parameter if present
- command: awk '/^OPENSHIFT_DEFAULT_REGISTRY/' /etc/sysconfig/atomic-openshift-master-controllers
- register: l_default_registry_defined_controllers
- when: l_aom_controllers_exists.stat.exists | bool
-
- - name: Update facts with OPENSHIFT_DEFAULT_REGISTRY value
- set_fact:
- l_default_registry_value: "{{ l_default_registry_defined.stdout | default('') }}"
- l_default_registry_value_api: "{{ l_default_registry_defined_api.stdout | default('') }}"
- l_default_registry_value_controllers: "{{ l_default_registry_defined_controllers.stdout | default('') }}"
-
-- name: Generate master session secrets
- hosts: oo_first_master
- vars:
- g_session_secrets_present: "{{ (openshift.master.session_auth_secrets | default([])) | length > 0 and (openshift.master.session_encryption_secrets | default([])) | length > 0 }}"
- g_session_auth_secrets: "{{ [ 24 | oo_generate_secret ] }}"
- g_session_encryption_secrets: "{{ [ 24 | oo_generate_secret ] }}"
- roles:
- - role: openshift_facts
- tasks:
- - openshift_facts:
- role: master
- local_facts:
- session_auth_secrets: "{{ g_session_auth_secrets }}"
- session_encryption_secrets: "{{ g_session_encryption_secrets }}"
- when: not g_session_secrets_present | bool
-
-- name: Configure masters
- hosts: oo_masters_to_config
- any_errors_fatal: true
- vars:
- openshift_master_ha: "{{ openshift.master.ha }}"
- openshift_master_count: "{{ openshift.master.master_count }}"
- openshift_master_session_auth_secrets: "{{ hostvars[groups.oo_first_master.0].openshift.master.session_auth_secrets }}"
- openshift_master_session_encryption_secrets: "{{ hostvars[groups.oo_first_master.0].openshift.master.session_encryption_secrets }}"
- openshift_ca_host: "{{ groups.oo_first_master.0 }}"
- openshift_master_etcd_hosts: "{{ hostvars
- | oo_select_keys(groups['oo_etcd_to_config'] | default([]))
- | oo_collect('openshift.common.hostname')
- | default(none, true) }}"
- openshift_no_proxy_etcd_host_ips: "{{ hostvars | oo_select_keys(groups['oo_etcd_to_config'] | default([]))
- | oo_collect('openshift.common.ip') | default([]) | join(',')
- }}"
- roles:
- - role: os_firewall
- - role: openshift_master_facts
- - role: openshift_hosted_facts
- - role: openshift_clock
- - role: openshift_cloud_provider
- - role: openshift_builddefaults
- - role: openshift_buildoverrides
- - role: nickhammond.logrotate
- - role: contiv
- contiv_role: netmaster
- when: openshift_use_contiv | default(False) | bool
- - role: openshift_master
- openshift_master_hosts: "{{ groups.oo_masters_to_config }}"
- r_openshift_master_clean_install: "{{ hostvars[groups.oo_first_master.0].l_clean_install }}"
- r_openshift_master_etcd3_storage: "{{ hostvars[groups.oo_first_master.0].l_etcd3_enabled }}"
- openshift_master_is_scaleup_host: "{{ g_openshift_master_is_scaleup | default(false) }}"
- openshift_master_default_registry_value: "{{ hostvars[groups.oo_first_master.0].l_default_registry_value }}"
- openshift_master_default_registry_value_api: "{{ hostvars[groups.oo_first_master.0].l_default_registry_value_api }}"
- openshift_master_default_registry_value_controllers: "{{ hostvars[groups.oo_first_master.0].l_default_registry_value_controllers }}"
- - role: tuned
- - role: nuage_ca
- when: openshift_use_nuage | default(false) | bool
- - role: nuage_common
- when: openshift_use_nuage | default(false) | bool
- - role: nuage_master
- when: openshift_use_nuage | default(false) | bool
- - role: calico_master
- when: openshift_use_calico | default(false) | bool
- tasks:
- - include_role:
- name: kuryr
- tasks_from: master
- when: openshift_use_kuryr | default(false) | bool
-
- post_tasks:
- - name: Create group for deployment type
- group_by: key=oo_masters_deployment_type_{{ openshift.common.deployment_type }}
- changed_when: False
-
-- name: Configure API Aggregation on masters
- hosts: oo_masters
- serial: 1
- tasks:
- - include: tasks/wire_aggregator.yml
-
-- name: Re-enable excluder if it was previously enabled
- hosts: oo_masters_to_config
- gather_facts: no
- roles:
- - role: openshift_excluder
- r_openshift_excluder_action: enable
- r_openshift_excluder_service_type: "{{ openshift.common.service_type }}"
-
-- name: Master Install Checkpoint End
- hosts: oo_all_hosts
- gather_facts: false
- tasks:
- - name: Set Master install 'Complete'
- set_stats:
- data:
- installer_phase_master: "Complete"
- aggregate: false
diff --git a/playbooks/common/openshift-master/files/openshift-ansible-catalog-console.js b/playbooks/common/openshift-master/files/openshift-ansible-catalog-console.js
deleted file mode 100644
index d0a9f11dc..000000000
--- a/playbooks/common/openshift-master/files/openshift-ansible-catalog-console.js
+++ /dev/null
@@ -1,2 +0,0 @@
-// empty file so that the master-config can still point to a file that exists
-// this file will be replaced by the template service broker role if enabled
diff --git a/playbooks/common/openshift-master/filter_plugins b/playbooks/common/openshift-master/filter_plugins
deleted file mode 120000
index 99a95e4ca..000000000
--- a/playbooks/common/openshift-master/filter_plugins
+++ /dev/null
@@ -1 +0,0 @@
-../../../filter_plugins \ No newline at end of file
diff --git a/playbooks/common/openshift-master/library b/playbooks/common/openshift-master/library
deleted file mode 120000
index d0b7393d3..000000000
--- a/playbooks/common/openshift-master/library
+++ /dev/null
@@ -1 +0,0 @@
-../../../library/ \ No newline at end of file
diff --git a/playbooks/common/openshift-master/lookup_plugins b/playbooks/common/openshift-master/lookup_plugins
deleted file mode 120000
index ac79701db..000000000
--- a/playbooks/common/openshift-master/lookup_plugins
+++ /dev/null
@@ -1 +0,0 @@
-../../../lookup_plugins \ No newline at end of file
diff --git a/playbooks/common/openshift-master/restart.yml b/playbooks/common/openshift-master/restart.yml
deleted file mode 100644
index 4d73b8124..000000000
--- a/playbooks/common/openshift-master/restart.yml
+++ /dev/null
@@ -1,19 +0,0 @@
----
-- include: validate_restart.yml
-
-- name: Restart masters
- hosts: oo_masters_to_config
- vars:
- openshift_master_ha: "{{ groups.oo_masters_to_config | length > 1 }}"
- serial: 1
- handlers:
- - include: ../../../roles/openshift_master/handlers/main.yml
- static: yes
- roles:
- - openshift_facts
- post_tasks:
- - include: restart_hosts.yml
- when: openshift_rolling_restart_mode | default('services') == 'system'
-
- - include: restart_services.yml
- when: openshift_rolling_restart_mode | default('services') == 'services'
diff --git a/playbooks/common/openshift-master/restart_hosts.yml b/playbooks/common/openshift-master/restart_hosts.yml
deleted file mode 100644
index a5dbe0590..000000000
--- a/playbooks/common/openshift-master/restart_hosts.yml
+++ /dev/null
@@ -1,40 +0,0 @@
----
-- name: Restart master system
- # https://github.com/ansible/ansible/issues/10616
- shell: sleep 2 && shutdown -r now "OpenShift Ansible master rolling restart"
- async: 1
- poll: 0
- ignore_errors: true
- become: yes
-
-# WARNING: This process is riddled with weird behavior.
-
-# Workaround for https://github.com/ansible/ansible/issues/21269
-- set_fact:
- wait_for_host: "{{ ansible_host }}"
-
-# Ansible's blog documents this *without* the port, which appears to now
-# just wait until the timeout value and then proceed without checking anything.
-# port is now required.
-#
-# However neither ansible_ssh_port or ansible_port are reliably defined, likely
-# only if overridden. Assume a default of 22.
-- name: Wait for master to restart
- local_action:
- module: wait_for
- host="{{ wait_for_host }}"
- state=started
- delay=10
- timeout=600
- port="{{ ansible_port | default(ansible_ssh_port | default(22,boolean=True),boolean=True) }}"
- become: no
-
-# Now that ssh is back up we can wait for API on the remote system,
-# avoiding some potential connection issues from local system:
-- name: Wait for master API to come back online
- wait_for:
- host: "{{ openshift.common.hostname }}"
- state: started
- delay: 10
- port: "{{ openshift.master.api_port }}"
- timeout: 600
diff --git a/playbooks/common/openshift-master/restart_services.yml b/playbooks/common/openshift-master/restart_services.yml
deleted file mode 100644
index 4f8b758fd..000000000
--- a/playbooks/common/openshift-master/restart_services.yml
+++ /dev/null
@@ -1,22 +0,0 @@
----
-- name: Restart master API
- service:
- name: "{{ openshift.common.service_type }}-master-api"
- state: restarted
- when: openshift_master_ha | bool
-- name: Wait for master API to come back online
- wait_for:
- host: "{{ openshift.common.hostname }}"
- state: started
- delay: 10
- port: "{{ openshift.master.api_port }}"
- timeout: 600
- when: openshift_master_ha | bool
-- name: Restart master controllers
- service:
- name: "{{ openshift.common.service_type }}-master-controllers"
- state: restarted
- # Ignore errrors since it is possible that type != simple for
- # pre-3.1.1 installations.
- ignore_errors: true
- when: openshift_master_ha | bool
diff --git a/playbooks/common/openshift-master/roles b/playbooks/common/openshift-master/roles
deleted file mode 120000
index e2b799b9d..000000000
--- a/playbooks/common/openshift-master/roles
+++ /dev/null
@@ -1 +0,0 @@
-../../../roles/ \ No newline at end of file
diff --git a/playbooks/common/openshift-master/scaleup.yml b/playbooks/common/openshift-master/scaleup.yml
deleted file mode 100644
index f4dc9df8a..000000000
--- a/playbooks/common/openshift-master/scaleup.yml
+++ /dev/null
@@ -1,56 +0,0 @@
----
-- name: Update master count
- hosts: oo_masters:!oo_masters_to_config
- serial: 1
- roles:
- - openshift_facts
- post_tasks:
- - openshift_facts:
- role: master
- local_facts:
- ha: "{{ openshift_master_ha | default(groups.oo_masters | length > 1) }}"
- master_count: "{{ openshift_master_count | default(groups.oo_masters | length) }}"
- - name: Update master count
- modify_yaml:
- dest: "{{ openshift.common.config_base}}/master/master-config.yaml"
- yaml_key: 'kubernetesMasterConfig.masterCount'
- yaml_value: "{{ openshift.master.master_count }}"
- notify:
- - restart master api
- - restart master controllers
- handlers:
- - name: restart master api
- service: name={{ openshift.common.service_type }}-master-controllers state=restarted
- notify: verify api server
- - name: restart master controllers
- service: name={{ openshift.common.service_type }}-master-controllers state=restarted
- - name: verify api server
- command: >
- curl --silent --tlsv1.2
- {% if openshift.common.version_gte_3_2_or_1_2 | bool %}
- --cacert {{ openshift.common.config_base }}/master/ca-bundle.crt
- {% else %}
- --cacert {{ openshift.common.config_base }}/master/ca.crt
- {% endif %}
- {{ openshift.master.api_url }}/healthz/ready
- args:
- # Disables the following warning:
- # Consider using get_url or uri module rather than running curl
- warn: no
- register: api_available_output
- until: api_available_output.stdout == 'ok'
- retries: 120
- delay: 1
- changed_when: false
-
-- include: ../openshift-master/set_network_facts.yml
-
-- include: ../openshift-etcd/certificates.yml
-
-- include: ../openshift-master/config.yml
-
-- include: ../openshift-loadbalancer/config.yml
-
-- include: ../openshift-node/certificates.yml
-
-- include: ../openshift-node/config.yml
diff --git a/playbooks/common/openshift-master/set_network_facts.yml b/playbooks/common/openshift-master/set_network_facts.yml
deleted file mode 100644
index 9a6cf26fc..000000000
--- a/playbooks/common/openshift-master/set_network_facts.yml
+++ /dev/null
@@ -1,34 +0,0 @@
----
-- name: Read first master\'s config
- hosts: oo_first_master
- gather_facts: no
- tasks:
- - stat:
- path: "{{ openshift.common.config_base }}/master/master-config.yaml"
- register: g_master_config_stat
- - slurp:
- src: "{{ openshift.common.config_base }}/master/master-config.yaml"
- register: g_master_config_slurp
-
-- name: Set network facts for masters
- hosts: oo_masters_to_config
- gather_facts: no
- roles:
- - role: openshift_facts
- post_tasks:
- - block:
- - set_fact:
- osm_cluster_network_cidr: "{{ (hostvars[groups.oo_first_master.0].g_master_config_slurp.content|b64decode|from_yaml).networkConfig.clusterNetworkCIDR }}"
- when: osm_cluster_network_cidr is not defined
- - set_fact:
- osm_host_subnet_length: "{{ (hostvars[groups.oo_first_master.0].g_master_config_slurp.content|b64decode|from_yaml).networkConfig.hostSubnetLength }}"
- when: osm_host_subnet_length is not defined
- - set_fact:
- openshift_portal_net: "{{ (hostvars[groups.oo_first_master.0].g_master_config_slurp.content|b64decode|from_yaml).networkConfig.serviceNetworkCIDR }}"
- when: openshift_portal_net is not defined
- - openshift_facts:
- role: common
- local_facts:
- portal_net: "{{ openshift_portal_net | default(openshift_master_portal_net) | default(None) }}"
- when:
- - hostvars[groups.oo_first_master.0].g_master_config_stat.stat.exists | bool
diff --git a/playbooks/common/openshift-master/tasks/wire_aggregator.yml b/playbooks/common/openshift-master/tasks/wire_aggregator.yml
deleted file mode 100644
index 560eea785..000000000
--- a/playbooks/common/openshift-master/tasks/wire_aggregator.yml
+++ /dev/null
@@ -1,215 +0,0 @@
----
-- name: Make temp cert dir
- command: mktemp -d /tmp/openshift-service-catalog-ansible-XXXXXX
- register: certtemp
- changed_when: False
-
-- name: Check for First Master Aggregator Signer cert
- stat:
- path: /etc/origin/master/front-proxy-ca.crt
- register: first_proxy_ca_crt
- changed_when: false
- delegate_to: "{{ groups.oo_first_master.0 }}"
-
-- name: Check for First Master Aggregator Signer key
- stat:
- path: /etc/origin/master/front-proxy-ca.crt
- register: first_proxy_ca_key
- changed_when: false
- delegate_to: "{{ groups.oo_first_master.0 }}"
-
-# TODO: this currently has a bug where hostnames are required
-- name: Creating First Master Aggregator signer certs
- command: >
- {{ hostvars[groups.oo_first_master.0].openshift.common.client_binary }} adm ca create-signer-cert
- --cert=/etc/origin/master/front-proxy-ca.crt
- --key=/etc/origin/master/front-proxy-ca.key
- --serial=/etc/origin/master/ca.serial.txt
- delegate_to: "{{ groups.oo_first_master.0 }}"
- when:
- - not first_proxy_ca_crt.stat.exists
- - not first_proxy_ca_key.stat.exists
-
-- name: Check for Aggregator Signer cert
- stat:
- path: /etc/origin/master/front-proxy-ca.crt
- register: proxy_ca_crt
- changed_when: false
-
-- name: Check for Aggregator Signer key
- stat:
- path: /etc/origin/master/front-proxy-ca.crt
- register: proxy_ca_key
- changed_when: false
-
-- name: Copy Aggregator Signer certs from first master
- fetch:
- src: "/etc/origin/master/{{ item }}"
- dest: "{{ certtemp.stdout }}/{{ item }}"
- flat: yes
- with_items:
- - front-proxy-ca.crt
- - front-proxy-ca.key
- delegate_to: "{{ groups.oo_first_master.0 }}"
- when:
- - not proxy_ca_key.stat.exists
- - not proxy_ca_crt.stat.exists
-
-- name: Copy Aggregator Signer certs to host
- copy:
- src: "{{ certtemp.stdout }}/{{ item }}"
- dest: "/etc/origin/master/{{ item }}"
- with_items:
- - front-proxy-ca.crt
- - front-proxy-ca.key
- when:
- - not proxy_ca_key.stat.exists
- - not proxy_ca_crt.stat.exists
-
-# oc_adm_ca_server_cert:
-# cert: /etc/origin/master/front-proxy-ca.crt
-# key: /etc/origin/master/front-proxy-ca.key
-
-- name: Check for first master api-client config
- stat:
- path: /etc/origin/master/aggregator-front-proxy.kubeconfig
- register: first_front_proxy_kubeconfig
- delegate_to: "{{ groups.oo_first_master.0 }}"
- run_once: true
-
-# create-api-client-config generates a ca.crt file which will
-# overwrite the OpenShift CA certificate. Generate the aggregator
-# kubeconfig in a temporary directory and then copy files into the
-# master config dir to avoid overwriting ca.crt.
-- block:
- - name: Create first master api-client config for Aggregator
- command: >
- {{ hostvars[groups.oo_first_master.0].openshift.common.client_binary }} adm create-api-client-config
- --certificate-authority=/etc/origin/master/front-proxy-ca.crt
- --signer-cert=/etc/origin/master/front-proxy-ca.crt
- --signer-key=/etc/origin/master/front-proxy-ca.key
- --user aggregator-front-proxy
- --client-dir={{ certtemp.stdout }}
- --signer-serial=/etc/origin/master/ca.serial.txt
- delegate_to: "{{ groups.oo_first_master.0 }}"
- run_once: true
- - name: Copy first master api-client config for Aggregator
- copy:
- src: "{{ certtemp.stdout }}/{{ item }}"
- dest: "/etc/origin/master/"
- remote_src: true
- with_items:
- - aggregator-front-proxy.crt
- - aggregator-front-proxy.key
- - aggregator-front-proxy.kubeconfig
- delegate_to: "{{ groups.oo_first_master.0 }}"
- run_once: true
- when:
- - not first_front_proxy_kubeconfig.stat.exists
-
-- name: Check for api-client config
- stat:
- path: /etc/origin/master/aggregator-front-proxy.kubeconfig
- register: front_proxy_kubeconfig
-
-- name: Copy api-client config from first master
- fetch:
- src: "/etc/origin/master/{{ item }}"
- dest: "{{ certtemp.stdout }}/{{ item }}"
- flat: yes
- delegate_to: "{{ groups.oo_first_master.0 }}"
- with_items:
- - aggregator-front-proxy.crt
- - aggregator-front-proxy.key
- - aggregator-front-proxy.kubeconfig
- when:
- - not front_proxy_kubeconfig.stat.exists
-
-- name: Copy api-client config to host
- copy:
- src: "{{ certtemp.stdout }}/{{ item }}"
- dest: "/etc/origin/master/{{ item }}"
- with_items:
- - aggregator-front-proxy.crt
- - aggregator-front-proxy.key
- - aggregator-front-proxy.kubeconfig
- when:
- - not front_proxy_kubeconfig.stat.exists
-
-- name: copy tech preview extension file for service console UI
- copy:
- src: openshift-ansible-catalog-console.js
- dest: /etc/origin/master/openshift-ansible-catalog-console.js
-
-- name: Update master config
- yedit:
- state: present
- src: /etc/origin/master/master-config.yaml
- edits:
- - key: aggregatorConfig.proxyClientInfo.certFile
- value: aggregator-front-proxy.crt
- - key: aggregatorConfig.proxyClientInfo.keyFile
- value: aggregator-front-proxy.key
- - key: authConfig.requestHeader.clientCA
- value: front-proxy-ca.crt
- - key: authConfig.requestHeader.clientCommonNames
- value: [aggregator-front-proxy]
- - key: authConfig.requestHeader.usernameHeaders
- value: [X-Remote-User]
- - key: authConfig.requestHeader.groupHeaders
- value: [X-Remote-Group]
- - key: authConfig.requestHeader.extraHeaderPrefixes
- value: [X-Remote-Extra-]
- - key: assetConfig.extensionScripts
- value: [/etc/origin/master/openshift-ansible-catalog-console.js]
- - key: kubernetesMasterConfig.apiServerArguments.runtime-config
- value: [apis/settings.k8s.io/v1alpha1=true]
- - key: admissionConfig.pluginConfig.PodPreset.configuration.kind
- value: DefaultAdmissionConfig
- - key: admissionConfig.pluginConfig.PodPreset.configuration.apiVersion
- value: v1
- - key: admissionConfig.pluginConfig.PodPreset.configuration.disable
- value: false
- register: yedit_output
-
-#restart master serially here
-- name: restart master api
- systemd: name={{ openshift.common.service_type }}-master-api state=restarted
- when:
- - yedit_output.changed
- - openshift.master.cluster_method == 'native'
-
-- name: restart master controllers
- systemd: name={{ openshift.common.service_type }}-master-controllers state=restarted
- when:
- - yedit_output.changed
- - openshift.master.cluster_method == 'native'
-
-- name: Verify API Server
- # Using curl here since the uri module requires python-httplib2 and
- # wait_for port doesn't provide health information.
- command: >
- curl --silent --tlsv1.2
- {% if openshift.common.version_gte_3_2_or_1_2 | bool %}
- --cacert {{ openshift.common.config_base }}/master/ca-bundle.crt
- {% else %}
- --cacert {{ openshift.common.config_base }}/master/ca.crt
- {% endif %}
- {{ openshift.master.api_url }}/healthz/ready
- args:
- # Disables the following warning:
- # Consider using get_url or uri module rather than running curl
- warn: no
- register: api_available_output
- until: api_available_output.stdout == 'ok'
- retries: 120
- delay: 1
- changed_when: false
- when:
- - yedit_output.changed
-
-- name: Delete temp directory
- file:
- name: "{{ certtemp.stdout }}"
- state: absent
- changed_when: False
diff --git a/playbooks/common/openshift-master/validate_restart.yml b/playbooks/common/openshift-master/validate_restart.yml
deleted file mode 100644
index 5dbb21502..000000000
--- a/playbooks/common/openshift-master/validate_restart.yml
+++ /dev/null
@@ -1,65 +0,0 @@
----
-- name: Validate configuration for rolling restart
- hosts: oo_masters_to_config
- roles:
- - openshift_facts
- tasks:
- - fail:
- msg: "openshift_rolling_restart_mode must be set to either 'services' or 'system'"
- when: openshift_rolling_restart_mode is defined and openshift_rolling_restart_mode not in ["services", "system"]
- - openshift_facts:
- role: "{{ item.role }}"
- local_facts: "{{ item.local_facts }}"
- with_items:
- - role: common
- local_facts:
- rolling_restart_mode: "{{ openshift_rolling_restart_mode | default('services') }}"
- - role: master
- local_facts:
- cluster_method: "{{ openshift_master_cluster_method | default(None) }}"
-
-# Creating a temp file on localhost, we then check each system that will
-# be rebooted to see if that file exists, if so we know we're running
-# ansible on a machine that needs a reboot, and we need to error out.
-- name: Create temp file on localhost
- hosts: localhost
- connection: local
- become: no
- gather_facts: no
- tasks:
- - local_action: command mktemp
- register: mktemp
- changed_when: false
-
-- name: Check if temp file exists on any masters
- hosts: oo_masters_to_config
- tasks:
- - stat: path="{{ hostvars.localhost.mktemp.stdout }}"
- register: exists
- changed_when: false
-
-- name: Cleanup temp file on localhost
- hosts: localhost
- connection: local
- become: no
- gather_facts: no
- tasks:
- - file: path="{{ hostvars.localhost.mktemp.stdout }}" state=absent
- changed_when: false
-
-- name: Warn if restarting the system where ansible is running
- hosts: oo_masters_to_config
- tasks:
- - pause:
- prompt: >
- Warning: Running playbook from a host that will be restarted!
- Press CTRL+C and A to abort playbook execution. You may
- continue by pressing ENTER but the playbook will stop
- executing after this system has been restarted and services
- must be verified manually. To only restart services, set
- openshift_master_rolling_restart_mode=services in host
- inventory and relaunch the playbook.
- when: exists.stat.exists and openshift.common.rolling_restart_mode == 'system'
- - set_fact:
- current_host: "{{ exists.stat.exists }}"
- when: openshift.common.rolling_restart_mode == 'system'
diff --git a/playbooks/common/openshift-nfs/config.yml b/playbooks/common/openshift-nfs/config.yml
deleted file mode 100644
index ce672daf5..000000000
--- a/playbooks/common/openshift-nfs/config.yml
+++ /dev/null
@@ -1,26 +0,0 @@
----
-- name: NFS Install Checkpoint Start
- hosts: oo_all_hosts
- gather_facts: false
- tasks:
- - name: Set NFS install 'In Progress'
- set_stats:
- data:
- installer_phase_nfs: "In Progress"
- aggregate: false
-
-- name: Configure nfs
- hosts: oo_nfs_to_config
- roles:
- - role: os_firewall
- - role: openshift_storage_nfs
-
-- name: NFS Install Checkpoint End
- hosts: oo_all_hosts
- gather_facts: false
- tasks:
- - name: Set NFS install 'Complete'
- set_stats:
- data:
- installer_phase_nfs: "Complete"
- aggregate: false
diff --git a/playbooks/common/openshift-nfs/filter_plugins b/playbooks/common/openshift-nfs/filter_plugins
deleted file mode 120000
index 99a95e4ca..000000000
--- a/playbooks/common/openshift-nfs/filter_plugins
+++ /dev/null
@@ -1 +0,0 @@
-../../../filter_plugins \ No newline at end of file
diff --git a/playbooks/common/openshift-nfs/lookup_plugins b/playbooks/common/openshift-nfs/lookup_plugins
deleted file mode 120000
index ac79701db..000000000
--- a/playbooks/common/openshift-nfs/lookup_plugins
+++ /dev/null
@@ -1 +0,0 @@
-../../../lookup_plugins \ No newline at end of file
diff --git a/playbooks/common/openshift-nfs/roles b/playbooks/common/openshift-nfs/roles
deleted file mode 120000
index e2b799b9d..000000000
--- a/playbooks/common/openshift-nfs/roles
+++ /dev/null
@@ -1 +0,0 @@
-../../../roles/ \ No newline at end of file
diff --git a/playbooks/common/openshift-node/additional_config.yml b/playbooks/common/openshift-node/additional_config.yml
deleted file mode 100644
index ac757397b..000000000
--- a/playbooks/common/openshift-node/additional_config.yml
+++ /dev/null
@@ -1,64 +0,0 @@
----
-- name: create additional node network plugin groups
- hosts: "{{ openshift_node_scale_up_group | default('oo_nodes_to_config') }}"
- tasks:
- # Creating these node groups will prevent a ton of skipped tasks.
- # Create group for flannel nodes
- - group_by:
- key: oo_nodes_use_{{ (openshift_use_flannel | default(False)) | ternary('flannel','nothing') }}
- changed_when: False
- # Create group for calico nodes
- - group_by:
- key: oo_nodes_use_{{ (openshift_use_calico | default(False)) | ternary('calico','nothing') }}
- changed_when: False
- # Create group for nuage nodes
- - group_by:
- key: oo_nodes_use_{{ (openshift_use_nuage | default(False)) | ternary('nuage','nothing') }}
- changed_when: False
- # Create group for contiv nodes
- - group_by:
- key: oo_nodes_use_{{ (openshift_use_contiv | default(False)) | ternary('contiv','nothing') }}
- changed_when: False
- # Create group for kuryr nodes
- - group_by:
- key: oo_nodes_use_{{ (openshift_use_kuryr | default(False)) | ternary('kuryr','nothing') }}
- changed_when: False
-
-- include: etcd_client_config.yml
- vars:
- openshift_node_scale_up_group: "oo_nodes_use_flannel:oo_nodes_use_calico:oo_nodes_use_contiv:oo_nodes_use_kuryr"
-
-- name: Additional node config
- hosts: oo_nodes_use_flannel
- roles:
- - role: flannel
- etcd_urls: "{{ hostvars[groups.oo_first_master.0].openshift.master.etcd_urls }}"
- embedded_etcd: "{{ hostvars[groups.oo_first_master.0].openshift.master.embedded_etcd }}"
- when: openshift_use_flannel | default(false) | bool
-
-- name: Additional node config
- hosts: oo_nodes_use_calico
- roles:
- - role: calico
- when: openshift_use_calico | default(false) | bool
-
-- name: Additional node config
- hosts: oo_nodes_use_nuage
- roles:
- - role: nuage_node
- when: openshift_use_nuage | default(false) | bool
-
-- name: Additional node config
- hosts: oo_nodes_use_contiv
- roles:
- - role: contiv
- contiv_role: netplugin
- when: openshift_use_contiv | default(false) | bool
-
-- name: Configure Kuryr node
- hosts: oo_nodes_use_kuryr
- tasks:
- - include_role:
- name: kuryr
- tasks_from: node
- when: openshift_use_kuryr | default(false) | bool
diff --git a/playbooks/common/openshift-node/certificates.yml b/playbooks/common/openshift-node/certificates.yml
deleted file mode 100644
index 908885ee6..000000000
--- a/playbooks/common/openshift-node/certificates.yml
+++ /dev/null
@@ -1,8 +0,0 @@
----
-- name: Create OpenShift certificates for node hosts
- hosts: oo_nodes_to_config
- gather_facts: no
- roles:
- - role: openshift_node_certificates
- openshift_ca_host: "{{ groups.oo_first_master.0 }}"
- when: not openshift_node_bootstrap | default(false) | bool
diff --git a/playbooks/common/openshift-node/config.yml b/playbooks/common/openshift-node/config.yml
deleted file mode 100644
index 4f8f98aef..000000000
--- a/playbooks/common/openshift-node/config.yml
+++ /dev/null
@@ -1,34 +0,0 @@
----
-- name: Node Install Checkpoint Start
- hosts: oo_all_hosts
- gather_facts: false
- tasks:
- - name: Set Node install 'In Progress'
- set_stats:
- data:
- installer_phase_node: "In Progress"
- aggregate: false
-
-- include: certificates.yml
-
-- include: setup.yml
-
-- include: containerized_nodes.yml
-
-- include: configure_nodes.yml
-
-- include: additional_config.yml
-
-- include: manage_node.yml
-
-- include: enable_excluders.yml
-
-- name: Node Install Checkpoint End
- hosts: oo_all_hosts
- gather_facts: false
- tasks:
- - name: Set Node install 'Complete'
- set_stats:
- data:
- installer_phase_node: "Complete"
- aggregate: false
diff --git a/playbooks/common/openshift-node/configure_nodes.yml b/playbooks/common/openshift-node/configure_nodes.yml
deleted file mode 100644
index 17259422d..000000000
--- a/playbooks/common/openshift-node/configure_nodes.yml
+++ /dev/null
@@ -1,17 +0,0 @@
----
-- name: Configure nodes
- hosts: oo_nodes_to_config:!oo_containerized_master_nodes
- vars:
- openshift_node_master_api_url: "{{ hostvars[groups.oo_first_master.0].openshift.master.api_url }}"
- openshift_node_first_master_ip: "{{ hostvars[groups.oo_first_master.0].openshift.common.ip }}"
- openshift_docker_hosted_registry_network: "{{ hostvars[groups.oo_first_master.0].openshift.common.portal_net }}"
- openshift_no_proxy_internal_hostnames: "{{ hostvars | oo_select_keys(groups['oo_nodes_to_config']
- | union(groups['oo_masters_to_config'])
- | union(groups['oo_etcd_to_config'] | default([])))
- | oo_collect('openshift.common.hostname') | default([]) | join (',')
- }}"
- roles:
- - role: os_firewall
- - role: openshift_node
- - role: tuned
- - role: nickhammond.logrotate
diff --git a/playbooks/common/openshift-node/containerized_nodes.yml b/playbooks/common/openshift-node/containerized_nodes.yml
deleted file mode 100644
index 6fac937e3..000000000
--- a/playbooks/common/openshift-node/containerized_nodes.yml
+++ /dev/null
@@ -1,19 +0,0 @@
----
-- name: Configure containerized nodes
- hosts: oo_containerized_master_nodes
- serial: 1
- vars:
- openshift_node_master_api_url: "{{ hostvars[groups.oo_first_master.0].openshift.master.api_url }}"
- openshift_node_first_master_ip: "{{ hostvars[groups.oo_first_master.0].openshift.common.ip }}"
- openshift_docker_hosted_registry_network: "{{ hostvars[groups.oo_first_master.0].openshift.common.portal_net }}"
- openshift_no_proxy_internal_hostnames: "{{ hostvars | oo_select_keys(groups['oo_nodes_to_config']
- | union(groups['oo_masters_to_config'])
- | union(groups['oo_etcd_to_config'] | default([])))
- | oo_collect('openshift.common.hostname') | default([]) | join (',')
- }}"
-
- roles:
- - role: os_firewall
- - role: openshift_node
- openshift_ca_host: "{{ groups.oo_first_master.0 }}"
- - role: nickhammond.logrotate
diff --git a/playbooks/common/openshift-node/enable_excluders.yml b/playbooks/common/openshift-node/enable_excluders.yml
deleted file mode 100644
index 5288b14f9..000000000
--- a/playbooks/common/openshift-node/enable_excluders.yml
+++ /dev/null
@@ -1,8 +0,0 @@
----
-- name: Re-enable excluder if it was previously enabled
- hosts: oo_nodes_to_config
- gather_facts: no
- roles:
- - role: openshift_excluder
- r_openshift_excluder_action: enable
- r_openshift_excluder_service_type: "{{ openshift.common.service_type }}"
diff --git a/playbooks/common/openshift-node/etcd_client_config.yml b/playbooks/common/openshift-node/etcd_client_config.yml
deleted file mode 100644
index c3fa38a81..000000000
--- a/playbooks/common/openshift-node/etcd_client_config.yml
+++ /dev/null
@@ -1,11 +0,0 @@
----
-- name: etcd_client node config
- hosts: "{{ openshift_node_scale_up_group | default('this_group_does_not_exist') }}"
- roles:
- - role: openshift_facts
- - role: openshift_etcd_facts
- - role: openshift_etcd_client_certificates
- etcd_cert_prefix: flannel.etcd-
- etcd_ca_host: "{{ groups.oo_etcd_to_config.0 }}"
- etcd_cert_subdir: "openshift-node-{{ openshift.common.hostname }}"
- etcd_cert_config_dir: "{{ openshift.common.config_base }}/node"
diff --git a/playbooks/common/openshift-node/filter_plugins b/playbooks/common/openshift-node/filter_plugins
deleted file mode 120000
index 99a95e4ca..000000000
--- a/playbooks/common/openshift-node/filter_plugins
+++ /dev/null
@@ -1 +0,0 @@
-../../../filter_plugins \ No newline at end of file
diff --git a/playbooks/common/openshift-node/image_prep.yml b/playbooks/common/openshift-node/image_prep.yml
deleted file mode 100644
index 00d167c22..000000000
--- a/playbooks/common/openshift-node/image_prep.yml
+++ /dev/null
@@ -1,21 +0,0 @@
----
-- name: normalize groups
- include: ../../byo/openshift-cluster/initialize_groups.yml
-
-- name: evaluate the groups
- include: ../openshift-cluster/evaluate_groups.yml
-
-- name: initialize the facts
- include: ../openshift-cluster/initialize_facts.yml
-
-- name: initialize the repositories
- include: ../openshift-cluster/initialize_openshift_repos.yml
-
-- name: run node config setup
- include: setup.yml
-
-- name: run node config
- include: configure_nodes.yml
-
-- name: Re-enable excluders
- include: enable_excluders.yml
diff --git a/playbooks/common/openshift-node/lookup_plugins b/playbooks/common/openshift-node/lookup_plugins
deleted file mode 120000
index ac79701db..000000000
--- a/playbooks/common/openshift-node/lookup_plugins
+++ /dev/null
@@ -1 +0,0 @@
-../../../lookup_plugins \ No newline at end of file
diff --git a/playbooks/common/openshift-node/manage_node.yml b/playbooks/common/openshift-node/manage_node.yml
deleted file mode 100644
index f48a19a9c..000000000
--- a/playbooks/common/openshift-node/manage_node.yml
+++ /dev/null
@@ -1,12 +0,0 @@
----
-- name: Additional node config
- hosts: "{{ openshift_node_scale_up_group | default('oo_nodes_to_config') }}"
- vars:
- openshift_node_master_api_url: "{{ hostvars[groups.oo_first_master.0].openshift.master.api_url }}"
- roles:
- - role: openshift_manage_node
- openshift_master_host: "{{ groups.oo_first_master.0 }}"
- tasks:
- - name: Create group for deployment type
- group_by: key=oo_nodes_deployment_type_{{ openshift.common.deployment_type }}
- changed_when: False
diff --git a/playbooks/common/openshift-node/network_manager.yml b/playbooks/common/openshift-node/network_manager.yml
deleted file mode 100644
index b3a7399dc..000000000
--- a/playbooks/common/openshift-node/network_manager.yml
+++ /dev/null
@@ -1,28 +0,0 @@
----
-- include: ../openshift-cluster/evaluate_groups.yml
-
-- name: Install and configure NetworkManager
- hosts: oo_all_hosts
- become: yes
- tasks:
- - name: install NetworkManager
- package:
- name: 'NetworkManager'
- state: present
-
- - name: configure NetworkManager
- lineinfile:
- dest: "/etc/sysconfig/network-scripts/ifcfg-{{ ansible_default_ipv4['interface'] }}"
- regexp: '^{{ item }}='
- line: '{{ item }}=yes'
- state: present
- create: yes
- with_items:
- - 'USE_PEERDNS'
- - 'NM_CONTROLLED'
-
- - name: enable and start NetworkManager
- service:
- name: 'NetworkManager'
- state: started
- enabled: yes
diff --git a/playbooks/common/openshift-node/restart.yml b/playbooks/common/openshift-node/restart.yml
deleted file mode 100644
index c3beb59b7..000000000
--- a/playbooks/common/openshift-node/restart.yml
+++ /dev/null
@@ -1,61 +0,0 @@
----
-- name: Restart nodes
- hosts: oo_nodes_to_config
- serial: "{{ openshift_restart_nodes_serial | default(1) }}"
-
- roles:
- - lib_openshift
-
- tasks:
- - name: Restart docker
- service:
- name: docker
- state: restarted
- register: l_docker_restart_docker_in_node_result
- until: not l_docker_restart_docker_in_node_result | failed
- retries: 3
- delay: 30
-
- - name: Update docker facts
- openshift_facts:
- role: docker
-
- - name: Restart containerized services
- service:
- name: "{{ item }}"
- state: started
- with_items:
- - etcd_container
- - openvswitch
- - "{{ openshift.common.service_type }}-master-api"
- - "{{ openshift.common.service_type }}-master-controllers"
- - "{{ openshift.common.service_type }}-node"
- failed_when: false
- when: openshift.common.is_containerized | bool
-
- - name: Wait for master API to come back online
- wait_for:
- host: "{{ openshift.common.hostname }}"
- state: started
- delay: 10
- port: "{{ openshift.master.api_port }}"
- timeout: 600
- when: inventory_hostname in groups.oo_masters_to_config
-
- - name: restart node
- service:
- name: "{{ openshift.common.service_type }}-node"
- state: restarted
-
- - name: Wait for node to be ready
- oc_obj:
- state: list
- kind: node
- name: "{{ openshift.common.hostname | lower }}"
- register: node_output
- delegate_to: "{{ groups.oo_first_master.0 }}"
- when: inventory_hostname in groups.oo_nodes_to_config
- until: node_output.results.returncode == 0 and node_output.results.results[0].status.conditions | selectattr('type', 'match', '^Ready$') | map(attribute='status') | join | bool == True
- # Give the node two minutes to come back online.
- retries: 24
- delay: 5
diff --git a/playbooks/common/openshift-node/roles b/playbooks/common/openshift-node/roles
deleted file mode 120000
index e2b799b9d..000000000
--- a/playbooks/common/openshift-node/roles
+++ /dev/null
@@ -1 +0,0 @@
-../../../roles/ \ No newline at end of file
diff --git a/playbooks/common/openshift-node/setup.yml b/playbooks/common/openshift-node/setup.yml
deleted file mode 100644
index 794c03a67..000000000
--- a/playbooks/common/openshift-node/setup.yml
+++ /dev/null
@@ -1,27 +0,0 @@
----
-- name: Disable excluders
- hosts: oo_nodes_to_config
- gather_facts: no
- roles:
- - role: openshift_excluder
- r_openshift_excluder_action: disable
- r_openshift_excluder_service_type: "{{ openshift.common.service_type }}"
-
-- name: Evaluate node groups
- hosts: localhost
- become: no
- connection: local
- tasks:
- - name: Evaluate oo_containerized_master_nodes
- add_host:
- name: "{{ item }}"
- groups: oo_containerized_master_nodes
- ansible_ssh_user: "{{ g_ssh_user | default(omit) }}"
- ansible_become: "{{ g_sudo | default(omit) }}"
- with_items: "{{ groups.oo_nodes_to_config | default([]) }}"
- when:
- - hostvars[item].openshift is defined
- - hostvars[item].openshift.common is defined
- - hostvars[item].openshift.common.is_containerized | bool
- - (item in groups.oo_nodes_to_config and item in groups.oo_masters_to_config)
- changed_when: False