summaryrefslogtreecommitdiffstats
path: root/playbooks/common
diff options
context:
space:
mode:
Diffstat (limited to 'playbooks/common')
-rw-r--r--playbooks/common/openshift-checks/adhoc.yml5
-rw-r--r--playbooks/common/openshift-checks/health.yml6
-rw-r--r--playbooks/common/openshift-checks/install.yml51
-rw-r--r--playbooks/common/openshift-checks/pre-install.yml6
-rw-r--r--playbooks/common/openshift-cluster/cockpit-ui.yml4
-rw-r--r--playbooks/common/openshift-cluster/config.yml23
-rw-r--r--playbooks/common/openshift-cluster/enable_dnsmasq.yml3
-rw-r--r--playbooks/common/openshift-cluster/evaluate_groups.yml2
-rw-r--r--playbooks/common/openshift-cluster/initialize_facts.yml15
-rw-r--r--playbooks/common/openshift-cluster/initialize_openshift_version.yml11
-rw-r--r--playbooks/common/openshift-cluster/install_docker_gc.yml7
-rw-r--r--playbooks/common/openshift-cluster/openshift_default_storage_class.yml2
-rw-r--r--playbooks/common/openshift-cluster/openshift_hosted.yml23
-rw-r--r--playbooks/common/openshift-cluster/openshift_logging.yml18
-rw-r--r--playbooks/common/openshift-cluster/openshift_management.yml25
-rw-r--r--playbooks/common/openshift-cluster/openshift_metrics.yml18
-rw-r--r--playbooks/common/openshift-cluster/openshift_prometheus.yml24
-rw-r--r--playbooks/common/openshift-cluster/redeploy-certificates/openshift-ca.yml21
-rw-r--r--playbooks/common/openshift-cluster/redeploy-certificates/registry.yml2
-rw-r--r--playbooks/common/openshift-cluster/service_catalog.yml18
-rw-r--r--playbooks/common/openshift-cluster/std_include.yml18
-rw-r--r--playbooks/common/openshift-cluster/upgrades/files/shared_resource_viewer_role.yaml37
-rw-r--r--playbooks/common/openshift-cluster/upgrades/init.yml5
-rw-r--r--playbooks/common/openshift-cluster/upgrades/initialize_nodes_to_upgrade.yml1
-rw-r--r--playbooks/common/openshift-cluster/upgrades/post_control_plane.yml28
-rw-r--r--playbooks/common/openshift-cluster/upgrades/pre/verify_control_plane_running.yml37
-rw-r--r--playbooks/common/openshift-cluster/upgrades/pre/verify_health_checks.yml6
-rw-r--r--playbooks/common/openshift-cluster/upgrades/pre/verify_upgrade_targets.yml8
-rw-r--r--playbooks/common/openshift-cluster/upgrades/rpm_upgrade.yml38
-rw-r--r--playbooks/common/openshift-cluster/upgrades/upgrade_control_plane.yml147
-rw-r--r--playbooks/common/openshift-cluster/upgrades/upgrade_scheduler.yml173
-rw-r--r--playbooks/common/openshift-cluster/upgrades/v3_3/master_config_upgrade.yml66
-rw-r--r--playbooks/common/openshift-cluster/upgrades/v3_3/node_config_upgrade.yml20
l---------playbooks/common/openshift-cluster/upgrades/v3_3/roles1
-rw-r--r--playbooks/common/openshift-cluster/upgrades/v3_3/upgrade.yml118
-rw-r--r--playbooks/common/openshift-cluster/upgrades/v3_3/upgrade_control_plane.yml119
-rw-r--r--playbooks/common/openshift-cluster/upgrades/v3_3/upgrade_nodes.yml113
-rw-r--r--playbooks/common/openshift-cluster/upgrades/v3_4/master_config_upgrade.yml16
l---------playbooks/common/openshift-cluster/upgrades/v3_4/roles1
-rw-r--r--playbooks/common/openshift-cluster/upgrades/v3_4/upgrade.yml116
-rw-r--r--playbooks/common/openshift-cluster/upgrades/v3_4/upgrade_control_plane.yml119
-rw-r--r--playbooks/common/openshift-cluster/upgrades/v3_5/master_config_upgrade.yml16
-rw-r--r--playbooks/common/openshift-cluster/upgrades/v3_5/upgrade_nodes.yml111
-rw-r--r--playbooks/common/openshift-cluster/upgrades/v3_5/validator.yml67
-rw-r--r--playbooks/common/openshift-cluster/upgrades/v3_6/master_config_upgrade.yml16
-rw-r--r--playbooks/common/openshift-cluster/upgrades/v3_6/upgrade.yml2
-rw-r--r--playbooks/common/openshift-cluster/upgrades/v3_6/upgrade_control_plane.yml1
-rw-r--r--playbooks/common/openshift-cluster/upgrades/v3_7/upgrade.yml16
-rw-r--r--playbooks/common/openshift-cluster/upgrades/v3_7/upgrade_control_plane.yml15
-rw-r--r--playbooks/common/openshift-cluster/upgrades/v3_7/validator.yml8
l---------playbooks/common/openshift-cluster/upgrades/v3_8/filter_plugins (renamed from playbooks/common/openshift-cluster/upgrades/v3_5/filter_plugins)0
-rw-r--r--playbooks/common/openshift-cluster/upgrades/v3_8/master_config_upgrade.yml (renamed from playbooks/common/openshift-cluster/upgrades/v3_7/master_config_upgrade.yml)19
l---------playbooks/common/openshift-cluster/upgrades/v3_8/roles (renamed from playbooks/common/openshift-cluster/upgrades/v3_5/roles)0
-rw-r--r--playbooks/common/openshift-cluster/upgrades/v3_8/upgrade.yml (renamed from playbooks/common/openshift-cluster/upgrades/v3_5/upgrade.yml)28
-rw-r--r--playbooks/common/openshift-cluster/upgrades/v3_8/upgrade_control_plane.yml (renamed from playbooks/common/openshift-cluster/upgrades/v3_5/upgrade_control_plane.yml)29
-rw-r--r--playbooks/common/openshift-cluster/upgrades/v3_8/upgrade_nodes.yml (renamed from playbooks/common/openshift-cluster/upgrades/v3_4/upgrade_nodes.yml)8
-rw-r--r--playbooks/common/openshift-cluster/upgrades/v3_8/validator.yml7
-rw-r--r--playbooks/common/openshift-etcd/certificates.yml29
-rw-r--r--playbooks/common/openshift-etcd/config.yml18
-rw-r--r--playbooks/common/openshift-etcd/embedded2external.yml172
-rw-r--r--playbooks/common/openshift-etcd/master_etcd_certificates.yml14
-rw-r--r--playbooks/common/openshift-etcd/migrate.yml24
-rw-r--r--playbooks/common/openshift-etcd/scaleup.yml4
-rw-r--r--playbooks/common/openshift-etcd/server_certificates.yml15
-rw-r--r--playbooks/common/openshift-glusterfs/config.yml28
-rw-r--r--playbooks/common/openshift-loadbalancer/config.yml29
-rw-r--r--playbooks/common/openshift-management/add_container_provider.yml8
-rw-r--r--playbooks/common/openshift-management/config.yml24
-rw-r--r--playbooks/common/openshift-management/uninstall.yml2
-rw-r--r--playbooks/common/openshift-master/additional_config.yml24
-rw-r--r--playbooks/common/openshift-master/config.yml31
-rw-r--r--playbooks/common/openshift-master/files/openshift-ansible-catalog-console.js2
-rw-r--r--playbooks/common/openshift-master/restart_services.yml24
-rw-r--r--playbooks/common/openshift-master/revert-client-ca.yml17
-rw-r--r--playbooks/common/openshift-master/scaleup.yml11
-rw-r--r--playbooks/common/openshift-master/tasks/wire_aggregator.yml29
-rw-r--r--playbooks/common/openshift-master/templates/openshift-ansible-catalog-console.js1
-rw-r--r--playbooks/common/openshift-nfs/config.yml18
-rw-r--r--playbooks/common/openshift-node/additional_config.yml14
-rw-r--r--playbooks/common/openshift-node/clean_image.yml10
-rw-r--r--playbooks/common/openshift-node/config.yml18
-rw-r--r--playbooks/common/openshift-node/configure_nodes.yml1
-rw-r--r--playbooks/common/openshift-node/image_prep.yml9
83 files changed, 892 insertions, 1498 deletions
diff --git a/playbooks/common/openshift-checks/adhoc.yml b/playbooks/common/openshift-checks/adhoc.yml
index dfcef8435..d0deaeb65 100644
--- a/playbooks/common/openshift-checks/adhoc.yml
+++ b/playbooks/common/openshift-checks/adhoc.yml
@@ -1,12 +1,13 @@
---
-- name: OpenShift health checks
+- name: OpenShift Health Checks
hosts: oo_all_hosts
+
roles:
- openshift_health_checker
vars:
- r_openshift_health_checker_playbook_context: adhoc
post_tasks:
- - name: Run health checks
+ - name: Run health checks (adhoc)
action: openshift_health_check
args:
checks: '{{ openshift_checks | default([]) }}'
diff --git a/playbooks/common/openshift-checks/health.yml b/playbooks/common/openshift-checks/health.yml
index 21ea785ef..d0921b9d3 100644
--- a/playbooks/common/openshift-checks/health.yml
+++ b/playbooks/common/openshift-checks/health.yml
@@ -1,11 +1,13 @@
---
-- name: Run OpenShift health checks
+- name: OpenShift Health Checks
hosts: oo_all_hosts
+
roles:
- openshift_health_checker
vars:
- r_openshift_health_checker_playbook_context: health
post_tasks:
- - action: openshift_health_check
+ - name: Run health checks (@health)
+ action: openshift_health_check
args:
checks: ['@health']
diff --git a/playbooks/common/openshift-checks/install.yml b/playbooks/common/openshift-checks/install.yml
new file mode 100644
index 000000000..93cf6c359
--- /dev/null
+++ b/playbooks/common/openshift-checks/install.yml
@@ -0,0 +1,51 @@
+---
+- name: Health Check Checkpoint Start
+ hosts: all
+ gather_facts: false
+ tasks:
+ - name: Set Health Check 'In Progress'
+ run_once: true
+ set_stats:
+ data:
+ installer_phase_health:
+ status: "In Progress"
+ start: "{{ lookup('pipe', 'date +%Y%m%d%H%M%SZ') }}"
+
+- name: OpenShift Health Checks
+ hosts: oo_all_hosts
+ any_errors_fatal: true
+ roles:
+ - openshift_health_checker
+ vars:
+ - r_openshift_health_checker_playbook_context: install
+ post_tasks:
+ - name: Run health checks (install) - EL
+ when: ansible_distribution != "Fedora"
+ action: openshift_health_check
+ args:
+ checks:
+ - disk_availability
+ - memory_availability
+ - package_availability
+ - package_version
+ - docker_image_availability
+ - docker_storage
+
+ - name: Run health checks (install) - Fedora
+ when: ansible_distribution == "Fedora"
+ action: openshift_health_check
+ args:
+ checks:
+ - docker_image_availability
+
+- name: Health Check Checkpoint End
+ hosts: all
+ gather_facts: false
+ tasks:
+ - name: Set Health Check 'Complete'
+ run_once: true
+ set_stats:
+ data:
+ installer_phase_health:
+ status: "Complete"
+ end: "{{ lookup('pipe', 'date +%Y%m%d%H%M%SZ') }}"
diff --git a/playbooks/common/openshift-checks/pre-install.yml b/playbooks/common/openshift-checks/pre-install.yml
index 88e6f9120..32449d4e4 100644
--- a/playbooks/common/openshift-checks/pre-install.yml
+++ b/playbooks/common/openshift-checks/pre-install.yml
@@ -1,11 +1,13 @@
---
-- name: run OpenShift pre-install checks
+- name: OpenShift Health Checks
hosts: oo_all_hosts
+
roles:
- openshift_health_checker
vars:
- r_openshift_health_checker_playbook_context: pre-install
post_tasks:
- - action: openshift_health_check
+ - name: Run health checks (@preflight)
+ action: openshift_health_check
args:
checks: ['@preflight']
diff --git a/playbooks/common/openshift-cluster/cockpit-ui.yml b/playbooks/common/openshift-cluster/cockpit-ui.yml
index 5ddafdb07..359132dd0 100644
--- a/playbooks/common/openshift-cluster/cockpit-ui.yml
+++ b/playbooks/common/openshift-cluster/cockpit-ui.yml
@@ -3,4 +3,6 @@
hosts: oo_first_master
roles:
- role: cockpit-ui
- when: ( openshift.common.version_gte_3_3_or_1_3 | bool ) and ( openshift_hosted_manage_registry | default(true) | bool ) and not (openshift.docker.hosted_registry_insecure | default(false) | bool)
+ when:
+ - openshift_hosted_manage_registry | default(true) | bool
+ - not openshift.docker.hosted_registry_insecure | default(false) | bool
diff --git a/playbooks/common/openshift-cluster/config.yml b/playbooks/common/openshift-cluster/config.yml
index b399ea995..3b4d6f9a6 100644
--- a/playbooks/common/openshift-cluster/config.yml
+++ b/playbooks/common/openshift-cluster/config.yml
@@ -1,22 +1,5 @@
---
-# TODO: refactor this into its own include
-# and pass a variable for ctx
-- name: Verify Requirements
- hosts: oo_all_hosts
- roles:
- - openshift_health_checker
- vars:
- - r_openshift_health_checker_playbook_context: install
- post_tasks:
- - action: openshift_health_check
- args:
- checks:
- - disk_availability
- - memory_availability
- - package_availability
- - package_version
- - docker_image_availability
- - docker_storage
+- include: ../openshift-checks/install.yml
- include: ../openshift-etcd/config.yml
@@ -44,9 +27,9 @@
when: openshift_logging_install_logging | default(false) | bool
- include: service_catalog.yml
- when: openshift_enable_service_catalog | default(false) | bool
+ when: openshift_enable_service_catalog | default(true) | bool
-- include: openshift_management.yml
+- include: ../openshift-management/config.yml
when: openshift_management_install_management | default(false) | bool
- name: Print deprecated variable warning message if necessary
diff --git a/playbooks/common/openshift-cluster/enable_dnsmasq.yml b/playbooks/common/openshift-cluster/enable_dnsmasq.yml
index be14b06f0..fe765aa5d 100644
--- a/playbooks/common/openshift-cluster/enable_dnsmasq.yml
+++ b/playbooks/common/openshift-cluster/enable_dnsmasq.yml
@@ -5,9 +5,6 @@
hosts: oo_masters_to_config:oo_nodes_to_config
roles:
- openshift_facts
- post_tasks:
- - fail: msg="This playbook requires a master version of at least Origin 1.1 or OSE 3.1"
- when: not openshift.common.version_gte_3_1_1_or_1_1_1 | bool
- name: Reconfigure masters to listen on our new dns_port
hosts: oo_masters_to_config
diff --git a/playbooks/common/openshift-cluster/evaluate_groups.yml b/playbooks/common/openshift-cluster/evaluate_groups.yml
index e55b2f964..78b552279 100644
--- a/playbooks/common/openshift-cluster/evaluate_groups.yml
+++ b/playbooks/common/openshift-cluster/evaluate_groups.yml
@@ -51,7 +51,7 @@
when:
- g_etcd_hosts | default([]) | length not in [3,1]
- not openshift_master_unsupported_embedded_etcd | default(False)
- - not openshift_node_bootstrap | default(False)
+ - not (openshift_node_bootstrap | default(False))
- name: Evaluate oo_all_hosts
add_host:
diff --git a/playbooks/common/openshift-cluster/initialize_facts.yml b/playbooks/common/openshift-cluster/initialize_facts.yml
index be2f8b5f4..91223d368 100644
--- a/playbooks/common/openshift-cluster/initialize_facts.yml
+++ b/playbooks/common/openshift-cluster/initialize_facts.yml
@@ -10,6 +10,7 @@
- name: load openshift_facts module
include_role:
name: openshift_facts
+ static: yes
# TODO: Should this role be refactored into health_checks??
- name: Run openshift_sanitize_inventory to set variables
@@ -145,7 +146,19 @@
https_proxy: "{{ openshift_https_proxy | default(None) }}"
no_proxy: "{{ openshift_no_proxy | default(None) }}"
generate_no_proxy_hosts: "{{ openshift_generate_no_proxy_hosts | default(True) }}"
- no_proxy_internal_hostnames: "{{ openshift_no_proxy_internal_hostnames | default(None) }}"
+
+ - name: Set fact of no_proxy_internal_hostnames
+ openshift_facts:
+ role: common
+ local_facts:
+ no_proxy_internal_hostnames: "{{ hostvars | oo_select_keys(groups['oo_nodes_to_config']
+ | union(groups['oo_masters_to_config'])
+ | union(groups['oo_etcd_to_config'] | default([])))
+ | oo_collect('openshift.common.hostname') | default([]) | join (',')
+ }}"
+ when:
+ - openshift_http_proxy is defined or openshift_https_proxy is defined
+ - openshift_generate_no_proxy_hosts | default(True) | bool
- name: initialize_facts set_fact repoquery command
set_fact:
diff --git a/playbooks/common/openshift-cluster/initialize_openshift_version.yml b/playbooks/common/openshift-cluster/initialize_openshift_version.yml
index e6400ea61..37a5284d5 100644
--- a/playbooks/common/openshift-cluster/initialize_openshift_version.yml
+++ b/playbooks/common/openshift-cluster/initialize_openshift_version.yml
@@ -1,15 +1,4 @@
---
-# openshift_install_base_package_group may be set in a play variable to limit
-# the host groups the base package is installed on. This is currently used
-# for master/control-plane upgrades.
-- name: Set version_install_base_package true on masters and nodes
- hosts: "{{ openshift_install_base_package_group | default('oo_masters_to_config:oo_nodes_to_config') }}"
- tasks:
- - name: Set version_install_base_package true
- set_fact:
- version_install_base_package: True
- when: version_install_base_package is not defined
-
# NOTE: requires openshift_facts be run
- name: Determine openshift_version to configure on first master
hosts: oo_first_master
diff --git a/playbooks/common/openshift-cluster/install_docker_gc.yml b/playbooks/common/openshift-cluster/install_docker_gc.yml
new file mode 100644
index 000000000..1e3dfee07
--- /dev/null
+++ b/playbooks/common/openshift-cluster/install_docker_gc.yml
@@ -0,0 +1,7 @@
+---
+- name: Install docker gc
+ hosts: oo_first_master
+ gather_facts: false
+ tasks:
+ - include_role:
+ name: openshift_docker_gc
diff --git a/playbooks/common/openshift-cluster/openshift_default_storage_class.yml b/playbooks/common/openshift-cluster/openshift_default_storage_class.yml
index 4b4f19690..62fe0dd60 100644
--- a/playbooks/common/openshift-cluster/openshift_default_storage_class.yml
+++ b/playbooks/common/openshift-cluster/openshift_default_storage_class.yml
@@ -3,4 +3,4 @@
hosts: oo_first_master
roles:
- role: openshift_default_storage_class
- when: openshift_cloudprovider_kind is defined and (openshift_cloudprovider_kind == 'aws' or openshift_cloudprovider_kind == 'gce')
+ when: openshift_cloudprovider_kind is defined and (openshift_cloudprovider_kind == 'aws' or openshift_cloudprovider_kind == 'gce' or openshift_cloudprovider_kind == 'openstack')
diff --git a/playbooks/common/openshift-cluster/openshift_hosted.yml b/playbooks/common/openshift-cluster/openshift_hosted.yml
index 32e5e708a..15ee60dc0 100644
--- a/playbooks/common/openshift-cluster/openshift_hosted.yml
+++ b/playbooks/common/openshift-cluster/openshift_hosted.yml
@@ -1,14 +1,15 @@
---
- name: Hosted Install Checkpoint Start
- hosts: localhost
- connection: local
+ hosts: all
gather_facts: false
tasks:
- name: Set Hosted install 'In Progress'
+ run_once: true
set_stats:
data:
- installer_phase_hosted: "In Progress"
- aggregate: false
+ installer_phase_hosted:
+ status: "In Progress"
+ start: "{{ lookup('pipe', 'date +%Y%m%d%H%M%SZ') }}"
- include: create_persistent_volumes.yml
@@ -25,13 +26,19 @@
- include: openshift_prometheus.yml
when: openshift_hosted_prometheus_deploy | default(False) | bool
+- include: install_docker_gc.yml
+ when:
+ - openshift_use_crio | default(False) | bool
+ - openshift_crio_enable_docker_gc | default(False) | bool
+
- name: Hosted Install Checkpoint End
- hosts: localhost
- connection: local
+ hosts: all
gather_facts: false
tasks:
- name: Set Hosted install 'Complete'
+ run_once: true
set_stats:
data:
- installer_phase_hosted: "Complete"
- aggregate: false
+ installer_phase_hosted:
+ status: "Complete"
+ end: "{{ lookup('pipe', 'date +%Y%m%d%H%M%SZ') }}"
diff --git a/playbooks/common/openshift-cluster/openshift_logging.yml b/playbooks/common/openshift-cluster/openshift_logging.yml
index 69f50fbcd..bc59bd95a 100644
--- a/playbooks/common/openshift-cluster/openshift_logging.yml
+++ b/playbooks/common/openshift-cluster/openshift_logging.yml
@@ -1,14 +1,15 @@
---
- name: Logging Install Checkpoint Start
- hosts: localhost
- connection: local
+ hosts: all
gather_facts: false
tasks:
- name: Set Logging install 'In Progress'
+ run_once: true
set_stats:
data:
- installer_phase_logging: "In Progress"
- aggregate: false
+ installer_phase_logging:
+ status: "In Progress"
+ start: "{{ lookup('pipe', 'date +%Y%m%d%H%M%SZ') }}"
- name: OpenShift Aggregated Logging
hosts: oo_first_master
@@ -24,12 +25,13 @@
tasks_from: update_master_config
- name: Logging Install Checkpoint End
- hosts: localhost
- connection: local
+ hosts: all
gather_facts: false
tasks:
- name: Set Logging install 'Complete'
+ run_once: true
set_stats:
data:
- installer_phase_logging: "Complete"
- aggregate: false
+ installer_phase_logging:
+ status: "Complete"
+ end: "{{ lookup('pipe', 'date +%Y%m%d%H%M%SZ') }}"
diff --git a/playbooks/common/openshift-cluster/openshift_management.yml b/playbooks/common/openshift-cluster/openshift_management.yml
deleted file mode 100644
index 6e582920b..000000000
--- a/playbooks/common/openshift-cluster/openshift_management.yml
+++ /dev/null
@@ -1,25 +0,0 @@
----
-- name: Management Install Checkpoint Start
- hosts: localhost
- connection: local
- gather_facts: false
- tasks:
- - name: Set Management install 'In Progress'
- set_stats:
- data:
- installer_phase_Management: "In Progress"
- aggregate: false
-
-- name: Management
- include: ../openshift-management/config.yml
-
-- name: Management Install Checkpoint End
- hosts: localhost
- connection: local
- gather_facts: false
- tasks:
- - name: Set Management install 'Complete'
- set_stats:
- data:
- installer_phase_Management: "Complete"
- aggregate: false
diff --git a/playbooks/common/openshift-cluster/openshift_metrics.yml b/playbooks/common/openshift-cluster/openshift_metrics.yml
index e369dcd86..80cd93e5f 100644
--- a/playbooks/common/openshift-cluster/openshift_metrics.yml
+++ b/playbooks/common/openshift-cluster/openshift_metrics.yml
@@ -1,14 +1,15 @@
---
- name: Metrics Install Checkpoint Start
- hosts: localhost
- connection: local
+ hosts: all
gather_facts: false
tasks:
- name: Set Metrics install 'In Progress'
+ run_once: true
set_stats:
data:
- installer_phase_metrics: "In Progress"
- aggregate: false
+ installer_phase_metrics:
+ status: "In Progress"
+ start: "{{ lookup('pipe', 'date +%Y%m%d%H%M%SZ') }}"
- name: OpenShift Metrics
hosts: oo_first_master
@@ -25,12 +26,13 @@
tasks_from: update_master_config.yaml
- name: Metrics Install Checkpoint End
- hosts: localhost
- connection: local
+ hosts: all
gather_facts: false
tasks:
- name: Set Metrics install 'Complete'
+ run_once: true
set_stats:
data:
- installer_phase_metrics: "Complete"
- aggregate: false
+ installer_phase_metrics:
+ status: "Complete"
+ end: "{{ lookup('pipe', 'date +%Y%m%d%H%M%SZ') }}"
diff --git a/playbooks/common/openshift-cluster/openshift_prometheus.yml b/playbooks/common/openshift-cluster/openshift_prometheus.yml
index ac2d250a3..7aa9a16e6 100644
--- a/playbooks/common/openshift-cluster/openshift_prometheus.yml
+++ b/playbooks/common/openshift-cluster/openshift_prometheus.yml
@@ -1,5 +1,29 @@
---
+- name: Prometheus Install Checkpoint Start
+ hosts: all
+ gather_facts: false
+ tasks:
+ - name: Set Prometheus install 'In Progress'
+ run_once: true
+ set_stats:
+ data:
+ installer_phase_prometheus:
+ status: "In Progress"
+ start: "{{ lookup('pipe', 'date +%Y%m%d%H%M%SZ') }}"
+
- name: Create Hosted Resources - openshift_prometheus
hosts: oo_first_master
roles:
- role: openshift_prometheus
+
+- name: Prometheus Install Checkpoint End
+ hosts: all
+ gather_facts: false
+ tasks:
+ - name: Set Prometheus install 'Complete'
+ run_once: true
+ set_stats:
+ data:
+ installer_phase_prometheus:
+ status: "Complete"
+ end: "{{ lookup('pipe', 'date +%Y%m%d%H%M%SZ') }}"
diff --git a/playbooks/common/openshift-cluster/redeploy-certificates/openshift-ca.yml b/playbooks/common/openshift-cluster/redeploy-certificates/openshift-ca.yml
index 2068ed199..eb225dfb5 100644
--- a/playbooks/common/openshift-cluster/redeploy-certificates/openshift-ca.yml
+++ b/playbooks/common/openshift-cluster/redeploy-certificates/openshift-ca.yml
@@ -1,11 +1,4 @@
---
-- name: Verify OpenShift version is greater than or equal to 1.2 or 3.2
- hosts: oo_first_master
- tasks:
- - fail:
- msg: "The current OpenShift version is less than 1.2/3.2 and does not support CA bundles."
- when: not openshift.common.version_gte_3_2_or_1_2 | bool
-
- name: Check cert expirys
hosts: oo_nodes_to_config:oo_masters_to_config:oo_etcd_to_config
vars:
@@ -43,11 +36,6 @@
when: (g_master_config_output.content|b64decode|from_yaml).oauthConfig.masterCA != 'ca-bundle.crt'
- modify_yaml:
dest: "{{ openshift.common.config_base }}/master/master-config.yaml"
- yaml_key: servingInfo.clientCA
- yaml_value: ca.crt
- when: (g_master_config_output.content|b64decode|from_yaml).servingInfo.clientCA != 'ca.crt'
- - modify_yaml:
- dest: "{{ openshift.common.config_base }}/master/master-config.yaml"
yaml_key: etcdClientInfo.ca
yaml_value: ca-bundle.crt
when:
@@ -67,6 +55,13 @@
when:
- groups.oo_etcd_to_config | default([]) | length == 0
- (g_master_config_output.content|b64decode|from_yaml).etcdConfig.servingInfo.clientCA != 'ca-bundle.crt'
+ # Set servingInfo.clientCA to client-ca-bundle.crt in order to roll the CA certificate.
+ # This change will be reverted in playbooks/byo/openshift-cluster/redeploy-certificates.yml
+ - modify_yaml:
+ dest: "{{ openshift.common.config_base }}/master/master-config.yaml"
+ yaml_key: servingInfo.clientCA
+ yaml_value: client-ca-bundle.crt
+ when: (g_master_config_output.content|b64decode|from_yaml).servingInfo.clientCA != 'client-ca-bundle.crt'
- name: Copy current OpenShift CA to legacy directory
hosts: oo_masters_to_config
@@ -155,6 +150,7 @@
- ca.key
- ca-bundle.crt
- ca.serial.txt
+ - client-ca-bundle.crt
delegate_to: "{{ openshift_ca_host }}"
run_once: true
changed_when: false
@@ -173,6 +169,7 @@
- ca.key
- ca-bundle.crt
- ca.serial.txt
+ - client-ca-bundle.crt
- name: Update master client kubeconfig CA data
kubeclient_ca:
client_path: "{{ openshift.common.config_base }}/master/openshift-master.kubeconfig"
diff --git a/playbooks/common/openshift-cluster/redeploy-certificates/registry.yml b/playbooks/common/openshift-cluster/redeploy-certificates/registry.yml
index afd5463b2..7e9363c5f 100644
--- a/playbooks/common/openshift-cluster/redeploy-certificates/registry.yml
+++ b/playbooks/common/openshift-cluster/redeploy-certificates/registry.yml
@@ -70,9 +70,7 @@
--hostnames="{{ docker_registry_service_ip.results.clusterip }},docker-registry.default.svc,docker-registry.default.svc.cluster.local,{{ docker_registry_route_hostname }}"
--cert={{ openshift.common.config_base }}/master/registry.crt
--key={{ openshift.common.config_base }}/master/registry.key
- {% if openshift_version | oo_version_gte_3_5_or_1_5(openshift.common.deployment_type) | bool %}
--expire-days={{ openshift_hosted_registry_cert_expire_days | default(730) }}
- {% endif %}
- name: Update registry certificates secret
oc_secret:
diff --git a/playbooks/common/openshift-cluster/service_catalog.yml b/playbooks/common/openshift-cluster/service_catalog.yml
index 95a8f601c..7bb8511f6 100644
--- a/playbooks/common/openshift-cluster/service_catalog.yml
+++ b/playbooks/common/openshift-cluster/service_catalog.yml
@@ -1,14 +1,15 @@
---
- name: Service Catalog Install Checkpoint Start
- hosts: localhost
- connection: local
+ hosts: all
gather_facts: false
tasks:
- name: Set Service Catalog install 'In Progress'
+ run_once: true
set_stats:
data:
- installer_phase_servicecatalog: "In Progress"
- aggregate: false
+ installer_phase_servicecatalog:
+ status: "In Progress"
+ start: "{{ lookup('pipe', 'date +%Y%m%d%H%M%SZ') }}"
- name: Service Catalog
hosts: oo_first_master
@@ -20,12 +21,13 @@
first_master: "{{ groups.oo_first_master[0] }}"
- name: Service Catalog Install Checkpoint End
- hosts: localhost
- connection: local
+ hosts: all
gather_facts: false
tasks:
- name: Set Service Catalog install 'Complete'
+ run_once: true
set_stats:
data:
- installer_phase_servicecatalog: "Complete"
- aggregate: false
+ installer_phase_servicecatalog:
+ status: "Complete"
+ end: "{{ lookup('pipe', 'date +%Y%m%d%H%M%SZ') }}"
diff --git a/playbooks/common/openshift-cluster/std_include.yml b/playbooks/common/openshift-cluster/std_include.yml
index 090ad6445..fe376fe31 100644
--- a/playbooks/common/openshift-cluster/std_include.yml
+++ b/playbooks/common/openshift-cluster/std_include.yml
@@ -1,16 +1,17 @@
---
- name: Initialization Checkpoint Start
- hosts: localhost
- connection: local
+ hosts: all
gather_facts: false
roles:
- installer_checkpoint
tasks:
- name: Set install initialization 'In Progress'
+ run_once: true
set_stats:
data:
- installer_phase_initialize: "In Progress"
- aggregate: false
+ installer_phase_initialize:
+ status: "In Progress"
+ start: "{{ lookup('pipe', 'date +%Y%m%d%H%M%SZ') }}"
- include: evaluate_groups.yml
tags:
@@ -37,12 +38,13 @@
- always
- name: Initialization Checkpoint End
- hosts: localhost
- connection: local
+ hosts: all
gather_facts: false
tasks:
- name: Set install initialization 'Complete'
+ run_once: true
set_stats:
data:
- installer_phase_initialize: "Complete"
- aggregate: false
+ installer_phase_initialize:
+ status: "Complete"
+ end: "{{ lookup('pipe', 'date +%Y%m%d%H%M%SZ') }}"
diff --git a/playbooks/common/openshift-cluster/upgrades/files/shared_resource_viewer_role.yaml b/playbooks/common/openshift-cluster/upgrades/files/shared_resource_viewer_role.yaml
new file mode 100644
index 000000000..9c9c260fb
--- /dev/null
+++ b/playbooks/common/openshift-cluster/upgrades/files/shared_resource_viewer_role.yaml
@@ -0,0 +1,37 @@
+---
+apiVersion: v1
+kind: Role
+metadata:
+ name: shared-resource-viewer
+ namespace: openshift
+rules:
+- apiGroups:
+ - ""
+ - template.openshift.io
+ attributeRestrictions: null
+ resources:
+ - templates
+ verbs:
+ - get
+ - list
+ - watch
+- apiGroups:
+ - ""
+ - image.openshift.io
+ attributeRestrictions: null
+ resources:
+ - imagestreamimages
+ - imagestreams
+ - imagestreamtags
+ verbs:
+ - get
+ - list
+ - watch
+- apiGroups:
+ - ""
+ - image.openshift.io
+ attributeRestrictions: null
+ resources:
+ - imagestreams/layers
+ verbs:
+ - get
diff --git a/playbooks/common/openshift-cluster/upgrades/init.yml b/playbooks/common/openshift-cluster/upgrades/init.yml
index 2826951e6..6ad0b6b86 100644
--- a/playbooks/common/openshift-cluster/upgrades/init.yml
+++ b/playbooks/common/openshift-cluster/upgrades/init.yml
@@ -9,7 +9,12 @@
- name: Ensure firewall is not switched during upgrade
hosts: oo_all_hosts
+ vars:
+ openshift_master_installed_version: "{{ hostvars[groups.oo_first_master.0].openshift.common.version }}"
tasks:
+ - name: set currently installed version
+ set_fact:
+ openshift_currently_installed_version: "{{ openshift_master_installed_version }}"
- name: Check if iptables is running
command: systemctl status iptables
changed_when: false
diff --git a/playbooks/common/openshift-cluster/upgrades/initialize_nodes_to_upgrade.yml b/playbooks/common/openshift-cluster/upgrades/initialize_nodes_to_upgrade.yml
index 72de63070..fc1cbf32a 100644
--- a/playbooks/common/openshift-cluster/upgrades/initialize_nodes_to_upgrade.yml
+++ b/playbooks/common/openshift-cluster/upgrades/initialize_nodes_to_upgrade.yml
@@ -30,6 +30,7 @@
ansible_become: "{{ g_sudo | default(omit) }}"
with_items: " {{ groups['oo_nodes_to_config'] }}"
when:
+ - hostvars[item].openshift is defined
- hostvars[item].openshift.common.hostname in nodes_to_upgrade.results.results[0]['items'] | map(attribute='metadata.name') | list
changed_when: false
diff --git a/playbooks/common/openshift-cluster/upgrades/post_control_plane.yml b/playbooks/common/openshift-cluster/upgrades/post_control_plane.yml
index 07e521a89..c634e0ab8 100644
--- a/playbooks/common/openshift-cluster/upgrades/post_control_plane.yml
+++ b/playbooks/common/openshift-cluster/upgrades/post_control_plane.yml
@@ -85,6 +85,8 @@
roles:
- openshift_manageiq
+ - role: openshift_project_request_template
+ when: openshift_project_request_template_manage
# Create the new templates shipped in 3.2, existing templates are left
# unmodified. This prevents the subsequent role definition for
# openshift_examples from failing when trying to replace templates that do
@@ -103,14 +105,20 @@
openshift_hosted_templates_import_command: replace
# Check for warnings to be printed at the end of the upgrade:
-- name: Check for warnings
+- name: Clean up and display warnings
hosts: oo_masters_to_config
- tasks:
+ tags:
+ - always
+ gather_facts: no
+ roles:
+ - role: openshift_excluder
+ r_openshift_excluder_action: enable
+ r_openshift_excluder_service_type: "{{ openshift.common.service_type }}"
+ post_tasks:
# Check if any masters are using pluginOrderOverride and warn if so, only for 1.3/3.3 and beyond:
- name: grep pluginOrderOverride
command: grep pluginOrderOverride {{ openshift.common.config_base }}/master/master-config.yaml
register: grep_plugin_order_override
- when: openshift.common.version_gte_3_3_or_1_3 | bool
changed_when: false
failed_when: false
@@ -121,12 +129,8 @@
- not grep_plugin_order_override | skipped
- grep_plugin_order_override.rc == 0
-- name: Re-enable excluder if it was previously enabled
- hosts: oo_masters_to_config
- tags:
- - always
- gather_facts: no
- roles:
- - role: openshift_excluder
- r_openshift_excluder_action: enable
- r_openshift_excluder_service_type: "{{ openshift.common.service_type }}"
+ - name: Warn if shared-resource-viewer could not be updated
+ debug:
+ msg: "WARNING the shared-resource-viewer role could not be upgraded to 3.6 spec because it's marked protected, please see https://bugzilla.redhat.com/show_bug.cgi?id=1493213"
+ when:
+ - __shared_resource_viewer_protected | default(false)
diff --git a/playbooks/common/openshift-cluster/upgrades/pre/verify_control_plane_running.yml b/playbooks/common/openshift-cluster/upgrades/pre/verify_control_plane_running.yml
index 45022cd61..6a5bc24f7 100644
--- a/playbooks/common/openshift-cluster/upgrades/pre/verify_control_plane_running.yml
+++ b/playbooks/common/openshift-cluster/upgrades/pre/verify_control_plane_running.yml
@@ -9,16 +9,29 @@
local_facts:
ha: "{{ groups.oo_masters_to_config | length > 1 }}"
- - name: Ensure HA Master is running
- service:
- name: "{{ openshift.common.service_type }}-master-api"
- state: started
- enabled: yes
- when: openshift.common.is_containerized | bool
+ - when: openshift.common.is_containerized | bool
+ block:
+ - set_fact:
+ master_services:
+ - "{{ openshift.common.service_type }}-master"
- - name: Ensure HA Master is running
- service:
- name: "{{ openshift.common.service_type }}-master-controllers"
- state: started
- enabled: yes
- when: openshift.common.is_containerized | bool
+ # In case of the non-ha to ha upgrade.
+ - name: Check if the {{ openshift.common.service_type }}-master-api.service exists
+ command: >
+ systemctl list-units {{ openshift.common.service_type }}-master-api.service --no-legend
+ register: master_api_service_status
+
+ - set_fact:
+ master_services:
+ - "{{ openshift.common.service_type }}-master-api"
+ - "{{ openshift.common.service_type }}-master-controllers"
+ when:
+ - master_api_service_status.stdout_lines | length > 0
+ - (openshift.common.service_type + '-master-api.service') in master_api_service_status.stdout_lines[0]
+
+ - name: Ensure Master is running
+ service:
+ name: "{{ item }}"
+ state: started
+ enabled: yes
+ with_items: "{{ master_services }}"
diff --git a/playbooks/common/openshift-cluster/upgrades/pre/verify_health_checks.yml b/playbooks/common/openshift-cluster/upgrades/pre/verify_health_checks.yml
index ad6325ca0..2a8de50a2 100644
--- a/playbooks/common/openshift-cluster/upgrades/pre/verify_health_checks.yml
+++ b/playbooks/common/openshift-cluster/upgrades/pre/verify_health_checks.yml
@@ -1,12 +1,14 @@
---
-- name: Verify Host Requirements
+- name: OpenShift Health Checks
hosts: oo_all_hosts
+ any_errors_fatal: true
roles:
- openshift_health_checker
vars:
- r_openshift_health_checker_playbook_context: upgrade
post_tasks:
- - action: openshift_health_check
+ - name: Run health checks (upgrade)
+ action: openshift_health_check
args:
checks:
- disk_availability
diff --git a/playbooks/common/openshift-cluster/upgrades/pre/verify_upgrade_targets.yml b/playbooks/common/openshift-cluster/upgrades/pre/verify_upgrade_targets.yml
index 142ce5f3d..13fa37b09 100644
--- a/playbooks/common/openshift-cluster/upgrades/pre/verify_upgrade_targets.yml
+++ b/playbooks/common/openshift-cluster/upgrades/pre/verify_upgrade_targets.yml
@@ -4,6 +4,12 @@
msg: Verify OpenShift is already installed
when: openshift.common.version is not defined
+- name: Update oreg_auth docker login credentials if necessary
+ include_role:
+ name: docker
+ tasks_from: registry_auth.yml
+ when: oreg_auth_user is defined
+
- name: Verify containers are available for upgrade
command: >
docker pull {{ openshift.common.cli_image }}:{{ openshift_image_tag }}
@@ -37,7 +43,7 @@
fail:
msg: "OpenShift {{ avail_openshift_version }} is available, but {{ openshift_upgrade_target }} or greater is required"
when:
- - openshift_pkg_version | default('0.0', True) | version_compare(openshift_release, '<')
+ - (openshift_pkg_version | default('-0.0', True)).split('-')[1] | version_compare(openshift_release, '<')
- name: Fail when openshift version does not meet minium requirement for Origin upgrade
fail:
diff --git a/playbooks/common/openshift-cluster/upgrades/rpm_upgrade.yml b/playbooks/common/openshift-cluster/upgrades/rpm_upgrade.yml
deleted file mode 100644
index 8cc46ab68..000000000
--- a/playbooks/common/openshift-cluster/upgrades/rpm_upgrade.yml
+++ /dev/null
@@ -1,38 +0,0 @@
----
-# When we update package "a-${version}" and a requires b >= ${version} if we
-# don't specify the version of b yum will choose the latest version of b
-# available and the whole set of dependencies end up at the latest version.
-# Since the package module, unlike the yum module, doesn't flatten a list
-# of packages into one transaction we need to do that explicitly. The ansible
-# core team tells us not to rely on yum module transaction flattening anyway.
-
-# TODO: If the sdn package isn't already installed this will install it, we
-# should fix that
-- name: Upgrade master packages
- package: name={{ master_pkgs | join(',') }} state=present
- vars:
- master_pkgs:
- - "{{ openshift.common.service_type }}{{ openshift_pkg_version }}"
- - "{{ openshift.common.service_type }}-master{{ openshift_pkg_version }}"
- - "{{ openshift.common.service_type }}-node{{ openshift_pkg_version }}"
- - "{{ openshift.common.service_type }}-sdn-ovs{{ openshift_pkg_version }}"
- - "{{ openshift.common.service_type }}-clients{{ openshift_pkg_version }}"
- - "tuned-profiles-{{ openshift.common.service_type }}-node{{ openshift_pkg_version }}"
- - PyYAML
- when:
- - component == "master"
- - not openshift.common.is_atomic | bool
-
-- name: Upgrade node packages
- package: name={{ node_pkgs | join(',') }} state=present
- vars:
- node_pkgs:
- - "{{ openshift.common.service_type }}{{ openshift_pkg_version }}"
- - "{{ openshift.common.service_type }}-node{{ openshift_pkg_version }}"
- - "{{ openshift.common.service_type }}-sdn-ovs{{ openshift_pkg_version }}"
- - "{{ openshift.common.service_type }}-clients{{ openshift_pkg_version }}"
- - "tuned-profiles-{{ openshift.common.service_type }}-node{{ openshift_pkg_version }}"
- - PyYAML
- when:
- - component == "node"
- - not openshift.common.is_atomic | bool
diff --git a/playbooks/common/openshift-cluster/upgrades/upgrade_control_plane.yml b/playbooks/common/openshift-cluster/upgrades/upgrade_control_plane.yml
index da47491c1..399b818a7 100644
--- a/playbooks/common/openshift-cluster/upgrades/upgrade_control_plane.yml
+++ b/playbooks/common/openshift-cluster/upgrades/upgrade_control_plane.yml
@@ -3,22 +3,6 @@
# Upgrade Masters
###############################################################################
-# oc adm migrate storage should be run prior to etcd v3 upgrade
-# See: https://github.com/openshift/origin/pull/14625#issuecomment-308467060
-- name: Pre master upgrade - Upgrade all storage
- hosts: oo_first_master
- tasks:
- - name: Upgrade all storage
- command: >
- {{ openshift.common.client_binary }} adm --config={{ openshift.common.config_base }}/master/admin.kubeconfig
- migrate storage --include=* --confirm
- register: l_pb_upgrade_control_plane_pre_upgrade_storage
- when: openshift_upgrade_pre_storage_migration_enabled | default(true) | bool
- failed_when:
- - openshift_upgrade_pre_storage_migration_enabled | default(true) | bool
- - l_pb_upgrade_control_plane_pre_upgrade_storage.rc != 0
- - openshift_upgrade_pre_storage_migration_fatal | default(true) | bool
-
# If facts cache were for some reason deleted, this fact may not be set, and if not set
# it will always default to true. This causes problems for the etcd data dir fact detection
# so we must first make sure this is set correctly before attempting the backup.
@@ -31,7 +15,6 @@
role: master
local_facts:
embedded_etcd: "{{ groups.oo_etcd_to_config | default([]) | length == 0 }}"
- debug_level: "{{ openshift_master_debug_level | default(openshift.common.debug_level | default(2)) }}"
- name: Upgrade and backup etcd
include: ./etcd/main.yml
@@ -49,6 +32,22 @@
- include: create_service_signer_cert.yml
+# oc adm migrate storage should be run prior to etcd v3 upgrade
+# See: https://github.com/openshift/origin/pull/14625#issuecomment-308467060
+- name: Pre master upgrade - Upgrade all storage
+ hosts: oo_first_master
+ tasks:
+ - name: Upgrade all storage
+ command: >
+ {{ openshift.common.client_binary }} adm --config={{ openshift.common.config_base }}/master/admin.kubeconfig
+ migrate storage --include=* --confirm
+ register: l_pb_upgrade_control_plane_pre_upgrade_storage
+ when: openshift_upgrade_pre_storage_migration_enabled | default(true) | bool
+ failed_when:
+ - openshift_upgrade_pre_storage_migration_enabled | default(true) | bool
+ - l_pb_upgrade_control_plane_pre_upgrade_storage.rc != 0
+ - openshift_upgrade_pre_storage_migration_fatal | default(true) | bool
+
# Set openshift_master_facts separately. In order to reconcile
# admission_config's, we currently must run openshift_master_facts and
# then run openshift_facts.
@@ -64,13 +63,9 @@
vars:
openshift_master_ha: "{{ groups.oo_masters_to_config | length > 1 }}"
serial: 1
- handlers:
- - include: ../../../../roles/openshift_master/handlers/main.yml
- static: yes
- roles:
- - openshift_facts
- - lib_utils
- post_tasks:
+ tasks:
+ - include_role:
+ name: openshift_facts
# Run the pre-upgrade hook if defined:
- debug: msg="Running master pre-upgrade hook {{ openshift_master_upgrade_pre_hook }}"
@@ -79,52 +74,9 @@
- include: "{{ openshift_master_upgrade_pre_hook }}"
when: openshift_master_upgrade_pre_hook is defined
- - include: rpm_upgrade.yml component=master
- when: not openshift.common.is_containerized | bool
-
- - include_vars: ../../../../roles/openshift_master_facts/vars/main.yml
-
- - include: upgrade_scheduler.yml
-
- - include: "{{ master_config_hook }}"
- when: master_config_hook is defined
-
- - include_vars: ../../../../roles/openshift_master/vars/main.yml
-
- - name: Remove any legacy systemd units and update systemd units
- include: ../../../../roles/openshift_master/tasks/systemd_units.yml
-
- - name: Check for ca-bundle.crt
- stat:
- path: "{{ openshift.common.config_base }}/master/ca-bundle.crt"
- register: ca_bundle_stat
- failed_when: false
-
- - name: Check for ca.crt
- stat:
- path: "{{ openshift.common.config_base }}/master/ca.crt"
- register: ca_crt_stat
- failed_when: false
-
- - name: Migrate ca.crt to ca-bundle.crt
- command: mv ca.crt ca-bundle.crt
- args:
- chdir: "{{ openshift.common.config_base }}/master"
- when: ca_crt_stat.stat.isreg and not ca_bundle_stat.stat.exists
-
- - name: Link ca.crt to ca-bundle.crt
- file:
- src: "{{ openshift.common.config_base }}/master/ca-bundle.crt"
- path: "{{ openshift.common.config_base }}/master/ca.crt"
- state: link
- when: ca_crt_stat.stat.isreg and not ca_bundle_stat.stat.exists
-
- - name: Update oreg value
- yedit:
- src: "{{ openshift.common.config_base }}/master/master-config.yaml"
- key: 'imageConfig.format'
- value: "{{ oreg_url | default(oreg_url_master) }}"
- when: oreg_url is defined or oreg_url_master is defined
+ - include_role:
+ name: openshift_master
+ tasks_from: upgrade.yml
# Run the upgrade hook prior to restarting services/system if defined:
- debug: msg="Running master upgrade hook {{ openshift_master_upgrade_hook }}"
@@ -151,7 +103,9 @@
{{ openshift.common.client_binary }} adm --config={{ openshift.common.config_base }}/master/admin.kubeconfig
migrate storage --include=clusterpolicies --confirm
register: l_pb_upgrade_control_plane_post_upgrade_storage
- when: openshift_upgrade_post_storage_migration_enabled | default(true) | bool
+ when:
+ - openshift_upgrade_post_storage_migration_enabled | default(true) | bool
+ - openshift_version | version_compare('3.7','<')
failed_when:
- openshift_upgrade_post_storage_migration_enabled | default(true) | bool
- l_pb_upgrade_control_plane_post_upgrade_storage.rc != 0
@@ -193,13 +147,14 @@
# Another spot where we assume docker is running and do not want to accidentally trigger an unsafe
# restart.
skip_docker_role: True
+ __master_shared_resource_viewer_file: "shared_resource_viewer_role.yaml"
tasks:
- name: Reconcile Cluster Roles
command: >
{{ openshift.common.client_binary }} adm --config={{ openshift.common.config_base }}/master/admin.kubeconfig
policy reconcile-cluster-roles --additive-only=true --confirm -o name
register: reconcile_cluster_role_result
- when: not openshift.common.version_gte_3_7 | bool
+ when: openshift_version | version_compare('3.7','<')
changed_when:
- reconcile_cluster_role_result.stdout != ''
- reconcile_cluster_role_result.rc == 0
@@ -214,7 +169,7 @@
--exclude-groups=system:unauthenticated
--exclude-users=system:anonymous
--additive-only=true --confirm -o name
- when: not openshift.common.version_gte_3_7 | bool
+ when: openshift_version | version_compare('3.7','<')
register: reconcile_bindings_result
changed_when:
- reconcile_bindings_result.stdout != ''
@@ -229,7 +184,51 @@
changed_when:
- reconcile_jenkins_role_binding_result.stdout != ''
- reconcile_jenkins_role_binding_result.rc == 0
- when: (not openshift.common.version_gte_3_7 | bool) and (openshift.common.version_gte_3_4_or_1_4 | bool)
+ when:
+ - openshift_version | version_compare('3.7','<')
+
+ - when: openshift_upgrade_target | version_compare('3.7','<')
+ block:
+ - name: Retrieve shared-resource-viewer
+ oc_obj:
+ state: list
+ kind: role
+ name: "shared-resource-viewer"
+ namespace: "openshift"
+ register: objout
+
+ - name: Determine if shared-resource-viewer is protected
+ set_fact:
+ __shared_resource_viewer_protected: true
+ when:
+ - "'results' in objout"
+ - "'results' in objout['results']"
+ - "'annotations' in objout['results']['results'][0]['metadata']"
+ - "'openshift.io/reconcile-protect' in objout['results']['results'][0]['metadata']['annotations']"
+ - "objout['results']['results'][0]['metadata']['annotations']['openshift.io/reconcile-protect'] == 'true'"
+ - copy:
+ src: "{{ item }}"
+ dest: "/tmp/{{ item }}"
+ with_items:
+ - "{{ __master_shared_resource_viewer_file }}"
+ when: __shared_resource_viewer_protected is not defined
+
+ - name: Fixup shared-resource-viewer role
+ oc_obj:
+ state: present
+ kind: role
+ name: "shared-resource-viewer"
+ namespace: "openshift"
+ files:
+ - "/tmp/{{ __master_shared_resource_viewer_file }}"
+ delete_after: true
+ when: __shared_resource_viewer_protected is not defined
+ register: result
+ retries: 3
+ delay: 5
+ until: result.rc == 0
+ ignore_errors: true
+
- name: Reconcile Security Context Constraints
command: >
diff --git a/playbooks/common/openshift-cluster/upgrades/upgrade_scheduler.yml b/playbooks/common/openshift-cluster/upgrades/upgrade_scheduler.yml
deleted file mode 100644
index 8558bf3e9..000000000
--- a/playbooks/common/openshift-cluster/upgrades/upgrade_scheduler.yml
+++ /dev/null
@@ -1,173 +0,0 @@
----
-# Upgrade predicates
-- vars:
- prev_predicates: "{{ lookup('openshift_master_facts_default_predicates', short_version=openshift_upgrade_min, deployment_type=openshift_deployment_type) }}"
- prev_predicates_no_region: "{{ lookup('openshift_master_facts_default_predicates', short_version=openshift_upgrade_min, deployment_type=openshift_deployment_type, regions_enabled=False) }}"
- default_predicates_no_region: "{{ lookup('openshift_master_facts_default_predicates', regions_enabled=False) }}"
- # older_predicates are the set of predicates that have previously been
- # hard-coded into openshift_facts
- older_predicates:
- - - name: MatchNodeSelector
- - name: PodFitsResources
- - name: PodFitsPorts
- - name: NoDiskConflict
- - name: NoVolumeZoneConflict
- - name: MaxEBSVolumeCount
- - name: MaxGCEPDVolumeCount
- - name: Region
- argument:
- serviceAffinity:
- labels:
- - region
- - - name: MatchNodeSelector
- - name: PodFitsResources
- - name: PodFitsPorts
- - name: NoDiskConflict
- - name: NoVolumeZoneConflict
- - name: Region
- argument:
- serviceAffinity:
- labels:
- - region
- - - name: MatchNodeSelector
- - name: PodFitsResources
- - name: PodFitsPorts
- - name: NoDiskConflict
- - name: Region
- argument:
- serviceAffinity:
- labels:
- - region
- # older_predicates_no_region are the set of predicates that have previously
- # been hard-coded into openshift_facts, with the Region predicate removed
- older_predicates_no_region:
- - - name: MatchNodeSelector
- - name: PodFitsResources
- - name: PodFitsPorts
- - name: NoDiskConflict
- - name: NoVolumeZoneConflict
- - name: MaxEBSVolumeCount
- - name: MaxGCEPDVolumeCount
- - - name: MatchNodeSelector
- - name: PodFitsResources
- - name: PodFitsPorts
- - name: NoDiskConflict
- - name: NoVolumeZoneConflict
- - - name: MatchNodeSelector
- - name: PodFitsResources
- - name: PodFitsPorts
- - name: NoDiskConflict
- block:
-
- # Handle case where openshift_master_predicates is defined
- - block:
- - debug:
- msg: "WARNING: openshift_master_scheduler_predicates is set to defaults from an earlier release of OpenShift current defaults are: {{ openshift_master_scheduler_default_predicates }}"
- when: openshift_master_scheduler_predicates in older_predicates + older_predicates_no_region + [prev_predicates] + [prev_predicates_no_region]
-
- - debug:
- msg: "WARNING: openshift_master_scheduler_predicates does not match current defaults of: {{ openshift_master_scheduler_default_predicates }}"
- when: openshift_master_scheduler_predicates != openshift_master_scheduler_default_predicates
- when: openshift_master_scheduler_predicates | default(none) is not none
-
- # Handle cases where openshift_master_predicates is not defined
- - block:
- - debug:
- msg: "WARNING: existing scheduler config does not match previous known defaults automated upgrade of scheduler config is disabled.\nexisting scheduler predicates: {{ openshift_master_scheduler_current_predicates }}\ncurrent scheduler default predicates are: {{ openshift_master_scheduler_default_predicates }}"
- when:
- - openshift_master_scheduler_current_predicates != openshift_master_scheduler_default_predicates
- - openshift_master_scheduler_current_predicates not in older_predicates + [prev_predicates]
-
- - set_fact:
- openshift_upgrade_scheduler_predicates: "{{ openshift_master_scheduler_default_predicates }}"
- when:
- - openshift_master_scheduler_current_predicates != openshift_master_scheduler_default_predicates
- - openshift_master_scheduler_current_predicates in older_predicates + [prev_predicates]
-
- - set_fact:
- openshift_upgrade_scheduler_predicates: "{{ default_predicates_no_region }}"
- when:
- - openshift_master_scheduler_current_predicates != default_predicates_no_region
- - openshift_master_scheduler_current_predicates in older_predicates_no_region + [prev_predicates_no_region]
-
- when: openshift_master_scheduler_predicates | default(none) is none
-
-
-# Upgrade priorities
-- vars:
- prev_priorities: "{{ lookup('openshift_master_facts_default_priorities', short_version=openshift_upgrade_min, deployment_type=openshift_deployment_type) }}"
- prev_priorities_no_zone: "{{ lookup('openshift_master_facts_default_priorities', short_version=openshift_upgrade_min, deployment_type=openshift_deployment_type, zones_enabled=False) }}"
- default_priorities_no_zone: "{{ lookup('openshift_master_facts_default_priorities', zones_enabled=False) }}"
- # older_priorities are the set of priorities that have previously been
- # hard-coded into openshift_facts
- older_priorities:
- - - name: LeastRequestedPriority
- weight: 1
- - name: SelectorSpreadPriority
- weight: 1
- - name: Zone
- weight: 2
- argument:
- serviceAntiAffinity:
- label: zone
- # older_priorities_no_region are the set of priorities that have previously
- # been hard-coded into openshift_facts, with the Zone priority removed
- older_priorities_no_zone:
- - - name: LeastRequestedPriority
- weight: 1
- - name: SelectorSpreadPriority
- weight: 1
- block:
-
- # Handle case where openshift_master_priorities is defined
- - block:
- - debug:
- msg: "WARNING: openshift_master_scheduler_priorities is set to defaults from an earlier release of OpenShift current defaults are: {{ openshift_master_scheduler_default_priorities }}"
- when: openshift_master_scheduler_priorities in older_priorities + older_priorities_no_zone + [prev_priorities] + [prev_priorities_no_zone]
-
- - debug:
- msg: "WARNING: openshift_master_scheduler_priorities does not match current defaults of: {{ openshift_master_scheduler_default_priorities }}"
- when: openshift_master_scheduler_priorities != openshift_master_scheduler_default_priorities
- when: openshift_master_scheduler_priorities | default(none) is not none
-
- # Handle cases where openshift_master_priorities is not defined
- - block:
- - debug:
- msg: "WARNING: existing scheduler config does not match previous known defaults automated upgrade of scheduler config is disabled.\nexisting scheduler priorities: {{ openshift_master_scheduler_current_priorities }}\ncurrent scheduler default priorities are: {{ openshift_master_scheduler_default_priorities }}"
- when:
- - openshift_master_scheduler_current_priorities != openshift_master_scheduler_default_priorities
- - openshift_master_scheduler_current_priorities not in older_priorities + [prev_priorities]
-
- - set_fact:
- openshift_upgrade_scheduler_priorities: "{{ openshift_master_scheduler_default_priorities }}"
- when:
- - openshift_master_scheduler_current_priorities != openshift_master_scheduler_default_priorities
- - openshift_master_scheduler_current_priorities in older_priorities + [prev_priorities]
-
- - set_fact:
- openshift_upgrade_scheduler_priorities: "{{ default_priorities_no_zone }}"
- when:
- - openshift_master_scheduler_current_priorities != default_priorities_no_zone
- - openshift_master_scheduler_current_priorities in older_priorities_no_zone + [prev_priorities_no_zone]
-
- when: openshift_master_scheduler_priorities | default(none) is none
-
-
-# Update scheduler
-- vars:
- scheduler_config:
- kind: Policy
- apiVersion: v1
- predicates: "{{ openshift_upgrade_scheduler_predicates
- | default(openshift_master_scheduler_current_predicates) }}"
- priorities: "{{ openshift_upgrade_scheduler_priorities
- | default(openshift_master_scheduler_current_priorities) }}"
- block:
- - name: Update scheduler config
- copy:
- content: "{{ scheduler_config | to_nice_json }}"
- dest: "{{ openshift_master_scheduler_conf }}"
- backup: true
- when: >
- openshift_upgrade_scheduler_predicates is defined or
- openshift_upgrade_scheduler_priorities is defined
diff --git a/playbooks/common/openshift-cluster/upgrades/v3_3/master_config_upgrade.yml b/playbooks/common/openshift-cluster/upgrades/v3_3/master_config_upgrade.yml
deleted file mode 100644
index d69472fad..000000000
--- a/playbooks/common/openshift-cluster/upgrades/v3_3/master_config_upgrade.yml
+++ /dev/null
@@ -1,66 +0,0 @@
----
-- modify_yaml:
- dest: "{{ openshift.common.config_base}}/master/master-config.yaml"
- yaml_key: 'masterClients.externalKubernetesClientConnectionOverrides.acceptContentTypes'
- yaml_value: 'application/vnd.kubernetes.protobuf,application/json'
-
-- modify_yaml:
- dest: "{{ openshift.common.config_base}}/master/master-config.yaml"
- yaml_key: 'masterClients.externalKubernetesClientConnectionOverrides.contentType'
- yaml_value: 'application/vnd.kubernetes.protobuf'
-
-- modify_yaml:
- dest: "{{ openshift.common.config_base}}/master/master-config.yaml"
- yaml_key: 'masterClients.externalKubernetesClientConnectionOverrides.burst'
- yaml_value: 400
-
-- modify_yaml:
- dest: "{{ openshift.common.config_base}}/master/master-config.yaml"
- yaml_key: 'masterClients.externalKubernetesClientConnectionOverrides.qps'
- yaml_value: 200
-
-- modify_yaml:
- dest: "{{ openshift.common.config_base}}/master/master-config.yaml"
- yaml_key: 'masterClients.openshiftLoopbackClientConnectionOverrides.acceptContentTypes'
- yaml_value: 'application/vnd.kubernetes.protobuf,application/json'
-
-- modify_yaml:
- dest: "{{ openshift.common.config_base}}/master/master-config.yaml"
- yaml_key: 'masterClients.openshiftLoopbackClientConnectionOverrides.contentType'
- yaml_value: 'application/vnd.kubernetes.protobuf'
-
-- modify_yaml:
- dest: "{{ openshift.common.config_base}}/master/master-config.yaml"
- yaml_key: 'masterClients.openshiftLoopbackClientConnectionOverrides.burst'
- yaml_value: 600
-
-- modify_yaml:
- dest: "{{ openshift.common.config_base}}/master/master-config.yaml"
- yaml_key: 'masterClients.openshiftLoopbackClientConnectionOverrides.qps'
- yaml_value: 300
-
-- modify_yaml:
- dest: "{{ openshift.common.config_base}}/master/master-config.yaml"
- yaml_key: 'controllerConfig.servicesServingCert.signer.certFile'
- yaml_value: service-signer.crt
-
-- modify_yaml:
- dest: "{{ openshift.common.config_base}}/master/master-config.yaml"
- yaml_key: 'controllerConfig.servicesServingCert.signer.keyFile'
- yaml_value: service-signer.key
-
-- modify_yaml:
- dest: "{{ openshift.common.config_base}}/master/master-config.yaml"
- yaml_key: 'admissionConfig.pluginConfig'
- yaml_value: "{{ openshift.master.admission_plugin_config }}"
- when: "'admission_plugin_config' in openshift.master"
-
-- modify_yaml:
- dest: "{{ openshift.common.config_base}}/master/master-config.yaml"
- yaml_key: 'admissionConfig.pluginOrderOverride'
- yaml_value:
-
-- modify_yaml:
- dest: "{{ openshift.common.config_base}}/master/master-config.yaml"
- yaml_key: 'kubernetesMasterConfig.admissionConfig'
- yaml_value:
diff --git a/playbooks/common/openshift-cluster/upgrades/v3_3/node_config_upgrade.yml b/playbooks/common/openshift-cluster/upgrades/v3_3/node_config_upgrade.yml
deleted file mode 100644
index 89b524f14..000000000
--- a/playbooks/common/openshift-cluster/upgrades/v3_3/node_config_upgrade.yml
+++ /dev/null
@@ -1,20 +0,0 @@
----
-- modify_yaml:
- dest: "{{ openshift.common.config_base}}/node/node-config.yaml"
- yaml_key: 'masterClientConnectionOverrides.acceptContentTypes'
- yaml_value: 'application/vnd.kubernetes.protobuf,application/json'
-
-- modify_yaml:
- dest: "{{ openshift.common.config_base}}/node/node-config.yaml"
- yaml_key: 'masterClientConnectionOverrides.contentType'
- yaml_value: 'application/vnd.kubernetes.protobuf'
-
-- modify_yaml:
- dest: "{{ openshift.common.config_base}}/node/node-config.yaml"
- yaml_key: 'masterClientConnectionOverrides.burst'
- yaml_value: 40
-
-- modify_yaml:
- dest: "{{ openshift.common.config_base}}/node/node-config.yaml"
- yaml_key: 'masterClientConnectionOverrides.qps'
- yaml_value: 20
diff --git a/playbooks/common/openshift-cluster/upgrades/v3_3/roles b/playbooks/common/openshift-cluster/upgrades/v3_3/roles
deleted file mode 120000
index 6bc1a7aef..000000000
--- a/playbooks/common/openshift-cluster/upgrades/v3_3/roles
+++ /dev/null
@@ -1 +0,0 @@
-../../../../../roles \ No newline at end of file
diff --git a/playbooks/common/openshift-cluster/upgrades/v3_3/upgrade.yml b/playbooks/common/openshift-cluster/upgrades/v3_3/upgrade.yml
deleted file mode 100644
index a241ef039..000000000
--- a/playbooks/common/openshift-cluster/upgrades/v3_3/upgrade.yml
+++ /dev/null
@@ -1,118 +0,0 @@
----
-#
-# Full Control Plane + Nodes Upgrade
-#
-- include: ../init.yml
- tags:
- - pre_upgrade
-
-- name: Configure the upgrade target for the common upgrade tasks
- hosts: oo_all_hosts
- tags:
- - pre_upgrade
- tasks:
- - set_fact:
- openshift_upgrade_target: "{{ '1.3' if deployment_type == 'origin' else '3.3' }}"
- openshift_upgrade_min: "{{ '1.2' if deployment_type == 'origin' else '3.2' }}"
-
-# Pre-upgrade
-
-- include: ../initialize_nodes_to_upgrade.yml
- tags:
- - pre_upgrade
-
-- name: Update repos and initialize facts on all hosts
- hosts: oo_masters_to_config:oo_nodes_to_upgrade:oo_etcd_to_config:oo_lb_to_config
- tags:
- - pre_upgrade
- roles:
- - openshift_repos
-
-- name: Set openshift_no_proxy_internal_hostnames
- hosts: oo_masters_to_config:oo_nodes_to_upgrade
- tags:
- - pre_upgrade
- tasks:
- - set_fact:
- openshift_no_proxy_internal_hostnames: "{{ hostvars | oo_select_keys(groups['oo_nodes_to_config']
- | union(groups['oo_masters_to_config'])
- | union(groups['oo_etcd_to_config'] | default([])))
- | oo_collect('openshift.common.hostname') | default([]) | join (',')
- }}"
- when:
- - openshift_http_proxy is defined or openshift_https_proxy is defined
- - openshift_generate_no_proxy_hosts | default(True) | bool
-
-- include: ../pre/verify_inventory_vars.yml
- tags:
- - pre_upgrade
-
-- include: ../disable_master_excluders.yml
- tags:
- - pre_upgrade
-
-- include: ../disable_node_excluders.yml
- tags:
- - pre_upgrade
-
-- include: ../../initialize_openshift_version.yml
- tags:
- - pre_upgrade
- vars:
- # Request specific openshift_release and let the openshift_version role handle converting this
- # to a more specific version, respecting openshift_image_tag and openshift_pkg_version if
- # defined, and overriding the normal behavior of protecting the installed version
- openshift_release: "{{ openshift_upgrade_target }}"
- openshift_protect_installed_version: False
-
- # We skip the docker role at this point in upgrade to prevent
- # unintended package, container, or config upgrades which trigger
- # docker restarts. At this early stage of upgrade we can assume
- # docker is configured and running.
- skip_docker_role: True
-
-- include: ../pre/verify_control_plane_running.yml
- tags:
- - pre_upgrade
-
-- include: ../../../openshift-master/validate_restart.yml
- tags:
- - pre_upgrade
-
-- name: Verify upgrade targets
- hosts: oo_masters_to_config:oo_nodes_to_upgrade
- tasks:
- - include: ../pre/verify_upgrade_targets.yml
- tags:
- - pre_upgrade
-
-- name: Verify docker upgrade targets
- hosts: oo_masters_to_config:oo_nodes_to_upgrade:oo_etcd_to_config
- tasks:
- - include: ../pre/tasks/verify_docker_upgrade_targets.yml
- tags:
- - pre_upgrade
-
-- include: ../pre/gate_checks.yml
- tags:
- - pre_upgrade
-
-# Pre-upgrade completed, nothing after this should be tagged pre_upgrade.
-
-# Separate step so we can execute in parallel and clear out anything unused
-# before we get into the serialized upgrade process which will then remove
-# remaining images if possible.
-- name: Cleanup unused Docker images
- hosts: oo_masters_to_config:oo_nodes_to_upgrade:oo_etcd_to_config
- tasks:
- - include: ../cleanup_unused_images.yml
-
-- include: ../upgrade_control_plane.yml
- vars:
- master_config_hook: "v3_3/master_config_upgrade.yml"
-
-- include: ../upgrade_nodes.yml
- vars:
- node_config_hook: "v3_3/node_config_upgrade.yml"
-
-- include: ../post_control_plane.yml
diff --git a/playbooks/common/openshift-cluster/upgrades/v3_3/upgrade_control_plane.yml b/playbooks/common/openshift-cluster/upgrades/v3_3/upgrade_control_plane.yml
deleted file mode 100644
index f64f0e003..000000000
--- a/playbooks/common/openshift-cluster/upgrades/v3_3/upgrade_control_plane.yml
+++ /dev/null
@@ -1,119 +0,0 @@
----
-#
-# Control Plane Upgrade Playbook
-#
-# Upgrades masters and Docker (only on standalone etcd hosts)
-#
-# This upgrade does not include:
-# - node service running on masters
-# - docker running on masters
-# - node service running on dedicated nodes
-#
-# You can run the upgrade_nodes.yml playbook after this to upgrade these components separately.
-#
-- include: ../init.yml
- tags:
- - pre_upgrade
-
-- name: Configure the upgrade target for the common upgrade tasks
- hosts: oo_all_hosts
- tags:
- - pre_upgrade
- tasks:
- - set_fact:
- openshift_upgrade_target: "{{ '1.3' if deployment_type == 'origin' else '3.3' }}"
- openshift_upgrade_min: "{{ '1.2' if deployment_type == 'origin' else '3.2' }}"
-
-# Pre-upgrade
-- include: ../initialize_nodes_to_upgrade.yml
- tags:
- - pre_upgrade
-
-- name: Update repos on control plane hosts
- hosts: oo_masters_to_config:oo_etcd_to_config:oo_lb_to_config
- tags:
- - pre_upgrade
- roles:
- - openshift_repos
-
-- name: Set openshift_no_proxy_internal_hostnames
- hosts: oo_masters_to_config:oo_nodes_to_upgrade
- tags:
- - pre_upgrade
- tasks:
- - set_fact:
- openshift_no_proxy_internal_hostnames: "{{ hostvars | oo_select_keys(groups['oo_nodes_to_config']
- | union(groups['oo_masters_to_config'])
- | union(groups['oo_etcd_to_config'] | default([])))
- | oo_collect('openshift.common.hostname') | default([]) | join (',')
- }}"
- when:
- - openshift_http_proxy is defined or openshift_https_proxy is defined
- - openshift_generate_no_proxy_hosts | default(True) | bool
-
-- include: ../pre/verify_inventory_vars.yml
- tags:
- - pre_upgrade
-
-- include: ../disable_master_excluders.yml
- tags:
- - pre_upgrade
-
-- include: ../../initialize_openshift_version.yml
- tags:
- - pre_upgrade
- vars:
- # Request specific openshift_release and let the openshift_version role handle converting this
- # to a more specific version, respecting openshift_image_tag and openshift_pkg_version if
- # defined, and overriding the normal behavior of protecting the installed version
- openshift_release: "{{ openshift_upgrade_target }}"
- openshift_protect_installed_version: False
- openshift_install_base_package_group: "oo_masters_to_config"
-
- # We skip the docker role at this point in upgrade to prevent
- # unintended package, container, or config upgrades which trigger
- # docker restarts. At this early stage of upgrade we can assume
- # docker is configured and running.
- skip_docker_role: True
-
-- include: ../pre/verify_control_plane_running.yml
- tags:
- - pre_upgrade
-
-- include: ../../../openshift-master/validate_restart.yml
- tags:
- - pre_upgrade
-
-- name: Verify upgrade targets
- hosts: oo_masters_to_config
- tasks:
- - include: ../pre/verify_upgrade_targets.yml
- tags:
- - pre_upgrade
-
-- name: Verify docker upgrade targets
- hosts: oo_masters_to_config:oo_etcd_to_config
- tasks:
- - include: ../pre/tasks/verify_docker_upgrade_targets.yml
- tags:
- - pre_upgrade
-
-- include: ../pre/gate_checks.yml
- tags:
- - pre_upgrade
-
-# Pre-upgrade completed, nothing after this should be tagged pre_upgrade.
-
-# Separate step so we can execute in parallel and clear out anything unused
-# before we get into the serialized upgrade process which will then remove
-# remaining images if possible.
-- name: Cleanup unused Docker images
- hosts: oo_masters_to_config:oo_etcd_to_config
- tasks:
- - include: ../cleanup_unused_images.yml
-
-- include: ../upgrade_control_plane.yml
- vars:
- master_config_hook: "v3_3/master_config_upgrade.yml"
-
-- include: ../post_control_plane.yml
diff --git a/playbooks/common/openshift-cluster/upgrades/v3_3/upgrade_nodes.yml b/playbooks/common/openshift-cluster/upgrades/v3_3/upgrade_nodes.yml
deleted file mode 100644
index cee4e9087..000000000
--- a/playbooks/common/openshift-cluster/upgrades/v3_3/upgrade_nodes.yml
+++ /dev/null
@@ -1,113 +0,0 @@
----
-#
-# Node Upgrade Playbook
-#
-# Upgrades nodes only, but requires the control plane to have already been upgraded.
-#
-- include: ../init.yml
- tags:
- - pre_upgrade
-
-- name: Configure the upgrade target for the common upgrade tasks
- hosts: oo_all_hosts
- tags:
- - pre_upgrade
- tasks:
- - set_fact:
- openshift_upgrade_target: "{{ '1.3' if deployment_type == 'origin' else '3.3' }}"
- openshift_upgrade_min: "{{ '1.2' if deployment_type == 'origin' else '3.2' }}"
-
-# Pre-upgrade
-- include: ../initialize_nodes_to_upgrade.yml
- tags:
- - pre_upgrade
-
-- name: Update repos on nodes
- hosts: oo_masters_to_config:oo_nodes_to_upgrade:oo_etcd_to_config:oo_lb_to_config
- roles:
- - openshift_repos
- tags:
- - pre_upgrade
-
-- name: Set openshift_no_proxy_internal_hostnames
- hosts: oo_masters_to_config:oo_nodes_to_upgrade
- tags:
- - pre_upgrade
- tasks:
- - set_fact:
- openshift_no_proxy_internal_hostnames: "{{ hostvars | oo_select_keys(groups['oo_nodes_to_upgrade']
- | union(groups['oo_masters_to_config'])
- | union(groups['oo_etcd_to_config'] | default([])))
- | oo_collect('openshift.common.hostname') | default([]) | join (',')
- }}"
- when:
- - openshift_http_proxy is defined or openshift_https_proxy is defined
- - openshift_generate_no_proxy_hosts | default(True) | bool
-
-- include: ../pre/verify_inventory_vars.yml
- tags:
- - pre_upgrade
-
-- include: ../disable_node_excluders.yml
- tags:
- - pre_upgrade
-
-- include: ../../initialize_openshift_version.yml
- tags:
- - pre_upgrade
- vars:
- # Request specific openshift_release and let the openshift_version role handle converting this
- # to a more specific version, respecting openshift_image_tag and openshift_pkg_version if
- # defined, and overriding the normal behavior of protecting the installed version
- openshift_release: "{{ openshift_upgrade_target }}"
- openshift_protect_installed_version: False
-
- # We skip the docker role at this point in upgrade to prevent
- # unintended package, container, or config upgrades which trigger
- # docker restarts. At this early stage of upgrade we can assume
- # docker is configured and running.
- skip_docker_role: True
-
-- name: Verify masters are already upgraded
- hosts: oo_masters_to_config
- tags:
- - pre_upgrade
- tasks:
- - fail: msg="Master running {{ openshift.common.version }} must be upgraded to {{ openshift_version }} before node upgrade can be run."
- when: openshift.common.version != openshift_version
-
-- include: ../pre/verify_control_plane_running.yml
- tags:
- - pre_upgrade
-
-- name: Verify upgrade targets
- hosts: oo_nodes_to_upgrade
- tasks:
- - include: ../pre/verify_upgrade_targets.yml
- tags:
- - pre_upgrade
-
-- name: Verify docker upgrade targets
- hosts: oo_nodes_to_upgrade
- tasks:
- - include: ../pre/tasks/verify_docker_upgrade_targets.yml
- tags:
- - pre_upgrade
-
-- include: ../pre/gate_checks.yml
- tags:
- - pre_upgrade
-
-# Pre-upgrade completed, nothing after this should be tagged pre_upgrade.
-
-# Separate step so we can execute in parallel and clear out anything unused
-# before we get into the serialized upgrade process which will then remove
-# remaining images if possible.
-- name: Cleanup unused Docker images
- hosts: oo_nodes_to_upgrade
- tasks:
- - include: ../cleanup_unused_images.yml
-
-- include: ../upgrade_nodes.yml
- vars:
- node_config_hook: "v3_3/node_config_upgrade.yml"
diff --git a/playbooks/common/openshift-cluster/upgrades/v3_4/master_config_upgrade.yml b/playbooks/common/openshift-cluster/upgrades/v3_4/master_config_upgrade.yml
deleted file mode 100644
index ed89dbe8d..000000000
--- a/playbooks/common/openshift-cluster/upgrades/v3_4/master_config_upgrade.yml
+++ /dev/null
@@ -1,16 +0,0 @@
----
-- modify_yaml:
- dest: "{{ openshift.common.config_base}}/master/master-config.yaml"
- yaml_key: 'admissionConfig.pluginConfig'
- yaml_value: "{{ openshift.master.admission_plugin_config }}"
- when: "'admission_plugin_config' in openshift.master"
-
-- modify_yaml:
- dest: "{{ openshift.common.config_base}}/master/master-config.yaml"
- yaml_key: 'admissionConfig.pluginOrderOverride'
- yaml_value:
-
-- modify_yaml:
- dest: "{{ openshift.common.config_base}}/master/master-config.yaml"
- yaml_key: 'kubernetesMasterConfig.admissionConfig'
- yaml_value:
diff --git a/playbooks/common/openshift-cluster/upgrades/v3_4/roles b/playbooks/common/openshift-cluster/upgrades/v3_4/roles
deleted file mode 120000
index 6bc1a7aef..000000000
--- a/playbooks/common/openshift-cluster/upgrades/v3_4/roles
+++ /dev/null
@@ -1 +0,0 @@
-../../../../../roles \ No newline at end of file
diff --git a/playbooks/common/openshift-cluster/upgrades/v3_4/upgrade.yml b/playbooks/common/openshift-cluster/upgrades/v3_4/upgrade.yml
deleted file mode 100644
index ae217ba2e..000000000
--- a/playbooks/common/openshift-cluster/upgrades/v3_4/upgrade.yml
+++ /dev/null
@@ -1,116 +0,0 @@
----
-#
-# Full Control Plane + Nodes Upgrade
-#
-- include: ../init.yml
- tags:
- - pre_upgrade
-
-- name: Configure the upgrade target for the common upgrade tasks
- hosts: oo_all_hosts
- tags:
- - pre_upgrade
- tasks:
- - set_fact:
- openshift_upgrade_target: "{{ '1.4' if deployment_type == 'origin' else '3.4' }}"
- openshift_upgrade_min: "{{ '1.3' if deployment_type == 'origin' else '3.3' }}"
-
-# Pre-upgrade
-
-- include: ../initialize_nodes_to_upgrade.yml
- tags:
- - pre_upgrade
-
-- name: Update repos and initialize facts on all hosts
- hosts: oo_masters_to_config:oo_nodes_to_upgrade:oo_etcd_to_config:oo_lb_to_config
- tags:
- - pre_upgrade
- roles:
- - openshift_repos
-
-- name: Set openshift_no_proxy_internal_hostnames
- hosts: oo_masters_to_config:oo_nodes_to_upgrade
- tags:
- - pre_upgrade
- tasks:
- - set_fact:
- openshift_no_proxy_internal_hostnames: "{{ hostvars | oo_select_keys(groups['oo_nodes_to_config']
- | union(groups['oo_masters_to_config'])
- | union(groups['oo_etcd_to_config'] | default([])))
- | oo_collect('openshift.common.hostname') | default([]) | join (',')
- }}"
- when:
- - openshift_http_proxy is defined or openshift_https_proxy is defined
- - openshift_generate_no_proxy_hosts | default(True) | bool
-
-- include: ../pre/verify_inventory_vars.yml
- tags:
- - pre_upgrade
-
-- include: ../disable_master_excluders.yml
- tags:
- - pre_upgrade
-
-- include: ../disable_node_excluders.yml
- tags:
- - pre_upgrade
-
-- include: ../../initialize_openshift_version.yml
- tags:
- - pre_upgrade
- vars:
- # Request specific openshift_release and let the openshift_version role handle converting this
- # to a more specific version, respecting openshift_image_tag and openshift_pkg_version if
- # defined, and overriding the normal behavior of protecting the installed version
- openshift_release: "{{ openshift_upgrade_target }}"
- openshift_protect_installed_version: False
-
- # We skip the docker role at this point in upgrade to prevent
- # unintended package, container, or config upgrades which trigger
- # docker restarts. At this early stage of upgrade we can assume
- # docker is configured and running.
- skip_docker_role: True
-
-- include: ../pre/verify_control_plane_running.yml
- tags:
- - pre_upgrade
-
-- include: ../../../openshift-master/validate_restart.yml
- tags:
- - pre_upgrade
-
-- name: Verify upgrade targets
- hosts: oo_masters_to_config:oo_nodes_to_upgrade
- tasks:
- - include: ../pre/verify_upgrade_targets.yml
- tags:
- - pre_upgrade
-
-- name: Verify docker upgrade targets
- hosts: oo_masters_to_config:oo_nodes_to_upgrade:oo_etcd_to_config
- tasks:
- - include: ../pre/tasks/verify_docker_upgrade_targets.yml
- tags:
- - pre_upgrade
-
-- include: ../pre/gate_checks.yml
- tags:
- - pre_upgrade
-
-# Pre-upgrade completed, nothing after this should be tagged pre_upgrade.
-
-# Separate step so we can execute in parallel and clear out anything unused
-# before we get into the serialized upgrade process which will then remove
-# remaining images if possible.
-- name: Cleanup unused Docker images
- hosts: oo_masters_to_config:oo_nodes_to_upgrade:oo_etcd_to_config
- tasks:
- - include: ../cleanup_unused_images.yml
-
-- include: ../upgrade_control_plane.yml
- vars:
- master_config_hook: "v3_4/master_config_upgrade.yml"
-
-- include: ../upgrade_nodes.yml
-
-- include: ../post_control_plane.yml
diff --git a/playbooks/common/openshift-cluster/upgrades/v3_4/upgrade_control_plane.yml b/playbooks/common/openshift-cluster/upgrades/v3_4/upgrade_control_plane.yml
deleted file mode 100644
index 43da5b629..000000000
--- a/playbooks/common/openshift-cluster/upgrades/v3_4/upgrade_control_plane.yml
+++ /dev/null
@@ -1,119 +0,0 @@
----
-#
-# Control Plane Upgrade Playbook
-#
-# Upgrades masters and Docker (only on standalone etcd hosts)
-#
-# This upgrade does not include:
-# - node service running on masters
-# - docker running on masters
-# - node service running on dedicated nodes
-#
-# You can run the upgrade_nodes.yml playbook after this to upgrade these components separately.
-#
-- include: ../init.yml
- tags:
- - pre_upgrade
-
-- name: Configure the upgrade target for the common upgrade tasks
- hosts: oo_all_hosts
- tags:
- - pre_upgrade
- tasks:
- - set_fact:
- openshift_upgrade_target: "{{ '1.4' if deployment_type == 'origin' else '3.4' }}"
- openshift_upgrade_min: "{{ '1.3' if deployment_type == 'origin' else '3.3' }}"
-
-# Pre-upgrade
-- include: ../initialize_nodes_to_upgrade.yml
- tags:
- - pre_upgrade
-
-- name: Update repos on control plane hosts
- hosts: oo_masters_to_config:oo_etcd_to_config:oo_lb_to_config
- tags:
- - pre_upgrade
- roles:
- - openshift_repos
-
-- name: Set openshift_no_proxy_internal_hostnames
- hosts: oo_masters_to_config:oo_nodes_to_upgrade
- tags:
- - pre_upgrade
- tasks:
- - set_fact:
- openshift_no_proxy_internal_hostnames: "{{ hostvars | oo_select_keys(groups['oo_nodes_to_config']
- | union(groups['oo_masters_to_config'])
- | union(groups['oo_etcd_to_config'] | default([])))
- | oo_collect('openshift.common.hostname') | default([]) | join (',')
- }}"
- when:
- - openshift_http_proxy is defined or openshift_https_proxy is defined
- - openshift_generate_no_proxy_hosts | default(True) | bool
-
-- include: ../pre/verify_inventory_vars.yml
- tags:
- - pre_upgrade
-
-- include: ../disable_master_excluders.yml
- tags:
- - pre_upgrade
-
-- include: ../../initialize_openshift_version.yml
- tags:
- - pre_upgrade
- vars:
- # Request specific openshift_release and let the openshift_version role handle converting this
- # to a more specific version, respecting openshift_image_tag and openshift_pkg_version if
- # defined, and overriding the normal behavior of protecting the installed version
- openshift_release: "{{ openshift_upgrade_target }}"
- openshift_protect_installed_version: False
- openshift_install_base_package_group: "oo_masters_to_config"
-
- # We skip the docker role at this point in upgrade to prevent
- # unintended package, container, or config upgrades which trigger
- # docker restarts. At this early stage of upgrade we can assume
- # docker is configured and running.
- skip_docker_role: True
-
-- include: ../pre/verify_control_plane_running.yml
- tags:
- - pre_upgrade
-
-- include: ../../../openshift-master/validate_restart.yml
- tags:
- - pre_upgrade
-
-- name: Verify upgrade targets
- hosts: oo_masters_to_config
- tasks:
- - include: ../pre/verify_upgrade_targets.yml
- tags:
- - pre_upgrade
-
-- name: Verify docker upgrade targets
- hosts: oo_masters_to_config:oo_etcd_to_config
- tasks:
- - include: ../pre/tasks/verify_docker_upgrade_targets.yml
- tags:
- - pre_upgrade
-
-- include: ../pre/gate_checks.yml
- tags:
- - pre_upgrade
-
-# Pre-upgrade completed, nothing after this should be tagged pre_upgrade.
-
-# Separate step so we can execute in parallel and clear out anything unused
-# before we get into the serialized upgrade process which will then remove
-# remaining images if possible.
-- name: Cleanup unused Docker images
- hosts: oo_masters_to_config:oo_etcd_to_config
- tasks:
- - include: ../cleanup_unused_images.yml
-
-- include: ../upgrade_control_plane.yml
- vars:
- master_config_hook: "v3_4/master_config_upgrade.yml"
-
-- include: ../post_control_plane.yml
diff --git a/playbooks/common/openshift-cluster/upgrades/v3_5/master_config_upgrade.yml b/playbooks/common/openshift-cluster/upgrades/v3_5/master_config_upgrade.yml
deleted file mode 100644
index ed89dbe8d..000000000
--- a/playbooks/common/openshift-cluster/upgrades/v3_5/master_config_upgrade.yml
+++ /dev/null
@@ -1,16 +0,0 @@
----
-- modify_yaml:
- dest: "{{ openshift.common.config_base}}/master/master-config.yaml"
- yaml_key: 'admissionConfig.pluginConfig'
- yaml_value: "{{ openshift.master.admission_plugin_config }}"
- when: "'admission_plugin_config' in openshift.master"
-
-- modify_yaml:
- dest: "{{ openshift.common.config_base}}/master/master-config.yaml"
- yaml_key: 'admissionConfig.pluginOrderOverride'
- yaml_value:
-
-- modify_yaml:
- dest: "{{ openshift.common.config_base}}/master/master-config.yaml"
- yaml_key: 'kubernetesMasterConfig.admissionConfig'
- yaml_value:
diff --git a/playbooks/common/openshift-cluster/upgrades/v3_5/upgrade_nodes.yml b/playbooks/common/openshift-cluster/upgrades/v3_5/upgrade_nodes.yml
deleted file mode 100644
index e29d0f8e6..000000000
--- a/playbooks/common/openshift-cluster/upgrades/v3_5/upgrade_nodes.yml
+++ /dev/null
@@ -1,111 +0,0 @@
----
-#
-# Node Upgrade Playbook
-#
-# Upgrades nodes only, but requires the control plane to have already been upgraded.
-#
-- include: ../init.yml
- tags:
- - pre_upgrade
-
-- name: Configure the upgrade target for the common upgrade tasks
- hosts: oo_all_hosts
- tags:
- - pre_upgrade
- tasks:
- - set_fact:
- openshift_upgrade_target: "{{ '1.5' if deployment_type == 'origin' else '3.5' }}"
- openshift_upgrade_min: "{{ '1.4' if deployment_type == 'origin' else '3.4' }}"
-
-# Pre-upgrade
-- include: ../initialize_nodes_to_upgrade.yml
- tags:
- - pre_upgrade
-
-- name: Update repos on nodes
- hosts: oo_masters_to_config:oo_nodes_to_upgrade:oo_etcd_to_config:oo_lb_to_config
- roles:
- - openshift_repos
- tags:
- - pre_upgrade
-
-- name: Set openshift_no_proxy_internal_hostnames
- hosts: oo_masters_to_config:oo_nodes_to_upgrade
- tags:
- - pre_upgrade
- tasks:
- - set_fact:
- openshift_no_proxy_internal_hostnames: "{{ hostvars | oo_select_keys(groups['oo_nodes_to_upgrade']
- | union(groups['oo_masters_to_config'])
- | union(groups['oo_etcd_to_config'] | default([])))
- | oo_collect('openshift.common.hostname') | default([]) | join (',')
- }}"
- when:
- - openshift_http_proxy is defined or openshift_https_proxy is defined
- - openshift_generate_no_proxy_hosts | default(True) | bool
-
-- include: ../pre/verify_inventory_vars.yml
- tags:
- - pre_upgrade
-
-- include: ../disable_node_excluders.yml
- tags:
- - pre_upgrade
-
-- include: ../../initialize_openshift_version.yml
- tags:
- - pre_upgrade
- vars:
- # Request specific openshift_release and let the openshift_version role handle converting this
- # to a more specific version, respecting openshift_image_tag and openshift_pkg_version if
- # defined, and overriding the normal behavior of protecting the installed version
- openshift_release: "{{ openshift_upgrade_target }}"
- openshift_protect_installed_version: False
-
- # We skip the docker role at this point in upgrade to prevent
- # unintended package, container, or config upgrades which trigger
- # docker restarts. At this early stage of upgrade we can assume
- # docker is configured and running.
- skip_docker_role: True
-
-- name: Verify masters are already upgraded
- hosts: oo_masters_to_config
- tags:
- - pre_upgrade
- tasks:
- - fail: msg="Master running {{ openshift.common.version }} must be upgraded to {{ openshift_version }} before node upgrade can be run."
- when: openshift.common.version != openshift_version
-
-- include: ../pre/verify_control_plane_running.yml
- tags:
- - pre_upgrade
-
-- name: Verify upgrade targets
- hosts: oo_nodes_to_upgrade
- tasks:
- - include: ../pre/verify_upgrade_targets.yml
- tags:
- - pre_upgrade
-
-- name: Verify docker upgrade targets
- hosts: oo_nodes_to_upgrade
- tasks:
- - include: ../pre/tasks/verify_docker_upgrade_targets.yml
- tags:
- - pre_upgrade
-
-- include: ../pre/gate_checks.yml
- tags:
- - pre_upgrade
-
-# Pre-upgrade completed, nothing after this should be tagged pre_upgrade.
-
-# Separate step so we can execute in parallel and clear out anything unused
-# before we get into the serialized upgrade process which will then remove
-# remaining images if possible.
-- name: Cleanup unused Docker images
- hosts: oo_nodes_to_upgrade
- tasks:
- - include: ../cleanup_unused_images.yml
-
-- include: ../upgrade_nodes.yml
diff --git a/playbooks/common/openshift-cluster/upgrades/v3_5/validator.yml b/playbooks/common/openshift-cluster/upgrades/v3_5/validator.yml
deleted file mode 100644
index ae63c9ca9..000000000
--- a/playbooks/common/openshift-cluster/upgrades/v3_5/validator.yml
+++ /dev/null
@@ -1,67 +0,0 @@
----
-###############################################################################
-# Pre upgrade checks for known data problems, if this playbook fails you should
-# contact support. If you're not supported contact users@lists.openshift.com
-#
-# oc_objectvalidator provides these two checks
-# 1 - SDN Data issues, never seen in the wild but known possible due to code audits
-# https://github.com/openshift/origin/issues/12697
-# 2 - Namespace protections, https://bugzilla.redhat.com/show_bug.cgi?id=1428934
-#
-###############################################################################
-- name: Verify 3.5 specific upgrade checks
- hosts: oo_first_master
- roles:
- - { role: lib_openshift }
- tasks:
- - name: Check for invalid namespaces and SDN errors
- oc_objectvalidator:
-
- # What's all this PetSet business about?
- #
- # 'PetSets' were ALPHA resources in Kube <= 3.4. In >= 3.5 they are
- # no longer supported. The BETA resource 'StatefulSets' replaces
- # them. We can't migrate clients PetSets to
- # StatefulSets. Additionally, Red Hat has never officially supported
- # these resource types. Sorry users, but if you were using
- # unsupported resources from the Kube documentation then we can't
- # help you at this time.
- #
- # Reference: https://bugzilla.redhat.com/show_bug.cgi?id=1428229
- - name: Check if legacy PetSets exist
- oc_obj:
- state: list
- all_namespaces: true
- kind: petsets
- register: l_do_petsets_exist
-
- - name: Fail on unsupported resource migration 'PetSets'
- fail:
- msg: >
- PetSet objects were detected in your cluster. These are an
- Alpha feature in upstream Kubernetes 1.4 and are not supported
- by Red Hat. In Kubernetes 1.5, they are replaced by the Beta
- feature StatefulSets. Red Hat currently does not offer support
- for either PetSets or StatefulSets.
-
- Automatically migrating PetSets to StatefulSets in OpenShift
- Container Platform (OCP) 3.5 is not supported. See the
- Kubernetes "Upgrading from PetSets to StatefulSets"
- documentation for additional information:
-
- https://kubernetes.io/docs/tasks/manage-stateful-set/upgrade-pet-set-to-stateful-set/
-
- PetSets MUST be removed before upgrading to OCP 3.5. Red Hat
- strongly recommends reading the above referenced documentation
- in its entirety before taking any destructive actions.
-
- If you want to simply remove all PetSets without manually
- migrating to StatefulSets, run this command as a user with
- cluster-admin privileges:
-
- $ oc get petsets --all-namespaces -o yaml | oc delete -f - --cascade=false
- when:
- # Search did not fail, valid resource type found
- - l_do_petsets_exist.results.returncode == 0
- # Items do exist in the search results
- - l_do_petsets_exist.results.results.0['items'] | length > 0
diff --git a/playbooks/common/openshift-cluster/upgrades/v3_6/master_config_upgrade.yml b/playbooks/common/openshift-cluster/upgrades/v3_6/master_config_upgrade.yml
deleted file mode 100644
index ed89dbe8d..000000000
--- a/playbooks/common/openshift-cluster/upgrades/v3_6/master_config_upgrade.yml
+++ /dev/null
@@ -1,16 +0,0 @@
----
-- modify_yaml:
- dest: "{{ openshift.common.config_base}}/master/master-config.yaml"
- yaml_key: 'admissionConfig.pluginConfig'
- yaml_value: "{{ openshift.master.admission_plugin_config }}"
- when: "'admission_plugin_config' in openshift.master"
-
-- modify_yaml:
- dest: "{{ openshift.common.config_base}}/master/master-config.yaml"
- yaml_key: 'admissionConfig.pluginOrderOverride'
- yaml_value:
-
-- modify_yaml:
- dest: "{{ openshift.common.config_base}}/master/master-config.yaml"
- yaml_key: 'kubernetesMasterConfig.admissionConfig'
- yaml_value:
diff --git a/playbooks/common/openshift-cluster/upgrades/v3_6/upgrade.yml b/playbooks/common/openshift-cluster/upgrades/v3_6/upgrade.yml
index 920dc2ffc..dd109cfa9 100644
--- a/playbooks/common/openshift-cluster/upgrades/v3_6/upgrade.yml
+++ b/playbooks/common/openshift-cluster/upgrades/v3_6/upgrade.yml
@@ -116,6 +116,8 @@
- include: ../cleanup_unused_images.yml
- include: ../upgrade_control_plane.yml
+ vars:
+ master_config_hook: "v3_6/master_config_upgrade.yml"
- include: ../upgrade_nodes.yml
diff --git a/playbooks/common/openshift-cluster/upgrades/v3_6/upgrade_control_plane.yml b/playbooks/common/openshift-cluster/upgrades/v3_6/upgrade_control_plane.yml
index 27d8515dc..8ab68002d 100644
--- a/playbooks/common/openshift-cluster/upgrades/v3_6/upgrade_control_plane.yml
+++ b/playbooks/common/openshift-cluster/upgrades/v3_6/upgrade_control_plane.yml
@@ -76,7 +76,6 @@
# defined, and overriding the normal behavior of protecting the installed version
openshift_release: "{{ openshift_upgrade_target }}"
openshift_protect_installed_version: False
- openshift_install_base_package_group: "oo_masters_to_config"
# We skip the docker role at this point in upgrade to prevent
# unintended package, container, or config upgrades which trigger
diff --git a/playbooks/common/openshift-cluster/upgrades/v3_7/upgrade.yml b/playbooks/common/openshift-cluster/upgrades/v3_7/upgrade.yml
index f1ca1edb9..f4862e321 100644
--- a/playbooks/common/openshift-cluster/upgrades/v3_7/upgrade.yml
+++ b/playbooks/common/openshift-cluster/upgrades/v3_7/upgrade.yml
@@ -120,6 +120,22 @@
- include: ../cleanup_unused_images.yml
- include: ../upgrade_control_plane.yml
+ vars:
+ master_config_hook: "v3_7/master_config_upgrade.yml"
+
+# All controllers must be stopped at the same time then restarted
+- name: Cycle all controller services to force new leader election mode
+ hosts: oo_masters_to_config
+ gather_facts: no
+ tasks:
+ - name: Stop {{ openshift.common.service_type }}-master-controllers
+ systemd:
+ name: "{{ openshift.common.service_type }}-master-controllers"
+ state: stopped
+ - name: Start {{ openshift.common.service_type }}-master-controllers
+ systemd:
+ name: "{{ openshift.common.service_type }}-master-controllers"
+ state: started
- include: ../upgrade_nodes.yml
diff --git a/playbooks/common/openshift-cluster/upgrades/v3_7/upgrade_control_plane.yml b/playbooks/common/openshift-cluster/upgrades/v3_7/upgrade_control_plane.yml
index 6c4f9671b..b905d6d86 100644
--- a/playbooks/common/openshift-cluster/upgrades/v3_7/upgrade_control_plane.yml
+++ b/playbooks/common/openshift-cluster/upgrades/v3_7/upgrade_control_plane.yml
@@ -80,7 +80,6 @@
# defined, and overriding the normal behavior of protecting the installed version
openshift_release: "{{ openshift_upgrade_target }}"
openshift_protect_installed_version: False
- openshift_install_base_package_group: "oo_masters_to_config"
# We skip the docker role at this point in upgrade to prevent
# unintended package, container, or config upgrades which trigger
@@ -128,4 +127,18 @@
vars:
master_config_hook: "v3_7/master_config_upgrade.yml"
+# All controllers must be stopped at the same time then restarted
+- name: Cycle all controller services to force new leader election mode
+ hosts: oo_masters_to_config
+ gather_facts: no
+ tasks:
+ - name: Stop {{ openshift.common.service_type }}-master-controllers
+ systemd:
+ name: "{{ openshift.common.service_type }}-master-controllers"
+ state: stopped
+ - name: Start {{ openshift.common.service_type }}-master-controllers
+ systemd:
+ name: "{{ openshift.common.service_type }}-master-controllers"
+ state: started
+
- include: ../post_control_plane.yml
diff --git a/playbooks/common/openshift-cluster/upgrades/v3_7/validator.yml b/playbooks/common/openshift-cluster/upgrades/v3_7/validator.yml
index f76fc68d1..74d0cd8ad 100644
--- a/playbooks/common/openshift-cluster/upgrades/v3_7/validator.yml
+++ b/playbooks/common/openshift-cluster/upgrades/v3_7/validator.yml
@@ -11,13 +11,15 @@
tasks:
- name: Check for invalid namespaces and SDN errors
oc_objectvalidator:
-
+ # DO NOT DISABLE THIS, YOUR UPGRADE WILL FAIL IF YOU DO SO
- name: Confirm OpenShift authorization objects are in sync
command: >
{{ openshift.common.client_binary }} adm migrate authorization
- when: not openshift.common.version_gte_3_7 | bool
+ when:
+ - openshift_currently_installed_version | version_compare('3.7','<')
+ - openshift_upgrade_pre_authorization_migration_enabled | default(true) | bool
changed_when: false
register: l_oc_result
until: l_oc_result.rc == 0
- retries: 4
+ retries: 2
delay: 15
diff --git a/playbooks/common/openshift-cluster/upgrades/v3_5/filter_plugins b/playbooks/common/openshift-cluster/upgrades/v3_8/filter_plugins
index 7de3c1dd7..7de3c1dd7 120000
--- a/playbooks/common/openshift-cluster/upgrades/v3_5/filter_plugins
+++ b/playbooks/common/openshift-cluster/upgrades/v3_8/filter_plugins
diff --git a/playbooks/common/openshift-cluster/upgrades/v3_7/master_config_upgrade.yml b/playbooks/common/openshift-cluster/upgrades/v3_8/master_config_upgrade.yml
index df59a8782..1d4d1919c 100644
--- a/playbooks/common/openshift-cluster/upgrades/v3_7/master_config_upgrade.yml
+++ b/playbooks/common/openshift-cluster/upgrades/v3_8/master_config_upgrade.yml
@@ -1,21 +1,20 @@
---
- modify_yaml:
dest: "{{ openshift.common.config_base}}/master/master-config.yaml"
- yaml_key: 'admissionConfig.pluginConfig'
- yaml_value: "{{ openshift.master.admission_plugin_config }}"
- when: "'admission_plugin_config' in openshift.master"
+ yaml_key: 'controllerConfig.election.lockName'
+ yaml_value: 'openshift-master-controllers'
- modify_yaml:
dest: "{{ openshift.common.config_base}}/master/master-config.yaml"
- yaml_key: 'admissionConfig.pluginOrderOverride'
- yaml_value:
+ yaml_key: 'controllerConfig.serviceServingCert.signer.certFile'
+ yaml_value: service-signer.crt
- modify_yaml:
dest: "{{ openshift.common.config_base}}/master/master-config.yaml"
- yaml_key: 'kubernetesMasterConfig.admissionConfig'
- yaml_value:
+ yaml_key: 'controllerConfig.serviceServingCert.signer.keyFile'
+ yaml_value: service-signer.key
- modify_yaml:
- dest: "{{ openshift.common.config_base}}/master/master-config.yaml"
- yaml_key: 'controllerConfig.election.lockName'
- yaml_value: 'openshift-master-controllers'
+ dest: "{{ openshift.common.config_base }}/master/master-config.yaml"
+ yaml_key: servingInfo.clientCA
+ yaml_value: ca.crt
diff --git a/playbooks/common/openshift-cluster/upgrades/v3_5/roles b/playbooks/common/openshift-cluster/upgrades/v3_8/roles
index 415645be6..415645be6 120000
--- a/playbooks/common/openshift-cluster/upgrades/v3_5/roles
+++ b/playbooks/common/openshift-cluster/upgrades/v3_8/roles
diff --git a/playbooks/common/openshift-cluster/upgrades/v3_5/upgrade.yml b/playbooks/common/openshift-cluster/upgrades/v3_8/upgrade.yml
index 30e719d8f..b3162bd5f 100644
--- a/playbooks/common/openshift-cluster/upgrades/v3_5/upgrade.yml
+++ b/playbooks/common/openshift-cluster/upgrades/v3_8/upgrade.yml
@@ -12,8 +12,8 @@
- pre_upgrade
tasks:
- set_fact:
- openshift_upgrade_target: "{{ '1.5' if deployment_type == 'origin' else '3.5' }}"
- openshift_upgrade_min: "{{ '1.4' if deployment_type == 'origin' else '3.4' }}"
+ openshift_upgrade_target: '3.8'
+ openshift_upgrade_min: '3.7'
# Pre-upgrade
@@ -21,6 +21,10 @@
tags:
- pre_upgrade
+- include: ../pre/verify_etcd3_backend.yml
+ tags:
+ - pre_upgrade
+
- name: Update repos and initialize facts on all hosts
hosts: oo_masters_to_config:oo_nodes_to_upgrade:oo_etcd_to_config:oo_lb_to_config
tags:
@@ -47,6 +51,10 @@
tags:
- pre_upgrade
+- include: ../pre/verify_health_checks.yml
+ tags:
+ - pre_upgrade
+
- include: ../pre/verify_control_plane_running.yml
tags:
- pre_upgrade
@@ -112,6 +120,22 @@
- include: ../cleanup_unused_images.yml
- include: ../upgrade_control_plane.yml
+ vars:
+ master_config_hook: "v3_7/master_config_upgrade.yml"
+
+# All controllers must be stopped at the same time then restarted
+- name: Cycle all controller services to force new leader election mode
+ hosts: oo_masters_to_config
+ gather_facts: no
+ tasks:
+ - name: Stop {{ openshift.common.service_type }}-master-controllers
+ systemd:
+ name: "{{ openshift.common.service_type }}-master-controllers"
+ state: stopped
+ - name: Start {{ openshift.common.service_type }}-master-controllers
+ systemd:
+ name: "{{ openshift.common.service_type }}-master-controllers"
+ state: started
- include: ../upgrade_nodes.yml
diff --git a/playbooks/common/openshift-cluster/upgrades/v3_5/upgrade_control_plane.yml b/playbooks/common/openshift-cluster/upgrades/v3_8/upgrade_control_plane.yml
index e9cec9220..3df5b17b5 100644
--- a/playbooks/common/openshift-cluster/upgrades/v3_5/upgrade_control_plane.yml
+++ b/playbooks/common/openshift-cluster/upgrades/v3_8/upgrade_control_plane.yml
@@ -21,14 +21,18 @@
- pre_upgrade
tasks:
- set_fact:
- openshift_upgrade_target: "{{ '1.5' if deployment_type == 'origin' else '3.5' }}"
- openshift_upgrade_min: "{{ '1.4' if deployment_type == 'origin' else '3.4' }}"
+ openshift_upgrade_target: '3.8'
+ openshift_upgrade_min: '3.7'
# Pre-upgrade
- include: ../initialize_nodes_to_upgrade.yml
tags:
- pre_upgrade
+- include: ../pre/verify_etcd3_backend.yml
+ tags:
+ - pre_upgrade
+
- name: Update repos on control plane hosts
hosts: oo_masters_to_config:oo_etcd_to_config:oo_lb_to_config
tags:
@@ -55,6 +59,10 @@
tags:
- pre_upgrade
+- include: ../pre/verify_health_checks.yml
+ tags:
+ - pre_upgrade
+
- include: ../pre/verify_control_plane_running.yml
tags:
- pre_upgrade
@@ -72,7 +80,6 @@
# defined, and overriding the normal behavior of protecting the installed version
openshift_release: "{{ openshift_upgrade_target }}"
openshift_protect_installed_version: False
- openshift_install_base_package_group: "oo_masters_to_config"
# We skip the docker role at this point in upgrade to prevent
# unintended package, container, or config upgrades which trigger
@@ -118,6 +125,20 @@
- include: ../upgrade_control_plane.yml
vars:
- master_config_hook: "v3_5/master_config_upgrade.yml"
+ master_config_hook: "v3_7/master_config_upgrade.yml"
+
+# All controllers must be stopped at the same time then restarted
+- name: Cycle all controller services to force new leader election mode
+ hosts: oo_masters_to_config
+ gather_facts: no
+ tasks:
+ - name: Stop {{ openshift.common.service_type }}-master-controllers
+ systemd:
+ name: "{{ openshift.common.service_type }}-master-controllers"
+ state: stopped
+ - name: Start {{ openshift.common.service_type }}-master-controllers
+ systemd:
+ name: "{{ openshift.common.service_type }}-master-controllers"
+ state: started
- include: ../post_control_plane.yml
diff --git a/playbooks/common/openshift-cluster/upgrades/v3_4/upgrade_nodes.yml b/playbooks/common/openshift-cluster/upgrades/v3_8/upgrade_nodes.yml
index 8531e6045..f3d192ba7 100644
--- a/playbooks/common/openshift-cluster/upgrades/v3_4/upgrade_nodes.yml
+++ b/playbooks/common/openshift-cluster/upgrades/v3_8/upgrade_nodes.yml
@@ -14,8 +14,8 @@
- pre_upgrade
tasks:
- set_fact:
- openshift_upgrade_target: "{{ '1.4' if deployment_type == 'origin' else '3.4' }}"
- openshift_upgrade_min: "{{ '1.3' if deployment_type == 'origin' else '3.3' }}"
+ openshift_upgrade_target: '3.8'
+ openshift_upgrade_min: '3.7'
# Pre-upgrade
- include: ../initialize_nodes_to_upgrade.yml
@@ -48,6 +48,10 @@
tags:
- pre_upgrade
+- include: ../pre/verify_health_checks.yml
+ tags:
+ - pre_upgrade
+
- include: ../disable_node_excluders.yml
tags:
- pre_upgrade
diff --git a/playbooks/common/openshift-cluster/upgrades/v3_8/validator.yml b/playbooks/common/openshift-cluster/upgrades/v3_8/validator.yml
new file mode 100644
index 000000000..d8540abfb
--- /dev/null
+++ b/playbooks/common/openshift-cluster/upgrades/v3_8/validator.yml
@@ -0,0 +1,7 @@
+---
+- name: Verify 3.8 specific upgrade checks
+ hosts: oo_first_master
+ roles:
+ - { role: lib_openshift }
+ tasks:
+ - debug: msg="noop"
diff --git a/playbooks/common/openshift-etcd/certificates.yml b/playbooks/common/openshift-etcd/certificates.yml
index 31a0f50d8..eb6b94f33 100644
--- a/playbooks/common/openshift-etcd/certificates.yml
+++ b/playbooks/common/openshift-etcd/certificates.yml
@@ -1,29 +1,4 @@
---
-- name: Create etcd server certificates for etcd hosts
- hosts: oo_etcd_to_config
- any_errors_fatal: true
- roles:
- - role: openshift_etcd_facts
- post_tasks:
- - include_role:
- name: etcd
- tasks_from: server_certificates
- vars:
- etcd_ca_host: "{{ groups.oo_etcd_to_config.0 }}"
- etcd_peers: "{{ groups.oo_etcd_to_config | default([], true) }}"
- etcd_certificates_etcd_hosts: "{{ groups.oo_etcd_to_config | default([], true) }}"
- r_etcd_common_etcd_runtime: "{{ openshift.common.etcd_runtime }}"
+- include: server_certificates.yml
-- name: Create etcd client certificates for master hosts
- hosts: oo_masters_to_config
- any_errors_fatal: true
- roles:
- - role: openshift_etcd_facts
- - role: openshift_etcd_client_certificates
- etcd_ca_host: "{{ groups.oo_etcd_to_config.0 }}"
- etcd_cert_subdir: "openshift-master-{{ openshift.common.hostname }}"
- etcd_cert_config_dir: "{{ openshift.common.config_base }}/master"
- etcd_cert_prefix: "master.etcd-"
- openshift_ca_host: "{{ groups.oo_first_master.0 }}"
- r_etcd_common_etcd_runtime: "{{ openshift.common.etcd_runtime }}"
- when: groups.oo_etcd_to_config is defined and groups.oo_etcd_to_config
+- include: master_etcd_certificates.yml
diff --git a/playbooks/common/openshift-etcd/config.yml b/playbooks/common/openshift-etcd/config.yml
index 82539dac8..3fe483785 100644
--- a/playbooks/common/openshift-etcd/config.yml
+++ b/playbooks/common/openshift-etcd/config.yml
@@ -1,14 +1,15 @@
---
- name: etcd Install Checkpoint Start
- hosts: localhost
- connection: local
+ hosts: all
gather_facts: false
tasks:
- name: Set etcd install 'In Progress'
+ run_once: true
set_stats:
data:
- installer_phase_etcd: "In Progress"
- aggregate: false
+ installer_phase_etcd:
+ status: "In Progress"
+ start: "{{ lookup('pipe', 'date +%Y%m%d%H%M%SZ') }}"
- include: ca.yml
@@ -27,12 +28,13 @@
- role: nickhammond.logrotate
- name: etcd Install Checkpoint End
- hosts: localhost
- connection: local
+ hosts: all
gather_facts: false
tasks:
- name: Set etcd install 'Complete'
+ run_once: true
set_stats:
data:
- installer_phase_etcd: "Complete"
- aggregate: false
+ installer_phase_etcd:
+ status: "Complete"
+ end: "{{ lookup('pipe', 'date +%Y%m%d%H%M%SZ') }}"
diff --git a/playbooks/common/openshift-etcd/embedded2external.yml b/playbooks/common/openshift-etcd/embedded2external.yml
new file mode 100644
index 000000000..b16b78c4f
--- /dev/null
+++ b/playbooks/common/openshift-etcd/embedded2external.yml
@@ -0,0 +1,172 @@
+---
+- name: Pre-migrate checks
+ hosts: localhost
+ tasks:
+ # Check there is only one etcd host
+ - assert:
+ that: groups.oo_etcd_to_config | default([]) | length == 1
+ msg: "[etcd] group must contain only one host"
+ # Check there is only one master
+ - assert:
+ that: groups.oo_masters_to_config | default([]) | length == 1
+ msg: "[master] group must contain only one host"
+
+# 1. stop a master
+- name: Prepare masters for etcd data migration
+ hosts: oo_first_master
+ roles:
+ - role: openshift_facts
+ tasks:
+ - name: Check the master API is ready
+ include_role:
+ name: openshift_master
+ tasks_from: check_master_api_is_ready
+ - set_fact:
+ master_service: "{{ openshift.common.service_type + '-master' }}"
+ embedded_etcd_backup_suffix: "{{ lookup('pipe', 'date +%Y%m%d%H%M%S') }}"
+ - debug:
+ msg: "master service name: {{ master_service }}"
+ - name: Stop master
+ service:
+ name: "{{ master_service }}"
+ state: stopped
+ # 2. backup embedded etcd
+ # Can't use with_items with include_role: https://github.com/ansible/ansible/issues/21285
+ - include_role:
+ name: etcd
+ tasks_from: backup
+ vars:
+ r_etcd_common_etcd_runtime: "{{ openshift.common.etcd_runtime }}"
+ r_etcd_common_backup_tag: pre-migrate
+ r_etcd_common_embedded_etcd: "{{ true }}"
+ r_etcd_common_backup_sufix_name: "{{ embedded_etcd_backup_suffix }}"
+
+ - include_role:
+ name: etcd
+ tasks_from: backup.archive
+ vars:
+ r_etcd_common_etcd_runtime: "{{ openshift.common.etcd_runtime }}"
+ r_etcd_common_backup_tag: pre-migrate
+ r_etcd_common_embedded_etcd: "{{ true }}"
+ r_etcd_common_backup_sufix_name: "{{ embedded_etcd_backup_suffix }}"
+
+# 3. deploy certificates (for etcd and master)
+- include: ca.yml
+
+- include: server_certificates.yml
+
+- name: Backup etcd client certificates for master host
+ hosts: oo_first_master
+ tasks:
+ - include_role:
+ name: etcd
+ tasks_from: backup_master_etcd_certificates
+
+- name: Redeploy master etcd certificates
+ include: master_etcd_certificates.yml
+ vars:
+ etcd_certificates_redeploy: "{{ true }}"
+
+# 4. deploy external etcd
+- include: ../openshift-etcd/config.yml
+
+# 5. stop external etcd
+- name: Cleanse etcd
+ hosts: oo_etcd_to_config[0]
+ gather_facts: no
+ pre_tasks:
+ - include_role:
+ name: etcd
+ tasks_from: disable_etcd
+ vars:
+ r_etcd_common_etcd_runtime: "{{ openshift.common.etcd_runtime }}"
+ - include_role:
+ name: etcd
+ tasks_from: clean_data
+ vars:
+ r_etcd_common_etcd_runtime: "{{ openshift.common.etcd_runtime }}"
+
+# 6. copy the embedded etcd backup to the external host
+# TODO(jchaloup): if the etcd and first master are on the same host, just copy the directory
+- name: Copy embedded etcd backup to the external host
+ hosts: localhost
+ tasks:
+ - name: Create local temp directory for syncing etcd backup
+ local_action: command mktemp -d /tmp/etcd_backup-XXXXXXX
+ register: g_etcd_client_mktemp
+ changed_when: False
+ become: no
+
+ - include_role:
+ name: etcd
+ tasks_from: backup.fetch
+ vars:
+ r_etcd_common_etcd_runtime: "{{ hostvars[groups.oo_first_master.0].openshift.common.etcd_runtime }}"
+ etcd_backup_sync_directory: "{{ g_etcd_client_mktemp.stdout }}"
+ r_etcd_common_backup_tag: pre-migrate
+ r_etcd_common_embedded_etcd: "{{ true }}"
+ r_etcd_common_backup_sufix_name: "{{ hostvars[groups.oo_first_master.0].embedded_etcd_backup_suffix }}"
+ delegate_to: "{{ groups.oo_first_master[0] }}"
+
+ - include_role:
+ name: etcd
+ tasks_from: backup.copy
+ vars:
+ r_etcd_common_etcd_runtime: "{{ hostvars[groups.oo_etcd_to_config.0].openshift.common.etcd_runtime }}"
+ etcd_backup_sync_directory: "{{ g_etcd_client_mktemp.stdout }}"
+ r_etcd_common_backup_tag: pre-migrate
+ r_etcd_common_backup_sufix_name: "{{ hostvars[groups.oo_first_master.0].embedded_etcd_backup_suffix }}"
+ delegate_to: "{{ groups.oo_etcd_to_config[0] }}"
+
+ - debug:
+ msg: "etcd_backup_dest_directory: {{ g_etcd_client_mktemp.stdout }}"
+
+ - name: Delete temporary directory
+ local_action: file path="{{ g_etcd_client_mktemp.stdout }}" state=absent
+ changed_when: False
+ become: no
+
+# 7. force new cluster from the backup
+- name: Force new etcd cluster
+ hosts: oo_etcd_to_config[0]
+ tasks:
+ - include_role:
+ name: etcd
+ tasks_from: backup.unarchive
+ vars:
+ r_etcd_common_etcd_runtime: "{{ openshift.common.etcd_runtime }}"
+ r_etcd_common_backup_tag: pre-migrate
+ r_etcd_common_backup_sufix_name: "{{ hostvars[groups.oo_first_master.0].embedded_etcd_backup_suffix }}"
+
+ - include_role:
+ name: etcd
+ tasks_from: backup.force_new_cluster
+ vars:
+ r_etcd_common_etcd_runtime: "{{ openshift.common.etcd_runtime }}"
+ r_etcd_common_backup_tag: pre-migrate
+ r_etcd_common_backup_sufix_name: "{{ hostvars[groups.oo_first_master.0].embedded_etcd_backup_suffix }}"
+ etcd_peer: "{{ openshift.common.ip }}"
+ etcd_url_scheme: "https"
+ etcd_peer_url_scheme: "https"
+
+# 8. re-configure master to use the external etcd
+- name: Configure master to use external etcd
+ hosts: oo_first_master
+ tasks:
+ - include_role:
+ name: openshift_master
+ tasks_from: configure_external_etcd
+ vars:
+ etcd_peer_url_scheme: "https"
+ etcd_ip: "{{ hostvars[groups.oo_etcd_to_config.0].openshift.common.ip }}"
+ etcd_peer_port: 2379
+
+ # 9. start the master
+ - name: Start master
+ service:
+ name: "{{ master_service }}"
+ state: started
+ register: service_status
+ until: service_status.state is defined and service_status.state == "started"
+ retries: 5
+ delay: 10
diff --git a/playbooks/common/openshift-etcd/master_etcd_certificates.yml b/playbooks/common/openshift-etcd/master_etcd_certificates.yml
new file mode 100644
index 000000000..0a25aac57
--- /dev/null
+++ b/playbooks/common/openshift-etcd/master_etcd_certificates.yml
@@ -0,0 +1,14 @@
+---
+- name: Create etcd client certificates for master hosts
+ hosts: oo_masters_to_config
+ any_errors_fatal: true
+ roles:
+ - role: openshift_etcd_facts
+ - role: openshift_etcd_client_certificates
+ etcd_ca_host: "{{ groups.oo_etcd_to_config.0 }}"
+ etcd_cert_subdir: "openshift-master-{{ openshift.common.hostname }}"
+ etcd_cert_config_dir: "{{ openshift.common.config_base }}/master"
+ etcd_cert_prefix: "master.etcd-"
+ openshift_ca_host: "{{ groups.oo_first_master.0 }}"
+ r_etcd_common_etcd_runtime: "{{ openshift.common.etcd_runtime }}"
+ when: groups.oo_etcd_to_config is defined and groups.oo_etcd_to_config
diff --git a/playbooks/common/openshift-etcd/migrate.yml b/playbooks/common/openshift-etcd/migrate.yml
index 2456ad3a8..31362f2f6 100644
--- a/playbooks/common/openshift-etcd/migrate.yml
+++ b/playbooks/common/openshift-etcd/migrate.yml
@@ -1,4 +1,17 @@
---
+- name: Check if the master has embedded etcd
+ hosts: localhost
+ connection: local
+ become: no
+ gather_facts: no
+ tags:
+ - always
+ tasks:
+ - fail:
+ msg: "Migration of an embedded etcd is not supported. Please, migrate the embedded etcd into an external etcd first."
+ when:
+ - groups.oo_etcd_to_config | default([]) | length == 0
+
- name: Run pre-checks
hosts: oo_etcd_to_migrate
tasks:
@@ -60,12 +73,11 @@
hosts: oo_etcd_to_migrate
gather_facts: no
pre_tasks:
- - set_fact:
- l_etcd_service: "{{ 'etcd_container' if openshift.common.is_containerized else 'etcd' }}"
- - name: Disable etcd members
- service:
- name: "{{ l_etcd_service }}"
- state: stopped
+ - include_role:
+ name: etcd
+ tasks_from: disable_etcd
+ vars:
+ r_etcd_common_etcd_runtime: "{{ openshift.common.etcd_runtime }}"
- name: Migrate data on first etcd
hosts: oo_etcd_to_migrate[0]
diff --git a/playbooks/common/openshift-etcd/scaleup.yml b/playbooks/common/openshift-etcd/scaleup.yml
index b5ba2bbba..20061366c 100644
--- a/playbooks/common/openshift-etcd/scaleup.yml
+++ b/playbooks/common/openshift-etcd/scaleup.yml
@@ -46,7 +46,7 @@
etcd_ca_host: "{{ groups.oo_etcd_to_config.0 }}"
etcd_certificates_etcd_hosts: "{{ groups.oo_etcd_to_config | default([], true) }}"
etcd_initial_cluster_state: "existing"
- initial_etcd_cluster: "{{ etcd_add_check.stdout_lines[3] | regex_replace('ETCD_INITIAL_CLUSTER=','') | regex_replace('\"','') }}"
+ etcd_initial_cluster: "{{ etcd_add_check.stdout_lines[3] | regex_replace('ETCD_INITIAL_CLUSTER=','') | regex_replace('\"','') }}"
etcd_ca_setup: False
r_etcd_common_etcd_runtime: "{{ openshift.common.etcd_runtime }}"
- role: nickhammond.logrotate
@@ -71,7 +71,7 @@
etcd_ca_host: "{{ groups.oo_etcd_to_config.0 }}"
openshift_ca_host: "{{ groups.oo_first_master.0 }}"
openshift_master_etcd_hosts: "{{ hostvars
- | oo_select_keys(groups['oo_etcd_to_config'] | union(groups['oo_new_etcd_to_config']))
+ | oo_select_keys(groups['oo_etcd_to_config'] | union(groups['oo_new_etcd_to_config'] | default([]) ))
| oo_collect('openshift.common.hostname')
| default(none, true) }}"
openshift_master_etcd_port: "{{ (etcd_client_port | default('2379')) if (groups.oo_etcd_to_config is defined and groups.oo_etcd_to_config) else none }}"
diff --git a/playbooks/common/openshift-etcd/server_certificates.yml b/playbooks/common/openshift-etcd/server_certificates.yml
new file mode 100644
index 000000000..10e06747b
--- /dev/null
+++ b/playbooks/common/openshift-etcd/server_certificates.yml
@@ -0,0 +1,15 @@
+---
+- name: Create etcd server certificates for etcd hosts
+ hosts: oo_etcd_to_config
+ any_errors_fatal: true
+ roles:
+ - role: openshift_etcd_facts
+ post_tasks:
+ - include_role:
+ name: etcd
+ tasks_from: server_certificates
+ vars:
+ etcd_ca_host: "{{ groups.oo_etcd_to_config.0 }}"
+ etcd_peers: "{{ groups.oo_etcd_to_config | default([], true) }}"
+ etcd_certificates_etcd_hosts: "{{ groups.oo_etcd_to_config | default([], true) }}"
+ r_etcd_common_etcd_runtime: "{{ openshift.common.etcd_runtime }}"
diff --git a/playbooks/common/openshift-glusterfs/config.yml b/playbooks/common/openshift-glusterfs/config.yml
index 516618de2..19e14ab3e 100644
--- a/playbooks/common/openshift-glusterfs/config.yml
+++ b/playbooks/common/openshift-glusterfs/config.yml
@@ -1,14 +1,15 @@
---
- name: GlusterFS Install Checkpoint Start
- hosts: localhost
- connection: local
+ hosts: all
gather_facts: false
tasks:
- name: Set GlusterFS install 'In Progress'
+ run_once: true
set_stats:
data:
- installer_phase_glusterfs: "In Progress"
- aggregate: false
+ installer_phase_glusterfs:
+ status: "In Progress"
+ start: "{{ lookup('pipe', 'date +%Y%m%d%H%M%SZ') }}"
- name: Open firewall ports for GlusterFS nodes
hosts: glusterfs
@@ -18,6 +19,11 @@
tasks_from: firewall.yml
when:
- openshift_storage_glusterfs_is_native | default(True) | bool
+ - include_role:
+ name: openshift_storage_glusterfs
+ tasks_from: kernel_modules.yml
+ when:
+ - openshift_storage_glusterfs_is_native | default(True) | bool
- name: Open firewall ports for GlusterFS registry nodes
hosts: glusterfs_registry
@@ -27,6 +33,11 @@
tasks_from: firewall.yml
when:
- openshift_storage_glusterfs_registry_is_native | default(True) | bool
+ - include_role:
+ name: openshift_storage_glusterfs
+ tasks_from: kernel_modules.yml
+ when:
+ - openshift_storage_glusterfs_registry_is_native | default(True) | bool
- name: Configure GlusterFS
hosts: oo_first_master
@@ -37,12 +48,13 @@
when: groups.oo_glusterfs_to_config | default([]) | count > 0
- name: GlusterFS Install Checkpoint End
- hosts: localhost
- connection: local
+ hosts: all
gather_facts: false
tasks:
- name: Set GlusterFS install 'Complete'
+ run_once: true
set_stats:
data:
- installer_phase_glusterfs: "Complete"
- aggregate: false
+ installer_phase_glusterfs:
+ status: "Complete"
+ end: "{{ lookup('pipe', 'date +%Y%m%d%H%M%SZ') }}"
diff --git a/playbooks/common/openshift-loadbalancer/config.yml b/playbooks/common/openshift-loadbalancer/config.yml
index ecbb092bc..d737b836b 100644
--- a/playbooks/common/openshift-loadbalancer/config.yml
+++ b/playbooks/common/openshift-loadbalancer/config.yml
@@ -1,14 +1,24 @@
---
- name: Load Balancer Install Checkpoint Start
- hosts: localhost
- connection: local
+ hosts: all
gather_facts: false
tasks:
- name: Set load balancer install 'In Progress'
+ run_once: true
set_stats:
data:
- installer_phase_loadbalancer: "In Progress"
- aggregate: false
+ installer_phase_loadbalancer:
+ status: "In Progress"
+ start: "{{ lookup('pipe', 'date +%Y%m%d%H%M%SZ') }}"
+
+- name: Configure firewall and docker for load balancers
+ hosts: oo_lb_to_config:!oo_masters_to_config:!oo_nodes_to_config
+ vars:
+ openshift_image_tag: "{{ hostvars[groups.oo_first_master.0].openshift_image_tag }}"
+ roles:
+ - role: os_firewall
+ - role: openshift_docker
+ when: openshift.common.is_containerized | default(False) | bool and not skip_docker_role | default(False) | bool
- name: Configure load balancers
hosts: oo_lb_to_config
@@ -25,16 +35,17 @@
+ openshift_loadbalancer_additional_backends | default([]) }}"
openshift_image_tag: "{{ hostvars[groups.oo_first_master.0].openshift_image_tag }}"
roles:
- - role: os_firewall
- role: openshift_loadbalancer
+ - role: tuned
- name: Load Balancer Install Checkpoint End
- hosts: localhost
- connection: local
+ hosts: all
gather_facts: false
tasks:
- name: Set load balancer install 'Complete'
+ run_once: true
set_stats:
data:
- installer_phase_loadbalancer: "Complete"
- aggregate: false
+ installer_phase_loadbalancer:
+ status: "Complete"
+ end: "{{ lookup('pipe', 'date +%Y%m%d%H%M%SZ') }}"
diff --git a/playbooks/common/openshift-management/add_container_provider.yml b/playbooks/common/openshift-management/add_container_provider.yml
new file mode 100644
index 000000000..facb3a5b9
--- /dev/null
+++ b/playbooks/common/openshift-management/add_container_provider.yml
@@ -0,0 +1,8 @@
+---
+- name: Add Container Provider to Management
+ hosts: oo_first_master
+ tasks:
+ - name: Run the Management Integration Tasks
+ include_role:
+ name: openshift_management
+ tasks_from: add_container_provider
diff --git a/playbooks/common/openshift-management/config.yml b/playbooks/common/openshift-management/config.yml
index 0aaafe440..3f1cdf713 100644
--- a/playbooks/common/openshift-management/config.yml
+++ b/playbooks/common/openshift-management/config.yml
@@ -1,4 +1,16 @@
---
+- name: Management Install Checkpoint Start
+ hosts: all
+ gather_facts: false
+ tasks:
+ - name: Set Management install 'In Progress'
+ run_once: true
+ set_stats:
+ data:
+ installer_phase_management:
+ status: "In Progress"
+ start: "{{ lookup('pipe', 'date +%Y%m%d%H%M%SZ') }}"
+
- name: Setup CFME
hosts: oo_first_master
pre_tasks:
@@ -13,3 +25,15 @@
name: openshift_management
vars:
template_dir: "{{ hostvars[groups.masters.0].r_openshift_management_mktemp.stdout }}"
+
+- name: Management Install Checkpoint End
+ hosts: all
+ gather_facts: false
+ tasks:
+ - name: Set Management install 'Complete'
+ run_once: true
+ set_stats:
+ data:
+ installer_phase_management:
+ status: "Complete"
+ end: "{{ lookup('pipe', 'date +%Y%m%d%H%M%SZ') }}"
diff --git a/playbooks/common/openshift-management/uninstall.yml b/playbooks/common/openshift-management/uninstall.yml
index 698d93405..9f35cc276 100644
--- a/playbooks/common/openshift-management/uninstall.yml
+++ b/playbooks/common/openshift-management/uninstall.yml
@@ -1,6 +1,6 @@
---
- name: Uninstall CFME
- hosts: masters
+ hosts: masters[0]
tasks:
- name: Run the CFME Uninstall Role Tasks
include_role:
diff --git a/playbooks/common/openshift-master/additional_config.yml b/playbooks/common/openshift-master/additional_config.yml
index ee76e2ed7..32f638d42 100644
--- a/playbooks/common/openshift-master/additional_config.yml
+++ b/playbooks/common/openshift-master/additional_config.yml
@@ -1,14 +1,15 @@
---
- name: Master Additional Install Checkpoint Start
- hosts: localhost
- connection: local
+ hosts: all
gather_facts: false
tasks:
- name: Set Master Additional install 'In Progress'
+ run_once: true
set_stats:
data:
- installer_phase_master_additional: "In Progress"
- aggregate: false
+ installer_phase_master_additional:
+ status: "In Progress"
+ start: "{{ lookup('pipe', 'date +%Y%m%d%H%M%SZ') }}"
- name: Additional master configuration
hosts: oo_first_master
@@ -20,16 +21,18 @@
roles:
- role: openshift_master_cluster
when: openshift_master_ha | bool and openshift.master.cluster_method == "pacemaker"
+ - role: openshift_project_request_template
+ when: openshift_project_request_template_manage
- role: openshift_examples
when: openshift_install_examples | default(true, true) | bool
registry_url: "{{ openshift.master.registry_url }}"
- role: openshift_hosted_templates
registry_url: "{{ openshift.master.registry_url }}"
- role: openshift_manageiq
- when: openshift_use_manageiq | default(false) | bool
+ when: openshift_use_manageiq | default(true) | bool
- role: cockpit
when:
- - openshift.common.is_atomic
+ - not openshift.common.is_atomic | bool
- deployment_type == 'openshift-enterprise'
- osm_use_cockpit is undefined or osm_use_cockpit | bool
- openshift.common.deployment_subtype != 'registry'
@@ -37,12 +40,13 @@
when: openshift_use_flannel | default(false) | bool
- name: Master Additional Install Checkpoint End
- hosts: localhost
- connection: local
+ hosts: all
gather_facts: false
tasks:
- name: Set Master Additional install 'Complete'
+ run_once: true
set_stats:
data:
- installer_phase_master_additional: "Complete"
- aggregate: false
+ installer_phase_master_additional:
+ status: "Complete"
+ end: "{{ lookup('pipe', 'date +%Y%m%d%H%M%SZ') }}"
diff --git a/playbooks/common/openshift-master/config.yml b/playbooks/common/openshift-master/config.yml
index bc1fee982..6b0fd6b7c 100644
--- a/playbooks/common/openshift-master/config.yml
+++ b/playbooks/common/openshift-master/config.yml
@@ -1,14 +1,15 @@
---
- name: Master Install Checkpoint Start
- hosts: localhost
- connection: local
+ hosts: all
gather_facts: false
tasks:
- name: Set Master install 'In Progress'
+ run_once: true
set_stats:
data:
- installer_phase_master: "In Progress"
- aggregate: false
+ installer_phase_master:
+ status: "In Progress"
+ start: "{{ lookup('pipe', 'date +%Y%m%d%H%M%SZ') }}"
- include: certificates.yml
@@ -198,6 +199,7 @@
openshift_master_default_registry_value: "{{ hostvars[groups.oo_first_master.0].l_default_registry_value }}"
openshift_master_default_registry_value_api: "{{ hostvars[groups.oo_first_master.0].l_default_registry_value_api }}"
openshift_master_default_registry_value_controllers: "{{ hostvars[groups.oo_first_master.0].l_default_registry_value_controllers }}"
+ - role: tuned
- role: nuage_ca
when: openshift_use_nuage | default(false) | bool
- role: nuage_common
@@ -206,6 +208,18 @@
when: openshift_use_nuage | default(false) | bool
- role: calico_master
when: openshift_use_calico | default(false) | bool
+ tasks:
+ - include_role:
+ name: kuryr
+ tasks_from: master
+ when: openshift_use_kuryr | default(false) | bool
+
+ - name: Setup the node group config maps
+ include_role:
+ name: openshift_node_group
+ when: openshift_master_bootstrap_enabled | default(false) | bool
+ run_once: True
+
post_tasks:
- name: Create group for deployment type
group_by: key=oo_masters_deployment_type_{{ openshift.common.deployment_type }}
@@ -226,12 +240,13 @@
r_openshift_excluder_service_type: "{{ openshift.common.service_type }}"
- name: Master Install Checkpoint End
- hosts: localhost
- connection: local
+ hosts: all
gather_facts: false
tasks:
- name: Set Master install 'Complete'
+ run_once: true
set_stats:
data:
- installer_phase_master: "Complete"
- aggregate: false
+ installer_phase_master:
+ status: "Complete"
+ end: "{{ lookup('pipe', 'date +%Y%m%d%H%M%SZ') }}"
diff --git a/playbooks/common/openshift-master/files/openshift-ansible-catalog-console.js b/playbooks/common/openshift-master/files/openshift-ansible-catalog-console.js
deleted file mode 100644
index d0a9f11dc..000000000
--- a/playbooks/common/openshift-master/files/openshift-ansible-catalog-console.js
+++ /dev/null
@@ -1,2 +0,0 @@
-// empty file so that the master-config can still point to a file that exists
-// this file will be replaced by the template service broker role if enabled
diff --git a/playbooks/common/openshift-master/restart_services.yml b/playbooks/common/openshift-master/restart_services.yml
index 4f8b758fd..4e1b3a3be 100644
--- a/playbooks/common/openshift-master/restart_services.yml
+++ b/playbooks/common/openshift-master/restart_services.yml
@@ -1,22 +1,4 @@
---
-- name: Restart master API
- service:
- name: "{{ openshift.common.service_type }}-master-api"
- state: restarted
- when: openshift_master_ha | bool
-- name: Wait for master API to come back online
- wait_for:
- host: "{{ openshift.common.hostname }}"
- state: started
- delay: 10
- port: "{{ openshift.master.api_port }}"
- timeout: 600
- when: openshift_master_ha | bool
-- name: Restart master controllers
- service:
- name: "{{ openshift.common.service_type }}-master-controllers"
- state: restarted
- # Ignore errrors since it is possible that type != simple for
- # pre-3.1.1 installations.
- ignore_errors: true
- when: openshift_master_ha | bool
+- include_role:
+ name: openshift_master
+ tasks_from: restart.yml
diff --git a/playbooks/common/openshift-master/revert-client-ca.yml b/playbooks/common/openshift-master/revert-client-ca.yml
new file mode 100644
index 000000000..9ae23bf5b
--- /dev/null
+++ b/playbooks/common/openshift-master/revert-client-ca.yml
@@ -0,0 +1,17 @@
+---
+- name: Set servingInfo.clientCA = ca.crt in master config
+ hosts: oo_masters_to_config
+ tasks:
+ - name: Read master config
+ slurp:
+ src: "{{ openshift.common.config_base }}/master/master-config.yaml"
+ register: g_master_config_output
+
+ # servingInfo.clientCA may be set as the client-ca-bundle.crt from
+ # CA redeployment and this task reverts that change.
+ - name: Set servingInfo.clientCA = ca.crt in master config
+ modify_yaml:
+ dest: "{{ openshift.common.config_base }}/master/master-config.yaml"
+ yaml_key: servingInfo.clientCA
+ yaml_value: ca.crt
+ when: (g_master_config_output.content|b64decode|from_yaml).servingInfo.clientCA != 'ca.crt'
diff --git a/playbooks/common/openshift-master/scaleup.yml b/playbooks/common/openshift-master/scaleup.yml
index f4dc9df8a..4c415ebce 100644
--- a/playbooks/common/openshift-master/scaleup.yml
+++ b/playbooks/common/openshift-master/scaleup.yml
@@ -22,16 +22,17 @@
- name: restart master api
service: name={{ openshift.common.service_type }}-master-controllers state=restarted
notify: verify api server
+ # We retry the controllers because the API may not be 100% initialized yet.
- name: restart master controllers
- service: name={{ openshift.common.service_type }}-master-controllers state=restarted
+ command: "systemctl restart {{ openshift.common.service_type }}-master-controllers"
+ retries: 3
+ delay: 5
+ register: result
+ until: result.rc == 0
- name: verify api server
command: >
curl --silent --tlsv1.2
- {% if openshift.common.version_gte_3_2_or_1_2 | bool %}
--cacert {{ openshift.common.config_base }}/master/ca-bundle.crt
- {% else %}
- --cacert {{ openshift.common.config_base }}/master/ca.crt
- {% endif %}
{{ openshift.master.api_url }}/healthz/ready
args:
# Disables the following warning:
diff --git a/playbooks/common/openshift-master/tasks/wire_aggregator.yml b/playbooks/common/openshift-master/tasks/wire_aggregator.yml
index 560eea785..97acc5d5d 100644
--- a/playbooks/common/openshift-master/tasks/wire_aggregator.yml
+++ b/playbooks/common/openshift-master/tasks/wire_aggregator.yml
@@ -136,9 +136,15 @@
when:
- not front_proxy_kubeconfig.stat.exists
-- name: copy tech preview extension file for service console UI
- copy:
- src: openshift-ansible-catalog-console.js
+- name: Delete temp directory
+ file:
+ name: "{{ certtemp.stdout }}"
+ state: absent
+ changed_when: False
+
+- name: Setup extension file for service console UI
+ template:
+ src: ../templates/openshift-ansible-catalog-console.js
dest: /etc/origin/master/openshift-ansible-catalog-console.js
- name: Update master config
@@ -179,8 +185,13 @@
- yedit_output.changed
- openshift.master.cluster_method == 'native'
+# We retry the controllers because the API may not be 100% initialized yet.
- name: restart master controllers
- systemd: name={{ openshift.common.service_type }}-master-controllers state=restarted
+ command: "systemctl restart {{ openshift.common.service_type }}-master-controllers"
+ retries: 3
+ delay: 5
+ register: result
+ until: result.rc == 0
when:
- yedit_output.changed
- openshift.master.cluster_method == 'native'
@@ -190,11 +201,7 @@
# wait_for port doesn't provide health information.
command: >
curl --silent --tlsv1.2
- {% if openshift.common.version_gte_3_2_or_1_2 | bool %}
--cacert {{ openshift.common.config_base }}/master/ca-bundle.crt
- {% else %}
- --cacert {{ openshift.common.config_base }}/master/ca.crt
- {% endif %}
{{ openshift.master.api_url }}/healthz/ready
args:
# Disables the following warning:
@@ -207,9 +214,3 @@
changed_when: false
when:
- yedit_output.changed
-
-- name: Delete temp directory
- file:
- name: "{{ certtemp.stdout }}"
- state: absent
- changed_when: False
diff --git a/playbooks/common/openshift-master/templates/openshift-ansible-catalog-console.js b/playbooks/common/openshift-master/templates/openshift-ansible-catalog-console.js
new file mode 100644
index 000000000..fd02325ba
--- /dev/null
+++ b/playbooks/common/openshift-master/templates/openshift-ansible-catalog-console.js
@@ -0,0 +1 @@
+window.OPENSHIFT_CONSTANTS.TEMPLATE_SERVICE_BROKER_ENABLED = {{ 'true' if (template_service_broker_install | default(True)) else 'false' }};
diff --git a/playbooks/common/openshift-nfs/config.yml b/playbooks/common/openshift-nfs/config.yml
index 66303d6f7..6ea77e00b 100644
--- a/playbooks/common/openshift-nfs/config.yml
+++ b/playbooks/common/openshift-nfs/config.yml
@@ -1,14 +1,15 @@
---
- name: NFS Install Checkpoint Start
- hosts: localhost
- connection: local
+ hosts: all
gather_facts: false
tasks:
- name: Set NFS install 'In Progress'
+ run_once: true
set_stats:
data:
- installer_phase_nfs: "In Progress"
- aggregate: false
+ installer_phase_nfs:
+ status: "In Progress"
+ start: "{{ lookup('pipe', 'date +%Y%m%d%H%M%SZ') }}"
- name: Configure nfs
hosts: oo_nfs_to_config
@@ -17,12 +18,13 @@
- role: openshift_storage_nfs
- name: NFS Install Checkpoint End
- hosts: localhost
- connection: local
+ hosts: all
gather_facts: false
tasks:
- name: Set NFS install 'Complete'
+ run_once: true
set_stats:
data:
- installer_phase_nfs: "Complete"
- aggregate: false
+ installer_phase_nfs:
+ status: "Complete"
+ end: "{{ lookup('pipe', 'date +%Y%m%d%H%M%SZ') }}"
diff --git a/playbooks/common/openshift-node/additional_config.yml b/playbooks/common/openshift-node/additional_config.yml
index fe51ef833..ac757397b 100644
--- a/playbooks/common/openshift-node/additional_config.yml
+++ b/playbooks/common/openshift-node/additional_config.yml
@@ -19,10 +19,14 @@
- group_by:
key: oo_nodes_use_{{ (openshift_use_contiv | default(False)) | ternary('contiv','nothing') }}
changed_when: False
+ # Create group for kuryr nodes
+ - group_by:
+ key: oo_nodes_use_{{ (openshift_use_kuryr | default(False)) | ternary('kuryr','nothing') }}
+ changed_when: False
- include: etcd_client_config.yml
vars:
- openshift_node_scale_up_group: "oo_nodes_use_flannel:oo_nodes_use_calico:oo_nodes_use_contiv"
+ openshift_node_scale_up_group: "oo_nodes_use_flannel:oo_nodes_use_calico:oo_nodes_use_contiv:oo_nodes_use_kuryr"
- name: Additional node config
hosts: oo_nodes_use_flannel
@@ -50,3 +54,11 @@
- role: contiv
contiv_role: netplugin
when: openshift_use_contiv | default(false) | bool
+
+- name: Configure Kuryr node
+ hosts: oo_nodes_use_kuryr
+ tasks:
+ - include_role:
+ name: kuryr
+ tasks_from: node
+ when: openshift_use_kuryr | default(false) | bool
diff --git a/playbooks/common/openshift-node/clean_image.yml b/playbooks/common/openshift-node/clean_image.yml
new file mode 100644
index 000000000..38753d0af
--- /dev/null
+++ b/playbooks/common/openshift-node/clean_image.yml
@@ -0,0 +1,10 @@
+---
+- name: Configure nodes
+ hosts: oo_nodes_to_config:!oo_containerized_master_nodes
+ tasks:
+ - name: Remove any ansible facts created during AMI creation
+ file:
+ path: "/etc/ansible/facts.d/{{ item }}"
+ state: absent
+ with_items:
+ - openshift.fact
diff --git a/playbooks/common/openshift-node/config.yml b/playbooks/common/openshift-node/config.yml
index 700aab48c..28e3c1b1b 100644
--- a/playbooks/common/openshift-node/config.yml
+++ b/playbooks/common/openshift-node/config.yml
@@ -1,14 +1,15 @@
---
- name: Node Install Checkpoint Start
- hosts: localhost
- connection: local
+ hosts: all
gather_facts: false
tasks:
- name: Set Node install 'In Progress'
+ run_once: true
set_stats:
data:
- installer_phase_node: "In Progress"
- aggregate: false
+ installer_phase_node:
+ status: "In Progress"
+ start: "{{ lookup('pipe', 'date +%Y%m%d%H%M%SZ') }}"
- include: certificates.yml
@@ -25,12 +26,13 @@
- include: enable_excluders.yml
- name: Node Install Checkpoint End
- hosts: localhost
- connection: local
+ hosts: all
gather_facts: false
tasks:
- name: Set Node install 'Complete'
+ run_once: true
set_stats:
data:
- installer_phase_node: "Complete"
- aggregate: false
+ installer_phase_node:
+ status: "Complete"
+ end: "{{ lookup('pipe', 'date +%Y%m%d%H%M%SZ') }}"
diff --git a/playbooks/common/openshift-node/configure_nodes.yml b/playbooks/common/openshift-node/configure_nodes.yml
index c96e4921c..17259422d 100644
--- a/playbooks/common/openshift-node/configure_nodes.yml
+++ b/playbooks/common/openshift-node/configure_nodes.yml
@@ -13,4 +13,5 @@
roles:
- role: os_firewall
- role: openshift_node
+ - role: tuned
- role: nickhammond.logrotate
diff --git a/playbooks/common/openshift-node/image_prep.yml b/playbooks/common/openshift-node/image_prep.yml
index fc06621ee..30651a1df 100644
--- a/playbooks/common/openshift-node/image_prep.yml
+++ b/playbooks/common/openshift-node/image_prep.yml
@@ -2,13 +2,13 @@
- name: normalize groups
include: ../../byo/openshift-cluster/initialize_groups.yml
-- name: run the std_include
+- name: evaluate the groups
include: ../openshift-cluster/evaluate_groups.yml
-- name: run the std_include
+- name: initialize the facts
include: ../openshift-cluster/initialize_facts.yml
-- name: run the std_include
+- name: initialize the repositories
include: ../openshift-cluster/initialize_openshift_repos.yml
- name: run node config setup
@@ -19,3 +19,6 @@
- name: Re-enable excluders
include: enable_excluders.yml
+
+- name: Remove any undesired artifacts from build
+ include: clean_image.yml