summaryrefslogtreecommitdiffstats
path: root/playbooks/common/openshift-cluster
diff options
context:
space:
mode:
Diffstat (limited to 'playbooks/common/openshift-cluster')
-rw-r--r--playbooks/common/openshift-cluster/config.yml44
-rw-r--r--playbooks/common/openshift-cluster/openshift_logging.yml37
-rw-r--r--playbooks/common/openshift-cluster/redeploy-certificates/check-expiry.yml12
-rw-r--r--playbooks/common/openshift-cluster/redeploy-certificates/etcd-backup.yml19
-rw-r--r--playbooks/common/openshift-cluster/redeploy-certificates/etcd-ca.yml101
l---------playbooks/common/openshift-cluster/redeploy-certificates/filter_plugins1
l---------playbooks/common/openshift-cluster/redeploy-certificates/library1
l---------playbooks/common/openshift-cluster/redeploy-certificates/lookup_plugins1
-rw-r--r--playbooks/common/openshift-cluster/redeploy-certificates/masters-backup.yml38
-rw-r--r--playbooks/common/openshift-cluster/redeploy-certificates/nodes-backup.yml24
-rw-r--r--playbooks/common/openshift-cluster/redeploy-certificates/openshift-ca.yml300
-rw-r--r--playbooks/common/openshift-cluster/redeploy-certificates/registry.yml100
l---------playbooks/common/openshift-cluster/redeploy-certificates/roles1
-rw-r--r--playbooks/common/openshift-cluster/redeploy-certificates/router.yml141
-rw-r--r--playbooks/common/openshift-cluster/upgrades/disable_master_excluders.yml1
-rw-r--r--playbooks/common/openshift-cluster/upgrades/disable_node_excluders.yml1
-rw-r--r--playbooks/common/openshift-cluster/upgrades/docker/docker_upgrade.yml10
-rw-r--r--playbooks/common/openshift-cluster/upgrades/docker/tasks/restart.yml10
-rw-r--r--playbooks/common/openshift-cluster/upgrades/docker/tasks/upgrade.yml10
-rw-r--r--playbooks/common/openshift-cluster/upgrades/etcd/backup.yml28
l---------playbooks/common/openshift-cluster/upgrades/etcd/filter_plugins1
l---------playbooks/common/openshift-cluster/upgrades/etcd/lookup_plugins1
-rw-r--r--playbooks/common/openshift-cluster/upgrades/etcd/main.yml29
l---------playbooks/common/openshift-cluster/upgrades/etcd/roles1
-rw-r--r--playbooks/common/openshift-cluster/upgrades/etcd/upgrade.yml64
-rw-r--r--playbooks/common/openshift-cluster/upgrades/etcd/upgrade_image_members.yml17
-rw-r--r--playbooks/common/openshift-cluster/upgrades/etcd/upgrade_rpm_members.yml18
-rw-r--r--playbooks/common/openshift-cluster/upgrades/init.yml4
-rw-r--r--playbooks/common/openshift-cluster/upgrades/post_control_plane.yml1
-rw-r--r--playbooks/common/openshift-cluster/upgrades/pre/tasks/verify_docker_upgrade_targets.yml2
-rw-r--r--playbooks/common/openshift-cluster/upgrades/pre/verify_control_plane_running.yml12
-rw-r--r--playbooks/common/openshift-cluster/upgrades/pre/verify_upgrade_targets.yml4
-rw-r--r--playbooks/common/openshift-cluster/upgrades/upgrade_control_plane.yml24
-rw-r--r--playbooks/common/openshift-cluster/upgrades/upgrade_nodes.yml3
-rw-r--r--playbooks/common/openshift-cluster/upgrades/upgrade_scale_group.yml59
-rw-r--r--playbooks/common/openshift-cluster/upgrades/v3_6/upgrade.yml40
-rw-r--r--playbooks/common/openshift-cluster/upgrades/v3_6/upgrade_control_plane.yml36
-rw-r--r--playbooks/common/openshift-cluster/upgrades/v3_6/upgrade_nodes.yml30
-rw-r--r--playbooks/common/openshift-cluster/upgrades/v3_7/upgrade.yml50
-rw-r--r--playbooks/common/openshift-cluster/upgrades/v3_7/upgrade_control_plane.yml46
-rw-r--r--playbooks/common/openshift-cluster/upgrades/v3_7/upgrade_nodes.yml30
-rw-r--r--playbooks/common/openshift-cluster/upgrades/v3_8/upgrade.yml50
-rw-r--r--playbooks/common/openshift-cluster/upgrades/v3_8/upgrade_control_plane.yml46
-rw-r--r--playbooks/common/openshift-cluster/upgrades/v3_8/upgrade_nodes.yml30
l---------playbooks/common/openshift-cluster/upgrades/v3_9/filter_plugins1
-rw-r--r--playbooks/common/openshift-cluster/upgrades/v3_9/master_config_upgrade.yml20
l---------playbooks/common/openshift-cluster/upgrades/v3_9/roles1
-rw-r--r--playbooks/common/openshift-cluster/upgrades/v3_9/upgrade.yml142
-rw-r--r--playbooks/common/openshift-cluster/upgrades/v3_9/upgrade_control_plane.yml144
-rw-r--r--playbooks/common/openshift-cluster/upgrades/v3_9/upgrade_nodes.yml115
-rw-r--r--playbooks/common/openshift-cluster/upgrades/v3_9/validator.yml7
51 files changed, 677 insertions, 1231 deletions
diff --git a/playbooks/common/openshift-cluster/config.yml b/playbooks/common/openshift-cluster/config.yml
deleted file mode 100644
index 2eeb81b86..000000000
--- a/playbooks/common/openshift-cluster/config.yml
+++ /dev/null
@@ -1,44 +0,0 @@
----
-- include: ../../openshift-checks/private/install.yml
-
-- include: ../../openshift-etcd/private/config.yml
-
-- include: ../../openshift-nfs/private/config.yml
- when: groups.oo_nfs_to_config | default([]) | count > 0
-
-- include: ../../openshift-loadbalancer/private/config.yml
- when: groups.oo_lb_to_config | default([]) | count > 0
-
-- include: ../../openshift-master/private/config.yml
-
-- include: ../../openshift-master/private/additional_config.yml
-
-- include: ../../openshift-node/private/config.yml
-
-- include: ../../openshift-glusterfs/private/config.yml
- when: groups.oo_glusterfs_to_config | default([]) | count > 0
-
-- include: ../../openshift-hosted/private/config.yml
-
-- include: ../../openshift-metrics/private/config.yml
- when: openshift_metrics_install_metrics | default(false) | bool
-
-- include: openshift_logging.yml
- when: openshift_logging_install_logging | default(false) | bool
-
-- include: ../../openshift-prometheus/private/config.yml
- when: openshift_hosted_prometheus_deploy | default(false) | bool
-
-- include: ../../openshift-service-catalog/private/config.yml
- when: openshift_enable_service_catalog | default(true) | bool
-
-- include: ../../openshift-management/private/config.yml
- when: openshift_management_install_management | default(false) | bool
-
-- name: Print deprecated variable warning message if necessary
- hosts: oo_first_master
- gather_facts: no
- tasks:
- - debug: msg="{{__deprecation_message}}"
- when:
- - __deprecation_message | default ('') | length > 0
diff --git a/playbooks/common/openshift-cluster/openshift_logging.yml b/playbooks/common/openshift-cluster/openshift_logging.yml
deleted file mode 100644
index bc59bd95a..000000000
--- a/playbooks/common/openshift-cluster/openshift_logging.yml
+++ /dev/null
@@ -1,37 +0,0 @@
----
-- name: Logging Install Checkpoint Start
- hosts: all
- gather_facts: false
- tasks:
- - name: Set Logging install 'In Progress'
- run_once: true
- set_stats:
- data:
- installer_phase_logging:
- status: "In Progress"
- start: "{{ lookup('pipe', 'date +%Y%m%d%H%M%SZ') }}"
-
-- name: OpenShift Aggregated Logging
- hosts: oo_first_master
- roles:
- - openshift_logging
-
-- name: Update Master configs
- hosts: oo_masters:!oo_first_master
- tasks:
- - block:
- - include_role:
- name: openshift_logging
- tasks_from: update_master_config
-
-- name: Logging Install Checkpoint End
- hosts: all
- gather_facts: false
- tasks:
- - name: Set Logging install 'Complete'
- run_once: true
- set_stats:
- data:
- installer_phase_logging:
- status: "Complete"
- end: "{{ lookup('pipe', 'date +%Y%m%d%H%M%SZ') }}"
diff --git a/playbooks/common/openshift-cluster/redeploy-certificates/check-expiry.yml b/playbooks/common/openshift-cluster/redeploy-certificates/check-expiry.yml
deleted file mode 100644
index 4a9fbf7eb..000000000
--- a/playbooks/common/openshift-cluster/redeploy-certificates/check-expiry.yml
+++ /dev/null
@@ -1,12 +0,0 @@
----
-- name: Check cert expirys
- hosts: "{{ g_check_expiry_hosts }}"
- vars:
- openshift_certificate_expiry_show_all: yes
- roles:
- # Sets 'check_results' per host which contains health status for
- # etcd, master and node certificates. We will use 'check_results'
- # to determine if any certificates were expired prior to running
- # this playbook. Service restarts will be skipped if any
- # certificates were previously expired.
- - role: openshift_certificate_expiry
diff --git a/playbooks/common/openshift-cluster/redeploy-certificates/etcd-backup.yml b/playbooks/common/openshift-cluster/redeploy-certificates/etcd-backup.yml
deleted file mode 100644
index d738c8207..000000000
--- a/playbooks/common/openshift-cluster/redeploy-certificates/etcd-backup.yml
+++ /dev/null
@@ -1,19 +0,0 @@
----
-- name: Backup and remove generated etcd certificates
- hosts: oo_first_etcd
- any_errors_fatal: true
- tasks:
- - include_role:
- name: etcd
- tasks_from: backup_generated_certificates
- - include_role:
- name: etcd
- tasks_from: remove_generated_certificates
-
-- name: Backup deployed etcd certificates
- hosts: oo_etcd_to_config
- any_errors_fatal: true
- tasks:
- - include_role:
- name: etcd
- tasks_from: backup_server_certificates
diff --git a/playbooks/common/openshift-cluster/redeploy-certificates/etcd-ca.yml b/playbooks/common/openshift-cluster/redeploy-certificates/etcd-ca.yml
deleted file mode 100644
index 438f704bc..000000000
--- a/playbooks/common/openshift-cluster/redeploy-certificates/etcd-ca.yml
+++ /dev/null
@@ -1,101 +0,0 @@
----
-- name: Check cert expirys
- hosts: oo_etcd_to_config:oo_masters_to_config
- vars:
- openshift_certificate_expiry_show_all: yes
- roles:
- # Sets 'check_results' per host which contains health status for
- # etcd, master and node certificates. We will use 'check_results'
- # to determine if any certificates were expired prior to running
- # this playbook. Service restarts will be skipped if any
- # certificates were previously expired.
- - role: openshift_certificate_expiry
-
-- name: Backup existing etcd CA certificate directories
- hosts: oo_etcd_to_config
- tasks:
- - include_role:
- name: etcd
- tasks_from: backup_ca_certificates
- - include_role:
- name: etcd
- tasks_from: remove_ca_certificates
-
-- include: ../../../openshift-etcd/private/ca.yml
-
-- name: Create temp directory for syncing certs
- hosts: localhost
- connection: local
- become: no
- gather_facts: no
- tasks:
- - name: Create local temp directory for syncing certs
- local_action: command mktemp -d /tmp/openshift-ansible-XXXXXXX
- register: g_etcd_mktemp
- changed_when: false
-
-- name: Distribute etcd CA to etcd hosts
- hosts: oo_etcd_to_config
- tasks:
- - include_role:
- name: etcd
- tasks_from: distribute_ca.yml
- vars:
- etcd_sync_cert_dir: "{{ hostvars['localhost'].g_etcd_mktemp.stdout }}"
- etcd_ca_host: "{{ groups.oo_etcd_to_config.0 }}"
-
-- include: ../../../openshift-etcd/private/restart.yml
- # Do not restart etcd when etcd certificates were previously expired.
- when: ('expired' not in (hostvars
- | oo_select_keys(groups['etcd'])
- | oo_collect('check_results.check_results.etcd')
- | oo_collect('health')))
-
-- name: Retrieve etcd CA certificate
- hosts: oo_first_etcd
- tasks:
- - include_role:
- name: etcd
- tasks_from: retrieve_ca_certificates
- vars:
- etcd_sync_cert_dir: "{{ hostvars['localhost'].g_etcd_mktemp.stdout }}"
-
-- name: Distribute etcd CA to masters
- hosts: oo_masters_to_config
- vars:
- openshift_ca_host: "{{ groups.oo_first_master.0 }}"
- tasks:
- - name: Deploy etcd CA
- copy:
- src: "{{ hostvars['localhost'].g_etcd_mktemp.stdout }}/ca.crt"
- dest: "{{ openshift.common.config_base }}/master/master.etcd-ca.crt"
- when: groups.oo_etcd_to_config | default([]) | length > 0
-
-- name: Delete temporary directory on localhost
- hosts: localhost
- connection: local
- become: no
- gather_facts: no
- tasks:
- - file:
- name: "{{ g_etcd_mktemp.stdout }}"
- state: absent
- changed_when: false
-
-- include: ../../../openshift-master/private/restart.yml
- # Do not restart masters when master or etcd certificates were previously expired.
- when:
- # masters
- - ('expired' not in hostvars
- | oo_select_keys(groups['oo_masters_to_config'])
- | oo_collect('check_results.check_results.ocp_certs')
- | oo_collect('health', {'path':hostvars[groups.oo_first_master.0].openshift.common.config_base ~ "/master/master.server.crt"}))
- - ('expired' not in hostvars
- | oo_select_keys(groups['oo_masters_to_config'])
- | oo_collect('check_results.check_results.ocp_certs')
- | oo_collect('health', {'path':hostvars[groups.oo_first_master.0].openshift.common.config_base ~ "/master/ca-bundle.crt"}))
- # etcd
- - ('expired' not in (hostvars
- | oo_select_keys(groups['etcd'])
- | oo_collect('check_results.check_results.etcd')
- | oo_collect('health')))
diff --git a/playbooks/common/openshift-cluster/redeploy-certificates/filter_plugins b/playbooks/common/openshift-cluster/redeploy-certificates/filter_plugins
deleted file mode 120000
index b1213dedb..000000000
--- a/playbooks/common/openshift-cluster/redeploy-certificates/filter_plugins
+++ /dev/null
@@ -1 +0,0 @@
-../../../../filter_plugins \ No newline at end of file
diff --git a/playbooks/common/openshift-cluster/redeploy-certificates/library b/playbooks/common/openshift-cluster/redeploy-certificates/library
deleted file mode 120000
index 9a53f009d..000000000
--- a/playbooks/common/openshift-cluster/redeploy-certificates/library
+++ /dev/null
@@ -1 +0,0 @@
-../../../../library \ No newline at end of file
diff --git a/playbooks/common/openshift-cluster/redeploy-certificates/lookup_plugins b/playbooks/common/openshift-cluster/redeploy-certificates/lookup_plugins
deleted file mode 120000
index aff753026..000000000
--- a/playbooks/common/openshift-cluster/redeploy-certificates/lookup_plugins
+++ /dev/null
@@ -1 +0,0 @@
-../../../../lookup_plugins \ No newline at end of file
diff --git a/playbooks/common/openshift-cluster/redeploy-certificates/masters-backup.yml b/playbooks/common/openshift-cluster/redeploy-certificates/masters-backup.yml
deleted file mode 100644
index 4dbc041b0..000000000
--- a/playbooks/common/openshift-cluster/redeploy-certificates/masters-backup.yml
+++ /dev/null
@@ -1,38 +0,0 @@
----
-- name: Backup and remove master cerftificates
- hosts: oo_masters_to_config
- any_errors_fatal: true
- vars:
- openshift_ca_host: "{{ groups.oo_first_master.0 }}"
- openshift_master_count: "{{ openshift.master.master_count | default(groups.oo_masters | length) }}"
- pre_tasks:
- - stat:
- path: "{{ openshift.common.config_base }}/generated-configs"
- register: openshift_generated_configs_dir_stat
- - name: Backup generated certificate and config directories
- command: >
- tar -czvf /etc/origin/master-node-cert-config-backup-{{ ansible_date_time.epoch }}.tgz
- {{ openshift.common.config_base }}/generated-configs
- {{ openshift.common.config_base }}/master
- when: openshift_generated_configs_dir_stat.stat.exists
- delegate_to: "{{ openshift_ca_host }}"
- run_once: true
- - name: Remove generated certificate directories
- file:
- path: "{{ item }}"
- state: absent
- with_items:
- - "{{ openshift.common.config_base }}/generated-configs"
- - name: Remove generated certificates
- file:
- path: "{{ openshift.common.config_base }}/master/{{ item }}"
- state: absent
- with_items:
- - "{{ hostvars[inventory_hostname] | certificates_to_synchronize(include_keys=false, include_ca=false) }}"
- - "etcd.server.crt"
- - "etcd.server.key"
- - "master.server.crt"
- - "master.server.key"
- - "openshift-master.crt"
- - "openshift-master.key"
- - "openshift-master.kubeconfig"
diff --git a/playbooks/common/openshift-cluster/redeploy-certificates/nodes-backup.yml b/playbooks/common/openshift-cluster/redeploy-certificates/nodes-backup.yml
deleted file mode 100644
index 2ad84b3b9..000000000
--- a/playbooks/common/openshift-cluster/redeploy-certificates/nodes-backup.yml
+++ /dev/null
@@ -1,24 +0,0 @@
----
-- name: Ensure node directory is absent from generated configs
- hosts: oo_first_master
- tasks:
- # The generated configs directory (/etc/origin/generated-configs) is
- # backed up during redeployment of the control plane certificates.
- # We need to ensure that the generated config directory for
- # individual nodes has been deleted before continuing, so verify
- # that it is missing here.
- - name: Ensure node directories and tarballs are absent from generated configs
- shell: >
- rm -rf {{ openshift.common.config_base }}/generated-configs/node-*
- args:
- warn: no
-
-- name: Redeploy node certificates
- hosts: oo_nodes_to_config
- pre_tasks:
- - name: Remove CA certificate
- file:
- path: "{{ item }}"
- state: absent
- with_items:
- - "{{ openshift.common.config_base }}/node/ca.crt"
diff --git a/playbooks/common/openshift-cluster/redeploy-certificates/openshift-ca.yml b/playbooks/common/openshift-cluster/redeploy-certificates/openshift-ca.yml
deleted file mode 100644
index 5a837d80d..000000000
--- a/playbooks/common/openshift-cluster/redeploy-certificates/openshift-ca.yml
+++ /dev/null
@@ -1,300 +0,0 @@
----
-- name: Check cert expirys
- hosts: oo_nodes_to_config:oo_masters_to_config:oo_etcd_to_config
- vars:
- openshift_certificate_expiry_show_all: yes
- roles:
- # Sets 'check_results' per host which contains health status for
- # etcd, master and node certificates. We will use 'check_results'
- # to determine if any certificates were expired prior to running
- # this playbook. Service restarts will be skipped if any
- # certificates were previously expired.
- - role: openshift_certificate_expiry
-
-# Update master config when ca-bundle not referenced. Services will be
-# restarted below after new CA certificate has been distributed.
-- name: Ensure ca-bundle.crt is referenced in master configuration
- hosts: oo_masters_to_config
- tasks:
- - slurp:
- src: "{{ openshift.common.config_base }}/master/master-config.yaml"
- register: g_master_config_output
- - modify_yaml:
- dest: "{{ openshift.common.config_base }}/master/master-config.yaml"
- yaml_key: kubeletClientInfo.ca
- yaml_value: ca-bundle.crt
- when: (g_master_config_output.content|b64decode|from_yaml).kubeletClientInfo.ca != 'ca-bundle.crt'
- - modify_yaml:
- dest: "{{ openshift.common.config_base }}/master/master-config.yaml"
- yaml_key: serviceAccountConfig.masterCA
- yaml_value: ca-bundle.crt
- when: (g_master_config_output.content|b64decode|from_yaml).serviceAccountConfig.masterCA != 'ca-bundle.crt'
- - modify_yaml:
- dest: "{{ openshift.common.config_base }}/master/master-config.yaml"
- yaml_key: oauthConfig.masterCA
- yaml_value: ca-bundle.crt
- when: (g_master_config_output.content|b64decode|from_yaml).oauthConfig.masterCA != 'ca-bundle.crt'
- - modify_yaml:
- dest: "{{ openshift.common.config_base }}/master/master-config.yaml"
- yaml_key: etcdClientInfo.ca
- yaml_value: ca-bundle.crt
- when:
- - groups.oo_etcd_to_config | default([]) | length == 0
- - (g_master_config_output.content|b64decode|from_yaml).etcdClientInfo.ca != 'ca-bundle.crt'
- - modify_yaml:
- dest: "{{ openshift.common.config_base }}/master/master-config.yaml"
- yaml_key: etcdConfig.peerServingInfo.clientCA
- yaml_value: ca-bundle.crt
- when:
- - groups.oo_etcd_to_config | default([]) | length == 0
- - (g_master_config_output.content|b64decode|from_yaml).etcdConfig.peerServingInfo.clientCA != 'ca-bundle.crt'
- - modify_yaml:
- dest: "{{ openshift.common.config_base }}/master/master-config.yaml"
- yaml_key: etcdConfig.servingInfo.clientCA
- yaml_value: ca-bundle.crt
- when:
- - groups.oo_etcd_to_config | default([]) | length == 0
- - (g_master_config_output.content|b64decode|from_yaml).etcdConfig.servingInfo.clientCA != 'ca-bundle.crt'
- # Set servingInfo.clientCA to client-ca-bundle.crt in order to roll the CA certificate.
- # This change will be reverted in playbooks/byo/openshift-cluster/redeploy-certificates.yml
- - modify_yaml:
- dest: "{{ openshift.common.config_base }}/master/master-config.yaml"
- yaml_key: servingInfo.clientCA
- yaml_value: client-ca-bundle.crt
- when: (g_master_config_output.content|b64decode|from_yaml).servingInfo.clientCA != 'client-ca-bundle.crt'
-
-- name: Copy current OpenShift CA to legacy directory
- hosts: oo_masters_to_config
- pre_tasks:
- - name: Create legacy-ca directory
- file:
- path: "{{ openshift.common.config_base }}/master/legacy-ca"
- state: directory
- mode: 0700
- owner: root
- group: root
- - command: mktemp -u XXXXXX
- register: g_legacy_ca_mktemp
- changed_when: false
- # Copy CA certificate, key, serial and bundle to legacy-ca with a
- # prefix generated by mktemp, ie. XXXXXX-ca.crt.
- #
- # The following roles will pick up all CA certificates matching
- # /.*-ca.crt/ in the legacy-ca directory and ensure they are present
- # in the OpenShift CA bundle.
- # - openshift_ca
- # - openshift_master_certificates
- # - openshift_node_certificates
- - name: Copy current OpenShift CA to legacy directory
- copy:
- src: "{{ openshift.common.config_base }}/master/{{ item }}"
- dest: "{{ openshift.common.config_base }}/master/legacy-ca/{{ g_legacy_ca_mktemp.stdout }}-{{ item }}"
- remote_src: true
- # It is possible that redeploying failed and files may be missing.
- # Ignore errors in this case. Files should have been copied to
- # legacy-ca directory in previous run.
- ignore_errors: true
- with_items:
- - "ca.crt"
- - "ca.key"
- - "ca.serial.txt"
- - "ca-bundle.crt"
-
-- name: Create temporary directory for creating new CA certificate
- hosts: oo_first_master
- tasks:
- - name: Create temporary directory for creating new CA certificate
- command: >
- mktemp -d /tmp/openshift-ansible-XXXXXXX
- register: g_new_openshift_ca_mktemp
- changed_when: false
-
-- name: Create OpenShift CA
- hosts: oo_first_master
- vars:
- # Set openshift_ca_config_dir to a temporary directory where CA
- # will be created. We'll replace the existing CA with the CA
- # created in the temporary directory.
- openshift_ca_config_dir: "{{ hostvars[groups.oo_first_master.0].g_new_openshift_ca_mktemp.stdout }}"
- roles:
- - role: openshift_master_facts
- - role: openshift_named_certificates
- - role: openshift_ca
- openshift_ca_host: "{{ groups.oo_first_master.0 }}"
-
-- name: Create temp directory for syncing certs
- hosts: localhost
- connection: local
- become: no
- gather_facts: no
- tasks:
- - name: Create local temp directory for syncing certs
- local_action: command mktemp -d /tmp/openshift-ansible-XXXXXXX
- register: g_master_mktemp
- changed_when: false
-
-- name: Retrieve OpenShift CA
- hosts: oo_first_master
- vars:
- openshift_ca_host: "{{ groups.oo_first_master.0 }}"
- tasks:
- - name: Retrieve CA certificate, key, bundle and serial
- fetch:
- src: "{{ hostvars[openshift_ca_host].g_new_openshift_ca_mktemp.stdout }}/{{ item }}"
- dest: "{{ hostvars['localhost'].g_master_mktemp.stdout }}/"
- flat: yes
- fail_on_missing: yes
- validate_checksum: yes
- with_items:
- - ca.crt
- - ca.key
- - ca-bundle.crt
- - ca.serial.txt
- - client-ca-bundle.crt
- delegate_to: "{{ openshift_ca_host }}"
- run_once: true
- changed_when: false
-
-- name: Distribute OpenShift CA to masters
- hosts: oo_masters_to_config
- vars:
- openshift_ca_host: "{{ groups.oo_first_master.0 }}"
- tasks:
- - name: Deploy CA certificate, key, bundle and serial
- copy:
- src: "{{ hostvars['localhost'].g_master_mktemp.stdout }}/{{ item }}"
- dest: "{{ openshift.common.config_base }}/master/"
- with_items:
- - ca.crt
- - ca.key
- - ca-bundle.crt
- - ca.serial.txt
- - client-ca-bundle.crt
- - name: Update master client kubeconfig CA data
- kubeclient_ca:
- client_path: "{{ openshift.common.config_base }}/master/openshift-master.kubeconfig"
- ca_path: "{{ openshift.common.config_base }}/master/ca-bundle.crt"
- - name: Update admin client kubeconfig CA data
- kubeclient_ca:
- client_path: "{{ openshift.common.config_base }}/master/admin.kubeconfig"
- ca_path: "{{ openshift.common.config_base }}/master/ca-bundle.crt"
- - name: Lookup default group for ansible_ssh_user
- command: "/usr/bin/id -g {{ ansible_ssh_user | quote }}"
- changed_when: false
- register: _ansible_ssh_user_gid
- - set_fact:
- client_users: "{{ [ansible_ssh_user, 'root'] | unique }}"
- - name: Create the client config dir(s)
- file:
- path: "~{{ item }}/.kube"
- state: directory
- mode: 0700
- owner: "{{ item }}"
- group: "{{ 'root' if item == 'root' else _ansible_ssh_user_gid.stdout }}"
- with_items: "{{ client_users }}"
- - name: Copy the admin client config(s)
- copy:
- src: "{{ openshift.common.config_base }}/master/admin.kubeconfig"
- dest: "~{{ item }}/.kube/config"
- remote_src: yes
- with_items: "{{ client_users }}"
- - name: Update the permissions on the admin client config(s)
- file:
- path: "~{{ item }}/.kube/config"
- state: file
- mode: 0700
- owner: "{{ item }}"
- group: "{{ 'root' if item == 'root' else _ansible_ssh_user_gid.stdout }}"
- with_items: "{{ client_users }}"
-
-- include: ../../../openshift-master/private/restart.yml
- # Do not restart masters when master or etcd certificates were previously expired.
- when:
- # masters
- - ('expired' not in hostvars
- | oo_select_keys(groups['oo_masters_to_config'])
- | oo_collect('check_results.check_results.ocp_certs')
- | oo_collect('health', {'path':hostvars[groups.oo_first_master.0].openshift.common.config_base ~ "/master/master.server.crt"}))
- - ('expired' not in hostvars
- | oo_select_keys(groups['oo_masters_to_config'])
- | oo_collect('check_results.check_results.ocp_certs')
- | oo_collect('health', {'path':hostvars[groups.oo_first_master.0].openshift.common.config_base ~ "/master/ca-bundle.crt"}))
- # etcd
- - ('expired' not in (hostvars
- | oo_select_keys(groups['etcd'])
- | oo_collect('check_results.check_results.etcd')
- | oo_collect('health')))
-
-- name: Distribute OpenShift CA certificate to nodes
- hosts: oo_nodes_to_config
- vars:
- openshift_ca_host: "{{ groups.oo_first_master.0 }}"
- tasks:
- - copy:
- src: "{{ hostvars['localhost'].g_master_mktemp.stdout }}/ca-bundle.crt"
- dest: "{{ openshift.common.config_base }}/node/ca.crt"
- - name: Copy OpenShift CA to system CA trust
- copy:
- src: "{{ item.cert }}"
- dest: "/etc/pki/ca-trust/source/anchors/{{ item.id }}-{{ item.cert | basename }}"
- remote_src: yes
- with_items:
- - id: openshift
- cert: "{{ openshift.common.config_base }}/node/ca.crt"
- notify:
- - update ca trust
- - name: Update node client kubeconfig CA data
- kubeclient_ca:
- client_path: "{{ openshift.common.config_base }}/node/system:node:{{ openshift.common.hostname }}.kubeconfig"
- ca_path: "{{ openshift.common.config_base }}/node/ca.crt"
- handlers:
- # Normally this handler would restart docker after updating ca
- # trust. We'll do that when we restart nodes to avoid restarting
- # docker on all nodes in parallel.
- - name: update ca trust
- command: update-ca-trust
-
-- name: Delete temporary directory on CA host
- hosts: oo_first_master
- tasks:
- - file:
- path: "{{ g_new_openshift_ca_mktemp.stdout }}"
- state: absent
-
-- name: Delete temporary directory on localhost
- hosts: localhost
- connection: local
- become: no
- gather_facts: no
- tasks:
- - file:
- name: "{{ g_master_mktemp.stdout }}"
- state: absent
- changed_when: false
-
-- include: ../../../openshift-node/private/restart.yml
- # Do not restart nodes when node, master or etcd certificates were previously expired.
- when:
- # nodes
- - ('expired' not in hostvars
- | oo_select_keys(groups['oo_nodes_to_config'])
- | oo_collect('check_results.check_results.ocp_certs')
- | oo_collect('health', {'path':hostvars[groups.oo_nodes_to_config.0].openshift.common.config_base ~ "/node/server.crt"}))
- - ('expired' not in hostvars
- | oo_select_keys(groups['oo_nodes_to_config'])
- | oo_collect('check_results.check_results.ocp_certs')
- | oo_collect('health', {'path':hostvars[groups.oo_nodes_to_config.0].openshift.common.config_base ~ "/node/ca.crt"}))
- # masters
- - ('expired' not in hostvars
- | oo_select_keys(groups['oo_masters_to_config'])
- | oo_collect('check_results.check_results.ocp_certs')
- | oo_collect('health', {'path':hostvars[groups.oo_first_master.0].openshift.common.config_base ~ "/master/master.server.crt"}))
- - ('expired' not in hostvars
- | oo_select_keys(groups['oo_masters_to_config'])
- | oo_collect('check_results.check_results.ocp_certs')
- | oo_collect('health', {'path':hostvars[groups.oo_first_master.0].openshift.common.config_base ~ "/master/ca-bundle.crt"}))
- # etcd
- - ('expired' not in (hostvars
- | oo_select_keys(groups['etcd'])
- | oo_collect('check_results.check_results.etcd')
- | oo_collect('health')))
diff --git a/playbooks/common/openshift-cluster/redeploy-certificates/registry.yml b/playbooks/common/openshift-cluster/redeploy-certificates/registry.yml
deleted file mode 100644
index 7e9363c5f..000000000
--- a/playbooks/common/openshift-cluster/redeploy-certificates/registry.yml
+++ /dev/null
@@ -1,100 +0,0 @@
----
-- name: Update registry certificates
- hosts: oo_first_master
- vars:
- roles:
- - lib_openshift
- tasks:
- - name: Create temp directory for kubeconfig
- command: mktemp -d /tmp/openshift-ansible-XXXXXX
- register: mktemp
- changed_when: false
-
- - name: Copy admin client config(s)
- command: >
- cp {{ openshift.common.config_base }}/master//admin.kubeconfig {{ mktemp.stdout }}/admin.kubeconfig
- changed_when: false
-
- - name: Determine if docker-registry exists
- command: >
- {{ openshift.common.client_binary }} get dc/docker-registry -o json
- --config={{ mktemp.stdout }}/admin.kubeconfig
- -n default
- register: l_docker_registry_dc
- failed_when: false
- changed_when: false
-
- - set_fact:
- docker_registry_env_vars: "{{ ((l_docker_registry_dc.stdout | from_json)['spec']['template']['spec']['containers'][0]['env']
- | oo_collect('name'))
- | default([]) }}"
- docker_registry_secrets: "{{ ((l_docker_registry_dc.stdout | from_json)['spec']['template']['spec']['volumes']
- | oo_collect('secret')
- | oo_collect('secretName'))
- | default([]) }}"
- changed_when: false
- when: l_docker_registry_dc.rc == 0
-
- # Replace dc/docker-registry environment variable certificate data if set.
- - name: Update docker-registry environment variables
- shell: >
- {{ openshift.common.client_binary }} env dc/docker-registry
- OPENSHIFT_CA_DATA="$(cat /etc/origin/master/ca.crt)"
- OPENSHIFT_CERT_DATA="$(cat /etc/origin/master/openshift-registry.crt)"
- OPENSHIFT_KEY_DATA="$(cat /etc/origin/master/openshift-registry.key)"
- --config={{ mktemp.stdout }}/admin.kubeconfig
- -n default
- when: l_docker_registry_dc.rc == 0 and 'OPENSHIFT_CA_DATA' in docker_registry_env_vars and 'OPENSHIFT_CERT_DATA' in docker_registry_env_vars and 'OPENSHIFT_KEY_DATA' in docker_registry_env_vars
-
- # Replace dc/docker-registry certificate secret contents if set.
- - block:
- - name: Retrieve registry service IP
- oc_service:
- namespace: default
- name: docker-registry
- state: list
- register: docker_registry_service_ip
- changed_when: false
-
- - set_fact:
- docker_registry_route_hostname: "{{ 'docker-registry-default.' ~ (openshift.master.default_subdomain | default('router.default.svc.cluster.local', true)) }}"
- changed_when: false
-
- - name: Generate registry certificate
- command: >
- {{ openshift.common.client_binary }} adm ca create-server-cert
- --signer-cert={{ openshift.common.config_base }}/master/ca.crt
- --signer-key={{ openshift.common.config_base }}/master/ca.key
- --signer-serial={{ openshift.common.config_base }}/master/ca.serial.txt
- --config={{ mktemp.stdout }}/admin.kubeconfig
- --hostnames="{{ docker_registry_service_ip.results.clusterip }},docker-registry.default.svc,docker-registry.default.svc.cluster.local,{{ docker_registry_route_hostname }}"
- --cert={{ openshift.common.config_base }}/master/registry.crt
- --key={{ openshift.common.config_base }}/master/registry.key
- --expire-days={{ openshift_hosted_registry_cert_expire_days | default(730) }}
-
- - name: Update registry certificates secret
- oc_secret:
- kubeconfig: "{{ mktemp.stdout }}/admin.kubeconfig"
- name: registry-certificates
- namespace: default
- state: present
- files:
- - name: registry.crt
- path: "{{ openshift.common.config_base }}/master/registry.crt"
- - name: registry.key
- path: "{{ openshift.common.config_base }}/master/registry.key"
- run_once: true
- when: l_docker_registry_dc.rc == 0 and 'registry-certificates' in docker_registry_secrets and 'REGISTRY_HTTP_TLS_CERTIFICATE' in docker_registry_env_vars and 'REGISTRY_HTTP_TLS_KEY' in docker_registry_env_vars
-
- - name: Redeploy docker registry
- command: >
- {{ openshift.common.client_binary }} deploy dc/docker-registry
- --latest
- --config={{ mktemp.stdout }}/admin.kubeconfig
- -n default
-
- - name: Delete temp directory
- file:
- name: "{{ mktemp.stdout }}"
- state: absent
- changed_when: False
diff --git a/playbooks/common/openshift-cluster/redeploy-certificates/roles b/playbooks/common/openshift-cluster/redeploy-certificates/roles
deleted file mode 120000
index 4bdbcbad3..000000000
--- a/playbooks/common/openshift-cluster/redeploy-certificates/roles
+++ /dev/null
@@ -1 +0,0 @@
-../../../../roles \ No newline at end of file
diff --git a/playbooks/common/openshift-cluster/redeploy-certificates/router.yml b/playbooks/common/openshift-cluster/redeploy-certificates/router.yml
deleted file mode 100644
index 2116c745c..000000000
--- a/playbooks/common/openshift-cluster/redeploy-certificates/router.yml
+++ /dev/null
@@ -1,141 +0,0 @@
----
-- name: Update router certificates
- hosts: oo_first_master
- vars:
- roles:
- - lib_openshift
- tasks:
- - name: Create temp directory for kubeconfig
- command: mktemp -d /tmp/openshift-ansible-XXXXXX
- register: router_cert_redeploy_tempdir
- changed_when: false
-
- - name: Copy admin client config(s)
- command: >
- cp {{ openshift.common.config_base }}/master//admin.kubeconfig {{ router_cert_redeploy_tempdir.stdout }}/admin.kubeconfig
- changed_when: false
-
- - name: Determine if router exists
- command: >
- {{ openshift.common.client_binary }} get dc/router -o json
- --config={{ router_cert_redeploy_tempdir.stdout }}/admin.kubeconfig
- -n default
- register: l_router_dc
- failed_when: false
- changed_when: false
-
- - name: Determine if router service exists
- command: >
- {{ openshift.common.client_binary }} get svc/router -o json
- --config={{ router_cert_redeploy_tempdir.stdout }}/admin.kubeconfig
- -n default
- register: l_router_svc
- failed_when: false
- changed_when: false
-
- - name: Collect router environment variables and secrets
- set_fact:
- router_env_vars: "{{ ((l_router_dc.stdout | from_json)['spec']['template']['spec']['containers'][0]['env']
- | oo_collect('name'))
- | default([]) }}"
- router_secrets: "{{ ((l_router_dc.stdout | from_json)['spec']['template']['spec']['volumes']
- | oo_collect('secret')
- | oo_collect('secretName'))
- | default([]) }}"
- changed_when: false
- when: l_router_dc.rc == 0
-
- - name: Collect router service annotations
- set_fact:
- router_service_annotations: "{{ (l_router_svc.stdout | from_json)['metadata']['annotations'] if 'annotations' in (l_router_svc.stdout | from_json)['metadata'] else [] }}"
- when: l_router_svc.rc == 0
-
- - name: Update router environment variables
- shell: >
- {{ openshift.common.client_binary }} env dc/router
- OPENSHIFT_CA_DATA="$(cat /etc/origin/master/ca.crt)"
- OPENSHIFT_CERT_DATA="$(cat /etc/origin/master/openshift-router.crt)"
- OPENSHIFT_KEY_DATA="$(cat /etc/origin/master/openshift-router.key)"
- --config={{ router_cert_redeploy_tempdir.stdout }}/admin.kubeconfig
- -n default
- when:
- - l_router_dc.rc == 0
- - ('OPENSHIFT_CA_DATA' in router_env_vars)
- - ('OPENSHIFT_CERT_DATA' in router_env_vars)
- - ('OPENSHIFT_KEY_DATA' in router_env_vars)
-
- # When the router service contains service signer annotations we
- # will delete the existing certificate secret and allow OpenShift to
- # replace the secret.
- - block:
- - name: Delete existing router certificate secret
- oc_secret:
- kubeconfig: "{{ router_cert_redeploy_tempdir.stdout }}/admin.kubeconfig"
- name: router-certs
- namespace: default
- state: absent
- run_once: true
-
- - name: Remove router service annotations
- command: >
- {{ openshift.common.client_binary }} annotate service/router
- service.alpha.openshift.io/serving-cert-secret-name-
- service.alpha.openshift.io/serving-cert-signed-by-
- --config={{ router_cert_redeploy_tempdir.stdout }}/admin.kubeconfig
- -n default
-
- - name: Add serving-cert-secret annotation to router service
- command: >
- {{ openshift.common.client_binary }} annotate service/router
- service.alpha.openshift.io/serving-cert-secret-name=router-certs
- --config={{ router_cert_redeploy_tempdir.stdout }}/admin.kubeconfig
- -n default
- when:
- - l_router_dc.rc == 0
- - l_router_svc.rc == 0
- - ('router-certs' in router_secrets)
- - openshift_hosted_router_certificate is undefined
- - ('service.alpha.openshift.io/serving-cert-secret-name') in router_service_annotations
- - ('service.alpha.openshift.io/serving-cert-signed-by') in router_service_annotations
-
- # When there are no annotations on the router service we will allow
- # the openshift_hosted role to either create a new wildcard
- # certificate (since we deleted the original) or reapply a custom
- # openshift_hosted_router_certificate.
- - file:
- path: "{{ item }}"
- state: absent
- with_items:
- - /etc/origin/master/openshift-router.crt
- - /etc/origin/master/openshift-router.key
- when:
- - l_router_dc.rc == 0
- - l_router_svc.rc == 0
- - ('router-certs' in router_secrets)
- - ('service.alpha.openshift.io/serving-cert-secret-name') not in router_service_annotations
- - ('service.alpha.openshift.io/serving-cert-signed-by') not in router_service_annotations
-
- - include_role:
- name: openshift_hosted
- tasks_from: main
- vars:
- openshift_hosted_manage_registry: false
- when:
- - l_router_dc.rc == 0
- - l_router_svc.rc == 0
- - ('router-certs' in router_secrets)
- - ('service.alpha.openshift.io/serving-cert-secret-name') not in router_service_annotations
- - ('service.alpha.openshift.io/serving-cert-signed-by') not in router_service_annotations
-
- - name: Redeploy router
- command: >
- {{ openshift.common.client_binary }} deploy dc/router
- --latest
- --config={{ router_cert_redeploy_tempdir.stdout }}/admin.kubeconfig
- -n default
-
- - name: Delete temp directory
- file:
- name: "{{ router_cert_redeploy_tempdir.stdout }}"
- state: absent
- changed_when: False
diff --git a/playbooks/common/openshift-cluster/upgrades/disable_master_excluders.yml b/playbooks/common/openshift-cluster/upgrades/disable_master_excluders.yml
index 800621857..33ed6a283 100644
--- a/playbooks/common/openshift-cluster/upgrades/disable_master_excluders.yml
+++ b/playbooks/common/openshift-cluster/upgrades/disable_master_excluders.yml
@@ -5,7 +5,6 @@
roles:
- role: openshift_excluder
r_openshift_excluder_action: disable
- r_openshift_excluder_service_type: "{{ openshift.common.service_type }}"
r_openshift_excluder_verify_upgrade: true
r_openshift_excluder_upgrade_target: "{{ openshift_upgrade_target }}"
r_openshift_excluder_package_state: latest
diff --git a/playbooks/common/openshift-cluster/upgrades/disable_node_excluders.yml b/playbooks/common/openshift-cluster/upgrades/disable_node_excluders.yml
index a66301c0d..ab3171c9a 100644
--- a/playbooks/common/openshift-cluster/upgrades/disable_node_excluders.yml
+++ b/playbooks/common/openshift-cluster/upgrades/disable_node_excluders.yml
@@ -5,7 +5,6 @@
roles:
- role: openshift_excluder
r_openshift_excluder_action: disable
- r_openshift_excluder_service_type: "{{ openshift.common.service_type }}"
r_openshift_excluder_verify_upgrade: true
r_openshift_excluder_upgrade_target: "{{ openshift_upgrade_target }}"
r_openshift_excluder_package_state: latest
diff --git a/playbooks/common/openshift-cluster/upgrades/docker/docker_upgrade.yml b/playbooks/common/openshift-cluster/upgrades/docker/docker_upgrade.yml
index 6d4ddf011..5c6def484 100644
--- a/playbooks/common/openshift-cluster/upgrades/docker/docker_upgrade.yml
+++ b/playbooks/common/openshift-cluster/upgrades/docker/docker_upgrade.yml
@@ -1,11 +1,11 @@
---
-- include: ../../../../init/evaluate_groups.yml
+- import_playbook: ../../../../init/evaluate_groups.yml
vars:
# Do not allow adding hosts during upgrade.
g_new_master_hosts: []
g_new_node_hosts: []
-- include: ../initialize_nodes_to_upgrade.yml
+- import_playbook: ../initialize_nodes_to_upgrade.yml
- name: Check for appropriate Docker versions
hosts: oo_masters_to_config:oo_nodes_to_upgrade:oo_etcd_to_config
@@ -19,7 +19,7 @@
msg: Cannot upgrade Docker on Atomic operating systems.
when: openshift.common.is_atomic | bool
- - include: upgrade_check.yml
+ - include_tasks: upgrade_check.yml
when: docker_upgrade is not defined or docker_upgrade | bool
@@ -51,7 +51,7 @@
- name: Drain Node for Kubelet upgrade
command: >
- {{ openshift.common.admin_binary }} drain {{ openshift.node.nodename }} --config={{ openshift.common.config_base }}/master/admin.kubeconfig --force --delete-local-data --ignore-daemonsets
+ {{ openshift.common.client_binary }} adm drain {{ openshift.node.nodename }} --config={{ openshift.common.config_base }}/master/admin.kubeconfig --force --delete-local-data --ignore-daemonsets
delegate_to: "{{ groups.oo_first_master.0 }}"
when: l_docker_upgrade is defined and l_docker_upgrade | bool and inventory_hostname in groups.oo_nodes_to_upgrade
register: l_docker_upgrade_drain_result
@@ -59,7 +59,7 @@
retries: 60
delay: 60
- - include: tasks/upgrade.yml
+ - include_tasks: tasks/upgrade.yml
when: l_docker_upgrade is defined and l_docker_upgrade | bool
- name: Set node schedulability
diff --git a/playbooks/common/openshift-cluster/upgrades/docker/tasks/restart.yml b/playbooks/common/openshift-cluster/upgrades/docker/tasks/restart.yml
index 83f16ac0d..dbc4f39c7 100644
--- a/playbooks/common/openshift-cluster/upgrades/docker/tasks/restart.yml
+++ b/playbooks/common/openshift-cluster/upgrades/docker/tasks/restart.yml
@@ -6,18 +6,14 @@
retries: 3
delay: 30
-- name: Update docker facts
- openshift_facts:
- role: docker
-
- name: Restart containerized services
service: name={{ item }} state=started
with_items:
- etcd_container
- openvswitch
- - "{{ openshift.common.service_type }}-master-api"
- - "{{ openshift.common.service_type }}-master-controllers"
- - "{{ openshift.common.service_type }}-node"
+ - "{{ openshift_service_type }}-master-api"
+ - "{{ openshift_service_type }}-master-controllers"
+ - "{{ openshift_service_type }}-node"
failed_when: false
when: openshift.common.is_containerized | bool
diff --git a/playbooks/common/openshift-cluster/upgrades/docker/tasks/upgrade.yml b/playbooks/common/openshift-cluster/upgrades/docker/tasks/upgrade.yml
index 808cc562c..4856a4b51 100644
--- a/playbooks/common/openshift-cluster/upgrades/docker/tasks/upgrade.yml
+++ b/playbooks/common/openshift-cluster/upgrades/docker/tasks/upgrade.yml
@@ -4,9 +4,9 @@
- name: Stop containerized services
service: name={{ item }} state=stopped
with_items:
- - "{{ openshift.common.service_type }}-master-api"
- - "{{ openshift.common.service_type }}-master-controllers"
- - "{{ openshift.common.service_type }}-node"
+ - "{{ openshift_service_type }}-master-api"
+ - "{{ openshift_service_type }}-master-controllers"
+ - "{{ openshift_service_type }}-node"
- etcd_container
- openvswitch
failed_when: false
@@ -41,6 +41,8 @@
- name: Upgrade Docker
package: name=docker{{ '-' + docker_version }} state=present
+ register: result
+ until: result | success
-- include: restart.yml
+- include_tasks: restart.yml
when: not skip_docker_restart | default(False) | bool
diff --git a/playbooks/common/openshift-cluster/upgrades/etcd/backup.yml b/playbooks/common/openshift-cluster/upgrades/etcd/backup.yml
deleted file mode 100644
index 531175c85..000000000
--- a/playbooks/common/openshift-cluster/upgrades/etcd/backup.yml
+++ /dev/null
@@ -1,28 +0,0 @@
----
-- name: Backup etcd
- hosts: oo_etcd_hosts_to_backup
- roles:
- - role: openshift_etcd_facts
- post_tasks:
- - include_role:
- name: etcd
- tasks_from: backup
- vars:
- r_etcd_common_backup_tag: "{{ etcd_backup_tag }}"
- r_etcd_common_embedded_etcd: "{{ groups.oo_etcd_to_config | default([]) | length == 0 }}"
- r_etcd_common_backup_sufix_name: "{{ lookup('pipe', 'date +%Y%m%d%H%M%S') }}"
-
-- name: Gate on etcd backup
- hosts: localhost
- connection: local
- become: no
- tasks:
- - set_fact:
- etcd_backup_completed: "{{ hostvars
- | oo_select_keys(groups.oo_etcd_hosts_to_backup)
- | oo_collect('inventory_hostname', {'r_etcd_common_backup_complete': true}) }}"
- - set_fact:
- etcd_backup_failed: "{{ groups.oo_etcd_hosts_to_backup | difference(etcd_backup_completed) | list }}"
- - fail:
- msg: "Upgrade cannot continue. The following hosts did not complete etcd backup: {{ etcd_backup_failed | join(',') }}"
- when: etcd_backup_failed | length > 0
diff --git a/playbooks/common/openshift-cluster/upgrades/etcd/filter_plugins b/playbooks/common/openshift-cluster/upgrades/etcd/filter_plugins
deleted file mode 120000
index 27ddaa18b..000000000
--- a/playbooks/common/openshift-cluster/upgrades/etcd/filter_plugins
+++ /dev/null
@@ -1 +0,0 @@
-../../../../../filter_plugins \ No newline at end of file
diff --git a/playbooks/common/openshift-cluster/upgrades/etcd/lookup_plugins b/playbooks/common/openshift-cluster/upgrades/etcd/lookup_plugins
deleted file mode 120000
index cf407f69b..000000000
--- a/playbooks/common/openshift-cluster/upgrades/etcd/lookup_plugins
+++ /dev/null
@@ -1 +0,0 @@
-../../../../../lookup_plugins \ No newline at end of file
diff --git a/playbooks/common/openshift-cluster/upgrades/etcd/main.yml b/playbooks/common/openshift-cluster/upgrades/etcd/main.yml
deleted file mode 100644
index 5b8ba3bb2..000000000
--- a/playbooks/common/openshift-cluster/upgrades/etcd/main.yml
+++ /dev/null
@@ -1,29 +0,0 @@
----
-# For 1.4/3.4 we want to upgrade everyone to etcd-3.0. etcd docs say to
-# upgrade from 2.0.x to 2.1.x to 2.2.x to 2.3.x to 3.0.x. While this is a tedius
-# task for RHEL and CENTOS it's simply not possible in Fedora unless you've
-# mirrored packages on your own because only the GA and latest versions are
-# available in the repos. So for Fedora we'll simply skip this, sorry.
-
-- name: Backup etcd before upgrading anything
- include: backup.yml
- vars:
- etcd_backup_tag: "pre-upgrade-"
- when: openshift_etcd_backup | default(true) | bool
-
-- name: Drop etcdctl profiles
- hosts: oo_etcd_hosts_to_upgrade
- tasks:
- - include_role:
- name: etcd
- tasks_from: drop_etcdctl
-
-- name: Perform etcd upgrade
- include: ./upgrade.yml
- when: openshift_etcd_upgrade | default(true) | bool
-
-- name: Backup etcd
- include: backup.yml
- vars:
- etcd_backup_tag: "post-3.0-"
- when: openshift_etcd_backup | default(true) | bool
diff --git a/playbooks/common/openshift-cluster/upgrades/etcd/roles b/playbooks/common/openshift-cluster/upgrades/etcd/roles
deleted file mode 120000
index 6bc1a7aef..000000000
--- a/playbooks/common/openshift-cluster/upgrades/etcd/roles
+++ /dev/null
@@ -1 +0,0 @@
-../../../../../roles \ No newline at end of file
diff --git a/playbooks/common/openshift-cluster/upgrades/etcd/upgrade.yml b/playbooks/common/openshift-cluster/upgrades/etcd/upgrade.yml
deleted file mode 100644
index c5ff4133c..000000000
--- a/playbooks/common/openshift-cluster/upgrades/etcd/upgrade.yml
+++ /dev/null
@@ -1,64 +0,0 @@
----
-- name: Determine etcd version
- hosts: oo_etcd_hosts_to_upgrade
- tasks:
- - include_role:
- name: etcd
- tasks_from: version_detect.yml
-
-- include: upgrade_rpm_members.yml
- vars:
- etcd_upgrade_version: '2.1'
-
-- include: upgrade_rpm_members.yml
- vars:
- etcd_upgrade_version: '2.2'
-
-- include: upgrade_image_members.yml
- vars:
- etcd_upgrade_version: '2.2.5'
-
-- include: upgrade_rpm_members.yml
- vars:
- etcd_upgrade_version: '2.3'
-
-- include: upgrade_image_members.yml
- vars:
- etcd_upgrade_version: '2.3.7'
-
-- include: upgrade_rpm_members.yml
- vars:
- etcd_upgrade_version: '3.0'
-
-- include: upgrade_image_members.yml
- vars:
- etcd_upgrade_version: '3.0.15'
-
-- include: upgrade_rpm_members.yml
- vars:
- etcd_upgrade_version: '3.1'
-
-- include: upgrade_image_members.yml
- vars:
- etcd_upgrade_version: '3.1.3'
-
-- include: upgrade_rpm_members.yml
- vars:
- etcd_upgrade_version: '3.2'
-
-- include: upgrade_image_members.yml
- vars:
- etcd_upgrade_version: '3.2.7'
-
-- name: Upgrade fedora to latest
- hosts: oo_etcd_hosts_to_upgrade
- serial: 1
- tasks:
- - include_role:
- name: etcd
- tasks_from: upgrade_image
- vars:
- etcd_peer: "{{ openshift.common.hostname }}"
- when:
- - ansible_distribution == 'Fedora'
- - not openshift.common.is_containerized | bool
diff --git a/playbooks/common/openshift-cluster/upgrades/etcd/upgrade_image_members.yml b/playbooks/common/openshift-cluster/upgrades/etcd/upgrade_image_members.yml
deleted file mode 100644
index 6fca42bd0..000000000
--- a/playbooks/common/openshift-cluster/upgrades/etcd/upgrade_image_members.yml
+++ /dev/null
@@ -1,17 +0,0 @@
----
-# INPUT etcd_upgrade_version
-# INPUT etcd_container_version
-# INPUT openshift.common.is_containerized
-- name: Upgrade containerized hosts to {{ etcd_upgrade_version }}
- hosts: oo_etcd_hosts_to_upgrade
- serial: 1
- tasks:
- - include_role:
- name: etcd
- tasks_from: upgrade_image
- vars:
- r_etcd_upgrade_version: "{{ etcd_upgrade_version }}"
- etcd_peer: "{{ openshift.common.hostname }}"
- when:
- - etcd_container_version | default('99') | version_compare(etcd_upgrade_version,'<')
- - openshift.common.is_containerized | bool
diff --git a/playbooks/common/openshift-cluster/upgrades/etcd/upgrade_rpm_members.yml b/playbooks/common/openshift-cluster/upgrades/etcd/upgrade_rpm_members.yml
deleted file mode 100644
index 51e8786b3..000000000
--- a/playbooks/common/openshift-cluster/upgrades/etcd/upgrade_rpm_members.yml
+++ /dev/null
@@ -1,18 +0,0 @@
----
-# INPUT etcd_upgrade_version
-# INPUT etcd_rpm_version
-# INPUT openshift.common.is_containerized
-- name: Upgrade to {{ etcd_upgrade_version }}
- hosts: oo_etcd_hosts_to_upgrade
- serial: 1
- tasks:
- - include_role:
- name: etcd
- tasks_from: upgrade_rpm
- vars:
- r_etcd_upgrade_version: "{{ etcd_upgrade_version }}"
- etcd_peer: "{{ openshift.common.hostname }}"
- when:
- - etcd_rpm_version.stdout | default('99') | version_compare(etcd_upgrade_version, '<')
- - ansible_distribution == 'RedHat'
- - not openshift.common.is_containerized | bool
diff --git a/playbooks/common/openshift-cluster/upgrades/init.yml b/playbooks/common/openshift-cluster/upgrades/init.yml
index 9981d905b..5454a6680 100644
--- a/playbooks/common/openshift-cluster/upgrades/init.yml
+++ b/playbooks/common/openshift-cluster/upgrades/init.yml
@@ -1,11 +1,11 @@
---
-- include: ../../../init/evaluate_groups.yml
+- import_playbook: ../../../init/evaluate_groups.yml
vars:
# Do not allow adding hosts during upgrade.
g_new_master_hosts: []
g_new_node_hosts: []
-- include: ../../../init/facts.yml
+- import_playbook: ../../../init/facts.yml
- name: Ensure firewall is not switched during upgrade
hosts: oo_all_hosts
diff --git a/playbooks/common/openshift-cluster/upgrades/post_control_plane.yml b/playbooks/common/openshift-cluster/upgrades/post_control_plane.yml
index c458184c9..344ddea3c 100644
--- a/playbooks/common/openshift-cluster/upgrades/post_control_plane.yml
+++ b/playbooks/common/openshift-cluster/upgrades/post_control_plane.yml
@@ -114,7 +114,6 @@
roles:
- role: openshift_excluder
r_openshift_excluder_action: enable
- r_openshift_excluder_service_type: "{{ openshift.common.service_type }}"
post_tasks:
# Check if any masters are using pluginOrderOverride and warn if so, only for 1.3/3.3 and beyond:
- name: grep pluginOrderOverride
diff --git a/playbooks/common/openshift-cluster/upgrades/pre/tasks/verify_docker_upgrade_targets.yml b/playbooks/common/openshift-cluster/upgrades/pre/tasks/verify_docker_upgrade_targets.yml
index 6d8503879..18a08eb99 100644
--- a/playbooks/common/openshift-cluster/upgrades/pre/tasks/verify_docker_upgrade_targets.yml
+++ b/playbooks/common/openshift-cluster/upgrades/pre/tasks/verify_docker_upgrade_targets.yml
@@ -1,7 +1,7 @@
---
# Only check if docker upgrade is required if docker_upgrade is not
# already set to False.
-- include: ../../docker/upgrade_check.yml
+- include_tasks: ../../docker/upgrade_check.yml
when:
- docker_upgrade is not defined or (docker_upgrade | bool)
- not (openshift.common.is_atomic | bool)
diff --git a/playbooks/common/openshift-cluster/upgrades/pre/verify_control_plane_running.yml b/playbooks/common/openshift-cluster/upgrades/pre/verify_control_plane_running.yml
index 6a5bc24f7..bef95546d 100644
--- a/playbooks/common/openshift-cluster/upgrades/pre/verify_control_plane_running.yml
+++ b/playbooks/common/openshift-cluster/upgrades/pre/verify_control_plane_running.yml
@@ -13,21 +13,21 @@
block:
- set_fact:
master_services:
- - "{{ openshift.common.service_type }}-master"
+ - "{{ openshift_service_type }}-master"
# In case of the non-ha to ha upgrade.
- - name: Check if the {{ openshift.common.service_type }}-master-api.service exists
+ - name: Check if the {{ openshift_service_type }}-master-api.service exists
command: >
- systemctl list-units {{ openshift.common.service_type }}-master-api.service --no-legend
+ systemctl list-units {{ openshift_service_type }}-master-api.service --no-legend
register: master_api_service_status
- set_fact:
master_services:
- - "{{ openshift.common.service_type }}-master-api"
- - "{{ openshift.common.service_type }}-master-controllers"
+ - "{{ openshift_service_type }}-master-api"
+ - "{{ openshift_service_type }}-master-controllers"
when:
- master_api_service_status.stdout_lines | length > 0
- - (openshift.common.service_type + '-master-api.service') in master_api_service_status.stdout_lines[0]
+ - (openshift_service_type + '-master-api.service') in master_api_service_status.stdout_lines[0]
- name: Ensure Master is running
service:
diff --git a/playbooks/common/openshift-cluster/upgrades/pre/verify_upgrade_targets.yml b/playbooks/common/openshift-cluster/upgrades/pre/verify_upgrade_targets.yml
index 446f315d6..96f970506 100644
--- a/playbooks/common/openshift-cluster/upgrades/pre/verify_upgrade_targets.yml
+++ b/playbooks/common/openshift-cluster/upgrades/pre/verify_upgrade_targets.yml
@@ -6,7 +6,7 @@
- name: Update oreg_auth docker login credentials if necessary
include_role:
- name: docker
+ name: container_runtime
tasks_from: registry_auth.yml
when: oreg_auth_user is defined
@@ -21,7 +21,7 @@
block:
- name: Check latest available OpenShift RPM version
repoquery:
- name: "{{ openshift.common.service_type }}"
+ name: "{{ openshift_service_type }}"
ignore_excluders: true
register: repoquery_out
diff --git a/playbooks/common/openshift-cluster/upgrades/upgrade_control_plane.yml b/playbooks/common/openshift-cluster/upgrades/upgrade_control_plane.yml
index d7a52707c..37fc8a0f6 100644
--- a/playbooks/common/openshift-cluster/upgrades/upgrade_control_plane.yml
+++ b/playbooks/common/openshift-cluster/upgrades/upgrade_control_plane.yml
@@ -16,8 +16,8 @@
local_facts:
embedded_etcd: "{{ groups.oo_etcd_to_config | default([]) | length == 0 }}"
-- name: Upgrade and backup etcd
- include: ./etcd/main.yml
+- name: Backup and upgrade etcd
+ import_playbook: ../../../openshift-etcd/private/upgrade_main.yml
# Create service signer cert when missing. Service signer certificate
# is added to master config in the master_config_upgrade hook.
@@ -30,7 +30,7 @@
register: service_signer_cert_stat
changed_when: false
-- include: create_service_signer_cert.yml
+- import_playbook: create_service_signer_cert.yml
# oc adm migrate storage should be run prior to etcd v3 upgrade
# See: https://github.com/openshift/origin/pull/14625#issuecomment-308467060
@@ -71,7 +71,7 @@
- debug: msg="Running master pre-upgrade hook {{ openshift_master_upgrade_pre_hook }}"
when: openshift_master_upgrade_pre_hook is defined
- - include: "{{ openshift_master_upgrade_pre_hook }}"
+ - include_tasks: "{{ openshift_master_upgrade_pre_hook }}"
when: openshift_master_upgrade_pre_hook is defined
- include_role:
@@ -82,20 +82,20 @@
- debug: msg="Running master upgrade hook {{ openshift_master_upgrade_hook }}"
when: openshift_master_upgrade_hook is defined
- - include: "{{ openshift_master_upgrade_hook }}"
+ - include_tasks: "{{ openshift_master_upgrade_hook }}"
when: openshift_master_upgrade_hook is defined
- - include: ../../../openshift-master/private/tasks/restart_hosts.yml
+ - include_tasks: ../../../openshift-master/private/tasks/restart_hosts.yml
when: openshift.common.rolling_restart_mode == 'system'
- - include: ../../../openshift-master/private/tasks/restart_services.yml
+ - include_tasks: ../../../openshift-master/private/tasks/restart_services.yml
when: openshift.common.rolling_restart_mode == 'services'
# Run the post-upgrade hook if defined:
- debug: msg="Running master post-upgrade hook {{ openshift_master_upgrade_post_hook }}"
when: openshift_master_upgrade_post_hook is defined
- - include: "{{ openshift_master_upgrade_post_hook }}"
+ - include_tasks: "{{ openshift_master_upgrade_post_hook }}"
when: openshift_master_upgrade_post_hook is defined
- name: Post master upgrade - Upgrade clusterpolicies storage
@@ -143,10 +143,6 @@
roles:
- { role: openshift_cli }
vars:
- openshift_docker_hosted_registry_network: "{{ hostvars[groups.oo_first_master.0].openshift.common.portal_net }}"
- # Another spot where we assume docker is running and do not want to accidentally trigger an unsafe
- # restart.
- skip_docker_role: True
__master_shared_resource_viewer_file: "shared_resource_viewer_role.yaml"
tasks:
- name: Reconcile Cluster Roles
@@ -279,7 +275,7 @@
roles:
- openshift_facts
tasks:
- - include: docker/tasks/upgrade.yml
+ - include_tasks: docker/tasks/upgrade.yml
when: l_docker_upgrade is defined and l_docker_upgrade | bool and not openshift.common.is_atomic | bool
- name: Drain and upgrade master nodes
@@ -309,7 +305,7 @@
- name: Drain Node for Kubelet upgrade
command: >
- {{ hostvars[groups.oo_first_master.0].openshift.common.admin_binary }} drain {{ openshift.node.nodename | lower }} --config={{ openshift.common.config_base }}/master/admin.kubeconfig --force --delete-local-data --ignore-daemonsets
+ {{ hostvars[groups.oo_first_master.0].openshift.common.client_binary }} adm drain {{ openshift.node.nodename | lower }} --config={{ openshift.common.config_base }}/master/admin.kubeconfig --force --delete-local-data --ignore-daemonsets
delegate_to: "{{ groups.oo_first_master.0 }}"
register: l_upgrade_control_plane_drain_result
until: not l_upgrade_control_plane_drain_result | failed
diff --git a/playbooks/common/openshift-cluster/upgrades/upgrade_nodes.yml b/playbooks/common/openshift-cluster/upgrades/upgrade_nodes.yml
index 75ffd3fe9..f7a85545b 100644
--- a/playbooks/common/openshift-cluster/upgrades/upgrade_nodes.yml
+++ b/playbooks/common/openshift-cluster/upgrades/upgrade_nodes.yml
@@ -26,7 +26,7 @@
- name: Drain Node for Kubelet upgrade
command: >
- {{ hostvars[groups.oo_first_master.0].openshift.common.admin_binary }} drain {{ openshift.node.nodename | lower }} --config={{ openshift.common.config_base }}/master/admin.kubeconfig --force --delete-local-data --ignore-daemonsets
+ {{ hostvars[groups.oo_first_master.0].openshift.common.client_binary }} adm drain {{ openshift.node.nodename | lower }} --config={{ openshift.common.config_base }}/master/admin.kubeconfig --force --delete-local-data --ignore-daemonsets
delegate_to: "{{ groups.oo_first_master.0 }}"
register: l_upgrade_nodes_drain_result
until: not l_upgrade_nodes_drain_result | failed
@@ -45,7 +45,6 @@
name: openshift_excluder
vars:
r_openshift_excluder_action: enable
- r_openshift_excluder_service_type: "{{ openshift.common.service_type }}"
- name: Set node schedulability
oc_adm_manage_node:
node: "{{ openshift.node.nodename | lower }}"
diff --git a/playbooks/common/openshift-cluster/upgrades/upgrade_scale_group.yml b/playbooks/common/openshift-cluster/upgrades/upgrade_scale_group.yml
new file mode 100644
index 000000000..47410dff3
--- /dev/null
+++ b/playbooks/common/openshift-cluster/upgrades/upgrade_scale_group.yml
@@ -0,0 +1,59 @@
+---
+- name: create new scale group
+ hosts: localhost
+ tasks:
+ - name: build upgrade scale groups
+ include_role:
+ name: openshift_aws
+ tasks_from: upgrade_node_group.yml
+
+ - fail:
+ msg: "Ensure that new scale groups were provisioned before proceeding to update."
+ when:
+ - "'oo_sg_new_nodes' not in groups or groups.oo_sg_new_nodes|length == 0"
+
+- name: initialize upgrade bits
+ import_playbook: init.yml
+
+- name: Drain and upgrade nodes
+ hosts: oo_sg_current_nodes
+ # This var must be set with -e on invocation, as it is not a per-host inventory var
+ # and is evaluated early. Values such as "20%" can also be used.
+ serial: "{{ openshift_upgrade_nodes_serial | default(1) }}"
+ max_fail_percentage: "{{ openshift_upgrade_nodes_max_fail_percentage | default(0) }}"
+
+ pre_tasks:
+ - name: Load lib_openshift modules
+ include_role:
+ name: ../roles/lib_openshift
+
+ # TODO: To better handle re-trying failed upgrades, it would be nice to check if the node
+ # or docker actually needs an upgrade before proceeding. Perhaps best to save this until
+ # we merge upgrade functionality into the base roles and a normal config.yml playbook run.
+ - name: Mark node unschedulable
+ oc_adm_manage_node:
+ node: "{{ openshift.node.nodename | lower }}"
+ schedulable: False
+ delegate_to: "{{ groups.oo_first_master.0 }}"
+ retries: 10
+ delay: 5
+ register: node_unschedulable
+ until: node_unschedulable|succeeded
+
+ - name: Drain Node for Kubelet upgrade
+ command: >
+ {{ hostvars[groups.oo_first_master.0].openshift.common.client_binary }} adm drain {{ openshift.node.nodename | lower }} --config={{ openshift.common.config_base }}/master/admin.kubeconfig --force --delete-local-data --ignore-daemonsets
+ delegate_to: "{{ groups.oo_first_master.0 }}"
+ register: l_upgrade_nodes_drain_result
+ until: not l_upgrade_nodes_drain_result | failed
+ retries: 60
+ delay: 60
+
+# Alright, let's clean up!
+- name: clean up the old scale group
+ hosts: localhost
+ tasks:
+ - name: clean up scale group
+ include_role:
+ name: openshift_aws
+ tasks_from: remove_scale_group.yml
diff --git a/playbooks/common/openshift-cluster/upgrades/v3_6/upgrade.yml b/playbooks/common/openshift-cluster/upgrades/v3_6/upgrade.yml
index 6cb6a665f..9f9399ff9 100644
--- a/playbooks/common/openshift-cluster/upgrades/v3_6/upgrade.yml
+++ b/playbooks/common/openshift-cluster/upgrades/v3_6/upgrade.yml
@@ -2,7 +2,7 @@
#
# Full Control Plane + Nodes Upgrade
#
-- include: ../init.yml
+- import_playbook: ../init.yml
tags:
- pre_upgrade
@@ -17,7 +17,7 @@
# Pre-upgrade
-- include: ../initialize_nodes_to_upgrade.yml
+- import_playbook: ../initialize_nodes_to_upgrade.yml
tags:
- pre_upgrade
@@ -43,27 +43,27 @@
- openshift_http_proxy is defined or openshift_https_proxy is defined
- openshift_generate_no_proxy_hosts | default(True) | bool
-- include: ../pre/verify_inventory_vars.yml
+- import_playbook: ../pre/verify_inventory_vars.yml
tags:
- pre_upgrade
-- include: ../pre/verify_health_checks.yml
+- import_playbook: ../pre/verify_health_checks.yml
tags:
- pre_upgrade
-- include: ../pre/verify_control_plane_running.yml
+- import_playbook: ../pre/verify_control_plane_running.yml
tags:
- pre_upgrade
-- include: ../disable_master_excluders.yml
+- import_playbook: ../disable_master_excluders.yml
tags:
- pre_upgrade
-- include: ../disable_node_excluders.yml
+- import_playbook: ../disable_node_excluders.yml
tags:
- pre_upgrade
-- include: ../../../../init/version.yml
+- import_playbook: ../../../../init/version.yml
tags:
- pre_upgrade
vars:
@@ -73,35 +73,29 @@
openshift_release: "{{ openshift_upgrade_target }}"
openshift_protect_installed_version: False
- # We skip the docker role at this point in upgrade to prevent
- # unintended package, container, or config upgrades which trigger
- # docker restarts. At this early stage of upgrade we can assume
- # docker is configured and running.
- skip_docker_role: True
-
-- include: ../../../../openshift-master/private/validate_restart.yml
+- import_playbook: ../../../../openshift-master/private/validate_restart.yml
tags:
- pre_upgrade
- name: Verify upgrade targets
hosts: oo_masters_to_config:oo_nodes_to_upgrade
tasks:
- - include: ../pre/verify_upgrade_targets.yml
+ - include_tasks: ../pre/verify_upgrade_targets.yml
tags:
- pre_upgrade
- name: Verify docker upgrade targets
hosts: oo_masters_to_config:oo_nodes_to_upgrade:oo_etcd_to_config
tasks:
- - include: ../pre/tasks/verify_docker_upgrade_targets.yml
+ - include_tasks: ../pre/tasks/verify_docker_upgrade_targets.yml
tags:
- pre_upgrade
-- include: validator.yml
+- import_playbook: validator.yml
tags:
- pre_upgrade
-- include: ../pre/gate_checks.yml
+- import_playbook: ../pre/gate_checks.yml
tags:
- pre_upgrade
@@ -113,12 +107,12 @@
- name: Cleanup unused Docker images
hosts: oo_masters_to_config:oo_nodes_to_upgrade:oo_etcd_to_config
tasks:
- - include: ../cleanup_unused_images.yml
+ - include_tasks: ../cleanup_unused_images.yml
-- include: ../upgrade_control_plane.yml
+- import_playbook: ../upgrade_control_plane.yml
vars:
master_config_hook: "v3_6/master_config_upgrade.yml"
-- include: ../upgrade_nodes.yml
+- import_playbook: ../upgrade_nodes.yml
-- include: ../post_control_plane.yml
+- import_playbook: ../post_control_plane.yml
diff --git a/playbooks/common/openshift-cluster/upgrades/v3_6/upgrade_control_plane.yml b/playbooks/common/openshift-cluster/upgrades/v3_6/upgrade_control_plane.yml
index 8f48bedcc..7374160d6 100644
--- a/playbooks/common/openshift-cluster/upgrades/v3_6/upgrade_control_plane.yml
+++ b/playbooks/common/openshift-cluster/upgrades/v3_6/upgrade_control_plane.yml
@@ -11,7 +11,7 @@
#
# You can run the upgrade_nodes.yml playbook after this to upgrade these components separately.
#
-- include: ../init.yml
+- import_playbook: ../init.yml
tags:
- pre_upgrade
@@ -25,7 +25,7 @@
openshift_upgrade_min: "{{ '1.5' if deployment_type == 'origin' else '3.5' }}"
# Pre-upgrade
-- include: ../initialize_nodes_to_upgrade.yml
+- import_playbook: ../initialize_nodes_to_upgrade.yml
tags:
- pre_upgrade
@@ -51,23 +51,23 @@
- openshift_http_proxy is defined or openshift_https_proxy is defined
- openshift_generate_no_proxy_hosts | default(True) | bool
-- include: ../pre/verify_inventory_vars.yml
+- import_playbook: ../pre/verify_inventory_vars.yml
tags:
- pre_upgrade
-- include: ../pre/verify_health_checks.yml
+- import_playbook: ../pre/verify_health_checks.yml
tags:
- pre_upgrade
-- include: ../pre/verify_control_plane_running.yml
+- import_playbook: ../pre/verify_control_plane_running.yml
tags:
- pre_upgrade
-- include: ../disable_master_excluders.yml
+- import_playbook: ../disable_master_excluders.yml
tags:
- pre_upgrade
-- include: ../../../../init/version.yml
+- import_playbook: ../../../../init/version.yml
tags:
- pre_upgrade
vars:
@@ -77,35 +77,29 @@
openshift_release: "{{ openshift_upgrade_target }}"
openshift_protect_installed_version: False
- # We skip the docker role at this point in upgrade to prevent
- # unintended package, container, or config upgrades which trigger
- # docker restarts. At this early stage of upgrade we can assume
- # docker is configured and running.
- skip_docker_role: True
-
-- include: ../../../../openshift-master/private/validate_restart.yml
+- import_playbook: ../../../../openshift-master/private/validate_restart.yml
tags:
- pre_upgrade
- name: Verify upgrade targets
hosts: oo_masters_to_config
tasks:
- - include: ../pre/verify_upgrade_targets.yml
+ - include_tasks: ../pre/verify_upgrade_targets.yml
tags:
- pre_upgrade
- name: Verify docker upgrade targets
hosts: oo_masters_to_config:oo_etcd_to_config
tasks:
- - include: ../pre/tasks/verify_docker_upgrade_targets.yml
+ - include_tasks: ../pre/tasks/verify_docker_upgrade_targets.yml
tags:
- pre_upgrade
-- include: validator.yml
+- import_playbook: validator.yml
tags:
- pre_upgrade
-- include: ../pre/gate_checks.yml
+- import_playbook: ../pre/gate_checks.yml
tags:
- pre_upgrade
@@ -117,10 +111,10 @@
- name: Cleanup unused Docker images
hosts: oo_masters_to_config:oo_etcd_to_config
tasks:
- - include: ../cleanup_unused_images.yml
+ - include_tasks: ../cleanup_unused_images.yml
-- include: ../upgrade_control_plane.yml
+- import_playbook: ../upgrade_control_plane.yml
vars:
master_config_hook: "v3_6/master_config_upgrade.yml"
-- include: ../post_control_plane.yml
+- import_playbook: ../post_control_plane.yml
diff --git a/playbooks/common/openshift-cluster/upgrades/v3_6/upgrade_nodes.yml b/playbooks/common/openshift-cluster/upgrades/v3_6/upgrade_nodes.yml
index f25cfe0d0..de9bf098e 100644
--- a/playbooks/common/openshift-cluster/upgrades/v3_6/upgrade_nodes.yml
+++ b/playbooks/common/openshift-cluster/upgrades/v3_6/upgrade_nodes.yml
@@ -4,7 +4,7 @@
#
# Upgrades nodes only, but requires the control plane to have already been upgraded.
#
-- include: ../init.yml
+- import_playbook: ../init.yml
tags:
- pre_upgrade
@@ -18,7 +18,7 @@
openshift_upgrade_min: "{{ '1.5' if deployment_type == 'origin' else '3.5' }}"
# Pre-upgrade
-- include: ../initialize_nodes_to_upgrade.yml
+- import_playbook: ../initialize_nodes_to_upgrade.yml
tags:
- pre_upgrade
@@ -44,19 +44,19 @@
- openshift_http_proxy is defined or openshift_https_proxy is defined
- openshift_generate_no_proxy_hosts | default(True) | bool
-- include: ../pre/verify_inventory_vars.yml
+- import_playbook: ../pre/verify_inventory_vars.yml
tags:
- pre_upgrade
-- include: ../pre/verify_health_checks.yml
+- import_playbook: ../pre/verify_health_checks.yml
tags:
- pre_upgrade
-- include: ../disable_node_excluders.yml
+- import_playbook: ../disable_node_excluders.yml
tags:
- pre_upgrade
-- include: ../../../../init/version.yml
+- import_playbook: ../../../../init/version.yml
tags:
- pre_upgrade
vars:
@@ -66,12 +66,6 @@
openshift_release: "{{ openshift_upgrade_target }}"
openshift_protect_installed_version: False
- # We skip the docker role at this point in upgrade to prevent
- # unintended package, container, or config upgrades which trigger
- # docker restarts. At this early stage of upgrade we can assume
- # docker is configured and running.
- skip_docker_role: True
-
- name: Verify masters are already upgraded
hosts: oo_masters_to_config
tags:
@@ -80,25 +74,25 @@
- fail: msg="Master running {{ openshift.common.version }} must be upgraded to {{ openshift_version }} before node upgrade can be run."
when: openshift.common.version != openshift_version
-- include: ../pre/verify_control_plane_running.yml
+- import_playbook: ../pre/verify_control_plane_running.yml
tags:
- pre_upgrade
- name: Verify upgrade targets
hosts: oo_nodes_to_upgrade
tasks:
- - include: ../pre/verify_upgrade_targets.yml
+ - include_tasks: ../pre/verify_upgrade_targets.yml
tags:
- pre_upgrade
- name: Verify docker upgrade targets
hosts: oo_nodes_to_upgrade
tasks:
- - include: ../pre/tasks/verify_docker_upgrade_targets.yml
+ - include_tasks: ../pre/tasks/verify_docker_upgrade_targets.yml
tags:
- pre_upgrade
-- include: ../pre/gate_checks.yml
+- import_playbook: ../pre/gate_checks.yml
tags:
- pre_upgrade
@@ -110,6 +104,6 @@
- name: Cleanup unused Docker images
hosts: oo_nodes_to_upgrade
tasks:
- - include: ../cleanup_unused_images.yml
+ - include_tasks: ../cleanup_unused_images.yml
-- include: ../upgrade_nodes.yml
+- import_playbook: ../upgrade_nodes.yml
diff --git a/playbooks/common/openshift-cluster/upgrades/v3_7/upgrade.yml b/playbooks/common/openshift-cluster/upgrades/v3_7/upgrade.yml
index 2b99568c7..0c1a99272 100644
--- a/playbooks/common/openshift-cluster/upgrades/v3_7/upgrade.yml
+++ b/playbooks/common/openshift-cluster/upgrades/v3_7/upgrade.yml
@@ -2,7 +2,7 @@
#
# Full Control Plane + Nodes Upgrade
#
-- include: ../init.yml
+- import_playbook: ../init.yml
tags:
- pre_upgrade
@@ -17,11 +17,11 @@
# Pre-upgrade
-- include: ../initialize_nodes_to_upgrade.yml
+- import_playbook: ../initialize_nodes_to_upgrade.yml
tags:
- pre_upgrade
-- include: ../pre/verify_etcd3_backend.yml
+- import_playbook: ../pre/verify_etcd3_backend.yml
tags:
- pre_upgrade
@@ -47,27 +47,27 @@
- openshift_http_proxy is defined or openshift_https_proxy is defined
- openshift_generate_no_proxy_hosts | default(True) | bool
-- include: ../pre/verify_inventory_vars.yml
+- import_playbook: ../pre/verify_inventory_vars.yml
tags:
- pre_upgrade
-- include: ../pre/verify_health_checks.yml
+- import_playbook: ../pre/verify_health_checks.yml
tags:
- pre_upgrade
-- include: ../pre/verify_control_plane_running.yml
+- import_playbook: ../pre/verify_control_plane_running.yml
tags:
- pre_upgrade
-- include: ../disable_master_excluders.yml
+- import_playbook: ../disable_master_excluders.yml
tags:
- pre_upgrade
-- include: ../disable_node_excluders.yml
+- import_playbook: ../disable_node_excluders.yml
tags:
- pre_upgrade
-- include: ../../../../init/version.yml
+- import_playbook: ../../../../init/version.yml
tags:
- pre_upgrade
vars:
@@ -77,35 +77,29 @@
openshift_release: "{{ openshift_upgrade_target }}"
openshift_protect_installed_version: False
- # We skip the docker role at this point in upgrade to prevent
- # unintended package, container, or config upgrades which trigger
- # docker restarts. At this early stage of upgrade we can assume
- # docker is configured and running.
- skip_docker_role: True
-
-- include: ../../../../openshift-master/private/validate_restart.yml
+- import_playbook: ../../../../openshift-master/private/validate_restart.yml
tags:
- pre_upgrade
- name: Verify upgrade targets
hosts: oo_masters_to_config:oo_nodes_to_upgrade
tasks:
- - include: ../pre/verify_upgrade_targets.yml
+ - include_tasks: ../pre/verify_upgrade_targets.yml
tags:
- pre_upgrade
- name: Verify docker upgrade targets
hosts: oo_masters_to_config:oo_nodes_to_upgrade:oo_etcd_to_config
tasks:
- - include: ../pre/tasks/verify_docker_upgrade_targets.yml
+ - include_tasks: ../pre/tasks/verify_docker_upgrade_targets.yml
tags:
- pre_upgrade
-- include: validator.yml
+- import_playbook: validator.yml
tags:
- pre_upgrade
-- include: ../pre/gate_checks.yml
+- import_playbook: ../pre/gate_checks.yml
tags:
- pre_upgrade
@@ -117,9 +111,9 @@
- name: Cleanup unused Docker images
hosts: oo_masters_to_config:oo_nodes_to_upgrade:oo_etcd_to_config
tasks:
- - include: ../cleanup_unused_images.yml
+ - include_tasks: ../cleanup_unused_images.yml
-- include: ../upgrade_control_plane.yml
+- import_playbook: ../upgrade_control_plane.yml
vars:
master_config_hook: "v3_7/master_config_upgrade.yml"
@@ -128,15 +122,15 @@
hosts: oo_masters_to_config
gather_facts: no
tasks:
- - name: Stop {{ openshift.common.service_type }}-master-controllers
+ - name: Stop {{ openshift_service_type }}-master-controllers
systemd:
- name: "{{ openshift.common.service_type }}-master-controllers"
+ name: "{{ openshift_service_type }}-master-controllers"
state: stopped
- - name: Start {{ openshift.common.service_type }}-master-controllers
+ - name: Start {{ openshift_service_type }}-master-controllers
systemd:
- name: "{{ openshift.common.service_type }}-master-controllers"
+ name: "{{ openshift_service_type }}-master-controllers"
state: started
-- include: ../upgrade_nodes.yml
+- import_playbook: ../upgrade_nodes.yml
-- include: ../post_control_plane.yml
+- import_playbook: ../post_control_plane.yml
diff --git a/playbooks/common/openshift-cluster/upgrades/v3_7/upgrade_control_plane.yml b/playbooks/common/openshift-cluster/upgrades/v3_7/upgrade_control_plane.yml
index d3d2046e6..9dcad352c 100644
--- a/playbooks/common/openshift-cluster/upgrades/v3_7/upgrade_control_plane.yml
+++ b/playbooks/common/openshift-cluster/upgrades/v3_7/upgrade_control_plane.yml
@@ -11,7 +11,7 @@
#
# You can run the upgrade_nodes.yml playbook after this to upgrade these components separately.
#
-- include: ../init.yml
+- import_playbook: ../init.yml
tags:
- pre_upgrade
@@ -25,11 +25,11 @@
openshift_upgrade_min: '3.6'
# Pre-upgrade
-- include: ../initialize_nodes_to_upgrade.yml
+- import_playbook: ../initialize_nodes_to_upgrade.yml
tags:
- pre_upgrade
-- include: ../pre/verify_etcd3_backend.yml
+- import_playbook: ../pre/verify_etcd3_backend.yml
tags:
- pre_upgrade
@@ -55,23 +55,23 @@
- openshift_http_proxy is defined or openshift_https_proxy is defined
- openshift_generate_no_proxy_hosts | default(True) | bool
-- include: ../pre/verify_inventory_vars.yml
+- import_playbook: ../pre/verify_inventory_vars.yml
tags:
- pre_upgrade
-- include: ../pre/verify_health_checks.yml
+- import_playbook: ../pre/verify_health_checks.yml
tags:
- pre_upgrade
-- include: ../pre/verify_control_plane_running.yml
+- import_playbook: ../pre/verify_control_plane_running.yml
tags:
- pre_upgrade
-- include: ../disable_master_excluders.yml
+- import_playbook: ../disable_master_excluders.yml
tags:
- pre_upgrade
-- include: ../../../../init/version.yml
+- import_playbook: ../../../../init/version.yml
tags:
- pre_upgrade
vars:
@@ -81,35 +81,29 @@
openshift_release: "{{ openshift_upgrade_target }}"
openshift_protect_installed_version: False
- # We skip the docker role at this point in upgrade to prevent
- # unintended package, container, or config upgrades which trigger
- # docker restarts. At this early stage of upgrade we can assume
- # docker is configured and running.
- skip_docker_role: True
-
-- include: ../../../../openshift-master/private/validate_restart.yml
+- import_playbook: ../../../../openshift-master/private/validate_restart.yml
tags:
- pre_upgrade
- name: Verify upgrade targets
hosts: oo_masters_to_config
tasks:
- - include: ../pre/verify_upgrade_targets.yml
+ - include_tasks: ../pre/verify_upgrade_targets.yml
tags:
- pre_upgrade
- name: Verify docker upgrade targets
hosts: oo_masters_to_config:oo_etcd_to_config
tasks:
- - include: ../pre/tasks/verify_docker_upgrade_targets.yml
+ - include_tasks: ../pre/tasks/verify_docker_upgrade_targets.yml
tags:
- pre_upgrade
-- include: validator.yml
+- import_playbook: validator.yml
tags:
- pre_upgrade
-- include: ../pre/gate_checks.yml
+- import_playbook: ../pre/gate_checks.yml
tags:
- pre_upgrade
@@ -121,9 +115,9 @@
- name: Cleanup unused Docker images
hosts: oo_masters_to_config:oo_etcd_to_config
tasks:
- - include: ../cleanup_unused_images.yml
+ - include_tasks: ../cleanup_unused_images.yml
-- include: ../upgrade_control_plane.yml
+- import_playbook: ../upgrade_control_plane.yml
vars:
master_config_hook: "v3_7/master_config_upgrade.yml"
@@ -132,13 +126,13 @@
hosts: oo_masters_to_config
gather_facts: no
tasks:
- - name: Stop {{ openshift.common.service_type }}-master-controllers
+ - name: Stop {{ openshift_service_type }}-master-controllers
systemd:
- name: "{{ openshift.common.service_type }}-master-controllers"
+ name: "{{ openshift_service_type }}-master-controllers"
state: stopped
- - name: Start {{ openshift.common.service_type }}-master-controllers
+ - name: Start {{ openshift_service_type }}-master-controllers
systemd:
- name: "{{ openshift.common.service_type }}-master-controllers"
+ name: "{{ openshift_service_type }}-master-controllers"
state: started
-- include: ../post_control_plane.yml
+- import_playbook: ../post_control_plane.yml
diff --git a/playbooks/common/openshift-cluster/upgrades/v3_7/upgrade_nodes.yml b/playbooks/common/openshift-cluster/upgrades/v3_7/upgrade_nodes.yml
index c0546bd2d..27a7f67ea 100644
--- a/playbooks/common/openshift-cluster/upgrades/v3_7/upgrade_nodes.yml
+++ b/playbooks/common/openshift-cluster/upgrades/v3_7/upgrade_nodes.yml
@@ -4,7 +4,7 @@
#
# Upgrades nodes only, but requires the control plane to have already been upgraded.
#
-- include: ../init.yml
+- import_playbook: ../init.yml
tags:
- pre_upgrade
@@ -18,7 +18,7 @@
openshift_upgrade_min: '3.6'
# Pre-upgrade
-- include: ../initialize_nodes_to_upgrade.yml
+- import_playbook: ../initialize_nodes_to_upgrade.yml
tags:
- pre_upgrade
@@ -44,19 +44,19 @@
- openshift_http_proxy is defined or openshift_https_proxy is defined
- openshift_generate_no_proxy_hosts | default(True) | bool
-- include: ../pre/verify_inventory_vars.yml
+- import_playbook: ../pre/verify_inventory_vars.yml
tags:
- pre_upgrade
-- include: ../pre/verify_health_checks.yml
+- import_playbook: ../pre/verify_health_checks.yml
tags:
- pre_upgrade
-- include: ../disable_node_excluders.yml
+- import_playbook: ../disable_node_excluders.yml
tags:
- pre_upgrade
-- include: ../../../../init/version.yml
+- import_playbook: ../../../../init/version.yml
tags:
- pre_upgrade
vars:
@@ -66,12 +66,6 @@
openshift_release: "{{ openshift_upgrade_target }}"
openshift_protect_installed_version: False
- # We skip the docker role at this point in upgrade to prevent
- # unintended package, container, or config upgrades which trigger
- # docker restarts. At this early stage of upgrade we can assume
- # docker is configured and running.
- skip_docker_role: True
-
- name: Verify masters are already upgraded
hosts: oo_masters_to_config
tags:
@@ -80,25 +74,25 @@
- fail: msg="Master running {{ openshift.common.version }} must be upgraded to {{ openshift_version }} before node upgrade can be run."
when: openshift.common.version != openshift_version
-- include: ../pre/verify_control_plane_running.yml
+- import_playbook: ../pre/verify_control_plane_running.yml
tags:
- pre_upgrade
- name: Verify upgrade targets
hosts: oo_nodes_to_upgrade
tasks:
- - include: ../pre/verify_upgrade_targets.yml
+ - include_tasks: ../pre/verify_upgrade_targets.yml
tags:
- pre_upgrade
- name: Verify docker upgrade targets
hosts: oo_nodes_to_upgrade
tasks:
- - include: ../pre/tasks/verify_docker_upgrade_targets.yml
+ - include_tasks: ../pre/tasks/verify_docker_upgrade_targets.yml
tags:
- pre_upgrade
-- include: ../pre/gate_checks.yml
+- import_playbook: ../pre/gate_checks.yml
tags:
- pre_upgrade
@@ -110,6 +104,6 @@
- name: Cleanup unused Docker images
hosts: oo_nodes_to_upgrade
tasks:
- - include: ../cleanup_unused_images.yml
+ - include_tasks: ../cleanup_unused_images.yml
-- include: ../upgrade_nodes.yml
+- import_playbook: ../upgrade_nodes.yml
diff --git a/playbooks/common/openshift-cluster/upgrades/v3_8/upgrade.yml b/playbooks/common/openshift-cluster/upgrades/v3_8/upgrade.yml
index b602cdd0e..ead2efbd0 100644
--- a/playbooks/common/openshift-cluster/upgrades/v3_8/upgrade.yml
+++ b/playbooks/common/openshift-cluster/upgrades/v3_8/upgrade.yml
@@ -2,7 +2,7 @@
#
# Full Control Plane + Nodes Upgrade
#
-- include: ../init.yml
+- import_playbook: ../init.yml
tags:
- pre_upgrade
@@ -17,11 +17,11 @@
# Pre-upgrade
-- include: ../initialize_nodes_to_upgrade.yml
+- import_playbook: ../initialize_nodes_to_upgrade.yml
tags:
- pre_upgrade
-- include: ../pre/verify_etcd3_backend.yml
+- import_playbook: ../pre/verify_etcd3_backend.yml
tags:
- pre_upgrade
@@ -47,27 +47,27 @@
- openshift_http_proxy is defined or openshift_https_proxy is defined
- openshift_generate_no_proxy_hosts | default(True) | bool
-- include: ../pre/verify_inventory_vars.yml
+- import_playbook: ../pre/verify_inventory_vars.yml
tags:
- pre_upgrade
-- include: ../pre/verify_health_checks.yml
+- import_playbook: ../pre/verify_health_checks.yml
tags:
- pre_upgrade
-- include: ../pre/verify_control_plane_running.yml
+- import_playbook: ../pre/verify_control_plane_running.yml
tags:
- pre_upgrade
-- include: ../disable_master_excluders.yml
+- import_playbook: ../disable_master_excluders.yml
tags:
- pre_upgrade
-- include: ../disable_node_excluders.yml
+- import_playbook: ../disable_node_excluders.yml
tags:
- pre_upgrade
-- include: ../../../../init/version.yml
+- import_playbook: ../../../../init/version.yml
tags:
- pre_upgrade
vars:
@@ -77,35 +77,29 @@
openshift_release: "{{ openshift_upgrade_target }}"
openshift_protect_installed_version: False
- # We skip the docker role at this point in upgrade to prevent
- # unintended package, container, or config upgrades which trigger
- # docker restarts. At this early stage of upgrade we can assume
- # docker is configured and running.
- skip_docker_role: True
-
-- include: ../../../../openshift-master/private/validate_restart.yml
+- import_playbook: ../../../../openshift-master/private/validate_restart.yml
tags:
- pre_upgrade
- name: Verify upgrade targets
hosts: oo_masters_to_config:oo_nodes_to_upgrade
tasks:
- - include: ../pre/verify_upgrade_targets.yml
+ - include_tasks: ../pre/verify_upgrade_targets.yml
tags:
- pre_upgrade
- name: Verify docker upgrade targets
hosts: oo_masters_to_config:oo_nodes_to_upgrade:oo_etcd_to_config
tasks:
- - include: ../pre/tasks/verify_docker_upgrade_targets.yml
+ - include_tasks: ../pre/tasks/verify_docker_upgrade_targets.yml
tags:
- pre_upgrade
-- include: validator.yml
+- import_playbook: validator.yml
tags:
- pre_upgrade
-- include: ../pre/gate_checks.yml
+- import_playbook: ../pre/gate_checks.yml
tags:
- pre_upgrade
@@ -117,9 +111,9 @@
- name: Cleanup unused Docker images
hosts: oo_masters_to_config:oo_nodes_to_upgrade:oo_etcd_to_config
tasks:
- - include: ../cleanup_unused_images.yml
+ - include_tasks: ../cleanup_unused_images.yml
-- include: ../upgrade_control_plane.yml
+- import_playbook: ../upgrade_control_plane.yml
vars:
master_config_hook: "v3_7/master_config_upgrade.yml"
@@ -128,15 +122,15 @@
hosts: oo_masters_to_config
gather_facts: no
tasks:
- - name: Stop {{ openshift.common.service_type }}-master-controllers
+ - name: Stop {{ openshift_service_type }}-master-controllers
systemd:
- name: "{{ openshift.common.service_type }}-master-controllers"
+ name: "{{ openshift_service_type }}-master-controllers"
state: stopped
- - name: Start {{ openshift.common.service_type }}-master-controllers
+ - name: Start {{ openshift_service_type }}-master-controllers
systemd:
- name: "{{ openshift.common.service_type }}-master-controllers"
+ name: "{{ openshift_service_type }}-master-controllers"
state: started
-- include: ../upgrade_nodes.yml
+- import_playbook: ../upgrade_nodes.yml
-- include: ../post_control_plane.yml
+- import_playbook: ../post_control_plane.yml
diff --git a/playbooks/common/openshift-cluster/upgrades/v3_8/upgrade_control_plane.yml b/playbooks/common/openshift-cluster/upgrades/v3_8/upgrade_control_plane.yml
index da81e6dea..ae37b1359 100644
--- a/playbooks/common/openshift-cluster/upgrades/v3_8/upgrade_control_plane.yml
+++ b/playbooks/common/openshift-cluster/upgrades/v3_8/upgrade_control_plane.yml
@@ -11,7 +11,7 @@
#
# You can run the upgrade_nodes.yml playbook after this to upgrade these components separately.
#
-- include: ../init.yml
+- import_playbook: ../init.yml
tags:
- pre_upgrade
@@ -25,11 +25,11 @@
openshift_upgrade_min: '3.7'
# Pre-upgrade
-- include: ../initialize_nodes_to_upgrade.yml
+- import_playbook: ../initialize_nodes_to_upgrade.yml
tags:
- pre_upgrade
-- include: ../pre/verify_etcd3_backend.yml
+- import_playbook: ../pre/verify_etcd3_backend.yml
tags:
- pre_upgrade
@@ -55,23 +55,23 @@
- openshift_http_proxy is defined or openshift_https_proxy is defined
- openshift_generate_no_proxy_hosts | default(True) | bool
-- include: ../pre/verify_inventory_vars.yml
+- import_playbook: ../pre/verify_inventory_vars.yml
tags:
- pre_upgrade
-- include: ../pre/verify_health_checks.yml
+- import_playbook: ../pre/verify_health_checks.yml
tags:
- pre_upgrade
-- include: ../pre/verify_control_plane_running.yml
+- import_playbook: ../pre/verify_control_plane_running.yml
tags:
- pre_upgrade
-- include: ../disable_master_excluders.yml
+- import_playbook: ../disable_master_excluders.yml
tags:
- pre_upgrade
-- include: ../../../../init/version.yml
+- import_playbook: ../../../../init/version.yml
tags:
- pre_upgrade
vars:
@@ -81,35 +81,29 @@
openshift_release: "{{ openshift_upgrade_target }}"
openshift_protect_installed_version: False
- # We skip the docker role at this point in upgrade to prevent
- # unintended package, container, or config upgrades which trigger
- # docker restarts. At this early stage of upgrade we can assume
- # docker is configured and running.
- skip_docker_role: True
-
-- include: ../../../../openshift-master/private/validate_restart.yml
+- import_playbook: ../../../../openshift-master/private/validate_restart.yml
tags:
- pre_upgrade
- name: Verify upgrade targets
hosts: oo_masters_to_config
tasks:
- - include: ../pre/verify_upgrade_targets.yml
+ - include_tasks: ../pre/verify_upgrade_targets.yml
tags:
- pre_upgrade
- name: Verify docker upgrade targets
hosts: oo_masters_to_config:oo_etcd_to_config
tasks:
- - include: ../pre/tasks/verify_docker_upgrade_targets.yml
+ - include_tasks: ../pre/tasks/verify_docker_upgrade_targets.yml
tags:
- pre_upgrade
-- include: validator.yml
+- import_playbook: validator.yml
tags:
- pre_upgrade
-- include: ../pre/gate_checks.yml
+- import_playbook: ../pre/gate_checks.yml
tags:
- pre_upgrade
@@ -121,9 +115,9 @@
- name: Cleanup unused Docker images
hosts: oo_masters_to_config:oo_etcd_to_config
tasks:
- - include: ../cleanup_unused_images.yml
+ - include_tasks: ../cleanup_unused_images.yml
-- include: ../upgrade_control_plane.yml
+- import_playbook: ../upgrade_control_plane.yml
vars:
master_config_hook: "v3_7/master_config_upgrade.yml"
@@ -132,13 +126,13 @@
hosts: oo_masters_to_config
gather_facts: no
tasks:
- - name: Stop {{ openshift.common.service_type }}-master-controllers
+ - name: Stop {{ openshift_service_type }}-master-controllers
systemd:
- name: "{{ openshift.common.service_type }}-master-controllers"
+ name: "{{ openshift_service_type }}-master-controllers"
state: stopped
- - name: Start {{ openshift.common.service_type }}-master-controllers
+ - name: Start {{ openshift_service_type }}-master-controllers
systemd:
- name: "{{ openshift.common.service_type }}-master-controllers"
+ name: "{{ openshift_service_type }}-master-controllers"
state: started
-- include: ../post_control_plane.yml
+- import_playbook: ../post_control_plane.yml
diff --git a/playbooks/common/openshift-cluster/upgrades/v3_8/upgrade_nodes.yml b/playbooks/common/openshift-cluster/upgrades/v3_8/upgrade_nodes.yml
index abd56e762..dd716b241 100644
--- a/playbooks/common/openshift-cluster/upgrades/v3_8/upgrade_nodes.yml
+++ b/playbooks/common/openshift-cluster/upgrades/v3_8/upgrade_nodes.yml
@@ -4,7 +4,7 @@
#
# Upgrades nodes only, but requires the control plane to have already been upgraded.
#
-- include: ../init.yml
+- import_playbook: ../init.yml
tags:
- pre_upgrade
@@ -18,7 +18,7 @@
openshift_upgrade_min: '3.7'
# Pre-upgrade
-- include: ../initialize_nodes_to_upgrade.yml
+- import_playbook: ../initialize_nodes_to_upgrade.yml
tags:
- pre_upgrade
@@ -44,19 +44,19 @@
- openshift_http_proxy is defined or openshift_https_proxy is defined
- openshift_generate_no_proxy_hosts | default(True) | bool
-- include: ../pre/verify_inventory_vars.yml
+- import_playbook: ../pre/verify_inventory_vars.yml
tags:
- pre_upgrade
-- include: ../pre/verify_health_checks.yml
+- import_playbook: ../pre/verify_health_checks.yml
tags:
- pre_upgrade
-- include: ../disable_node_excluders.yml
+- import_playbook: ../disable_node_excluders.yml
tags:
- pre_upgrade
-- include: ../../../../init/version.yml
+- import_playbook: ../../../../init/version.yml
tags:
- pre_upgrade
vars:
@@ -66,12 +66,6 @@
openshift_release: "{{ openshift_upgrade_target }}"
openshift_protect_installed_version: False
- # We skip the docker role at this point in upgrade to prevent
- # unintended package, container, or config upgrades which trigger
- # docker restarts. At this early stage of upgrade we can assume
- # docker is configured and running.
- skip_docker_role: True
-
- name: Verify masters are already upgraded
hosts: oo_masters_to_config
tags:
@@ -80,25 +74,25 @@
- fail: msg="Master running {{ openshift.common.version }} must be upgraded to {{ openshift_version }} before node upgrade can be run."
when: openshift.common.version != openshift_version
-- include: ../pre/verify_control_plane_running.yml
+- import_playbook: ../pre/verify_control_plane_running.yml
tags:
- pre_upgrade
- name: Verify upgrade targets
hosts: oo_nodes_to_upgrade
tasks:
- - include: ../pre/verify_upgrade_targets.yml
+ - include_tasks: ../pre/verify_upgrade_targets.yml
tags:
- pre_upgrade
- name: Verify docker upgrade targets
hosts: oo_nodes_to_upgrade
tasks:
- - include: ../pre/tasks/verify_docker_upgrade_targets.yml
+ - include_tasks: ../pre/tasks/verify_docker_upgrade_targets.yml
tags:
- pre_upgrade
-- include: ../pre/gate_checks.yml
+- import_playbook: ../pre/gate_checks.yml
tags:
- pre_upgrade
@@ -110,6 +104,6 @@
- name: Cleanup unused Docker images
hosts: oo_nodes_to_upgrade
tasks:
- - include: ../cleanup_unused_images.yml
+ - include_tasks: ../cleanup_unused_images.yml
-- include: ../upgrade_nodes.yml
+- import_playbook: ../upgrade_nodes.yml
diff --git a/playbooks/common/openshift-cluster/upgrades/v3_9/filter_plugins b/playbooks/common/openshift-cluster/upgrades/v3_9/filter_plugins
new file mode 120000
index 000000000..7de3c1dd7
--- /dev/null
+++ b/playbooks/common/openshift-cluster/upgrades/v3_9/filter_plugins
@@ -0,0 +1 @@
+../../../../../filter_plugins/ \ No newline at end of file
diff --git a/playbooks/common/openshift-cluster/upgrades/v3_9/master_config_upgrade.yml b/playbooks/common/openshift-cluster/upgrades/v3_9/master_config_upgrade.yml
new file mode 100644
index 000000000..1d4d1919c
--- /dev/null
+++ b/playbooks/common/openshift-cluster/upgrades/v3_9/master_config_upgrade.yml
@@ -0,0 +1,20 @@
+---
+- modify_yaml:
+ dest: "{{ openshift.common.config_base}}/master/master-config.yaml"
+ yaml_key: 'controllerConfig.election.lockName'
+ yaml_value: 'openshift-master-controllers'
+
+- modify_yaml:
+ dest: "{{ openshift.common.config_base}}/master/master-config.yaml"
+ yaml_key: 'controllerConfig.serviceServingCert.signer.certFile'
+ yaml_value: service-signer.crt
+
+- modify_yaml:
+ dest: "{{ openshift.common.config_base}}/master/master-config.yaml"
+ yaml_key: 'controllerConfig.serviceServingCert.signer.keyFile'
+ yaml_value: service-signer.key
+
+- modify_yaml:
+ dest: "{{ openshift.common.config_base }}/master/master-config.yaml"
+ yaml_key: servingInfo.clientCA
+ yaml_value: ca.crt
diff --git a/playbooks/common/openshift-cluster/upgrades/v3_9/roles b/playbooks/common/openshift-cluster/upgrades/v3_9/roles
new file mode 120000
index 000000000..415645be6
--- /dev/null
+++ b/playbooks/common/openshift-cluster/upgrades/v3_9/roles
@@ -0,0 +1 @@
+../../../../../roles/ \ No newline at end of file
diff --git a/playbooks/common/openshift-cluster/upgrades/v3_9/upgrade.yml b/playbooks/common/openshift-cluster/upgrades/v3_9/upgrade.yml
new file mode 100644
index 000000000..eb688f189
--- /dev/null
+++ b/playbooks/common/openshift-cluster/upgrades/v3_9/upgrade.yml
@@ -0,0 +1,142 @@
+---
+#
+# Full Control Plane + Nodes Upgrade
+#
+- import_playbook: ../init.yml
+ tags:
+ - pre_upgrade
+
+- name: Configure the upgrade target for the common upgrade tasks
+ hosts: oo_all_hosts
+ tags:
+ - pre_upgrade
+ tasks:
+ - set_fact:
+ openshift_upgrade_target: '3.9'
+ openshift_upgrade_min: '3.7'
+
+# Pre-upgrade
+
+- import_playbook: ../initialize_nodes_to_upgrade.yml
+ tags:
+ - pre_upgrade
+
+- import_playbook: ../pre/verify_etcd3_backend.yml
+ tags:
+ - pre_upgrade
+
+- name: Update repos and initialize facts on all hosts
+ hosts: oo_masters_to_config:oo_nodes_to_upgrade:oo_etcd_to_config:oo_lb_to_config
+ tags:
+ - pre_upgrade
+ roles:
+ - openshift_repos
+
+- name: Set openshift_no_proxy_internal_hostnames
+ hosts: oo_masters_to_config:oo_nodes_to_upgrade
+ tags:
+ - pre_upgrade
+ tasks:
+ - set_fact:
+ openshift_no_proxy_internal_hostnames: "{{ hostvars | oo_select_keys(groups['oo_nodes_to_config']
+ | union(groups['oo_masters_to_config'])
+ | union(groups['oo_etcd_to_config'] | default([])))
+ | oo_collect('openshift.common.hostname') | default([]) | join (',')
+ }}"
+ when:
+ - openshift_http_proxy is defined or openshift_https_proxy is defined
+ - openshift_generate_no_proxy_hosts | default(True) | bool
+
+- import_playbook: ../pre/verify_inventory_vars.yml
+ tags:
+ - pre_upgrade
+
+- import_playbook: ../pre/verify_health_checks.yml
+ tags:
+ - pre_upgrade
+
+- import_playbook: ../pre/verify_control_plane_running.yml
+ tags:
+ - pre_upgrade
+
+- import_playbook: ../disable_master_excluders.yml
+ tags:
+ - pre_upgrade
+
+- import_playbook: ../disable_node_excluders.yml
+ tags:
+ - pre_upgrade
+
+- import_playbook: ../../../../init/version.yml
+ tags:
+ - pre_upgrade
+ vars:
+ # Request specific openshift_release and let the openshift_version role handle converting this
+ # to a more specific version, respecting openshift_image_tag and openshift_pkg_version if
+ # defined, and overriding the normal behavior of protecting the installed version
+ openshift_release: "{{ openshift_upgrade_target }}"
+ openshift_protect_installed_version: False
+
+ # We skip the docker role at this point in upgrade to prevent
+ # unintended package, container, or config upgrades which trigger
+ # docker restarts. At this early stage of upgrade we can assume
+ # docker is configured and running.
+ skip_docker_role: True
+
+- import_playbook: ../../../../openshift-master/private/validate_restart.yml
+ tags:
+ - pre_upgrade
+
+- name: Verify upgrade targets
+ hosts: oo_masters_to_config:oo_nodes_to_upgrade
+ tasks:
+ - include_tasks: ../pre/verify_upgrade_targets.yml
+ tags:
+ - pre_upgrade
+
+- name: Verify docker upgrade targets
+ hosts: oo_masters_to_config:oo_nodes_to_upgrade:oo_etcd_to_config
+ tasks:
+ - import_tasks: ../pre/tasks/verify_docker_upgrade_targets.yml
+ tags:
+ - pre_upgrade
+
+- import_playbook: validator.yml
+ tags:
+ - pre_upgrade
+
+- import_playbook: ../pre/gate_checks.yml
+ tags:
+ - pre_upgrade
+
+# Pre-upgrade completed, nothing after this should be tagged pre_upgrade.
+
+# Separate step so we can execute in parallel and clear out anything unused
+# before we get into the serialized upgrade process which will then remove
+# remaining images if possible.
+- name: Cleanup unused Docker images
+ hosts: oo_masters_to_config:oo_nodes_to_upgrade:oo_etcd_to_config
+ tasks:
+ - include_tasks: ../cleanup_unused_images.yml
+
+- import_playbook: ../upgrade_control_plane.yml
+ vars:
+ master_config_hook: "v3_7/master_config_upgrade.yml"
+
+# All controllers must be stopped at the same time then restarted
+- name: Cycle all controller services to force new leader election mode
+ hosts: oo_masters_to_config
+ gather_facts: no
+ tasks:
+ - name: Stop {{ openshift.common.service_type }}-master-controllers
+ systemd:
+ name: "{{ openshift.common.service_type }}-master-controllers"
+ state: stopped
+ - name: Start {{ openshift.common.service_type }}-master-controllers
+ systemd:
+ name: "{{ openshift.common.service_type }}-master-controllers"
+ state: started
+
+- import_playbook: ../upgrade_nodes.yml
+
+- import_playbook: ../post_control_plane.yml
diff --git a/playbooks/common/openshift-cluster/upgrades/v3_9/upgrade_control_plane.yml b/playbooks/common/openshift-cluster/upgrades/v3_9/upgrade_control_plane.yml
new file mode 100644
index 000000000..983bb4a63
--- /dev/null
+++ b/playbooks/common/openshift-cluster/upgrades/v3_9/upgrade_control_plane.yml
@@ -0,0 +1,144 @@
+---
+#
+# Control Plane Upgrade Playbook
+#
+# Upgrades masters and Docker (only on standalone etcd hosts)
+#
+# This upgrade does not include:
+# - node service running on masters
+# - docker running on masters
+# - node service running on dedicated nodes
+#
+# You can run the upgrade_nodes.yml playbook after this to upgrade these components separately.
+#
+- import_playbook: ../init.yml
+ tags:
+ - pre_upgrade
+
+- name: Configure the upgrade target for the common upgrade tasks
+ hosts: oo_all_hosts
+ tags:
+ - pre_upgrade
+ tasks:
+ - set_fact:
+ openshift_upgrade_target: '3.9'
+ openshift_upgrade_min: '3.7'
+
+# Pre-upgrade
+- import_playbook: ../initialize_nodes_to_upgrade.yml
+ tags:
+ - pre_upgrade
+
+- import_playbook: ../pre/verify_etcd3_backend.yml
+ tags:
+ - pre_upgrade
+
+- name: Update repos on control plane hosts
+ hosts: oo_masters_to_config:oo_etcd_to_config:oo_lb_to_config
+ tags:
+ - pre_upgrade
+ roles:
+ - openshift_repos
+
+- name: Set openshift_no_proxy_internal_hostnames
+ hosts: oo_masters_to_config:oo_nodes_to_upgrade
+ tags:
+ - pre_upgrade
+ tasks:
+ - set_fact:
+ openshift_no_proxy_internal_hostnames: "{{ hostvars | oo_select_keys(groups['oo_nodes_to_config']
+ | union(groups['oo_masters_to_config'])
+ | union(groups['oo_etcd_to_config'] | default([])))
+ | oo_collect('openshift.common.hostname') | default([]) | join (',')
+ }}"
+ when:
+ - openshift_http_proxy is defined or openshift_https_proxy is defined
+ - openshift_generate_no_proxy_hosts | default(True) | bool
+
+- import_playbook: ../pre/verify_inventory_vars.yml
+ tags:
+ - pre_upgrade
+
+- import_playbook: ../pre/verify_health_checks.yml
+ tags:
+ - pre_upgrade
+
+- import_playbook: ../pre/verify_control_plane_running.yml
+ tags:
+ - pre_upgrade
+
+- import_playbook: ../disable_master_excluders.yml
+ tags:
+ - pre_upgrade
+
+- import_playbook: ../../../../init/version.yml
+ tags:
+ - pre_upgrade
+ vars:
+ # Request specific openshift_release and let the openshift_version role handle converting this
+ # to a more specific version, respecting openshift_image_tag and openshift_pkg_version if
+ # defined, and overriding the normal behavior of protecting the installed version
+ openshift_release: "{{ openshift_upgrade_target }}"
+ openshift_protect_installed_version: False
+
+ # We skip the docker role at this point in upgrade to prevent
+ # unintended package, container, or config upgrades which trigger
+ # docker restarts. At this early stage of upgrade we can assume
+ # docker is configured and running.
+ skip_docker_role: True
+
+- import_playbook: ../../../../openshift-master/private/validate_restart.yml
+ tags:
+ - pre_upgrade
+
+- name: Verify upgrade targets
+ hosts: oo_masters_to_config
+ tasks:
+ - include_tasks: ../pre/verify_upgrade_targets.yml
+ tags:
+ - pre_upgrade
+
+- name: Verify docker upgrade targets
+ hosts: oo_masters_to_config:oo_etcd_to_config
+ tasks:
+ - include_tasks: ../pre/tasks/verify_docker_upgrade_targets.yml
+ tags:
+ - pre_upgrade
+
+- import_playbook: validator.yml
+ tags:
+ - pre_upgrade
+
+- import_playbook: ../pre/gate_checks.yml
+ tags:
+ - pre_upgrade
+
+# Pre-upgrade completed, nothing after this should be tagged pre_upgrade.
+
+# Separate step so we can execute in parallel and clear out anything unused
+# before we get into the serialized upgrade process which will then remove
+# remaining images if possible.
+- name: Cleanup unused Docker images
+ hosts: oo_masters_to_config:oo_etcd_to_config
+ tasks:
+ - include_tasks: ../cleanup_unused_images.yml
+
+- import_playbook: ../upgrade_control_plane.yml
+ vars:
+ master_config_hook: "v3_7/master_config_upgrade.yml"
+
+# All controllers must be stopped at the same time then restarted
+- name: Cycle all controller services to force new leader election mode
+ hosts: oo_masters_to_config
+ gather_facts: no
+ tasks:
+ - name: Stop {{ openshift.common.service_type }}-master-controllers
+ systemd:
+ name: "{{ openshift.common.service_type }}-master-controllers"
+ state: stopped
+ - name: Start {{ openshift.common.service_type }}-master-controllers
+ systemd:
+ name: "{{ openshift.common.service_type }}-master-controllers"
+ state: started
+
+- import_playbook: ../post_control_plane.yml
diff --git a/playbooks/common/openshift-cluster/upgrades/v3_9/upgrade_nodes.yml b/playbooks/common/openshift-cluster/upgrades/v3_9/upgrade_nodes.yml
new file mode 100644
index 000000000..d95cfa4e1
--- /dev/null
+++ b/playbooks/common/openshift-cluster/upgrades/v3_9/upgrade_nodes.yml
@@ -0,0 +1,115 @@
+---
+#
+# Node Upgrade Playbook
+#
+# Upgrades nodes only, but requires the control plane to have already been upgraded.
+#
+- import_playbook: ../init.yml
+ tags:
+ - pre_upgrade
+
+- name: Configure the upgrade target for the common upgrade tasks
+ hosts: oo_all_hosts
+ tags:
+ - pre_upgrade
+ tasks:
+ - set_fact:
+ openshift_upgrade_target: '3.9'
+ openshift_upgrade_min: '3.7'
+
+# Pre-upgrade
+- import_playbook: ../initialize_nodes_to_upgrade.yml
+ tags:
+ - pre_upgrade
+
+- name: Update repos on nodes
+ hosts: oo_masters_to_config:oo_nodes_to_upgrade:oo_etcd_to_config:oo_lb_to_config
+ roles:
+ - openshift_repos
+ tags:
+ - pre_upgrade
+
+- name: Set openshift_no_proxy_internal_hostnames
+ hosts: oo_masters_to_config:oo_nodes_to_upgrade
+ tags:
+ - pre_upgrade
+ tasks:
+ - set_fact:
+ openshift_no_proxy_internal_hostnames: "{{ hostvars | oo_select_keys(groups['oo_nodes_to_upgrade']
+ | union(groups['oo_masters_to_config'])
+ | union(groups['oo_etcd_to_config'] | default([])))
+ | oo_collect('openshift.common.hostname') | default([]) | join (',')
+ }}"
+ when:
+ - openshift_http_proxy is defined or openshift_https_proxy is defined
+ - openshift_generate_no_proxy_hosts | default(True) | bool
+
+- import_playbook: ../pre/verify_inventory_vars.yml
+ tags:
+ - pre_upgrade
+
+- import_playbook: ../pre/verify_health_checks.yml
+ tags:
+ - pre_upgrade
+
+- import_playbook: ../disable_node_excluders.yml
+ tags:
+ - pre_upgrade
+
+- import_playbook: ../../../../init/version.yml
+ tags:
+ - pre_upgrade
+ vars:
+ # Request specific openshift_release and let the openshift_version role handle converting this
+ # to a more specific version, respecting openshift_image_tag and openshift_pkg_version if
+ # defined, and overriding the normal behavior of protecting the installed version
+ openshift_release: "{{ openshift_upgrade_target }}"
+ openshift_protect_installed_version: False
+
+ # We skip the docker role at this point in upgrade to prevent
+ # unintended package, container, or config upgrades which trigger
+ # docker restarts. At this early stage of upgrade we can assume
+ # docker is configured and running.
+ skip_docker_role: True
+
+- name: Verify masters are already upgraded
+ hosts: oo_masters_to_config
+ tags:
+ - pre_upgrade
+ tasks:
+ - fail: msg="Master running {{ openshift.common.version }} must be upgraded to {{ openshift_version }} before node upgrade can be run."
+ when: openshift.common.version != openshift_version
+
+- import_playbook: ../pre/verify_control_plane_running.yml
+ tags:
+ - pre_upgrade
+
+- name: Verify upgrade targets
+ hosts: oo_nodes_to_upgrade
+ tasks:
+ - include_tasks: ../pre/verify_upgrade_targets.yml
+ tags:
+ - pre_upgrade
+
+- name: Verify docker upgrade targets
+ hosts: oo_nodes_to_upgrade
+ tasks:
+ - include_tasks: ../pre/tasks/verify_docker_upgrade_targets.yml
+ tags:
+ - pre_upgrade
+
+- import_playbook: ../pre/gate_checks.yml
+ tags:
+ - pre_upgrade
+
+# Pre-upgrade completed, nothing after this should be tagged pre_upgrade.
+
+# Separate step so we can execute in parallel and clear out anything unused
+# before we get into the serialized upgrade process which will then remove
+# remaining images if possible.
+- name: Cleanup unused Docker images
+ hosts: oo_nodes_to_upgrade
+ tasks:
+ - include_tasks: ../cleanup_unused_images.yml
+
+- import_playbook: ../upgrade_nodes.yml
diff --git a/playbooks/common/openshift-cluster/upgrades/v3_9/validator.yml b/playbooks/common/openshift-cluster/upgrades/v3_9/validator.yml
new file mode 100644
index 000000000..4bd2d87b1
--- /dev/null
+++ b/playbooks/common/openshift-cluster/upgrades/v3_9/validator.yml
@@ -0,0 +1,7 @@
+---
+- name: Verify 3.9 specific upgrade checks
+ hosts: oo_first_master
+ roles:
+ - { role: lib_openshift }
+ tasks:
+ - debug: msg="noop"