summaryrefslogtreecommitdiffstats
path: root/playbooks
diff options
context:
space:
mode:
Diffstat (limited to 'playbooks')
-rw-r--r--playbooks/adhoc/uninstall.yml10
-rw-r--r--playbooks/byo/openshift-cluster/upgrades/v3_7/upgrade_scale_groups.yml7
-rw-r--r--playbooks/byo/openshift-cluster/upgrades/v3_8/README.md2
-rw-r--r--playbooks/byo/openshift-cluster/upgrades/v3_9/README.md20
-rw-r--r--playbooks/byo/openshift-cluster/upgrades/v3_9/upgrade.yml5
-rw-r--r--playbooks/byo/openshift-cluster/upgrades/v3_9/upgrade_control_plane.yml14
-rw-r--r--playbooks/byo/openshift-cluster/upgrades/v3_9/upgrade_nodes.yml7
-rw-r--r--playbooks/common/openshift-cluster/upgrades/docker/tasks/upgrade.yml2
-rw-r--r--playbooks/common/openshift-cluster/upgrades/upgrade_scale_group.yml59
l---------playbooks/common/openshift-cluster/upgrades/v3_9/filter_plugins1
-rw-r--r--playbooks/common/openshift-cluster/upgrades/v3_9/master_config_upgrade.yml20
l---------playbooks/common/openshift-cluster/upgrades/v3_9/roles1
-rw-r--r--playbooks/common/openshift-cluster/upgrades/v3_9/upgrade.yml142
-rw-r--r--playbooks/common/openshift-cluster/upgrades/v3_9/upgrade_control_plane.yml144
-rw-r--r--playbooks/common/openshift-cluster/upgrades/v3_9/upgrade_nodes.yml115
-rw-r--r--playbooks/common/openshift-cluster/upgrades/v3_9/validator.yml7
-rw-r--r--playbooks/init/facts.yml4
-rw-r--r--playbooks/openshift-node/private/network_manager.yml2
18 files changed, 561 insertions, 1 deletions
diff --git a/playbooks/adhoc/uninstall.yml b/playbooks/adhoc/uninstall.yml
index 5ed55a817..0c2a2c7e8 100644
--- a/playbooks/adhoc/uninstall.yml
+++ b/playbooks/adhoc/uninstall.yml
@@ -126,10 +126,14 @@
- origin-sdn-ovs
- tuned-profiles-openshift-node
- tuned-profiles-origin-node
+ register: result
+ until: result | success
- name: Remove flannel package
package: name=flannel state=absent
when: openshift_use_flannel | default(false) | bool
+ register: result
+ until: result | success
when: not is_atomic | bool
- shell: systemctl reset-failed
@@ -382,6 +386,8 @@
- origin-master
- pacemaker
- pcs
+ register: result
+ until: result | success
- shell: systemctl reset-failed
changed_when: False
@@ -497,6 +503,8 @@
with_items:
- etcd
- etcd3
+ register: result
+ until: result | success
- shell: systemctl reset-failed
changed_when: False
@@ -554,6 +562,8 @@
when: not is_atomic | bool and openshift_remove_all | default(True) | bool
with_items:
- haproxy
+ register: result
+ until: result | success
- shell: systemctl reset-failed
changed_when: False
diff --git a/playbooks/byo/openshift-cluster/upgrades/v3_7/upgrade_scale_groups.yml b/playbooks/byo/openshift-cluster/upgrades/v3_7/upgrade_scale_groups.yml
new file mode 100644
index 000000000..14b0f85d4
--- /dev/null
+++ b/playbooks/byo/openshift-cluster/upgrades/v3_7/upgrade_scale_groups.yml
@@ -0,0 +1,7 @@
+---
+#
+# Node Scale Group Upgrade Playbook
+#
+# Upgrades scale group nodes only.
+#
+- include: ../../../../common/openshift-cluster/upgrades/upgrade_scale_group.yml
diff --git a/playbooks/byo/openshift-cluster/upgrades/v3_8/README.md b/playbooks/byo/openshift-cluster/upgrades/v3_8/README.md
index d9be6ae3b..815e49c28 100644
--- a/playbooks/byo/openshift-cluster/upgrades/v3_8/README.md
+++ b/playbooks/byo/openshift-cluster/upgrades/v3_8/README.md
@@ -1,4 +1,4 @@
-# v3.6 Major and Minor Upgrade Playbook
+# v3.8 Major and Minor Upgrade Playbook
## Overview
This playbook currently performs the following steps.
diff --git a/playbooks/byo/openshift-cluster/upgrades/v3_9/README.md b/playbooks/byo/openshift-cluster/upgrades/v3_9/README.md
new file mode 100644
index 000000000..0ab3d3a52
--- /dev/null
+++ b/playbooks/byo/openshift-cluster/upgrades/v3_9/README.md
@@ -0,0 +1,20 @@
+# v3.9 Major and Minor Upgrade Playbook
+
+## Overview
+This playbook currently performs the following steps.
+
+ * Upgrade and restart master services
+ * Unschedule node
+ * Upgrade and restart docker
+ * Upgrade and restart node services
+ * Modifies the subset of the configuration necessary
+ * Applies the latest cluster policies
+ * Updates the default router if one exists
+ * Updates the default registry if one exists
+ * Updates image streams and quickstarts
+
+## Usage
+
+```
+ansible-playbook -i ~/ansible-inventory openshift-ansible/playbooks/byo/openshift-cluster/upgrades/v3_9/upgrade.yml
+```
diff --git a/playbooks/byo/openshift-cluster/upgrades/v3_9/upgrade.yml b/playbooks/byo/openshift-cluster/upgrades/v3_9/upgrade.yml
new file mode 100644
index 000000000..e8f9d94e2
--- /dev/null
+++ b/playbooks/byo/openshift-cluster/upgrades/v3_9/upgrade.yml
@@ -0,0 +1,5 @@
+---
+#
+# Full Control Plane + Nodes Upgrade
+#
+- include: ../../../../common/openshift-cluster/upgrades/v3_9/upgrade.yml
diff --git a/playbooks/byo/openshift-cluster/upgrades/v3_9/upgrade_control_plane.yml b/playbooks/byo/openshift-cluster/upgrades/v3_9/upgrade_control_plane.yml
new file mode 100644
index 000000000..acb4195e3
--- /dev/null
+++ b/playbooks/byo/openshift-cluster/upgrades/v3_9/upgrade_control_plane.yml
@@ -0,0 +1,14 @@
+---
+#
+# Control Plane Upgrade Playbook
+#
+# Upgrades masters and Docker (only on standalone etcd hosts)
+#
+# This upgrade does not include:
+# - node service running on masters
+# - docker running on masters
+# - node service running on dedicated nodes
+#
+# You can run the upgrade_nodes.yml playbook after this to upgrade these components separately.
+#
+- include: ../../../../common/openshift-cluster/upgrades/v3_9/upgrade_control_plane.yml
diff --git a/playbooks/byo/openshift-cluster/upgrades/v3_9/upgrade_nodes.yml b/playbooks/byo/openshift-cluster/upgrades/v3_9/upgrade_nodes.yml
new file mode 100644
index 000000000..df19097e1
--- /dev/null
+++ b/playbooks/byo/openshift-cluster/upgrades/v3_9/upgrade_nodes.yml
@@ -0,0 +1,7 @@
+---
+#
+# Node Upgrade Playbook
+#
+# Upgrades nodes only, but requires the control plane to have already been upgraded.
+#
+- include: ../../../../common/openshift-cluster/upgrades/v3_9/upgrade_nodes.yml
diff --git a/playbooks/common/openshift-cluster/upgrades/docker/tasks/upgrade.yml b/playbooks/common/openshift-cluster/upgrades/docker/tasks/upgrade.yml
index 808cc562c..83be290e6 100644
--- a/playbooks/common/openshift-cluster/upgrades/docker/tasks/upgrade.yml
+++ b/playbooks/common/openshift-cluster/upgrades/docker/tasks/upgrade.yml
@@ -41,6 +41,8 @@
- name: Upgrade Docker
package: name=docker{{ '-' + docker_version }} state=present
+ register: result
+ until: result | success
- include: restart.yml
when: not skip_docker_restart | default(False) | bool
diff --git a/playbooks/common/openshift-cluster/upgrades/upgrade_scale_group.yml b/playbooks/common/openshift-cluster/upgrades/upgrade_scale_group.yml
new file mode 100644
index 000000000..d9ce3a7e3
--- /dev/null
+++ b/playbooks/common/openshift-cluster/upgrades/upgrade_scale_group.yml
@@ -0,0 +1,59 @@
+---
+- name: create new scale group
+ hosts: localhost
+ tasks:
+ - name: build upgrade scale groups
+ include_role:
+ name: openshift_aws
+ tasks_from: upgrade_node_group.yml
+
+ - fail:
+ msg: "Ensure that new scale groups were provisioned before proceeding to update."
+ when:
+ - "'oo_sg_new_nodes' not in groups or groups.oo_sg_new_nodes|length == 0"
+
+- name: initialize upgrade bits
+ include: init.yml
+
+- name: Drain and upgrade nodes
+ hosts: oo_sg_current_nodes
+ # This var must be set with -e on invocation, as it is not a per-host inventory var
+ # and is evaluated early. Values such as "20%" can also be used.
+ serial: "{{ openshift_upgrade_nodes_serial | default(1) }}"
+ max_fail_percentage: "{{ openshift_upgrade_nodes_max_fail_percentage | default(0) }}"
+
+ pre_tasks:
+ - name: Load lib_openshift modules
+ include_role:
+ name: ../roles/lib_openshift
+
+ # TODO: To better handle re-trying failed upgrades, it would be nice to check if the node
+ # or docker actually needs an upgrade before proceeding. Perhaps best to save this until
+ # we merge upgrade functionality into the base roles and a normal config.yml playbook run.
+ - name: Mark node unschedulable
+ oc_adm_manage_node:
+ node: "{{ openshift.node.nodename | lower }}"
+ schedulable: False
+ delegate_to: "{{ groups.oo_first_master.0 }}"
+ retries: 10
+ delay: 5
+ register: node_unschedulable
+ until: node_unschedulable|succeeded
+
+ - name: Drain Node for Kubelet upgrade
+ command: >
+ {{ hostvars[groups.oo_first_master.0].openshift.common.admin_binary }} drain {{ openshift.node.nodename | lower }} --config={{ openshift.common.config_base }}/master/admin.kubeconfig --force --delete-local-data --ignore-daemonsets
+ delegate_to: "{{ groups.oo_first_master.0 }}"
+ register: l_upgrade_nodes_drain_result
+ until: not l_upgrade_nodes_drain_result | failed
+ retries: 60
+ delay: 60
+
+# Alright, let's clean up!
+- name: clean up the old scale group
+ hosts: localhost
+ tasks:
+ - name: clean up scale group
+ include_role:
+ name: openshift_aws
+ tasks_from: remove_scale_group.yml
diff --git a/playbooks/common/openshift-cluster/upgrades/v3_9/filter_plugins b/playbooks/common/openshift-cluster/upgrades/v3_9/filter_plugins
new file mode 120000
index 000000000..7de3c1dd7
--- /dev/null
+++ b/playbooks/common/openshift-cluster/upgrades/v3_9/filter_plugins
@@ -0,0 +1 @@
+../../../../../filter_plugins/ \ No newline at end of file
diff --git a/playbooks/common/openshift-cluster/upgrades/v3_9/master_config_upgrade.yml b/playbooks/common/openshift-cluster/upgrades/v3_9/master_config_upgrade.yml
new file mode 100644
index 000000000..1d4d1919c
--- /dev/null
+++ b/playbooks/common/openshift-cluster/upgrades/v3_9/master_config_upgrade.yml
@@ -0,0 +1,20 @@
+---
+- modify_yaml:
+ dest: "{{ openshift.common.config_base}}/master/master-config.yaml"
+ yaml_key: 'controllerConfig.election.lockName'
+ yaml_value: 'openshift-master-controllers'
+
+- modify_yaml:
+ dest: "{{ openshift.common.config_base}}/master/master-config.yaml"
+ yaml_key: 'controllerConfig.serviceServingCert.signer.certFile'
+ yaml_value: service-signer.crt
+
+- modify_yaml:
+ dest: "{{ openshift.common.config_base}}/master/master-config.yaml"
+ yaml_key: 'controllerConfig.serviceServingCert.signer.keyFile'
+ yaml_value: service-signer.key
+
+- modify_yaml:
+ dest: "{{ openshift.common.config_base }}/master/master-config.yaml"
+ yaml_key: servingInfo.clientCA
+ yaml_value: ca.crt
diff --git a/playbooks/common/openshift-cluster/upgrades/v3_9/roles b/playbooks/common/openshift-cluster/upgrades/v3_9/roles
new file mode 120000
index 000000000..415645be6
--- /dev/null
+++ b/playbooks/common/openshift-cluster/upgrades/v3_9/roles
@@ -0,0 +1 @@
+../../../../../roles/ \ No newline at end of file
diff --git a/playbooks/common/openshift-cluster/upgrades/v3_9/upgrade.yml b/playbooks/common/openshift-cluster/upgrades/v3_9/upgrade.yml
new file mode 100644
index 000000000..94c16cae0
--- /dev/null
+++ b/playbooks/common/openshift-cluster/upgrades/v3_9/upgrade.yml
@@ -0,0 +1,142 @@
+---
+#
+# Full Control Plane + Nodes Upgrade
+#
+- include: ../init.yml
+ tags:
+ - pre_upgrade
+
+- name: Configure the upgrade target for the common upgrade tasks
+ hosts: oo_all_hosts
+ tags:
+ - pre_upgrade
+ tasks:
+ - set_fact:
+ openshift_upgrade_target: '3.9'
+ openshift_upgrade_min: '3.7'
+
+# Pre-upgrade
+
+- include: ../initialize_nodes_to_upgrade.yml
+ tags:
+ - pre_upgrade
+
+- include: ../pre/verify_etcd3_backend.yml
+ tags:
+ - pre_upgrade
+
+- name: Update repos and initialize facts on all hosts
+ hosts: oo_masters_to_config:oo_nodes_to_upgrade:oo_etcd_to_config:oo_lb_to_config
+ tags:
+ - pre_upgrade
+ roles:
+ - openshift_repos
+
+- name: Set openshift_no_proxy_internal_hostnames
+ hosts: oo_masters_to_config:oo_nodes_to_upgrade
+ tags:
+ - pre_upgrade
+ tasks:
+ - set_fact:
+ openshift_no_proxy_internal_hostnames: "{{ hostvars | oo_select_keys(groups['oo_nodes_to_config']
+ | union(groups['oo_masters_to_config'])
+ | union(groups['oo_etcd_to_config'] | default([])))
+ | oo_collect('openshift.common.hostname') | default([]) | join (',')
+ }}"
+ when:
+ - openshift_http_proxy is defined or openshift_https_proxy is defined
+ - openshift_generate_no_proxy_hosts | default(True) | bool
+
+- include: ../pre/verify_inventory_vars.yml
+ tags:
+ - pre_upgrade
+
+- include: ../pre/verify_health_checks.yml
+ tags:
+ - pre_upgrade
+
+- include: ../pre/verify_control_plane_running.yml
+ tags:
+ - pre_upgrade
+
+- include: ../disable_master_excluders.yml
+ tags:
+ - pre_upgrade
+
+- include: ../disable_node_excluders.yml
+ tags:
+ - pre_upgrade
+
+- include: ../../../../init/version.yml
+ tags:
+ - pre_upgrade
+ vars:
+ # Request specific openshift_release and let the openshift_version role handle converting this
+ # to a more specific version, respecting openshift_image_tag and openshift_pkg_version if
+ # defined, and overriding the normal behavior of protecting the installed version
+ openshift_release: "{{ openshift_upgrade_target }}"
+ openshift_protect_installed_version: False
+
+ # We skip the docker role at this point in upgrade to prevent
+ # unintended package, container, or config upgrades which trigger
+ # docker restarts. At this early stage of upgrade we can assume
+ # docker is configured and running.
+ skip_docker_role: True
+
+- include: ../../../../openshift-master/private/validate_restart.yml
+ tags:
+ - pre_upgrade
+
+- name: Verify upgrade targets
+ hosts: oo_masters_to_config:oo_nodes_to_upgrade
+ tasks:
+ - include: ../pre/verify_upgrade_targets.yml
+ tags:
+ - pre_upgrade
+
+- name: Verify docker upgrade targets
+ hosts: oo_masters_to_config:oo_nodes_to_upgrade:oo_etcd_to_config
+ tasks:
+ - include: ../pre/tasks/verify_docker_upgrade_targets.yml
+ tags:
+ - pre_upgrade
+
+- include: validator.yml
+ tags:
+ - pre_upgrade
+
+- include: ../pre/gate_checks.yml
+ tags:
+ - pre_upgrade
+
+# Pre-upgrade completed, nothing after this should be tagged pre_upgrade.
+
+# Separate step so we can execute in parallel and clear out anything unused
+# before we get into the serialized upgrade process which will then remove
+# remaining images if possible.
+- name: Cleanup unused Docker images
+ hosts: oo_masters_to_config:oo_nodes_to_upgrade:oo_etcd_to_config
+ tasks:
+ - include: ../cleanup_unused_images.yml
+
+- include: ../upgrade_control_plane.yml
+ vars:
+ master_config_hook: "v3_7/master_config_upgrade.yml"
+
+# All controllers must be stopped at the same time then restarted
+- name: Cycle all controller services to force new leader election mode
+ hosts: oo_masters_to_config
+ gather_facts: no
+ tasks:
+ - name: Stop {{ openshift.common.service_type }}-master-controllers
+ systemd:
+ name: "{{ openshift.common.service_type }}-master-controllers"
+ state: stopped
+ - name: Start {{ openshift.common.service_type }}-master-controllers
+ systemd:
+ name: "{{ openshift.common.service_type }}-master-controllers"
+ state: started
+
+- include: ../upgrade_nodes.yml
+
+- include: ../post_control_plane.yml
diff --git a/playbooks/common/openshift-cluster/upgrades/v3_9/upgrade_control_plane.yml b/playbooks/common/openshift-cluster/upgrades/v3_9/upgrade_control_plane.yml
new file mode 100644
index 000000000..2045f6379
--- /dev/null
+++ b/playbooks/common/openshift-cluster/upgrades/v3_9/upgrade_control_plane.yml
@@ -0,0 +1,144 @@
+---
+#
+# Control Plane Upgrade Playbook
+#
+# Upgrades masters and Docker (only on standalone etcd hosts)
+#
+# This upgrade does not include:
+# - node service running on masters
+# - docker running on masters
+# - node service running on dedicated nodes
+#
+# You can run the upgrade_nodes.yml playbook after this to upgrade these components separately.
+#
+- include: ../init.yml
+ tags:
+ - pre_upgrade
+
+- name: Configure the upgrade target for the common upgrade tasks
+ hosts: oo_all_hosts
+ tags:
+ - pre_upgrade
+ tasks:
+ - set_fact:
+ openshift_upgrade_target: '3.9'
+ openshift_upgrade_min: '3.7'
+
+# Pre-upgrade
+- include: ../initialize_nodes_to_upgrade.yml
+ tags:
+ - pre_upgrade
+
+- include: ../pre/verify_etcd3_backend.yml
+ tags:
+ - pre_upgrade
+
+- name: Update repos on control plane hosts
+ hosts: oo_masters_to_config:oo_etcd_to_config:oo_lb_to_config
+ tags:
+ - pre_upgrade
+ roles:
+ - openshift_repos
+
+- name: Set openshift_no_proxy_internal_hostnames
+ hosts: oo_masters_to_config:oo_nodes_to_upgrade
+ tags:
+ - pre_upgrade
+ tasks:
+ - set_fact:
+ openshift_no_proxy_internal_hostnames: "{{ hostvars | oo_select_keys(groups['oo_nodes_to_config']
+ | union(groups['oo_masters_to_config'])
+ | union(groups['oo_etcd_to_config'] | default([])))
+ | oo_collect('openshift.common.hostname') | default([]) | join (',')
+ }}"
+ when:
+ - openshift_http_proxy is defined or openshift_https_proxy is defined
+ - openshift_generate_no_proxy_hosts | default(True) | bool
+
+- include: ../pre/verify_inventory_vars.yml
+ tags:
+ - pre_upgrade
+
+- include: ../pre/verify_health_checks.yml
+ tags:
+ - pre_upgrade
+
+- include: ../pre/verify_control_plane_running.yml
+ tags:
+ - pre_upgrade
+
+- include: ../disable_master_excluders.yml
+ tags:
+ - pre_upgrade
+
+- include: ../../../../init/version.yml
+ tags:
+ - pre_upgrade
+ vars:
+ # Request specific openshift_release and let the openshift_version role handle converting this
+ # to a more specific version, respecting openshift_image_tag and openshift_pkg_version if
+ # defined, and overriding the normal behavior of protecting the installed version
+ openshift_release: "{{ openshift_upgrade_target }}"
+ openshift_protect_installed_version: False
+
+ # We skip the docker role at this point in upgrade to prevent
+ # unintended package, container, or config upgrades which trigger
+ # docker restarts. At this early stage of upgrade we can assume
+ # docker is configured and running.
+ skip_docker_role: True
+
+- include: ../../../../openshift-master/private/validate_restart.yml
+ tags:
+ - pre_upgrade
+
+- name: Verify upgrade targets
+ hosts: oo_masters_to_config
+ tasks:
+ - include: ../pre/verify_upgrade_targets.yml
+ tags:
+ - pre_upgrade
+
+- name: Verify docker upgrade targets
+ hosts: oo_masters_to_config:oo_etcd_to_config
+ tasks:
+ - include: ../pre/tasks/verify_docker_upgrade_targets.yml
+ tags:
+ - pre_upgrade
+
+- include: validator.yml
+ tags:
+ - pre_upgrade
+
+- include: ../pre/gate_checks.yml
+ tags:
+ - pre_upgrade
+
+# Pre-upgrade completed, nothing after this should be tagged pre_upgrade.
+
+# Separate step so we can execute in parallel and clear out anything unused
+# before we get into the serialized upgrade process which will then remove
+# remaining images if possible.
+- name: Cleanup unused Docker images
+ hosts: oo_masters_to_config:oo_etcd_to_config
+ tasks:
+ - include: ../cleanup_unused_images.yml
+
+- include: ../upgrade_control_plane.yml
+ vars:
+ master_config_hook: "v3_7/master_config_upgrade.yml"
+
+# All controllers must be stopped at the same time then restarted
+- name: Cycle all controller services to force new leader election mode
+ hosts: oo_masters_to_config
+ gather_facts: no
+ tasks:
+ - name: Stop {{ openshift.common.service_type }}-master-controllers
+ systemd:
+ name: "{{ openshift.common.service_type }}-master-controllers"
+ state: stopped
+ - name: Start {{ openshift.common.service_type }}-master-controllers
+ systemd:
+ name: "{{ openshift.common.service_type }}-master-controllers"
+ state: started
+
+- include: ../post_control_plane.yml
diff --git a/playbooks/common/openshift-cluster/upgrades/v3_9/upgrade_nodes.yml b/playbooks/common/openshift-cluster/upgrades/v3_9/upgrade_nodes.yml
new file mode 100644
index 000000000..6134f8653
--- /dev/null
+++ b/playbooks/common/openshift-cluster/upgrades/v3_9/upgrade_nodes.yml
@@ -0,0 +1,115 @@
+---
+#
+# Node Upgrade Playbook
+#
+# Upgrades nodes only, but requires the control plane to have already been upgraded.
+#
+- include: ../init.yml
+ tags:
+ - pre_upgrade
+
+- name: Configure the upgrade target for the common upgrade tasks
+ hosts: oo_all_hosts
+ tags:
+ - pre_upgrade
+ tasks:
+ - set_fact:
+ openshift_upgrade_target: '3.9'
+ openshift_upgrade_min: '3.7'
+
+# Pre-upgrade
+- include: ../initialize_nodes_to_upgrade.yml
+ tags:
+ - pre_upgrade
+
+- name: Update repos on nodes
+ hosts: oo_masters_to_config:oo_nodes_to_upgrade:oo_etcd_to_config:oo_lb_to_config
+ roles:
+ - openshift_repos
+ tags:
+ - pre_upgrade
+
+- name: Set openshift_no_proxy_internal_hostnames
+ hosts: oo_masters_to_config:oo_nodes_to_upgrade
+ tags:
+ - pre_upgrade
+ tasks:
+ - set_fact:
+ openshift_no_proxy_internal_hostnames: "{{ hostvars | oo_select_keys(groups['oo_nodes_to_upgrade']
+ | union(groups['oo_masters_to_config'])
+ | union(groups['oo_etcd_to_config'] | default([])))
+ | oo_collect('openshift.common.hostname') | default([]) | join (',')
+ }}"
+ when:
+ - openshift_http_proxy is defined or openshift_https_proxy is defined
+ - openshift_generate_no_proxy_hosts | default(True) | bool
+
+- include: ../pre/verify_inventory_vars.yml
+ tags:
+ - pre_upgrade
+
+- include: ../pre/verify_health_checks.yml
+ tags:
+ - pre_upgrade
+
+- include: ../disable_node_excluders.yml
+ tags:
+ - pre_upgrade
+
+- include: ../../../../init/version.yml
+ tags:
+ - pre_upgrade
+ vars:
+ # Request specific openshift_release and let the openshift_version role handle converting this
+ # to a more specific version, respecting openshift_image_tag and openshift_pkg_version if
+ # defined, and overriding the normal behavior of protecting the installed version
+ openshift_release: "{{ openshift_upgrade_target }}"
+ openshift_protect_installed_version: False
+
+ # We skip the docker role at this point in upgrade to prevent
+ # unintended package, container, or config upgrades which trigger
+ # docker restarts. At this early stage of upgrade we can assume
+ # docker is configured and running.
+ skip_docker_role: True
+
+- name: Verify masters are already upgraded
+ hosts: oo_masters_to_config
+ tags:
+ - pre_upgrade
+ tasks:
+ - fail: msg="Master running {{ openshift.common.version }} must be upgraded to {{ openshift_version }} before node upgrade can be run."
+ when: openshift.common.version != openshift_version
+
+- include: ../pre/verify_control_plane_running.yml
+ tags:
+ - pre_upgrade
+
+- name: Verify upgrade targets
+ hosts: oo_nodes_to_upgrade
+ tasks:
+ - include: ../pre/verify_upgrade_targets.yml
+ tags:
+ - pre_upgrade
+
+- name: Verify docker upgrade targets
+ hosts: oo_nodes_to_upgrade
+ tasks:
+ - include: ../pre/tasks/verify_docker_upgrade_targets.yml
+ tags:
+ - pre_upgrade
+
+- include: ../pre/gate_checks.yml
+ tags:
+ - pre_upgrade
+
+# Pre-upgrade completed, nothing after this should be tagged pre_upgrade.
+
+# Separate step so we can execute in parallel and clear out anything unused
+# before we get into the serialized upgrade process which will then remove
+# remaining images if possible.
+- name: Cleanup unused Docker images
+ hosts: oo_nodes_to_upgrade
+ tasks:
+ - include: ../cleanup_unused_images.yml
+
+- include: ../upgrade_nodes.yml
diff --git a/playbooks/common/openshift-cluster/upgrades/v3_9/validator.yml b/playbooks/common/openshift-cluster/upgrades/v3_9/validator.yml
new file mode 100644
index 000000000..4bd2d87b1
--- /dev/null
+++ b/playbooks/common/openshift-cluster/upgrades/v3_9/validator.yml
@@ -0,0 +1,7 @@
+---
+- name: Verify 3.9 specific upgrade checks
+ hosts: oo_first_master
+ roles:
+ - { role: lib_openshift }
+ tasks:
+ - debug: msg="noop"
diff --git a/playbooks/init/facts.yml b/playbooks/init/facts.yml
index 05142f9b6..1166ac538 100644
--- a/playbooks/init/facts.yml
+++ b/playbooks/init/facts.yml
@@ -84,6 +84,8 @@
- "{{ 'python3-dbus' if ansible_distribution == 'Fedora' else 'dbus-python' }}"
- "{{ 'python3-PyYAML' if ansible_distribution == 'Fedora' else 'PyYAML' }}"
- yum-utils
+ register: result
+ until: result | success
- name: Ensure various deps for running system containers are installed
package:
@@ -100,6 +102,8 @@
or (openshift_use_openvswitch_system_container | default(False)) | bool
or (openshift_use_node_system_container | default(False)) | bool
or (openshift_use_master_system_container | default(False)) | bool
+ register: result
+ until: result | success
- name: Gather Cluster facts and set is_containerized if needed
openshift_facts:
diff --git a/playbooks/openshift-node/private/network_manager.yml b/playbooks/openshift-node/private/network_manager.yml
index fffed4dfb..7211787be 100644
--- a/playbooks/openshift-node/private/network_manager.yml
+++ b/playbooks/openshift-node/private/network_manager.yml
@@ -9,6 +9,8 @@
package:
name: 'NetworkManager'
state: present
+ register: result
+ until: result | success
- name: configure NetworkManager
lineinfile: