summaryrefslogtreecommitdiffstats
path: root/playbooks/common
diff options
context:
space:
mode:
Diffstat (limited to 'playbooks/common')
l---------playbooks/common/openshift-cluster/filter_plugins1
l---------playbooks/common/openshift-cluster/library1
l---------playbooks/common/openshift-cluster/lookup_plugins1
-rw-r--r--playbooks/common/openshift-cluster/upgrades/cleanup_unused_images.yml21
-rw-r--r--playbooks/common/openshift-cluster/upgrades/create_service_signer_cert.yml11
-rw-r--r--playbooks/common/openshift-cluster/upgrades/disable_excluders.yml (renamed from playbooks/common/openshift-cluster/upgrades/disable_master_excluders.yml)2
-rw-r--r--playbooks/common/openshift-cluster/upgrades/disable_node_excluders.yml11
-rw-r--r--playbooks/common/openshift-cluster/upgrades/docker/docker_upgrade.yml29
-rw-r--r--playbooks/common/openshift-cluster/upgrades/docker/nuke_images.sh25
-rw-r--r--playbooks/common/openshift-cluster/upgrades/docker/tasks/restart.yml4
-rw-r--r--playbooks/common/openshift-cluster/upgrades/docker/tasks/upgrade.yml6
-rw-r--r--playbooks/common/openshift-cluster/upgrades/docker/upgrade_check.yml57
l---------playbooks/common/openshift-cluster/upgrades/filter_plugins1
-rw-r--r--playbooks/common/openshift-cluster/upgrades/init.yml5
-rw-r--r--playbooks/common/openshift-cluster/upgrades/initialize_nodes_to_upgrade.yml2
l---------playbooks/common/openshift-cluster/upgrades/lookup_plugins1
-rw-r--r--playbooks/common/openshift-cluster/upgrades/post_control_plane.yml20
-rw-r--r--playbooks/common/openshift-cluster/upgrades/pre/config.yml81
-rw-r--r--playbooks/common/openshift-cluster/upgrades/pre/gate_checks.yml6
-rw-r--r--playbooks/common/openshift-cluster/upgrades/pre/tasks/verify_docker_upgrade_targets.yml22
-rw-r--r--playbooks/common/openshift-cluster/upgrades/pre/verify_cluster.yml116
-rw-r--r--playbooks/common/openshift-cluster/upgrades/pre/verify_control_plane_running.yml37
-rw-r--r--playbooks/common/openshift-cluster/upgrades/pre/verify_etcd3_backend.yml22
-rw-r--r--playbooks/common/openshift-cluster/upgrades/pre/verify_health_checks.yml16
-rw-r--r--playbooks/common/openshift-cluster/upgrades/pre/verify_inventory_vars.yml37
-rw-r--r--playbooks/common/openshift-cluster/upgrades/pre/verify_upgrade_targets.yml14
-rw-r--r--playbooks/common/openshift-cluster/upgrades/upgrade_control_plane.yml111
-rw-r--r--playbooks/common/openshift-cluster/upgrades/upgrade_nodes.yml58
-rw-r--r--playbooks/common/openshift-cluster/upgrades/upgrade_scale_group.yml43
l---------playbooks/common/openshift-cluster/upgrades/v3_6/filter_plugins1
-rw-r--r--playbooks/common/openshift-cluster/upgrades/v3_6/upgrade.yml100
-rw-r--r--playbooks/common/openshift-cluster/upgrades/v3_6/upgrade_control_plane.yml108
-rw-r--r--playbooks/common/openshift-cluster/upgrades/v3_6/upgrade_nodes.yml101
l---------playbooks/common/openshift-cluster/upgrades/v3_7/filter_plugins1
-rw-r--r--playbooks/common/openshift-cluster/upgrades/v3_7/upgrade.yml102
-rw-r--r--playbooks/common/openshift-cluster/upgrades/v3_7/upgrade_control_plane.yml110
-rw-r--r--playbooks/common/openshift-cluster/upgrades/v3_7/upgrade_nodes.yml99
-rw-r--r--playbooks/common/openshift-cluster/upgrades/v3_7/validator.yml5
l---------playbooks/common/openshift-cluster/upgrades/v3_8/filter_plugins1
-rw-r--r--playbooks/common/openshift-cluster/upgrades/v3_8/upgrade.yml104
-rw-r--r--playbooks/common/openshift-cluster/upgrades/v3_8/upgrade_control_plane.yml113
-rw-r--r--playbooks/common/openshift-cluster/upgrades/v3_8/upgrade_nodes.yml99
l---------playbooks/common/openshift-cluster/upgrades/v3_9/filter_plugins1
-rw-r--r--playbooks/common/openshift-cluster/upgrades/v3_9/master_config_upgrade.yml19
-rw-r--r--playbooks/common/openshift-cluster/upgrades/v3_9/upgrade.yml123
-rw-r--r--playbooks/common/openshift-cluster/upgrades/v3_9/upgrade_control_plane.yml191
-rw-r--r--playbooks/common/openshift-cluster/upgrades/v3_9/upgrade_nodes.yml110
-rw-r--r--playbooks/common/openshift-cluster/upgrades/v3_9/validator.yml2
-rw-r--r--playbooks/common/private/components.yml38
-rw-r--r--playbooks/common/private/control_plane.yml34
50 files changed, 714 insertions, 1509 deletions
diff --git a/playbooks/common/openshift-cluster/filter_plugins b/playbooks/common/openshift-cluster/filter_plugins
deleted file mode 120000
index 99a95e4ca..000000000
--- a/playbooks/common/openshift-cluster/filter_plugins
+++ /dev/null
@@ -1 +0,0 @@
-../../../filter_plugins \ No newline at end of file
diff --git a/playbooks/common/openshift-cluster/library b/playbooks/common/openshift-cluster/library
deleted file mode 120000
index d0b7393d3..000000000
--- a/playbooks/common/openshift-cluster/library
+++ /dev/null
@@ -1 +0,0 @@
-../../../library/ \ No newline at end of file
diff --git a/playbooks/common/openshift-cluster/lookup_plugins b/playbooks/common/openshift-cluster/lookup_plugins
deleted file mode 120000
index ac79701db..000000000
--- a/playbooks/common/openshift-cluster/lookup_plugins
+++ /dev/null
@@ -1 +0,0 @@
-../../../lookup_plugins \ No newline at end of file
diff --git a/playbooks/common/openshift-cluster/upgrades/cleanup_unused_images.yml b/playbooks/common/openshift-cluster/upgrades/cleanup_unused_images.yml
index 6e953be69..ed97d539c 100644
--- a/playbooks/common/openshift-cluster/upgrades/cleanup_unused_images.yml
+++ b/playbooks/common/openshift-cluster/upgrades/cleanup_unused_images.yml
@@ -1,22 +1 @@
---
-- name: Check Docker image count
- shell: "docker images -aq | wc -l"
- register: docker_image_count
- when: docker_upgrade_nuke_images is defined and docker_upgrade_nuke_images | bool
-
-- debug: var=docker_image_count.stdout
- when: docker_upgrade_nuke_images is defined and docker_upgrade_nuke_images | bool
-
-- name: Remove unused Docker images for Docker 1.10+ migration
- shell: "docker rmi `docker images -aq`"
- # Will fail on images still in use:
- failed_when: false
- when: docker_upgrade_nuke_images is defined and docker_upgrade_nuke_images | bool
-
-- name: Check Docker image count
- shell: "docker images -aq | wc -l"
- register: docker_image_count
- when: docker_upgrade_nuke_images is defined and docker_upgrade_nuke_images | bool
-
-- debug: var=docker_image_count.stdout
- when: docker_upgrade_nuke_images is defined and docker_upgrade_nuke_images | bool
diff --git a/playbooks/common/openshift-cluster/upgrades/create_service_signer_cert.yml b/playbooks/common/openshift-cluster/upgrades/create_service_signer_cert.yml
index 23cf8cf76..6d82fa928 100644
--- a/playbooks/common/openshift-cluster/upgrades/create_service_signer_cert.yml
+++ b/playbooks/common/openshift-cluster/upgrades/create_service_signer_cert.yml
@@ -2,7 +2,6 @@
- name: Create local temp directory for syncing certs
hosts: localhost
connection: local
- become: no
gather_facts: no
tasks:
- name: Create local temp directory for syncing certs
@@ -11,8 +10,15 @@
changed_when: false
when: not (hostvars[groups.oo_first_master.0].service_signer_cert_stat.stat.exists | bool)
+ - name: Chmod local temp directory
+ local_action: command chmod 777 "{{ local_cert_sync_tmpdir.stdout }}"
+ changed_when: false
+ when: not (hostvars[groups.oo_first_master.0].service_signer_cert_stat.stat.exists | bool)
+
- name: Create service signer certificate
hosts: oo_first_master
+ roles:
+ - openshift_facts
tasks:
- name: Create remote temp directory for creating certs
command: mktemp -d /tmp/openshift-ansible-XXXXXXX
@@ -22,7 +28,7 @@
- name: Create service signer certificate
command: >
- {{ openshift.common.client_binary }} adm ca create-signer-cert
+ {{ openshift_client_binary }} adm ca create-signer-cert
--cert="{{ remote_cert_create_tmpdir.stdout }}/"service-signer.crt
--key="{{ remote_cert_create_tmpdir.stdout }}/"service-signer.key
--name="{{ remote_cert_create_tmpdir.stdout }}/"openshift-service-serving-signer
@@ -65,7 +71,6 @@
- name: Delete local temp directory
hosts: localhost
connection: local
- become: no
gather_facts: no
tasks:
- name: Delete local temp directory
diff --git a/playbooks/common/openshift-cluster/upgrades/disable_master_excluders.yml b/playbooks/common/openshift-cluster/upgrades/disable_excluders.yml
index 33ed6a283..858912379 100644
--- a/playbooks/common/openshift-cluster/upgrades/disable_master_excluders.yml
+++ b/playbooks/common/openshift-cluster/upgrades/disable_excluders.yml
@@ -1,6 +1,6 @@
---
- name: Disable excluders
- hosts: oo_masters_to_config
+ hosts: "{{ l_upgrade_excluder_hosts }}"
gather_facts: no
roles:
- role: openshift_excluder
diff --git a/playbooks/common/openshift-cluster/upgrades/disable_node_excluders.yml b/playbooks/common/openshift-cluster/upgrades/disable_node_excluders.yml
deleted file mode 100644
index ab3171c9a..000000000
--- a/playbooks/common/openshift-cluster/upgrades/disable_node_excluders.yml
+++ /dev/null
@@ -1,11 +0,0 @@
----
-- name: Disable excluders
- hosts: oo_nodes_to_upgrade:!oo_masters_to_config
- gather_facts: no
- roles:
- - role: openshift_excluder
- r_openshift_excluder_action: disable
- r_openshift_excluder_verify_upgrade: true
- r_openshift_excluder_upgrade_target: "{{ openshift_upgrade_target }}"
- r_openshift_excluder_package_state: latest
- r_openshift_excluder_docker_package_state: latest
diff --git a/playbooks/common/openshift-cluster/upgrades/docker/docker_upgrade.yml b/playbooks/common/openshift-cluster/upgrades/docker/docker_upgrade.yml
index fcb828808..8392e21ee 100644
--- a/playbooks/common/openshift-cluster/upgrades/docker/docker_upgrade.yml
+++ b/playbooks/common/openshift-cluster/upgrades/docker/docker_upgrade.yml
@@ -12,14 +12,13 @@
roles:
- openshift_facts
tasks:
- - set_fact:
- repoquery_cmd: "{{ 'dnf repoquery --latest-limit 1 -d 0' if ansible_pkg_mgr == 'dnf' else 'repoquery --plugins' }}"
-
- fail:
msg: Cannot upgrade Docker on Atomic operating systems.
- when: openshift.common.is_atomic | bool
+ when: openshift_is_atomic | bool
- - include_tasks: upgrade_check.yml
+ - import_role:
+ name: container_runtime
+ tasks_from: docker_upgrade_check.yml
when: docker_upgrade is not defined or docker_upgrade | bool
@@ -44,7 +43,7 @@
retries: 10
delay: 5
register: node_unschedulable
- until: node_unschedulable|succeeded
+ until: node_unschedulable is succeeded
when:
- l_docker_upgrade is defined
- l_docker_upgrade | bool
@@ -52,13 +51,19 @@
- name: Drain Node for Kubelet upgrade
command: >
- {{ openshift.common.client_binary }} adm drain {{ openshift.node.nodename }} --config={{ openshift.common.config_base }}/master/admin.kubeconfig --force --delete-local-data --ignore-daemonsets
+ {{ hostvars[groups.oo_first_master.0]['first_master_client_binary'] }} adm drain {{ openshift.node.nodename | lower }}
+ --config={{ openshift.common.config_base }}/master/admin.kubeconfig
+ --force --delete-local-data --ignore-daemonsets
+ --timeout={{ openshift_upgrade_nodes_drain_timeout | default(0) }}s
delegate_to: "{{ groups.oo_first_master.0 }}"
when: l_docker_upgrade is defined and l_docker_upgrade | bool and inventory_hostname in groups.oo_nodes_to_upgrade
register: l_docker_upgrade_drain_result
- until: not l_docker_upgrade_drain_result | failed
- retries: 60
- delay: 60
+ until: not (l_docker_upgrade_drain_result is failed)
+ retries: "{{ 1 if ( openshift_upgrade_nodes_drain_timeout | default(0) | int ) == 0 else 0 }}"
+ delay: 5
+ failed_when:
+ - l_docker_upgrade_drain_result is failed
+ - openshift_upgrade_nodes_drain_timeout | default(0) | int == 0
- include_tasks: tasks/upgrade.yml
when: l_docker_upgrade is defined and l_docker_upgrade | bool
@@ -71,5 +76,5 @@
retries: 10
delay: 5
register: node_schedulable
- until: node_schedulable|succeeded
- when: node_unschedulable|changed
+ until: node_schedulable is succeeded
+ when: node_unschedulable is changed
diff --git a/playbooks/common/openshift-cluster/upgrades/docker/nuke_images.sh b/playbooks/common/openshift-cluster/upgrades/docker/nuke_images.sh
deleted file mode 100644
index 8635eab0d..000000000
--- a/playbooks/common/openshift-cluster/upgrades/docker/nuke_images.sh
+++ /dev/null
@@ -1,25 +0,0 @@
-#!/bin/bash
-
-# Stop any running containers
-running_container_ids=`docker ps -q`
-if test -n "$running_container_ids"
-then
- docker stop $running_container_ids
-fi
-
-# Delete all containers
-container_ids=`docker ps -a -q`
-if test -n "$container_ids"
-then
- docker rm -f -v $container_ids
-fi
-
-# Delete all images (forcefully)
-image_ids=`docker images -aq`
-if test -n "$image_ids"
-then
- # Some layers are deleted recursively and are no longer present
- # when docker goes to remove them:
- docker rmi -f `docker images -aq` || true
-fi
-
diff --git a/playbooks/common/openshift-cluster/upgrades/docker/tasks/restart.yml b/playbooks/common/openshift-cluster/upgrades/docker/tasks/restart.yml
index dbc4f39c7..3b47a11e0 100644
--- a/playbooks/common/openshift-cluster/upgrades/docker/tasks/restart.yml
+++ b/playbooks/common/openshift-cluster/upgrades/docker/tasks/restart.yml
@@ -2,7 +2,7 @@
- name: Restart docker
service: name=docker state=restarted
register: l_docker_restart_docker_in_upgrade_result
- until: not l_docker_restart_docker_in_upgrade_result | failed
+ until: not (l_docker_restart_docker_in_upgrade_result is failed)
retries: 3
delay: 30
@@ -15,7 +15,7 @@
- "{{ openshift_service_type }}-master-controllers"
- "{{ openshift_service_type }}-node"
failed_when: false
- when: openshift.common.is_containerized | bool
+ when: openshift_is_containerized | bool
- name: Wait for master API to come back online
wait_for:
diff --git a/playbooks/common/openshift-cluster/upgrades/docker/tasks/upgrade.yml b/playbooks/common/openshift-cluster/upgrades/docker/tasks/upgrade.yml
index 4856a4b51..54eeb2ef5 100644
--- a/playbooks/common/openshift-cluster/upgrades/docker/tasks/upgrade.yml
+++ b/playbooks/common/openshift-cluster/upgrades/docker/tasks/upgrade.yml
@@ -10,7 +10,7 @@
- etcd_container
- openvswitch
failed_when: false
- when: openshift.common.is_containerized | bool
+ when: openshift_is_containerized | bool
- name: Check Docker image count
shell: "docker images -aq | wc -l"
@@ -35,14 +35,14 @@
name: docker
state: stopped
register: l_pb_docker_upgrade_stop_result
- until: not l_pb_docker_upgrade_stop_result | failed
+ until: not (l_pb_docker_upgrade_stop_result is failed)
retries: 3
delay: 30
- name: Upgrade Docker
package: name=docker{{ '-' + docker_version }} state=present
register: result
- until: result | success
+ until: result is succeeded
- include_tasks: restart.yml
when: not skip_docker_restart | default(False) | bool
diff --git a/playbooks/common/openshift-cluster/upgrades/docker/upgrade_check.yml b/playbooks/common/openshift-cluster/upgrades/docker/upgrade_check.yml
index 2e3a7ae8b..ed97d539c 100644
--- a/playbooks/common/openshift-cluster/upgrades/docker/upgrade_check.yml
+++ b/playbooks/common/openshift-cluster/upgrades/docker/upgrade_check.yml
@@ -1,58 +1 @@
---
-
-# This snippet determines if a Docker upgrade is required by checking the inventory
-# variables, the available packages, and sets l_docker_upgrade to True if so.
-
-- set_fact:
- docker_upgrade: True
- when: docker_upgrade is not defined
-
-- name: Check if Docker is installed
- command: rpm -q docker
- args:
- warn: no
- register: pkg_check
- failed_when: pkg_check.rc > 1
- changed_when: no
-
-- name: Get current version of Docker
- command: "{{ repoquery_installed }} --qf '%{version}' docker"
- register: curr_docker_version
- retries: 4
- until: curr_docker_version | succeeded
- changed_when: false
-
-- name: Get latest available version of Docker
- command: >
- {{ repoquery_cmd }} --qf '%{version}' "docker"
- register: avail_docker_version
- retries: 4
- until: avail_docker_version | succeeded
- # Don't expect docker rpm to be available on hosts that don't already have it installed:
- when: pkg_check.rc == 0
- failed_when: false
- changed_when: false
-
-- fail:
- msg: This playbook requires access to Docker 1.12 or later
- # Disable the 1.12 requirement if the user set a specific Docker version
- when: docker_version is not defined and (docker_upgrade is not defined or docker_upgrade | bool == True) and (pkg_check.rc == 0 and (avail_docker_version.stdout == "" or avail_docker_version.stdout | version_compare('1.12','<')))
-
-# Default l_docker_upgrade to False, we'll set to True if an upgrade is required:
-- set_fact:
- l_docker_upgrade: False
-
-# Make sure a docker_version is set if none was requested:
-- set_fact:
- docker_version: "{{ avail_docker_version.stdout }}"
- when: pkg_check.rc == 0 and docker_version is not defined
-
-- name: Flag for Docker upgrade if necessary
- set_fact:
- l_docker_upgrade: True
- when: pkg_check.rc == 0 and curr_docker_version.stdout | version_compare(docker_version,'<')
-
-- name: Flag to delete all images prior to upgrade if crossing Docker 1.10 boundary
- set_fact:
- docker_upgrade_nuke_images: True
- when: l_docker_upgrade | bool and docker_upgrade_nuke_images is not defined and curr_docker_version.stdout | version_compare('1.10','<') and docker_version | version_compare('1.10','>=')
diff --git a/playbooks/common/openshift-cluster/upgrades/filter_plugins b/playbooks/common/openshift-cluster/upgrades/filter_plugins
deleted file mode 120000
index b1213dedb..000000000
--- a/playbooks/common/openshift-cluster/upgrades/filter_plugins
+++ /dev/null
@@ -1 +0,0 @@
-../../../../filter_plugins \ No newline at end of file
diff --git a/playbooks/common/openshift-cluster/upgrades/init.yml b/playbooks/common/openshift-cluster/upgrades/init.yml
index 5454a6680..ba783638d 100644
--- a/playbooks/common/openshift-cluster/upgrades/init.yml
+++ b/playbooks/common/openshift-cluster/upgrades/init.yml
@@ -5,10 +5,11 @@
g_new_master_hosts: []
g_new_node_hosts: []
-- import_playbook: ../../../init/facts.yml
+- import_playbook: ../../../init/basic_facts.yml
+- import_playbook: ../../../init/cluster_facts.yml
- name: Ensure firewall is not switched during upgrade
- hosts: oo_all_hosts
+ hosts: "{{ l_upgrade_no_switch_firewall_hosts | default('oo_all_hosts') }}"
vars:
openshift_master_installed_version: "{{ hostvars[groups.oo_first_master.0].openshift.common.version }}"
tasks:
diff --git a/playbooks/common/openshift-cluster/upgrades/initialize_nodes_to_upgrade.yml b/playbooks/common/openshift-cluster/upgrades/initialize_nodes_to_upgrade.yml
index fc1cbf32a..07be0b0d4 100644
--- a/playbooks/common/openshift-cluster/upgrades/initialize_nodes_to_upgrade.yml
+++ b/playbooks/common/openshift-cluster/upgrades/initialize_nodes_to_upgrade.yml
@@ -31,7 +31,7 @@
with_items: " {{ groups['oo_nodes_to_config'] }}"
when:
- hostvars[item].openshift is defined
- - hostvars[item].openshift.common.hostname in nodes_to_upgrade.results.results[0]['items'] | map(attribute='metadata.name') | list
+ - hostvars[item].openshift.common.hostname | lower in nodes_to_upgrade.results.results[0]['items'] | map(attribute='metadata.name') | list
changed_when: false
# Build up the oo_nodes_to_upgrade group, use the list filtered by label if
diff --git a/playbooks/common/openshift-cluster/upgrades/lookup_plugins b/playbooks/common/openshift-cluster/upgrades/lookup_plugins
deleted file mode 120000
index aff753026..000000000
--- a/playbooks/common/openshift-cluster/upgrades/lookup_plugins
+++ /dev/null
@@ -1 +0,0 @@
-../../../../lookup_plugins \ No newline at end of file
diff --git a/playbooks/common/openshift-cluster/upgrades/post_control_plane.yml b/playbooks/common/openshift-cluster/upgrades/post_control_plane.yml
index 344ddea3c..de612da21 100644
--- a/playbooks/common/openshift-cluster/upgrades/post_control_plane.yml
+++ b/playbooks/common/openshift-cluster/upgrades/post_control_plane.yml
@@ -1,7 +1,15 @@
---
-###############################################################################
-# Post upgrade - Upgrade default router, default registry and examples
-###############################################################################
+####################################################################################
+# Post upgrade - Upgrade web console, default router, default registry, and examples
+####################################################################################
+- name: Upgrade web console
+ hosts: oo_first_master
+ roles:
+ - role: openshift_web_console
+ when:
+ - openshift_web_console_install | default(true) | bool
+ - openshift_upgrade_target is version_compare('3.9','>=')
+
- name: Upgrade default router and default registry
hosts: oo_first_master
vars:
@@ -27,8 +35,8 @@
- set_fact:
haproxy_routers: "{{ all_routers.results.results[0]['items'] |
- oo_pods_match_component(openshift_deployment_type, 'haproxy-router') |
- oo_select_keys_from_list(['metadata']) }}"
+ lib_utils_oo_pods_match_component(openshift_deployment_type, 'haproxy-router') |
+ lib_utils_oo_select_keys_from_list(['metadata']) }}"
when:
- all_routers.results.returncode == 0
@@ -126,7 +134,7 @@
debug:
msg: "WARNING pluginOrderOverride is being deprecated in master-config.yaml, please see https://docs.openshift.com/enterprise/latest/architecture/additional_concepts/admission_controllers.html for more information."
when:
- - not grep_plugin_order_override | skipped
+ - not (grep_plugin_order_override is skipped)
- grep_plugin_order_override.rc == 0
- name: Warn if shared-resource-viewer could not be updated
diff --git a/playbooks/common/openshift-cluster/upgrades/pre/config.yml b/playbooks/common/openshift-cluster/upgrades/pre/config.yml
new file mode 100644
index 000000000..2b27f8dd0
--- /dev/null
+++ b/playbooks/common/openshift-cluster/upgrades/pre/config.yml
@@ -0,0 +1,81 @@
+---
+# for control-plane upgrade, several variables may be passed in to this play
+# why may affect the tasks here and in imported playbooks.
+
+# Pre-upgrade
+- import_playbook: ../initialize_nodes_to_upgrade.yml
+
+- import_playbook: verify_cluster.yml
+
+- name: Update repos on upgrade hosts
+ hosts: "{{ l_upgrade_repo_hosts }}"
+ roles:
+ - openshift_repos
+
+- name: Set openshift_no_proxy_internal_hostnames
+ hosts: "{{ l_upgrade_no_proxy_hosts }}"
+ tasks:
+ - set_fact:
+ openshift_no_proxy_internal_hostnames: "{{ hostvars | lib_utils_oo_select_keys(groups['oo_nodes_to_config']
+ | union(groups['oo_masters_to_config'])
+ | union(groups['oo_etcd_to_config'] | default([])))
+ | lib_utils_oo_collect('openshift.common.hostname') | default([]) | join (',')
+ }}"
+ when:
+ - openshift_http_proxy is defined or openshift_https_proxy is defined
+ - openshift_generate_no_proxy_hosts | default(True) | bool
+
+- name: OpenShift Health Checks
+ hosts: "{{ l_upgrade_health_check_hosts }}"
+ any_errors_fatal: true
+ roles:
+ - openshift_health_checker
+ vars:
+ - r_openshift_health_checker_playbook_context: upgrade
+ post_tasks:
+ - name: Run health checks (upgrade)
+ action: openshift_health_check
+ args:
+ checks:
+ - disk_availability
+ - memory_availability
+ - docker_image_availability
+
+- import_playbook: ../disable_excluders.yml
+
+- import_playbook: ../../../../init/version.yml
+ vars:
+ # Request specific openshift_release and let the openshift_version role handle converting this
+ # to a more specific version, respecting openshift_image_tag and openshift_pkg_version if
+ # defined, and overriding the normal behavior of protecting the installed version
+ openshift_release: "{{ openshift_upgrade_target }}"
+ # openshift_protect_installed_version is passed n via upgrade_control_plane.yml
+ # l_openshift_version_set_hosts is passed via upgrade_control_plane.yml
+ # l_openshift_version_check_hosts is passed via upgrade_control_plane.yml
+
+# If we're only upgrading nodes, we need to ensure masters are already upgraded
+- name: Verify masters are already upgraded
+ hosts: oo_masters_to_config
+ tasks:
+ - fail: msg="Master running {{ openshift.common.version }} must be upgraded to {{ openshift_version }} before node upgrade can be run."
+ when:
+ - l_upgrade_nodes_only | default(False) | bool
+ - openshift.common.version != openshift_version
+
+# If we're only upgrading nodes, skip this.
+- import_playbook: ../../../../openshift-master/private/validate_restart.yml
+ when: not (l_upgrade_nodes_only | default(False)) | bool
+
+- name: Verify upgrade targets
+ hosts: "{{ l_upgrade_verify_targets_hosts }}"
+ roles:
+ - role: openshift_facts
+ tasks:
+ - include_tasks: verify_upgrade_targets.yml
+
+- name: Verify docker upgrade targets
+ hosts: "{{ l_upgrade_docker_target_hosts }}"
+ tasks:
+ - import_role:
+ name: container_runtime
+ tasks_from: docker_upgrade_check.yml
diff --git a/playbooks/common/openshift-cluster/upgrades/pre/gate_checks.yml b/playbooks/common/openshift-cluster/upgrades/pre/gate_checks.yml
deleted file mode 100644
index 8ecae4539..000000000
--- a/playbooks/common/openshift-cluster/upgrades/pre/gate_checks.yml
+++ /dev/null
@@ -1,6 +0,0 @@
----
-- name: Flag pre-upgrade checks complete for hosts without errors
- hosts: oo_masters_to_config:oo_nodes_to_upgrade:oo_etcd_to_config
- tasks:
- - set_fact:
- pre_upgrade_complete: True
diff --git a/playbooks/common/openshift-cluster/upgrades/pre/tasks/verify_docker_upgrade_targets.yml b/playbooks/common/openshift-cluster/upgrades/pre/tasks/verify_docker_upgrade_targets.yml
deleted file mode 100644
index 18a08eb99..000000000
--- a/playbooks/common/openshift-cluster/upgrades/pre/tasks/verify_docker_upgrade_targets.yml
+++ /dev/null
@@ -1,22 +0,0 @@
----
-# Only check if docker upgrade is required if docker_upgrade is not
-# already set to False.
-- include_tasks: ../../docker/upgrade_check.yml
- when:
- - docker_upgrade is not defined or (docker_upgrade | bool)
- - not (openshift.common.is_atomic | bool)
-
-# Additional checks for Atomic hosts:
-
-- name: Determine available Docker
- shell: "rpm -q --queryformat '---\ncurr_version: %{VERSION}\navail_version: \n' docker"
- register: g_atomic_docker_version_result
- when: openshift.common.is_atomic | bool
-
-- set_fact:
- l_docker_version: "{{ g_atomic_docker_version_result.stdout | from_yaml }}"
- when: openshift.common.is_atomic | bool
-
-- fail:
- msg: This playbook requires access to Docker 1.12 or later
- when: openshift.common.is_atomic | bool and l_docker_version.avail_version | default(l_docker_version.curr_version, true) | version_compare('1.12','<')
diff --git a/playbooks/common/openshift-cluster/upgrades/pre/verify_cluster.yml b/playbooks/common/openshift-cluster/upgrades/pre/verify_cluster.yml
new file mode 100644
index 000000000..5ee8a9d78
--- /dev/null
+++ b/playbooks/common/openshift-cluster/upgrades/pre/verify_cluster.yml
@@ -0,0 +1,116 @@
+---
+# Verify a few items before we proceed with upgrade process.
+
+- name: Verify upgrade can proceed on first master
+ hosts: oo_first_master
+ gather_facts: no
+ tasks:
+
+ # Error out in situations where the user has older versions specified in their
+ # inventory in any of the openshift_release, openshift_image_tag, and
+ # openshift_pkg_version variables. These must be removed or updated to proceed
+ # with upgrade.
+ # TODO: Should we block if you're *over* the next major release version as well?
+ - fail:
+ msg: >
+ openshift_pkg_version is {{ openshift_pkg_version }} which is not a
+ valid version for a {{ openshift_upgrade_target }} upgrade
+ when:
+ - openshift_pkg_version is defined
+ - openshift_pkg_version.split('-',1).1 is version_compare(openshift_upgrade_target ,'<')
+
+ - fail:
+ msg: >
+ openshift_image_tag is {{ openshift_image_tag }} which is not a
+ valid version for a {{ openshift_upgrade_target }} upgrade
+ when:
+ - openshift_image_tag is defined
+ - openshift_image_tag.split('v',1).1 is version_compare(openshift_upgrade_target ,'<')
+
+ - set_fact:
+ openshift_release: "{{ openshift_release[1:] }}"
+ when: openshift_release is defined and openshift_release[0] == 'v'
+
+ - fail:
+ msg: >
+ openshift_release is {{ openshift_release }} which is not a
+ valid release for a {{ openshift_upgrade_target }} upgrade
+ when:
+ - openshift_release is defined
+ - not (openshift_release is version_compare(openshift_upgrade_target ,'='))
+
+- name: Verify master processes
+ hosts: oo_masters_to_config
+ roles:
+ - lib_utils
+ - openshift_facts
+ tasks:
+ - name: Read master storage backend setting
+ yedit:
+ state: list
+ src: /etc/origin/master/master-config.yaml
+ key: kubernetesMasterConfig.apiServerArguments.storage-backend
+ register: _storage_backend
+
+ - fail:
+ msg: "Storage backend in /etc/origin/master/master-config.yaml must be set to 'etcd3' before the upgrade can continue"
+ when:
+ # assuming the master-config.yml is properly configured, i.e. the value is a list
+ - _storage_backend.result | default([], true) | length == 0 or _storage_backend.result[0] != "etcd3"
+
+ - debug:
+ msg: "Storage backend is set to etcd3"
+
+ - openshift_facts:
+ role: master
+ local_facts:
+ ha: "{{ groups.oo_masters_to_config | length > 1 }}"
+
+ - when: openshift_is_containerized | bool
+ block:
+ - set_fact:
+ master_services:
+ - "{{ openshift_service_type }}-master"
+
+ # In case of the non-ha to ha upgrade.
+ - name: Check if the {{ openshift_service_type }}-master-api.service exists
+ command: >
+ systemctl list-units {{ openshift_service_type }}-master-api.service --no-legend
+ register: master_api_service_status
+
+ - set_fact:
+ master_services:
+ - "{{ openshift_service_type }}-master-api"
+ - "{{ openshift_service_type }}-master-controllers"
+ when:
+ - master_api_service_status.stdout_lines | length > 0
+ - (openshift_service_type + '-master-api.service') in master_api_service_status.stdout_lines[0]
+
+ - name: Ensure Master is running
+ service:
+ name: "{{ item }}"
+ state: started
+ enabled: yes
+ with_items: "{{ master_services }}"
+
+# Until openshift-ansible is determining which host is the CA host we
+# must (unfortunately) ensure that the first host in the etcd group is
+# the etcd CA host.
+# https://bugzilla.redhat.com/show_bug.cgi?id=1469358
+- name: Verify we can proceed on first etcd
+ hosts: oo_first_etcd
+ gather_facts: no
+ tasks:
+ - name: Ensure CA exists on first etcd
+ stat:
+ path: /etc/etcd/generated_certs
+ register: __etcd_ca_stat
+
+ - fail:
+ msg: >
+ In order to correct an etcd certificate signing problem
+ upgrading may require re-generating etcd certificates. Please
+ ensure that the /etc/etcd/generated_certs directory exists on
+ the first host defined in your [etcd] group.
+ when:
+ - not __etcd_ca_stat.stat.exists | bool
diff --git a/playbooks/common/openshift-cluster/upgrades/pre/verify_control_plane_running.yml b/playbooks/common/openshift-cluster/upgrades/pre/verify_control_plane_running.yml
deleted file mode 100644
index bef95546d..000000000
--- a/playbooks/common/openshift-cluster/upgrades/pre/verify_control_plane_running.yml
+++ /dev/null
@@ -1,37 +0,0 @@
----
-- name: Verify master processes
- hosts: oo_masters_to_config
- roles:
- - openshift_facts
- tasks:
- - openshift_facts:
- role: master
- local_facts:
- ha: "{{ groups.oo_masters_to_config | length > 1 }}"
-
- - when: openshift.common.is_containerized | bool
- block:
- - set_fact:
- master_services:
- - "{{ openshift_service_type }}-master"
-
- # In case of the non-ha to ha upgrade.
- - name: Check if the {{ openshift_service_type }}-master-api.service exists
- command: >
- systemctl list-units {{ openshift_service_type }}-master-api.service --no-legend
- register: master_api_service_status
-
- - set_fact:
- master_services:
- - "{{ openshift_service_type }}-master-api"
- - "{{ openshift_service_type }}-master-controllers"
- when:
- - master_api_service_status.stdout_lines | length > 0
- - (openshift_service_type + '-master-api.service') in master_api_service_status.stdout_lines[0]
-
- - name: Ensure Master is running
- service:
- name: "{{ item }}"
- state: started
- enabled: yes
- with_items: "{{ master_services }}"
diff --git a/playbooks/common/openshift-cluster/upgrades/pre/verify_etcd3_backend.yml b/playbooks/common/openshift-cluster/upgrades/pre/verify_etcd3_backend.yml
deleted file mode 100644
index f75ae3b15..000000000
--- a/playbooks/common/openshift-cluster/upgrades/pre/verify_etcd3_backend.yml
+++ /dev/null
@@ -1,22 +0,0 @@
----
-- name: Verify all masters has etcd3 storage backend set
- hosts: oo_masters_to_config
- gather_facts: no
- roles:
- - lib_utils
- tasks:
- - name: Read master storage backend setting
- yedit:
- state: list
- src: /etc/origin/master/master-config.yaml
- key: kubernetesMasterConfig.apiServerArguments.storage-backend
- register: _storage_backend
-
- - fail:
- msg: "Storage backend in /etc/origin/master/master-config.yaml must be set to 'etcd3' before the upgrade can continue"
- when:
- # assuming the master-config.yml is properly configured, i.e. the value is a list
- - _storage_backend.result | default([], true) | length == 0 or _storage_backend.result[0] != "etcd3"
-
- - debug:
- msg: "Storage backend is set to etcd3"
diff --git a/playbooks/common/openshift-cluster/upgrades/pre/verify_health_checks.yml b/playbooks/common/openshift-cluster/upgrades/pre/verify_health_checks.yml
deleted file mode 100644
index 2a8de50a2..000000000
--- a/playbooks/common/openshift-cluster/upgrades/pre/verify_health_checks.yml
+++ /dev/null
@@ -1,16 +0,0 @@
----
-- name: OpenShift Health Checks
- hosts: oo_all_hosts
- any_errors_fatal: true
- roles:
- - openshift_health_checker
- vars:
- - r_openshift_health_checker_playbook_context: upgrade
- post_tasks:
- - name: Run health checks (upgrade)
- action: openshift_health_check
- args:
- checks:
- - disk_availability
- - memory_availability
- - docker_image_availability
diff --git a/playbooks/common/openshift-cluster/upgrades/pre/verify_inventory_vars.yml b/playbooks/common/openshift-cluster/upgrades/pre/verify_inventory_vars.yml
deleted file mode 100644
index 3c0017891..000000000
--- a/playbooks/common/openshift-cluster/upgrades/pre/verify_inventory_vars.yml
+++ /dev/null
@@ -1,37 +0,0 @@
----
-- name: Verify upgrade can proceed on first master
- hosts: oo_first_master
- gather_facts: no
- tasks:
- - fail:
- msg: >
- This upgrade is only supported for origin and openshift-enterprise
- deployment types
- when: deployment_type not in ['origin','openshift-enterprise']
-
- # Error out in situations where the user has older versions specified in their
- # inventory in any of the openshift_release, openshift_image_tag, and
- # openshift_pkg_version variables. These must be removed or updated to proceed
- # with upgrade.
- # TODO: Should we block if you're *over* the next major release version as well?
- - fail:
- msg: >
- openshift_pkg_version is {{ openshift_pkg_version }} which is not a
- valid version for a {{ openshift_upgrade_target }} upgrade
- when: openshift_pkg_version is defined and openshift_pkg_version.split('-',1).1 | version_compare(openshift_upgrade_target ,'<')
-
- - fail:
- msg: >
- openshift_image_tag is {{ openshift_image_tag }} which is not a
- valid version for a {{ openshift_upgrade_target }} upgrade
- when: openshift_image_tag is defined and openshift_image_tag.split('v',1).1 | version_compare(openshift_upgrade_target ,'<')
-
- - set_fact:
- openshift_release: "{{ openshift_release[1:] }}"
- when: openshift_release is defined and openshift_release[0] == 'v'
-
- - fail:
- msg: >
- openshift_release is {{ openshift_release }} which is not a
- valid release for a {{ openshift_upgrade_target }} upgrade
- when: openshift_release is defined and not openshift_release | version_compare(openshift_upgrade_target ,'=')
diff --git a/playbooks/common/openshift-cluster/upgrades/pre/verify_upgrade_targets.yml b/playbooks/common/openshift-cluster/upgrades/pre/verify_upgrade_targets.yml
index 96f970506..45ddf7eea 100644
--- a/playbooks/common/openshift-cluster/upgrades/pre/verify_upgrade_targets.yml
+++ b/playbooks/common/openshift-cluster/upgrades/pre/verify_upgrade_targets.yml
@@ -5,7 +5,7 @@
when: openshift.common.version is not defined
- name: Update oreg_auth docker login credentials if necessary
- include_role:
+ import_role:
name: container_runtime
tasks_from: registry_auth.yml
when: oreg_auth_user is defined
@@ -15,13 +15,13 @@
docker pull {{ openshift_cli_image }}:{{ openshift_image_tag }}
register: pull_result
changed_when: "'Downloaded newer image' in pull_result.stdout"
- when: openshift.common.is_containerized | bool
+ when: openshift_is_containerized | bool
-- when: not openshift.common.is_containerized | bool
+- when: not openshift_is_containerized | bool
block:
- name: Check latest available OpenShift RPM version
repoquery:
- name: "{{ openshift_service_type }}"
+ name: "{{ openshift_service_type }}{{ '-' ~ openshift_release ~ '*' if openshift_release is defined else '' }}"
ignore_excluders: true
register: repoquery_out
@@ -43,11 +43,11 @@
fail:
msg: "OpenShift {{ avail_openshift_version }} is available, but {{ openshift_upgrade_target }} or greater is required"
when:
- - (openshift_pkg_version | default('-0.0', True)).split('-')[1] | version_compare(openshift_release, '<')
+ - (openshift_pkg_version | default('-0.0', True)).split('-')[1] is version_compare(openshift_release, '<')
- name: Fail when openshift version does not meet minium requirement for Origin upgrade
fail:
msg: "This upgrade playbook must be run against OpenShift {{ openshift_upgrade_min }} or later"
when:
- - deployment_type == 'origin'
- - openshift.common.version | version_compare(openshift_upgrade_min,'<')
+ - openshift_deployment_type == 'origin'
+ - openshift.common.version is version_compare(openshift_upgrade_min,'<')
diff --git a/playbooks/common/openshift-cluster/upgrades/upgrade_control_plane.yml b/playbooks/common/openshift-cluster/upgrades/upgrade_control_plane.yml
index 37fc8a0f6..a10fd4bee 100644
--- a/playbooks/common/openshift-cluster/upgrades/upgrade_control_plane.yml
+++ b/playbooks/common/openshift-cluster/upgrades/upgrade_control_plane.yml
@@ -3,18 +3,28 @@
# Upgrade Masters
###############################################################################
-# If facts cache were for some reason deleted, this fact may not be set, and if not set
-# it will always default to true. This causes problems for the etcd data dir fact detection
-# so we must first make sure this is set correctly before attempting the backup.
-- name: Set master embedded_etcd fact
- hosts: oo_masters_to_config
- roles:
- - openshift_facts
+# Prior to 3.6, openshift-ansible created etcd serving certificates
+# without a SubjectAlternativeName entry for the system hostname. The
+# SAN list in Go 1.8 is now (correctly) authoritative and since
+# openshift-ansible configures masters to talk to etcd hostnames
+# rather than IP addresses, we must correct etcd certificates.
+#
+# This play examines the etcd serving certificate SANs on each etcd
+# host and records whether or not the system hostname is missing.
+- name: Examine etcd serving certificate SAN
+ hosts: oo_etcd_to_config
tasks:
- - openshift_facts:
- role: master
- local_facts:
- embedded_etcd: "{{ groups.oo_etcd_to_config | default([]) | length == 0 }}"
+ - slurp:
+ src: /etc/etcd/server.crt
+ register: etcd_serving_cert
+ - set_fact:
+ __etcd_cert_lacks_hostname: "{{ (openshift.common.hostname not in (etcd_serving_cert.content | b64decode | lib_utils_oo_parse_certificate_san)) | bool }}"
+
+# Redeploy etcd certificates when hostnames were missing from etcd
+# serving certificate SANs.
+- import_playbook: ../../../openshift-etcd/redeploy-certificates.yml
+ when:
+ - true in hostvars | lib_utils_oo_select_keys(groups['oo_etcd_to_config']) | lib_utils_oo_collect('__etcd_cert_lacks_hostname') | default([false])
- name: Backup and upgrade etcd
import_playbook: ../../../openshift-etcd/private/upgrade_main.yml
@@ -36,10 +46,12 @@
# See: https://github.com/openshift/origin/pull/14625#issuecomment-308467060
- name: Pre master upgrade - Upgrade all storage
hosts: oo_first_master
+ roles:
+ - openshift_facts
tasks:
- name: Upgrade all storage
command: >
- {{ openshift.common.client_binary }} adm --config={{ openshift.common.config_base }}/master/admin.kubeconfig
+ {{ openshift_client_binary }} adm --config={{ openshift.common.config_base }}/master/admin.kubeconfig
migrate storage --include=* --confirm
register: l_pb_upgrade_control_plane_pre_upgrade_storage
when: openshift_upgrade_pre_storage_migration_enabled | default(true) | bool
@@ -63,10 +75,9 @@
vars:
openshift_master_ha: "{{ groups.oo_masters_to_config | length > 1 }}"
serial: 1
+ roles:
+ - openshift_facts
tasks:
- - include_role:
- name: openshift_facts
-
# Run the pre-upgrade hook if defined:
- debug: msg="Running master pre-upgrade hook {{ openshift_master_upgrade_pre_hook }}"
when: openshift_master_upgrade_pre_hook is defined
@@ -74,7 +85,7 @@
- include_tasks: "{{ openshift_master_upgrade_pre_hook }}"
when: openshift_master_upgrade_pre_hook is defined
- - include_role:
+ - import_role:
name: openshift_master
tasks_from: upgrade.yml
@@ -100,12 +111,12 @@
- name: Post master upgrade - Upgrade clusterpolicies storage
command: >
- {{ openshift.common.client_binary }} adm --config={{ openshift.common.config_base }}/master/admin.kubeconfig
+ {{ openshift_client_binary }} adm --config={{ openshift.common.config_base }}/master/admin.kubeconfig
migrate storage --include=clusterpolicies --confirm
register: l_pb_upgrade_control_plane_post_upgrade_storage
when:
- openshift_upgrade_post_storage_migration_enabled | default(true) | bool
- - openshift_version | version_compare('3.7','<')
+ - openshift_version is version_compare('3.7','<')
failed_when:
- openshift_upgrade_post_storage_migration_enabled | default(true) | bool
- l_pb_upgrade_control_plane_post_upgrade_storage.rc != 0
@@ -122,12 +133,11 @@
- name: Gate on master update
hosts: localhost
connection: local
- become: no
tasks:
- set_fact:
master_update_completed: "{{ hostvars
- | oo_select_keys(groups.oo_masters_to_config)
- | oo_collect('inventory_hostname', {'master_update_complete': true}) }}"
+ | lib_utils_oo_select_keys(groups.oo_masters_to_config)
+ | lib_utils_oo_collect('inventory_hostname', {'master_update_complete': true}) }}"
- set_fact:
master_update_failed: "{{ groups.oo_masters_to_config | difference(master_update_completed) | list }}"
- fail:
@@ -142,15 +152,16 @@
hosts: oo_masters_to_config
roles:
- { role: openshift_cli }
+ - { role: openshift_facts }
vars:
__master_shared_resource_viewer_file: "shared_resource_viewer_role.yaml"
tasks:
- name: Reconcile Cluster Roles
command: >
- {{ openshift.common.client_binary }} adm --config={{ openshift.common.config_base }}/master/admin.kubeconfig
+ {{ openshift_client_binary }} adm --config={{ openshift.common.config_base }}/master/admin.kubeconfig
policy reconcile-cluster-roles --additive-only=true --confirm -o name
register: reconcile_cluster_role_result
- when: openshift_version | version_compare('3.7','<')
+ when: openshift_version is version_compare('3.7','<')
changed_when:
- reconcile_cluster_role_result.stdout != ''
- reconcile_cluster_role_result.rc == 0
@@ -158,14 +169,14 @@
- name: Reconcile Cluster Role Bindings
command: >
- {{ openshift.common.client_binary }} adm --config={{ openshift.common.config_base }}/master/admin.kubeconfig
+ {{ openshift_client_binary }} adm --config={{ openshift.common.config_base }}/master/admin.kubeconfig
policy reconcile-cluster-role-bindings
--exclude-groups=system:authenticated
--exclude-groups=system:authenticated:oauth
--exclude-groups=system:unauthenticated
--exclude-users=system:anonymous
--additive-only=true --confirm -o name
- when: openshift_version | version_compare('3.7','<')
+ when: openshift_version is version_compare('3.7','<')
register: reconcile_bindings_result
changed_when:
- reconcile_bindings_result.stdout != ''
@@ -174,16 +185,16 @@
- name: Reconcile Jenkins Pipeline Role Bindings
command: >
- {{ openshift.common.client_binary }} adm --config={{ openshift.common.config_base }}/master/admin.kubeconfig policy reconcile-cluster-role-bindings system:build-strategy-jenkinspipeline --confirm -o name
+ {{ openshift_client_binary }} adm --config={{ openshift.common.config_base }}/master/admin.kubeconfig policy reconcile-cluster-role-bindings system:build-strategy-jenkinspipeline --confirm -o name
run_once: true
register: reconcile_jenkins_role_binding_result
changed_when:
- reconcile_jenkins_role_binding_result.stdout != ''
- reconcile_jenkins_role_binding_result.rc == 0
when:
- - openshift_version | version_compare('3.7','<')
+ - openshift_version is version_compare('3.7','<')
- - when: openshift_upgrade_target | version_compare('3.7','<')
+ - when: openshift_upgrade_target is version_compare('3.7','<')
block:
- name: Retrieve shared-resource-viewer
oc_obj:
@@ -228,7 +239,7 @@
- name: Reconcile Security Context Constraints
command: >
- {{ openshift.common.client_binary }} adm policy --config={{ openshift.common.config_base }}/master/admin.kubeconfig reconcile-sccs --confirm --additive-only=true -o name
+ {{ openshift_client_binary }} adm policy --config={{ openshift.common.config_base }}/master/admin.kubeconfig reconcile-sccs --confirm --additive-only=true -o name
register: reconcile_scc_result
changed_when:
- reconcile_scc_result.stdout != ''
@@ -237,7 +248,7 @@
- name: Migrate storage post policy reconciliation
command: >
- {{ openshift.common.client_binary }} adm --config={{ openshift.common.config_base }}/master/admin.kubeconfig
+ {{ openshift_client_binary }} adm --config={{ openshift.common.config_base }}/master/admin.kubeconfig
migrate storage --include=* --confirm
run_once: true
register: l_pb_upgrade_control_plane_post_upgrade_storage
@@ -256,12 +267,11 @@
- name: Gate on reconcile
hosts: localhost
connection: local
- become: no
tasks:
- set_fact:
reconcile_completed: "{{ hostvars
- | oo_select_keys(groups.oo_masters_to_config)
- | oo_collect('inventory_hostname', {'reconcile_complete': true}) }}"
+ | lib_utils_oo_select_keys(groups.oo_masters_to_config)
+ | lib_utils_oo_collect('inventory_hostname', {'reconcile_complete': true}) }}"
- set_fact:
reconcile_failed: "{{ groups.oo_masters_to_config | difference(reconcile_completed) | list }}"
- fail:
@@ -276,7 +286,7 @@
- openshift_facts
tasks:
- include_tasks: docker/tasks/upgrade.yml
- when: l_docker_upgrade is defined and l_docker_upgrade | bool and not openshift.common.is_atomic | bool
+ when: l_docker_upgrade is defined and l_docker_upgrade | bool and not openshift_is_atomic | bool
- name: Drain and upgrade master nodes
hosts: oo_masters_to_config:&oo_nodes_to_upgrade
@@ -301,32 +311,31 @@
retries: 10
delay: 5
register: node_unschedulable
- until: node_unschedulable|succeeded
+ until: node_unschedulable is succeeded
- name: Drain Node for Kubelet upgrade
command: >
- {{ hostvars[groups.oo_first_master.0].openshift.common.client_binary }} adm drain {{ openshift.node.nodename | lower }} --config={{ openshift.common.config_base }}/master/admin.kubeconfig --force --delete-local-data --ignore-daemonsets
+ {{ hostvars[groups.oo_first_master.0]['first_master_client_binary'] }} adm drain {{ openshift.node.nodename | lower }}
+ --config={{ openshift.common.config_base }}/master/admin.kubeconfig
+ --force --delete-local-data --ignore-daemonsets
+ --timeout={{ openshift_upgrade_nodes_drain_timeout | default(0) }}s
delegate_to: "{{ groups.oo_first_master.0 }}"
register: l_upgrade_control_plane_drain_result
- until: not l_upgrade_control_plane_drain_result | failed
- retries: 60
- delay: 60
+ until: not (l_upgrade_control_plane_drain_result is failed)
+ retries: "{{ 1 if ( openshift_upgrade_nodes_drain_timeout | default(0) | int ) == 0 else 0 }}"
+ delay: 5
+ failed_when:
+ - l_upgrade_control_plane_drain_result is failed
+ - openshift_upgrade_nodes_drain_timeout | default(0) | int == 0
roles:
- openshift_facts
post_tasks:
- - include_role:
+ - import_role:
name: openshift_node
tasks_from: upgrade.yml
+ - import_role:
+ name: openshift_manage_node
+ tasks_from: config.yml
vars:
- openshift_node_upgrade_in_progress: True
- - name: Set node schedulability
- oc_adm_manage_node:
- node: "{{ openshift.node.nodename | lower }}"
- schedulable: True
- delegate_to: "{{ groups.oo_first_master.0 }}"
- retries: 10
- delay: 5
- register: node_schedulable
- until: node_schedulable|succeeded
- when: node_unschedulable|changed
+ openshift_master_host: "{{ groups.oo_first_master.0 }}"
diff --git a/playbooks/common/openshift-cluster/upgrades/upgrade_nodes.yml b/playbooks/common/openshift-cluster/upgrades/upgrade_nodes.yml
index f7a85545b..915fae9fd 100644
--- a/playbooks/common/openshift-cluster/upgrades/upgrade_nodes.yml
+++ b/playbooks/common/openshift-cluster/upgrades/upgrade_nodes.yml
@@ -1,16 +1,23 @@
---
+- name: Prepull images and rpms before doing rolling restart
+ hosts: oo_nodes_to_upgrade:!oo_masters_to_config
+ roles:
+ - role: openshift_facts
+ tasks:
+ - import_role:
+ name: openshift_node
+ tasks_from: upgrade_pre.yml
+
- name: Drain and upgrade nodes
hosts: oo_nodes_to_upgrade:!oo_masters_to_config
# This var must be set with -e on invocation, as it is not a per-host inventory var
# and is evaluated early. Values such as "20%" can also be used.
serial: "{{ openshift_upgrade_nodes_serial | default(1) }}"
max_fail_percentage: "{{ openshift_upgrade_nodes_max_fail_percentage | default(0) }}"
-
+ roles:
+ - lib_openshift
+ - openshift_facts
pre_tasks:
- - name: Load lib_openshift modules
- import_role:
- name: lib_openshift
-
# TODO: To better handle re-trying failed upgrades, it would be nice to check if the node
# or docker actually needs an upgrade before proceeding. Perhaps best to save this until
# we merge upgrade functionality into the base roles and a normal config.yml playbook run.
@@ -22,36 +29,37 @@
retries: 10
delay: 5
register: node_unschedulable
- until: node_unschedulable|succeeded
+ until: node_unschedulable is succeeded
- name: Drain Node for Kubelet upgrade
command: >
- {{ hostvars[groups.oo_first_master.0].openshift.common.client_binary }} adm drain {{ openshift.node.nodename | lower }} --config={{ openshift.common.config_base }}/master/admin.kubeconfig --force --delete-local-data --ignore-daemonsets
+ {{ hostvars[groups.oo_first_master.0]['first_master_client_binary'] }} adm drain {{ openshift.node.nodename | lower }}
+ --config={{ openshift.common.config_base }}/master/admin.kubeconfig
+ --force --delete-local-data --ignore-daemonsets
+ --timeout={{ openshift_upgrade_nodes_drain_timeout | default(0) }}s
delegate_to: "{{ groups.oo_first_master.0 }}"
register: l_upgrade_nodes_drain_result
- until: not l_upgrade_nodes_drain_result | failed
- retries: 60
- delay: 60
+ until: not (l_upgrade_nodes_drain_result is failed)
+ retries: "{{ 1 if ( openshift_upgrade_nodes_drain_timeout | default(0) | int ) == 0 else 0 }}"
+ delay: 5
+ failed_when:
+ - l_upgrade_nodes_drain_result is failed
+ - openshift_upgrade_nodes_drain_timeout | default(0) | int == 0
- roles:
- - openshift_facts
post_tasks:
- - include_role:
+ - import_role:
name: openshift_node
tasks_from: upgrade.yml
+ - import_role:
+ name: openshift_manage_node
+ tasks_from: config.yml
vars:
- openshift_node_upgrade_in_progress: True
- - include_role:
+ openshift_master_host: "{{ groups.oo_first_master.0 }}"
+
+- name: Re-enable excluders
+ hosts: oo_nodes_to_upgrade:!oo_masters_to_config
+ tasks:
+ - import_role:
name: openshift_excluder
vars:
r_openshift_excluder_action: enable
- - name: Set node schedulability
- oc_adm_manage_node:
- node: "{{ openshift.node.nodename | lower }}"
- schedulable: True
- delegate_to: "{{ groups.oo_first_master.0 }}"
- retries: 10
- delay: 5
- register: node_schedulable
- until: node_schedulable|succeeded
- when: node_unschedulable|changed
diff --git a/playbooks/common/openshift-cluster/upgrades/upgrade_scale_group.yml b/playbooks/common/openshift-cluster/upgrades/upgrade_scale_group.yml
index 47410dff3..e259b5d09 100644
--- a/playbooks/common/openshift-cluster/upgrades/upgrade_scale_group.yml
+++ b/playbooks/common/openshift-cluster/upgrades/upgrade_scale_group.yml
@@ -3,7 +3,7 @@
hosts: localhost
tasks:
- name: build upgrade scale groups
- include_role:
+ import_role:
name: openshift_aws
tasks_from: upgrade_node_group.yml
@@ -11,25 +11,19 @@
msg: "Ensure that new scale groups were provisioned before proceeding to update."
when:
- "'oo_sg_new_nodes' not in groups or groups.oo_sg_new_nodes|length == 0"
+ - "'oo_sg_current_nodes' not in groups or groups.oo_sg_current_nodes|length == 0"
+ - groups.oo_sg_current_nodes == groups.oo_sg_new_nodes
- name: initialize upgrade bits
import_playbook: init.yml
-- name: Drain and upgrade nodes
+- name: unschedule nodes
hosts: oo_sg_current_nodes
- # This var must be set with -e on invocation, as it is not a per-host inventory var
- # and is evaluated early. Values such as "20%" can also be used.
- serial: "{{ openshift_upgrade_nodes_serial | default(1) }}"
- max_fail_percentage: "{{ openshift_upgrade_nodes_max_fail_percentage | default(0) }}"
-
- pre_tasks:
+ tasks:
- name: Load lib_openshift modules
- include_role:
+ import_role:
name: ../roles/lib_openshift
- # TODO: To better handle re-trying failed upgrades, it would be nice to check if the node
- # or docker actually needs an upgrade before proceeding. Perhaps best to save this until
- # we merge upgrade functionality into the base roles and a normal config.yml playbook run.
- name: Mark node unschedulable
oc_adm_manage_node:
node: "{{ openshift.node.nodename | lower }}"
@@ -38,22 +32,35 @@
retries: 10
delay: 5
register: node_unschedulable
- until: node_unschedulable|succeeded
+ until: node_unschedulable is succeeded
+- name: Drain nodes
+ hosts: oo_sg_current_nodes
+ # This var must be set with -e on invocation, as it is not a per-host inventory var
+ # and is evaluated early. Values such as "20%" can also be used.
+ serial: "{{ openshift_upgrade_nodes_serial | default(1) }}"
+ max_fail_percentage: "{{ openshift_upgrade_nodes_max_fail_percentage | default(0) }}"
+ tasks:
- name: Drain Node for Kubelet upgrade
command: >
- {{ hostvars[groups.oo_first_master.0].openshift.common.client_binary }} adm drain {{ openshift.node.nodename | lower }} --config={{ openshift.common.config_base }}/master/admin.kubeconfig --force --delete-local-data --ignore-daemonsets
+ {{ hostvars[groups.oo_first_master.0]['first_master_client_binary'] }} adm drain {{ openshift.node.nodename | lower }}
+ --config={{ openshift.common.config_base }}/master/admin.kubeconfig
+ --force --delete-local-data --ignore-daemonsets
+ --timeout={{ openshift_upgrade_nodes_drain_timeout | default(0) }}s
delegate_to: "{{ groups.oo_first_master.0 }}"
register: l_upgrade_nodes_drain_result
- until: not l_upgrade_nodes_drain_result | failed
- retries: 60
- delay: 60
+ until: not (l_upgrade_nodes_drain_result is failed)
+ retries: "{{ 1 if ( openshift_upgrade_nodes_drain_timeout | default(0) | int ) == 0 else 0 }}"
+ delay: 5
+ failed_when:
+ - l_upgrade_nodes_drain_result is failed
+ - openshift_upgrade_nodes_drain_timeout | default(0) | int == 0
# Alright, let's clean up!
- name: clean up the old scale group
hosts: localhost
tasks:
- name: clean up scale group
- include_role:
+ import_role:
name: openshift_aws
tasks_from: remove_scale_group.yml
diff --git a/playbooks/common/openshift-cluster/upgrades/v3_6/filter_plugins b/playbooks/common/openshift-cluster/upgrades/v3_6/filter_plugins
deleted file mode 120000
index 7de3c1dd7..000000000
--- a/playbooks/common/openshift-cluster/upgrades/v3_6/filter_plugins
+++ /dev/null
@@ -1 +0,0 @@
-../../../../../filter_plugins/ \ No newline at end of file
diff --git a/playbooks/common/openshift-cluster/upgrades/v3_6/upgrade.yml b/playbooks/common/openshift-cluster/upgrades/v3_6/upgrade.yml
index 9f9399ff9..a2d21b69f 100644
--- a/playbooks/common/openshift-cluster/upgrades/v3_6/upgrade.yml
+++ b/playbooks/common/openshift-cluster/upgrades/v3_6/upgrade.yml
@@ -13,101 +13,27 @@
tasks:
- set_fact:
openshift_upgrade_target: '3.6'
- openshift_upgrade_min: "{{ '1.5' if deployment_type == 'origin' else '3.5' }}"
+ openshift_upgrade_min: "{{ '1.5' if openshift_deployment_type == 'origin' else '3.5' }}"
-# Pre-upgrade
-
-- import_playbook: ../initialize_nodes_to_upgrade.yml
- tags:
- - pre_upgrade
-
-- name: Update repos and initialize facts on all hosts
- hosts: oo_masters_to_config:oo_nodes_to_upgrade:oo_etcd_to_config:oo_lb_to_config
- tags:
- - pre_upgrade
- roles:
- - openshift_repos
-
-- name: Set openshift_no_proxy_internal_hostnames
- hosts: oo_masters_to_config:oo_nodes_to_upgrade
- tags:
- - pre_upgrade
- tasks:
- - set_fact:
- openshift_no_proxy_internal_hostnames: "{{ hostvars | oo_select_keys(groups['oo_nodes_to_config']
- | union(groups['oo_masters_to_config'])
- | union(groups['oo_etcd_to_config'] | default([])))
- | oo_collect('openshift.common.hostname') | default([]) | join (',')
- }}"
- when:
- - openshift_http_proxy is defined or openshift_https_proxy is defined
- - openshift_generate_no_proxy_hosts | default(True) | bool
-
-- import_playbook: ../pre/verify_inventory_vars.yml
- tags:
- - pre_upgrade
-
-- import_playbook: ../pre/verify_health_checks.yml
- tags:
- - pre_upgrade
-
-- import_playbook: ../pre/verify_control_plane_running.yml
- tags:
- - pre_upgrade
-
-- import_playbook: ../disable_master_excluders.yml
- tags:
- - pre_upgrade
-
-- import_playbook: ../disable_node_excluders.yml
- tags:
- - pre_upgrade
-
-- import_playbook: ../../../../init/version.yml
- tags:
- - pre_upgrade
+- import_playbook: ../pre/config.yml
vars:
- # Request specific openshift_release and let the openshift_version role handle converting this
- # to a more specific version, respecting openshift_image_tag and openshift_pkg_version if
- # defined, and overriding the normal behavior of protecting the installed version
- openshift_release: "{{ openshift_upgrade_target }}"
+ l_upgrade_repo_hosts: "oo_masters_to_config:oo_nodes_to_upgrade:oo_etcd_to_config:oo_lb_to_config"
+ l_upgrade_no_proxy_hosts: "oo_masters_to_config:oo_nodes_to_upgrade"
+ l_upgrade_health_check_hosts: "oo_masters_to_config:oo_etcd_to_config:oo_lb_to_config"
+ l_upgrade_verify_targets_hosts: "oo_masters_to_config:oo_nodes_to_upgrade"
+ l_upgrade_docker_target_hosts: "oo_masters_to_config:oo_nodes_to_upgrade:oo_etcd_to_config"
+ l_upgrade_excluder_hosts: "oo_nodes_to_config:oo_masters_to_config"
openshift_protect_installed_version: False
-- import_playbook: ../../../../openshift-master/private/validate_restart.yml
- tags:
- - pre_upgrade
-
-- name: Verify upgrade targets
- hosts: oo_masters_to_config:oo_nodes_to_upgrade
- tasks:
- - include_tasks: ../pre/verify_upgrade_targets.yml
- tags:
- - pre_upgrade
-
-- name: Verify docker upgrade targets
- hosts: oo_masters_to_config:oo_nodes_to_upgrade:oo_etcd_to_config
- tasks:
- - include_tasks: ../pre/tasks/verify_docker_upgrade_targets.yml
- tags:
- - pre_upgrade
-
- import_playbook: validator.yml
- tags:
- - pre_upgrade
-- import_playbook: ../pre/gate_checks.yml
- tags:
- - pre_upgrade
-
-# Pre-upgrade completed, nothing after this should be tagged pre_upgrade.
-
-# Separate step so we can execute in parallel and clear out anything unused
-# before we get into the serialized upgrade process which will then remove
-# remaining images if possible.
-- name: Cleanup unused Docker images
+- name: Flag pre-upgrade checks complete for hosts without errors
hosts: oo_masters_to_config:oo_nodes_to_upgrade:oo_etcd_to_config
tasks:
- - include_tasks: ../cleanup_unused_images.yml
+ - set_fact:
+ pre_upgrade_complete: True
+
+# Pre-upgrade completed
- import_playbook: ../upgrade_control_plane.yml
vars:
diff --git a/playbooks/common/openshift-cluster/upgrades/v3_6/upgrade_control_plane.yml b/playbooks/common/openshift-cluster/upgrades/v3_6/upgrade_control_plane.yml
index 7374160d6..9aa5a3b64 100644
--- a/playbooks/common/openshift-cluster/upgrades/v3_6/upgrade_control_plane.yml
+++ b/playbooks/common/openshift-cluster/upgrades/v3_6/upgrade_control_plane.yml
@@ -12,106 +12,40 @@
# You can run the upgrade_nodes.yml playbook after this to upgrade these components separately.
#
- import_playbook: ../init.yml
- tags:
- - pre_upgrade
+ vars:
+ l_upgrade_no_switch_firewall_hosts: "oo_masters_to_config:oo_etcd_to_config:oo_lb_to_config"
+ l_init_fact_hosts: "oo_masters_to_config:oo_etcd_to_config:oo_lb_to_config"
- name: Configure the upgrade target for the common upgrade tasks
- hosts: oo_all_hosts
- tags:
- - pre_upgrade
- tasks:
- - set_fact:
- openshift_upgrade_target: '3.6'
- openshift_upgrade_min: "{{ '1.5' if deployment_type == 'origin' else '3.5' }}"
-
-# Pre-upgrade
-- import_playbook: ../initialize_nodes_to_upgrade.yml
- tags:
- - pre_upgrade
-
-- name: Update repos on control plane hosts
hosts: oo_masters_to_config:oo_etcd_to_config:oo_lb_to_config
- tags:
- - pre_upgrade
- roles:
- - openshift_repos
-
-- name: Set openshift_no_proxy_internal_hostnames
- hosts: oo_masters_to_config:oo_nodes_to_upgrade
- tags:
- - pre_upgrade
tasks:
- set_fact:
- openshift_no_proxy_internal_hostnames: "{{ hostvars | oo_select_keys(groups['oo_nodes_to_config']
- | union(groups['oo_masters_to_config'])
- | union(groups['oo_etcd_to_config'] | default([])))
- | oo_collect('openshift.common.hostname') | default([]) | join (',')
- }}"
- when:
- - openshift_http_proxy is defined or openshift_https_proxy is defined
- - openshift_generate_no_proxy_hosts | default(True) | bool
-
-- import_playbook: ../pre/verify_inventory_vars.yml
- tags:
- - pre_upgrade
-
-- import_playbook: ../pre/verify_health_checks.yml
- tags:
- - pre_upgrade
-
-- import_playbook: ../pre/verify_control_plane_running.yml
- tags:
- - pre_upgrade
-
-- import_playbook: ../disable_master_excluders.yml
- tags:
- - pre_upgrade
+ openshift_upgrade_target: '3.6'
+ openshift_upgrade_min: "{{ '1.5' if openshift_deployment_type == 'origin' else '3.5' }}"
-- import_playbook: ../../../../init/version.yml
- tags:
- - pre_upgrade
+- import_playbook: ../pre/config.yml
+ # These vars a meant to exclude oo_nodes from plays that would otherwise include
+ # them by default.
vars:
- # Request specific openshift_release and let the openshift_version role handle converting this
- # to a more specific version, respecting openshift_image_tag and openshift_pkg_version if
- # defined, and overriding the normal behavior of protecting the installed version
- openshift_release: "{{ openshift_upgrade_target }}"
+ l_openshift_version_set_hosts: "oo_etcd_to_config:oo_masters_to_config:!oo_first_master"
+ l_openshift_version_check_hosts: "oo_masters_to_config:!oo_first_master"
+ l_upgrade_repo_hosts: "oo_masters_to_config:oo_etcd_to_config:oo_lb_to_config"
+ l_upgrade_no_proxy_hosts: "oo_masters_to_config"
+ l_upgrade_health_check_hosts: "oo_masters_to_config:oo_etcd_to_config:oo_lb_to_config"
+ l_upgrade_verify_targets_hosts: "oo_masters_to_config"
+ l_upgrade_docker_target_hosts: "oo_masters_to_config:oo_etcd_to_config"
+ l_upgrade_excluder_hosts: "oo_masters_to_config"
openshift_protect_installed_version: False
-- import_playbook: ../../../../openshift-master/private/validate_restart.yml
- tags:
- - pre_upgrade
-
-- name: Verify upgrade targets
- hosts: oo_masters_to_config
- tasks:
- - include_tasks: ../pre/verify_upgrade_targets.yml
- tags:
- - pre_upgrade
-
-- name: Verify docker upgrade targets
- hosts: oo_masters_to_config:oo_etcd_to_config
- tasks:
- - include_tasks: ../pre/tasks/verify_docker_upgrade_targets.yml
- tags:
- - pre_upgrade
-
- import_playbook: validator.yml
- tags:
- - pre_upgrade
-
-- import_playbook: ../pre/gate_checks.yml
- tags:
- - pre_upgrade
-
-# Pre-upgrade completed, nothing after this should be tagged pre_upgrade.
-# Separate step so we can execute in parallel and clear out anything unused
-# before we get into the serialized upgrade process which will then remove
-# remaining images if possible.
-- name: Cleanup unused Docker images
+- name: Flag pre-upgrade checks complete for hosts without errors
hosts: oo_masters_to_config:oo_etcd_to_config
tasks:
- - include_tasks: ../cleanup_unused_images.yml
+ - set_fact:
+ pre_upgrade_complete: True
+
+# Pre-upgrade completed
- import_playbook: ../upgrade_control_plane.yml
vars:
diff --git a/playbooks/common/openshift-cluster/upgrades/v3_6/upgrade_nodes.yml b/playbooks/common/openshift-cluster/upgrades/v3_6/upgrade_nodes.yml
index de9bf098e..4febe76ee 100644
--- a/playbooks/common/openshift-cluster/upgrades/v3_6/upgrade_nodes.yml
+++ b/playbooks/common/openshift-cluster/upgrades/v3_6/upgrade_nodes.yml
@@ -15,95 +15,24 @@
tasks:
- set_fact:
openshift_upgrade_target: '3.6'
- openshift_upgrade_min: "{{ '1.5' if deployment_type == 'origin' else '3.5' }}"
+ openshift_upgrade_min: "{{ '1.5' if openshift_deployment_type == 'origin' else '3.5' }}"
-# Pre-upgrade
-- import_playbook: ../initialize_nodes_to_upgrade.yml
- tags:
- - pre_upgrade
-
-- name: Update repos on nodes
- hosts: oo_masters_to_config:oo_nodes_to_upgrade:oo_etcd_to_config:oo_lb_to_config
- roles:
- - openshift_repos
- tags:
- - pre_upgrade
-
-- name: Set openshift_no_proxy_internal_hostnames
- hosts: oo_masters_to_config:oo_nodes_to_upgrade
- tags:
- - pre_upgrade
- tasks:
- - set_fact:
- openshift_no_proxy_internal_hostnames: "{{ hostvars | oo_select_keys(groups['oo_nodes_to_upgrade']
- | union(groups['oo_masters_to_config'])
- | union(groups['oo_etcd_to_config'] | default([])))
- | oo_collect('openshift.common.hostname') | default([]) | join (',')
- }}"
- when:
- - openshift_http_proxy is defined or openshift_https_proxy is defined
- - openshift_generate_no_proxy_hosts | default(True) | bool
-
-- import_playbook: ../pre/verify_inventory_vars.yml
- tags:
- - pre_upgrade
-
-- import_playbook: ../pre/verify_health_checks.yml
- tags:
- - pre_upgrade
-
-- import_playbook: ../disable_node_excluders.yml
- tags:
- - pre_upgrade
-
-- import_playbook: ../../../../init/version.yml
- tags:
- - pre_upgrade
+- import_playbook: ../pre/config.yml
vars:
- # Request specific openshift_release and let the openshift_version role handle converting this
- # to a more specific version, respecting openshift_image_tag and openshift_pkg_version if
- # defined, and overriding the normal behavior of protecting the installed version
- openshift_release: "{{ openshift_upgrade_target }}"
- openshift_protect_installed_version: False
-
-- name: Verify masters are already upgraded
- hosts: oo_masters_to_config
- tags:
- - pre_upgrade
+ l_upgrade_repo_hosts: "oo_nodes_to_config"
+ l_upgrade_no_proxy_hosts: "oo_all_hosts"
+ l_upgrade_health_check_hosts: "oo_nodes_to_config"
+ l_upgrade_verify_targets_hosts: "oo_nodes_to_config"
+ l_upgrade_docker_target_hosts: "oo_nodes_to_config"
+ l_upgrade_excluder_hosts: "oo_nodes_to_config:!oo_masters_to_config"
+ l_upgrade_nodes_only: True
+
+- name: Flag pre-upgrade checks complete for hosts without errors
+ hosts: oo_masters_to_config:oo_nodes_to_upgrade:oo_etcd_to_config
tasks:
- - fail: msg="Master running {{ openshift.common.version }} must be upgraded to {{ openshift_version }} before node upgrade can be run."
- when: openshift.common.version != openshift_version
-
-- import_playbook: ../pre/verify_control_plane_running.yml
- tags:
- - pre_upgrade
-
-- name: Verify upgrade targets
- hosts: oo_nodes_to_upgrade
- tasks:
- - include_tasks: ../pre/verify_upgrade_targets.yml
- tags:
- - pre_upgrade
-
-- name: Verify docker upgrade targets
- hosts: oo_nodes_to_upgrade
- tasks:
- - include_tasks: ../pre/tasks/verify_docker_upgrade_targets.yml
- tags:
- - pre_upgrade
-
-- import_playbook: ../pre/gate_checks.yml
- tags:
- - pre_upgrade
-
-# Pre-upgrade completed, nothing after this should be tagged pre_upgrade.
+ - set_fact:
+ pre_upgrade_complete: True
-# Separate step so we can execute in parallel and clear out anything unused
-# before we get into the serialized upgrade process which will then remove
-# remaining images if possible.
-- name: Cleanup unused Docker images
- hosts: oo_nodes_to_upgrade
- tasks:
- - include_tasks: ../cleanup_unused_images.yml
+# Pre-upgrade completed
- import_playbook: ../upgrade_nodes.yml
diff --git a/playbooks/common/openshift-cluster/upgrades/v3_7/filter_plugins b/playbooks/common/openshift-cluster/upgrades/v3_7/filter_plugins
deleted file mode 120000
index 7de3c1dd7..000000000
--- a/playbooks/common/openshift-cluster/upgrades/v3_7/filter_plugins
+++ /dev/null
@@ -1 +0,0 @@
-../../../../../filter_plugins/ \ No newline at end of file
diff --git a/playbooks/common/openshift-cluster/upgrades/v3_7/upgrade.yml b/playbooks/common/openshift-cluster/upgrades/v3_7/upgrade.yml
index 9ec788e76..cc2ec2709 100644
--- a/playbooks/common/openshift-cluster/upgrades/v3_7/upgrade.yml
+++ b/playbooks/common/openshift-cluster/upgrades/v3_7/upgrade.yml
@@ -15,103 +15,25 @@
openshift_upgrade_target: '3.7'
openshift_upgrade_min: '3.6'
-# Pre-upgrade
-
-- import_playbook: ../initialize_nodes_to_upgrade.yml
- tags:
- - pre_upgrade
-
-- import_playbook: ../pre/verify_etcd3_backend.yml
- tags:
- - pre_upgrade
-
-- name: Update repos and initialize facts on all hosts
- hosts: oo_masters_to_config:oo_nodes_to_upgrade:oo_etcd_to_config:oo_lb_to_config
- tags:
- - pre_upgrade
- roles:
- - openshift_repos
-
-- name: Set openshift_no_proxy_internal_hostnames
- hosts: oo_masters_to_config:oo_nodes_to_upgrade
- tags:
- - pre_upgrade
- tasks:
- - set_fact:
- openshift_no_proxy_internal_hostnames: "{{ hostvars | oo_select_keys(groups['oo_nodes_to_config']
- | union(groups['oo_masters_to_config'])
- | union(groups['oo_etcd_to_config'] | default([])))
- | oo_collect('openshift.common.hostname') | default([]) | join (',')
- }}"
- when:
- - openshift_http_proxy is defined or openshift_https_proxy is defined
- - openshift_generate_no_proxy_hosts | default(True) | bool
-
-- import_playbook: ../pre/verify_inventory_vars.yml
- tags:
- - pre_upgrade
-
-- import_playbook: ../pre/verify_health_checks.yml
- tags:
- - pre_upgrade
-
-- import_playbook: ../pre/verify_control_plane_running.yml
- tags:
- - pre_upgrade
-
-- import_playbook: ../disable_master_excluders.yml
- tags:
- - pre_upgrade
-
-- import_playbook: ../disable_node_excluders.yml
- tags:
- - pre_upgrade
-
-- import_playbook: ../../../../init/version.yml
- tags:
- - pre_upgrade
+- import_playbook: ../pre/config.yml
vars:
- # Request specific openshift_release and let the openshift_version role handle converting this
- # to a more specific version, respecting openshift_image_tag and openshift_pkg_version if
- # defined, and overriding the normal behavior of protecting the installed version
- openshift_release: "{{ openshift_upgrade_target }}"
+ l_upgrade_repo_hosts: "oo_masters_to_config:oo_nodes_to_upgrade:oo_etcd_to_config:oo_lb_to_config"
+ l_upgrade_no_proxy_hosts: "oo_masters_to_config:oo_nodes_to_upgrade"
+ l_upgrade_health_check_hosts: "oo_masters_to_config:oo_etcd_to_config:oo_lb_to_config"
+ l_upgrade_verify_targets_hosts: "oo_masters_to_config:oo_nodes_to_upgrade"
+ l_upgrade_docker_target_hosts: "oo_masters_to_config:oo_nodes_to_upgrade:oo_etcd_to_config"
+ l_upgrade_excluder_hosts: "oo_nodes_to_config:oo_masters_to_config"
openshift_protect_installed_version: False
-- import_playbook: ../../../../openshift-master/private/validate_restart.yml
- tags:
- - pre_upgrade
-
-- name: Verify upgrade targets
- hosts: oo_masters_to_config:oo_nodes_to_upgrade
- tasks:
- - include_tasks: ../pre/verify_upgrade_targets.yml
- tags:
- - pre_upgrade
-
-- name: Verify docker upgrade targets
- hosts: oo_masters_to_config:oo_nodes_to_upgrade:oo_etcd_to_config
- tasks:
- - include_tasks: ../pre/tasks/verify_docker_upgrade_targets.yml
- tags:
- - pre_upgrade
-
- import_playbook: validator.yml
- tags:
- - pre_upgrade
-- import_playbook: ../pre/gate_checks.yml
- tags:
- - pre_upgrade
-
-# Pre-upgrade completed, nothing after this should be tagged pre_upgrade.
-
-# Separate step so we can execute in parallel and clear out anything unused
-# before we get into the serialized upgrade process which will then remove
-# remaining images if possible.
-- name: Cleanup unused Docker images
+- name: Flag pre-upgrade checks complete for hosts without errors
hosts: oo_masters_to_config:oo_nodes_to_upgrade:oo_etcd_to_config
tasks:
- - include_tasks: ../cleanup_unused_images.yml
+ - set_fact:
+ pre_upgrade_complete: True
+
+# Pre-upgrade completed
- import_playbook: ../upgrade_control_plane.yml
vars:
diff --git a/playbooks/common/openshift-cluster/upgrades/v3_7/upgrade_control_plane.yml b/playbooks/common/openshift-cluster/upgrades/v3_7/upgrade_control_plane.yml
index ad67b6c44..b1ecc75d3 100644
--- a/playbooks/common/openshift-cluster/upgrades/v3_7/upgrade_control_plane.yml
+++ b/playbooks/common/openshift-cluster/upgrades/v3_7/upgrade_control_plane.yml
@@ -12,110 +12,40 @@
# You can run the upgrade_nodes.yml playbook after this to upgrade these components separately.
#
- import_playbook: ../init.yml
- tags:
- - pre_upgrade
+ vars:
+ l_upgrade_no_switch_firewall_hosts: "oo_masters_to_config:oo_etcd_to_config:oo_lb_to_config"
+ l_init_fact_hosts: "oo_masters_to_config:oo_etcd_to_config:oo_lb_to_config"
- name: Configure the upgrade target for the common upgrade tasks
- hosts: oo_all_hosts
- tags:
- - pre_upgrade
+ hosts: oo_masters_to_config:oo_etcd_to_config:oo_lb_to_config
tasks:
- set_fact:
openshift_upgrade_target: '3.7'
openshift_upgrade_min: '3.6'
-# Pre-upgrade
-- import_playbook: ../initialize_nodes_to_upgrade.yml
- tags:
- - pre_upgrade
-
-- import_playbook: ../pre/verify_etcd3_backend.yml
- tags:
- - pre_upgrade
-
-- name: Update repos on control plane hosts
- hosts: oo_masters_to_config:oo_etcd_to_config:oo_lb_to_config
- tags:
- - pre_upgrade
- roles:
- - openshift_repos
-
-- name: Set openshift_no_proxy_internal_hostnames
- hosts: oo_masters_to_config:oo_nodes_to_upgrade
- tags:
- - pre_upgrade
- tasks:
- - set_fact:
- openshift_no_proxy_internal_hostnames: "{{ hostvars | oo_select_keys(groups['oo_nodes_to_config']
- | union(groups['oo_masters_to_config'])
- | union(groups['oo_etcd_to_config'] | default([])))
- | oo_collect('openshift.common.hostname') | default([]) | join (',')
- }}"
- when:
- - openshift_http_proxy is defined or openshift_https_proxy is defined
- - openshift_generate_no_proxy_hosts | default(True) | bool
-
-- import_playbook: ../pre/verify_inventory_vars.yml
- tags:
- - pre_upgrade
-
-- import_playbook: ../pre/verify_health_checks.yml
- tags:
- - pre_upgrade
-
-- import_playbook: ../pre/verify_control_plane_running.yml
- tags:
- - pre_upgrade
-
-- import_playbook: ../disable_master_excluders.yml
- tags:
- - pre_upgrade
-
-- import_playbook: ../../../../init/version.yml
- tags:
- - pre_upgrade
+- import_playbook: ../pre/config.yml
+ # These vars a meant to exclude oo_nodes from plays that would otherwise include
+ # them by default.
vars:
- # Request specific openshift_release and let the openshift_version role handle converting this
- # to a more specific version, respecting openshift_image_tag and openshift_pkg_version if
- # defined, and overriding the normal behavior of protecting the installed version
- openshift_release: "{{ openshift_upgrade_target }}"
+ l_openshift_version_set_hosts: "oo_etcd_to_config:oo_masters_to_config:!oo_first_master"
+ l_openshift_version_check_hosts: "oo_masters_to_config:!oo_first_master"
+ l_upgrade_repo_hosts: "oo_masters_to_config:oo_etcd_to_config:oo_lb_to_config"
+ l_upgrade_no_proxy_hosts: "oo_masters_to_config"
+ l_upgrade_health_check_hosts: "oo_masters_to_config:oo_etcd_to_config:oo_lb_to_config"
+ l_upgrade_verify_targets_hosts: "oo_masters_to_config"
+ l_upgrade_docker_target_hosts: "oo_masters_to_config:oo_etcd_to_config"
+ l_upgrade_excluder_hosts: "oo_masters_to_config"
openshift_protect_installed_version: False
-- import_playbook: ../../../../openshift-master/private/validate_restart.yml
- tags:
- - pre_upgrade
-
-- name: Verify upgrade targets
- hosts: oo_masters_to_config
- tasks:
- - include_tasks: ../pre/verify_upgrade_targets.yml
- tags:
- - pre_upgrade
-
-- name: Verify docker upgrade targets
- hosts: oo_masters_to_config:oo_etcd_to_config
- tasks:
- - include_tasks: ../pre/tasks/verify_docker_upgrade_targets.yml
- tags:
- - pre_upgrade
-
- import_playbook: validator.yml
- tags:
- - pre_upgrade
-
-- import_playbook: ../pre/gate_checks.yml
- tags:
- - pre_upgrade
-# Pre-upgrade completed, nothing after this should be tagged pre_upgrade.
-
-# Separate step so we can execute in parallel and clear out anything unused
-# before we get into the serialized upgrade process which will then remove
-# remaining images if possible.
-- name: Cleanup unused Docker images
+- name: Flag pre-upgrade checks complete for hosts without errors
hosts: oo_masters_to_config:oo_etcd_to_config
tasks:
- - include_tasks: ../cleanup_unused_images.yml
+ - set_fact:
+ pre_upgrade_complete: True
+
+# Pre-upgrade completed
- import_playbook: ../upgrade_control_plane.yml
vars:
diff --git a/playbooks/common/openshift-cluster/upgrades/v3_7/upgrade_nodes.yml b/playbooks/common/openshift-cluster/upgrades/v3_7/upgrade_nodes.yml
index 27a7f67ea..16d95514c 100644
--- a/playbooks/common/openshift-cluster/upgrades/v3_7/upgrade_nodes.yml
+++ b/playbooks/common/openshift-cluster/upgrades/v3_7/upgrade_nodes.yml
@@ -17,93 +17,22 @@
openshift_upgrade_target: '3.7'
openshift_upgrade_min: '3.6'
-# Pre-upgrade
-- import_playbook: ../initialize_nodes_to_upgrade.yml
- tags:
- - pre_upgrade
-
-- name: Update repos on nodes
- hosts: oo_masters_to_config:oo_nodes_to_upgrade:oo_etcd_to_config:oo_lb_to_config
- roles:
- - openshift_repos
- tags:
- - pre_upgrade
-
-- name: Set openshift_no_proxy_internal_hostnames
- hosts: oo_masters_to_config:oo_nodes_to_upgrade
- tags:
- - pre_upgrade
- tasks:
- - set_fact:
- openshift_no_proxy_internal_hostnames: "{{ hostvars | oo_select_keys(groups['oo_nodes_to_upgrade']
- | union(groups['oo_masters_to_config'])
- | union(groups['oo_etcd_to_config'] | default([])))
- | oo_collect('openshift.common.hostname') | default([]) | join (',')
- }}"
- when:
- - openshift_http_proxy is defined or openshift_https_proxy is defined
- - openshift_generate_no_proxy_hosts | default(True) | bool
-
-- import_playbook: ../pre/verify_inventory_vars.yml
- tags:
- - pre_upgrade
-
-- import_playbook: ../pre/verify_health_checks.yml
- tags:
- - pre_upgrade
-
-- import_playbook: ../disable_node_excluders.yml
- tags:
- - pre_upgrade
-
-- import_playbook: ../../../../init/version.yml
- tags:
- - pre_upgrade
+- import_playbook: ../pre/config.yml
vars:
- # Request specific openshift_release and let the openshift_version role handle converting this
- # to a more specific version, respecting openshift_image_tag and openshift_pkg_version if
- # defined, and overriding the normal behavior of protecting the installed version
- openshift_release: "{{ openshift_upgrade_target }}"
- openshift_protect_installed_version: False
-
-- name: Verify masters are already upgraded
- hosts: oo_masters_to_config
- tags:
- - pre_upgrade
+ l_upgrade_repo_hosts: "oo_nodes_to_config"
+ l_upgrade_no_proxy_hosts: "oo_all_hosts"
+ l_upgrade_health_check_hosts: "oo_nodes_to_config"
+ l_upgrade_verify_targets_hosts: "oo_nodes_to_config"
+ l_upgrade_docker_target_hosts: "oo_nodes_to_config"
+ l_upgrade_excluder_hosts: "oo_nodes_to_config:!oo_masters_to_config"
+ l_upgrade_nodes_only: True
+
+- name: Flag pre-upgrade checks complete for hosts without errors
+ hosts: oo_masters_to_config:oo_nodes_to_upgrade:oo_etcd_to_config
tasks:
- - fail: msg="Master running {{ openshift.common.version }} must be upgraded to {{ openshift_version }} before node upgrade can be run."
- when: openshift.common.version != openshift_version
-
-- import_playbook: ../pre/verify_control_plane_running.yml
- tags:
- - pre_upgrade
-
-- name: Verify upgrade targets
- hosts: oo_nodes_to_upgrade
- tasks:
- - include_tasks: ../pre/verify_upgrade_targets.yml
- tags:
- - pre_upgrade
-
-- name: Verify docker upgrade targets
- hosts: oo_nodes_to_upgrade
- tasks:
- - include_tasks: ../pre/tasks/verify_docker_upgrade_targets.yml
- tags:
- - pre_upgrade
-
-- import_playbook: ../pre/gate_checks.yml
- tags:
- - pre_upgrade
-
-# Pre-upgrade completed, nothing after this should be tagged pre_upgrade.
+ - set_fact:
+ pre_upgrade_complete: True
-# Separate step so we can execute in parallel and clear out anything unused
-# before we get into the serialized upgrade process which will then remove
-# remaining images if possible.
-- name: Cleanup unused Docker images
- hosts: oo_nodes_to_upgrade
- tasks:
- - include_tasks: ../cleanup_unused_images.yml
+# Pre-upgrade completed
- import_playbook: ../upgrade_nodes.yml
diff --git a/playbooks/common/openshift-cluster/upgrades/v3_7/validator.yml b/playbooks/common/openshift-cluster/upgrades/v3_7/validator.yml
index 74d0cd8ad..9c7688981 100644
--- a/playbooks/common/openshift-cluster/upgrades/v3_7/validator.yml
+++ b/playbooks/common/openshift-cluster/upgrades/v3_7/validator.yml
@@ -7,6 +7,7 @@
hosts: oo_first_master
roles:
- { role: lib_openshift }
+ - { role: openshift_facts }
tasks:
- name: Check for invalid namespaces and SDN errors
@@ -14,9 +15,9 @@
# DO NOT DISABLE THIS, YOUR UPGRADE WILL FAIL IF YOU DO SO
- name: Confirm OpenShift authorization objects are in sync
command: >
- {{ openshift.common.client_binary }} adm migrate authorization
+ {{ openshift_client_binary }} adm migrate authorization
when:
- - openshift_currently_installed_version | version_compare('3.7','<')
+ - openshift_currently_installed_version is version_compare('3.7','<')
- openshift_upgrade_pre_authorization_migration_enabled | default(true) | bool
changed_when: false
register: l_oc_result
diff --git a/playbooks/common/openshift-cluster/upgrades/v3_8/filter_plugins b/playbooks/common/openshift-cluster/upgrades/v3_8/filter_plugins
deleted file mode 120000
index 7de3c1dd7..000000000
--- a/playbooks/common/openshift-cluster/upgrades/v3_8/filter_plugins
+++ /dev/null
@@ -1 +0,0 @@
-../../../../../filter_plugins/ \ No newline at end of file
diff --git a/playbooks/common/openshift-cluster/upgrades/v3_8/upgrade.yml b/playbooks/common/openshift-cluster/upgrades/v3_8/upgrade.yml
index 60ec79df5..a73b7d63a 100644
--- a/playbooks/common/openshift-cluster/upgrades/v3_8/upgrade.yml
+++ b/playbooks/common/openshift-cluster/upgrades/v3_8/upgrade.yml
@@ -15,107 +15,27 @@
openshift_upgrade_target: '3.8'
openshift_upgrade_min: '3.7'
-# Pre-upgrade
-
-- import_playbook: ../initialize_nodes_to_upgrade.yml
- tags:
- - pre_upgrade
-
-- import_playbook: ../pre/verify_etcd3_backend.yml
- tags:
- - pre_upgrade
-
-- name: Update repos and initialize facts on all hosts
- hosts: oo_masters_to_config:oo_nodes_to_upgrade:oo_etcd_to_config:oo_lb_to_config
- tags:
- - pre_upgrade
- roles:
- - openshift_repos
-
-- name: Set openshift_no_proxy_internal_hostnames
- hosts: oo_masters_to_config:oo_nodes_to_upgrade
- tags:
- - pre_upgrade
- tasks:
- - set_fact:
- openshift_no_proxy_internal_hostnames: "{{ hostvars | oo_select_keys(groups['oo_nodes_to_config']
- | union(groups['oo_masters_to_config'])
- | union(groups['oo_etcd_to_config'] | default([])))
- | oo_collect('openshift.common.hostname') | default([]) | join (',')
- }}"
- when:
- - openshift_http_proxy is defined or openshift_https_proxy is defined
- - openshift_generate_no_proxy_hosts | default(True) | bool
-
-- import_playbook: ../pre/verify_inventory_vars.yml
- tags:
- - pre_upgrade
-
-- import_playbook: ../pre/verify_health_checks.yml
- tags:
- - pre_upgrade
-
-- import_playbook: ../pre/verify_control_plane_running.yml
- tags:
- - pre_upgrade
-
-- import_playbook: ../disable_master_excluders.yml
- tags:
- - pre_upgrade
-
-- import_playbook: ../disable_node_excluders.yml
- tags:
- - pre_upgrade
-
-- import_playbook: ../../../../init/version.yml
- tags:
- - pre_upgrade
+- import_playbook: ../pre/config.yml
vars:
- # Request specific openshift_release and let the openshift_version role handle converting this
- # to a more specific version, respecting openshift_image_tag and openshift_pkg_version if
- # defined, and overriding the normal behavior of protecting the installed version
- openshift_release: "{{ openshift_upgrade_target }}"
+ l_upgrade_repo_hosts: "oo_masters_to_config:oo_nodes_to_upgrade:oo_etcd_to_config:oo_lb_to_config"
+ l_upgrade_no_proxy_hosts: "oo_masters_to_config:oo_nodes_to_upgrade"
+ l_upgrade_health_check_hosts: "oo_masters_to_config:oo_etcd_to_config:oo_lb_to_config"
+ l_upgrade_verify_targets_hosts: "oo_masters_to_config:oo_nodes_to_upgrade"
+ l_upgrade_docker_target_hosts: "oo_masters_to_config:oo_nodes_to_upgrade:oo_etcd_to_config"
+ l_upgrade_excluder_hosts: "oo_nodes_to_config:oo_masters_to_config"
openshift_protect_installed_version: False
-- import_playbook: ../../../../openshift-master/private/validate_restart.yml
- tags:
- - pre_upgrade
-
-- name: Verify upgrade targets
- hosts: oo_masters_to_config:oo_nodes_to_upgrade
- tasks:
- - include_tasks: ../pre/verify_upgrade_targets.yml
- tags:
- - pre_upgrade
-
-- name: Verify docker upgrade targets
- hosts: oo_masters_to_config:oo_nodes_to_upgrade:oo_etcd_to_config
- tasks:
- - include_tasks: ../pre/tasks/verify_docker_upgrade_targets.yml
- tags:
- - pre_upgrade
-
- import_playbook: validator.yml
- tags:
- - pre_upgrade
-- import_playbook: ../pre/gate_checks.yml
- tags:
- - pre_upgrade
-
-# Pre-upgrade completed, nothing after this should be tagged pre_upgrade.
-
-# Separate step so we can execute in parallel and clear out anything unused
-# before we get into the serialized upgrade process which will then remove
-# remaining images if possible.
-- name: Cleanup unused Docker images
+- name: Flag pre-upgrade checks complete for hosts without errors
hosts: oo_masters_to_config:oo_nodes_to_upgrade:oo_etcd_to_config
tasks:
- - include_tasks: ../cleanup_unused_images.yml
+ - set_fact:
+ pre_upgrade_complete: True
+
+# Pre-upgrade completed
- import_playbook: ../upgrade_control_plane.yml
- vars:
- master_config_hook: "v3_7/master_config_upgrade.yml"
# All controllers must be stopped at the same time then restarted
- name: Cycle all controller services to force new leader election mode
diff --git a/playbooks/common/openshift-cluster/upgrades/v3_8/upgrade_control_plane.yml b/playbooks/common/openshift-cluster/upgrades/v3_8/upgrade_control_plane.yml
index c1a3f64f2..723b2e533 100644
--- a/playbooks/common/openshift-cluster/upgrades/v3_8/upgrade_control_plane.yml
+++ b/playbooks/common/openshift-cluster/upgrades/v3_8/upgrade_control_plane.yml
@@ -12,114 +12,43 @@
# You can run the upgrade_nodes.yml playbook after this to upgrade these components separately.
#
- import_playbook: ../init.yml
- tags:
- - pre_upgrade
+ vars:
+ l_upgrade_no_switch_firewall_hosts: "oo_masters_to_config:oo_etcd_to_config:oo_lb_to_config"
+ l_init_fact_hosts: "oo_masters_to_config:oo_etcd_to_config:oo_lb_to_config"
+ when: not skip_version_info | default(false)
- name: Configure the upgrade target for the common upgrade tasks
- hosts: oo_all_hosts
- tags:
- - pre_upgrade
+ hosts: oo_masters_to_config:oo_etcd_to_config:oo_lb_to_config
tasks:
- set_fact:
openshift_upgrade_target: '3.8'
openshift_upgrade_min: '3.7'
-# Pre-upgrade
-- import_playbook: ../initialize_nodes_to_upgrade.yml
- tags:
- - pre_upgrade
-
-- import_playbook: ../pre/verify_etcd3_backend.yml
- tags:
- - pre_upgrade
-
-- name: Update repos on control plane hosts
- hosts: oo_masters_to_config:oo_etcd_to_config:oo_lb_to_config
- tags:
- - pre_upgrade
- roles:
- - openshift_repos
-
-- name: Set openshift_no_proxy_internal_hostnames
- hosts: oo_masters_to_config:oo_nodes_to_upgrade
- tags:
- - pre_upgrade
- tasks:
- - set_fact:
- openshift_no_proxy_internal_hostnames: "{{ hostvars | oo_select_keys(groups['oo_nodes_to_config']
- | union(groups['oo_masters_to_config'])
- | union(groups['oo_etcd_to_config'] | default([])))
- | oo_collect('openshift.common.hostname') | default([]) | join (',')
- }}"
- when:
- - openshift_http_proxy is defined or openshift_https_proxy is defined
- - openshift_generate_no_proxy_hosts | default(True) | bool
-
-- import_playbook: ../pre/verify_inventory_vars.yml
- tags:
- - pre_upgrade
-
-- import_playbook: ../pre/verify_health_checks.yml
- tags:
- - pre_upgrade
-
-- import_playbook: ../pre/verify_control_plane_running.yml
- tags:
- - pre_upgrade
-
-- import_playbook: ../disable_master_excluders.yml
- tags:
- - pre_upgrade
-
-- import_playbook: ../../../../init/version.yml
- tags:
- - pre_upgrade
+- import_playbook: ../pre/config.yml
+ # These vars a meant to exclude oo_nodes from plays that would otherwise include
+ # them by default.
vars:
- # Request specific openshift_release and let the openshift_version role handle converting this
- # to a more specific version, respecting openshift_image_tag and openshift_pkg_version if
- # defined, and overriding the normal behavior of protecting the installed version
- openshift_release: "{{ openshift_upgrade_target }}"
+ l_openshift_version_set_hosts: "oo_etcd_to_config:oo_masters_to_config:!oo_first_master"
+ l_openshift_version_check_hosts: "oo_masters_to_config:!oo_first_master"
+ l_upgrade_repo_hosts: "oo_masters_to_config:oo_etcd_to_config:oo_lb_to_config"
+ l_upgrade_no_proxy_hosts: "oo_masters_to_config"
+ l_upgrade_health_check_hosts: "oo_masters_to_config:oo_etcd_to_config:oo_lb_to_config"
+ l_upgrade_verify_targets_hosts: "oo_masters_to_config"
+ l_upgrade_docker_target_hosts: "oo_masters_to_config:oo_etcd_to_config"
+ l_upgrade_excluder_hosts: "oo_masters_to_config"
openshift_protect_installed_version: False
-- import_playbook: ../../../../openshift-master/private/validate_restart.yml
- tags:
- - pre_upgrade
-
-- name: Verify upgrade targets
- hosts: oo_masters_to_config
- tasks:
- - include_tasks: ../pre/verify_upgrade_targets.yml
- tags:
- - pre_upgrade
-
-- name: Verify docker upgrade targets
- hosts: oo_masters_to_config:oo_etcd_to_config
- tasks:
- - include_tasks: ../pre/tasks/verify_docker_upgrade_targets.yml
- tags:
- - pre_upgrade
-
- import_playbook: validator.yml
- tags:
- - pre_upgrade
-
-- import_playbook: ../pre/gate_checks.yml
- tags:
- - pre_upgrade
-
-# Pre-upgrade completed, nothing after this should be tagged pre_upgrade.
-# Separate step so we can execute in parallel and clear out anything unused
-# before we get into the serialized upgrade process which will then remove
-# remaining images if possible.
-- name: Cleanup unused Docker images
+- name: Flag pre-upgrade checks complete for hosts without errors
hosts: oo_masters_to_config:oo_etcd_to_config
tasks:
- - include_tasks: ../cleanup_unused_images.yml
+ - set_fact:
+ pre_upgrade_complete: True
+
+# Pre-upgrade completed
- import_playbook: ../upgrade_control_plane.yml
- vars:
- master_config_hook: "v3_7/master_config_upgrade.yml"
# All controllers must be stopped at the same time then restarted
- name: Cycle all controller services to force new leader election mode
diff --git a/playbooks/common/openshift-cluster/upgrades/v3_8/upgrade_nodes.yml b/playbooks/common/openshift-cluster/upgrades/v3_8/upgrade_nodes.yml
index dd716b241..b5f1038fd 100644
--- a/playbooks/common/openshift-cluster/upgrades/v3_8/upgrade_nodes.yml
+++ b/playbooks/common/openshift-cluster/upgrades/v3_8/upgrade_nodes.yml
@@ -17,93 +17,22 @@
openshift_upgrade_target: '3.8'
openshift_upgrade_min: '3.7'
-# Pre-upgrade
-- import_playbook: ../initialize_nodes_to_upgrade.yml
- tags:
- - pre_upgrade
-
-- name: Update repos on nodes
- hosts: oo_masters_to_config:oo_nodes_to_upgrade:oo_etcd_to_config:oo_lb_to_config
- roles:
- - openshift_repos
- tags:
- - pre_upgrade
-
-- name: Set openshift_no_proxy_internal_hostnames
- hosts: oo_masters_to_config:oo_nodes_to_upgrade
- tags:
- - pre_upgrade
- tasks:
- - set_fact:
- openshift_no_proxy_internal_hostnames: "{{ hostvars | oo_select_keys(groups['oo_nodes_to_upgrade']
- | union(groups['oo_masters_to_config'])
- | union(groups['oo_etcd_to_config'] | default([])))
- | oo_collect('openshift.common.hostname') | default([]) | join (',')
- }}"
- when:
- - openshift_http_proxy is defined or openshift_https_proxy is defined
- - openshift_generate_no_proxy_hosts | default(True) | bool
-
-- import_playbook: ../pre/verify_inventory_vars.yml
- tags:
- - pre_upgrade
-
-- import_playbook: ../pre/verify_health_checks.yml
- tags:
- - pre_upgrade
-
-- import_playbook: ../disable_node_excluders.yml
- tags:
- - pre_upgrade
-
-- import_playbook: ../../../../init/version.yml
- tags:
- - pre_upgrade
+- import_playbook: ../pre/config.yml
vars:
- # Request specific openshift_release and let the openshift_version role handle converting this
- # to a more specific version, respecting openshift_image_tag and openshift_pkg_version if
- # defined, and overriding the normal behavior of protecting the installed version
- openshift_release: "{{ openshift_upgrade_target }}"
- openshift_protect_installed_version: False
-
-- name: Verify masters are already upgraded
- hosts: oo_masters_to_config
- tags:
- - pre_upgrade
+ l_upgrade_repo_hosts: "oo_nodes_to_config"
+ l_upgrade_no_proxy_hosts: "oo_all_hosts"
+ l_upgrade_health_check_hosts: "oo_nodes_to_config"
+ l_upgrade_verify_targets_hosts: "oo_nodes_to_config"
+ l_upgrade_docker_target_hosts: "oo_nodes_to_config"
+ l_upgrade_excluder_hosts: "oo_nodes_to_config:!oo_masters_to_config"
+ l_upgrade_nodes_only: True
+
+- name: Flag pre-upgrade checks complete for hosts without errors
+ hosts: oo_masters_to_config:oo_nodes_to_upgrade:oo_etcd_to_config
tasks:
- - fail: msg="Master running {{ openshift.common.version }} must be upgraded to {{ openshift_version }} before node upgrade can be run."
- when: openshift.common.version != openshift_version
-
-- import_playbook: ../pre/verify_control_plane_running.yml
- tags:
- - pre_upgrade
-
-- name: Verify upgrade targets
- hosts: oo_nodes_to_upgrade
- tasks:
- - include_tasks: ../pre/verify_upgrade_targets.yml
- tags:
- - pre_upgrade
-
-- name: Verify docker upgrade targets
- hosts: oo_nodes_to_upgrade
- tasks:
- - include_tasks: ../pre/tasks/verify_docker_upgrade_targets.yml
- tags:
- - pre_upgrade
-
-- import_playbook: ../pre/gate_checks.yml
- tags:
- - pre_upgrade
-
-# Pre-upgrade completed, nothing after this should be tagged pre_upgrade.
+ - set_fact:
+ pre_upgrade_complete: True
-# Separate step so we can execute in parallel and clear out anything unused
-# before we get into the serialized upgrade process which will then remove
-# remaining images if possible.
-- name: Cleanup unused Docker images
- hosts: oo_nodes_to_upgrade
- tasks:
- - include_tasks: ../cleanup_unused_images.yml
+# Pre-upgrade completed
- import_playbook: ../upgrade_nodes.yml
diff --git a/playbooks/common/openshift-cluster/upgrades/v3_9/filter_plugins b/playbooks/common/openshift-cluster/upgrades/v3_9/filter_plugins
deleted file mode 120000
index 7de3c1dd7..000000000
--- a/playbooks/common/openshift-cluster/upgrades/v3_9/filter_plugins
+++ /dev/null
@@ -1 +0,0 @@
-../../../../../filter_plugins/ \ No newline at end of file
diff --git a/playbooks/common/openshift-cluster/upgrades/v3_9/master_config_upgrade.yml b/playbooks/common/openshift-cluster/upgrades/v3_9/master_config_upgrade.yml
index 1d4d1919c..ed97d539c 100644
--- a/playbooks/common/openshift-cluster/upgrades/v3_9/master_config_upgrade.yml
+++ b/playbooks/common/openshift-cluster/upgrades/v3_9/master_config_upgrade.yml
@@ -1,20 +1 @@
---
-- modify_yaml:
- dest: "{{ openshift.common.config_base}}/master/master-config.yaml"
- yaml_key: 'controllerConfig.election.lockName'
- yaml_value: 'openshift-master-controllers'
-
-- modify_yaml:
- dest: "{{ openshift.common.config_base}}/master/master-config.yaml"
- yaml_key: 'controllerConfig.serviceServingCert.signer.certFile'
- yaml_value: service-signer.crt
-
-- modify_yaml:
- dest: "{{ openshift.common.config_base}}/master/master-config.yaml"
- yaml_key: 'controllerConfig.serviceServingCert.signer.keyFile'
- yaml_value: service-signer.key
-
-- modify_yaml:
- dest: "{{ openshift.common.config_base }}/master/master-config.yaml"
- yaml_key: servingInfo.clientCA
- yaml_value: ca.crt
diff --git a/playbooks/common/openshift-cluster/upgrades/v3_9/upgrade.yml b/playbooks/common/openshift-cluster/upgrades/v3_9/upgrade.yml
index 1e704b66c..bf6e8605e 100644
--- a/playbooks/common/openshift-cluster/upgrades/v3_9/upgrade.yml
+++ b/playbooks/common/openshift-cluster/upgrades/v3_9/upgrade.yml
@@ -3,125 +3,36 @@
# Full Control Plane + Nodes Upgrade
#
- import_playbook: ../init.yml
- tags:
- - pre_upgrade
- name: Configure the upgrade target for the common upgrade tasks
hosts: oo_all_hosts
- tags:
- - pre_upgrade
tasks:
- set_fact:
openshift_upgrade_target: '3.9'
openshift_upgrade_min: '3.7'
+ openshift_release: '3.9'
-# Pre-upgrade
-
-- import_playbook: ../initialize_nodes_to_upgrade.yml
- tags:
- - pre_upgrade
-
-- import_playbook: ../pre/verify_etcd3_backend.yml
- tags:
- - pre_upgrade
-
-- name: Update repos and initialize facts on all hosts
- hosts: oo_masters_to_config:oo_nodes_to_upgrade:oo_etcd_to_config:oo_lb_to_config
- tags:
- - pre_upgrade
- roles:
- - openshift_repos
-
-- name: Set openshift_no_proxy_internal_hostnames
- hosts: oo_masters_to_config:oo_nodes_to_upgrade
- tags:
- - pre_upgrade
- tasks:
- - set_fact:
- openshift_no_proxy_internal_hostnames: "{{ hostvars | oo_select_keys(groups['oo_nodes_to_config']
- | union(groups['oo_masters_to_config'])
- | union(groups['oo_etcd_to_config'] | default([])))
- | oo_collect('openshift.common.hostname') | default([]) | join (',')
- }}"
- when:
- - openshift_http_proxy is defined or openshift_https_proxy is defined
- - openshift_generate_no_proxy_hosts | default(True) | bool
-
-- import_playbook: ../pre/verify_inventory_vars.yml
- tags:
- - pre_upgrade
-
-- import_playbook: ../pre/verify_health_checks.yml
- tags:
- - pre_upgrade
-
-- import_playbook: ../pre/verify_control_plane_running.yml
- tags:
- - pre_upgrade
-
-- import_playbook: ../disable_master_excluders.yml
- tags:
- - pre_upgrade
-
-- import_playbook: ../disable_node_excluders.yml
- tags:
- - pre_upgrade
-
-- import_playbook: ../../../../init/version.yml
- tags:
- - pre_upgrade
+- import_playbook: ../pre/config.yml
vars:
- # Request specific openshift_release and let the openshift_version role handle converting this
- # to a more specific version, respecting openshift_image_tag and openshift_pkg_version if
- # defined, and overriding the normal behavior of protecting the installed version
- openshift_release: "{{ openshift_upgrade_target }}"
+ l_upgrade_repo_hosts: "oo_masters_to_config:oo_nodes_to_upgrade:oo_etcd_to_config:oo_lb_to_config"
+ l_upgrade_no_proxy_hosts: "oo_masters_to_config:oo_nodes_to_upgrade"
+ l_upgrade_health_check_hosts: "oo_masters_to_config:oo_etcd_to_config:oo_lb_to_config"
+ l_upgrade_verify_targets_hosts: "oo_masters_to_config:oo_nodes_to_upgrade"
+ l_upgrade_docker_target_hosts: "oo_masters_to_config:oo_nodes_to_upgrade:oo_etcd_to_config"
+ l_upgrade_excluder_hosts: "oo_nodes_to_config:oo_masters_to_config"
openshift_protect_installed_version: False
- # We skip the docker role at this point in upgrade to prevent
- # unintended package, container, or config upgrades which trigger
- # docker restarts. At this early stage of upgrade we can assume
- # docker is configured and running.
- skip_docker_role: True
-
-- import_playbook: ../../../../openshift-master/private/validate_restart.yml
- tags:
- - pre_upgrade
-
-- name: Verify upgrade targets
- hosts: oo_masters_to_config:oo_nodes_to_upgrade
- tasks:
- - include_tasks: ../pre/verify_upgrade_targets.yml
- tags:
- - pre_upgrade
-
-- name: Verify docker upgrade targets
- hosts: oo_masters_to_config:oo_nodes_to_upgrade:oo_etcd_to_config
- tasks:
- - import_tasks: ../pre/tasks/verify_docker_upgrade_targets.yml
- tags:
- - pre_upgrade
-
- import_playbook: validator.yml
- tags:
- - pre_upgrade
-
-- import_playbook: ../pre/gate_checks.yml
- tags:
- - pre_upgrade
-
-# Pre-upgrade completed, nothing after this should be tagged pre_upgrade.
-# Separate step so we can execute in parallel and clear out anything unused
-# before we get into the serialized upgrade process which will then remove
-# remaining images if possible.
-- name: Cleanup unused Docker images
+- name: Flag pre-upgrade checks complete for hosts without errors
hosts: oo_masters_to_config:oo_nodes_to_upgrade:oo_etcd_to_config
tasks:
- - include_tasks: ../cleanup_unused_images.yml
+ - set_fact:
+ pre_upgrade_complete: True
+
+# Pre-upgrade completed
- import_playbook: ../upgrade_control_plane.yml
- vars:
- master_config_hook: "v3_7/master_config_upgrade.yml"
# All controllers must be stopped at the same time then restarted
- name: Cycle all controller services to force new leader election mode
@@ -130,13 +41,13 @@
roles:
- role: openshift_facts
tasks:
- - name: Stop {{ openshift.common.service_type }}-master-controllers
+ - name: Stop {{ openshift_service_type }}-master-controllers
systemd:
- name: "{{ openshift.common.service_type }}-master-controllers"
+ name: "{{ openshift_service_type }}-master-controllers"
state: stopped
- - name: Start {{ openshift.common.service_type }}-master-controllers
+ - name: Start {{ openshift_service_type }}-master-controllers
systemd:
- name: "{{ openshift.common.service_type }}-master-controllers"
+ name: "{{ openshift_service_type }}-master-controllers"
state: started
- import_playbook: ../upgrade_nodes.yml
diff --git a/playbooks/common/openshift-cluster/upgrades/v3_9/upgrade_control_plane.yml b/playbooks/common/openshift-cluster/upgrades/v3_9/upgrade_control_plane.yml
index a9689da1f..fe1fdefff 100644
--- a/playbooks/common/openshift-cluster/upgrades/v3_9/upgrade_control_plane.yml
+++ b/playbooks/common/openshift-cluster/upgrades/v3_9/upgrade_control_plane.yml
@@ -12,120 +12,109 @@
# You can run the upgrade_nodes.yml playbook after this to upgrade these components separately.
#
- import_playbook: ../init.yml
- tags:
- - pre_upgrade
+ vars:
+ l_upgrade_no_switch_firewall_hosts: "oo_masters_to_config:oo_etcd_to_config:oo_lb_to_config"
+ l_init_fact_hosts: "oo_masters_to_config:oo_etcd_to_config:oo_lb_to_config"
-- name: Configure the upgrade target for the common upgrade tasks
- hosts: oo_all_hosts
- tags:
- - pre_upgrade
+## Check to see if they're running 3.7 and if so upgrade them to 3.8 on control plan
+## If they've specified pkg_version or image_tag preserve that for later use
+- name: Configure the upgrade target for the common upgrade tasks 3.8
+ hosts: oo_masters_to_config:oo_etcd_to_config:oo_lb_to_config
tasks:
- set_fact:
- openshift_upgrade_target: '3.9'
+ openshift_upgrade_target: '3.8'
openshift_upgrade_min: '3.7'
+ openshift_release: '3.8'
+ _requested_pkg_version: "{{ openshift_pkg_version if openshift_pkg_version is defined else omit }}"
+ _requested_image_tag: "{{ openshift_image_tag if openshift_image_tag is defined else omit }}"
+ l_double_upgrade_cp: True
+ when: hostvars[groups.oo_first_master.0].openshift_currently_installed_version | version_compare('3.8','<')
+
+ - name: set l_force_image_tag_to_version = True
+ set_fact:
+ # Need to set this during 3.8 upgrade to ensure image_tag is set correctly
+ # to match 3.8 version
+ l_force_image_tag_to_version: True
+ when: _requested_image_tag is defined
+
+- import_playbook: ../pre/config.yml
+ # These vars a meant to exclude oo_nodes from plays that would otherwise include
+ # them by default.
+ vars:
+ l_openshift_version_set_hosts: "oo_etcd_to_config:oo_masters_to_config:!oo_first_master"
+ l_openshift_version_check_hosts: "oo_masters_to_config:!oo_first_master"
+ l_upgrade_repo_hosts: "oo_masters_to_config:oo_etcd_to_config:oo_lb_to_config"
+ l_upgrade_no_proxy_hosts: "oo_masters_to_config"
+ l_upgrade_health_check_hosts: "oo_masters_to_config:oo_etcd_to_config:oo_lb_to_config"
+ l_upgrade_verify_targets_hosts: "oo_masters_to_config"
+ l_upgrade_docker_target_hosts: "oo_masters_to_config:oo_etcd_to_config"
+ l_upgrade_excluder_hosts: "oo_masters_to_config"
+ openshift_protect_installed_version: False
+ when: hostvars[groups.oo_first_master.0].openshift_currently_installed_version | version_compare('3.8','<')
-# Pre-upgrade
-- import_playbook: ../initialize_nodes_to_upgrade.yml
- tags:
- - pre_upgrade
-
-- import_playbook: ../pre/verify_etcd3_backend.yml
- tags:
- - pre_upgrade
-
-- name: Update repos on control plane hosts
- hosts: oo_masters_to_config:oo_etcd_to_config:oo_lb_to_config
- tags:
- - pre_upgrade
- roles:
- - openshift_repos
-
-- name: Set openshift_no_proxy_internal_hostnames
- hosts: oo_masters_to_config:oo_nodes_to_upgrade
- tags:
- - pre_upgrade
+- name: Flag pre-upgrade checks complete for hosts without errors 3.8
+ hosts: oo_masters_to_config:oo_etcd_to_config
tasks:
- set_fact:
- openshift_no_proxy_internal_hostnames: "{{ hostvars | oo_select_keys(groups['oo_nodes_to_config']
- | union(groups['oo_masters_to_config'])
- | union(groups['oo_etcd_to_config'] | default([])))
- | oo_collect('openshift.common.hostname') | default([]) | join (',')
- }}"
- when:
- - openshift_http_proxy is defined or openshift_https_proxy is defined
- - openshift_generate_no_proxy_hosts | default(True) | bool
-
-- import_playbook: ../pre/verify_inventory_vars.yml
- tags:
- - pre_upgrade
-
-- import_playbook: ../pre/verify_health_checks.yml
- tags:
- - pre_upgrade
+ pre_upgrade_complete: True
+ when: hostvars[groups.oo_first_master.0].openshift_currently_installed_version | version_compare('3.8','<')
-- import_playbook: ../pre/verify_control_plane_running.yml
- tags:
- - pre_upgrade
+# Pre-upgrade completed
-- import_playbook: ../disable_master_excluders.yml
- tags:
- - pre_upgrade
-
-- import_playbook: ../../../../init/version.yml
- tags:
- - pre_upgrade
+- import_playbook: ../upgrade_control_plane.yml
vars:
- # Request specific openshift_release and let the openshift_version role handle converting this
- # to a more specific version, respecting openshift_image_tag and openshift_pkg_version if
- # defined, and overriding the normal behavior of protecting the installed version
- openshift_release: "{{ openshift_upgrade_target }}"
- openshift_protect_installed_version: False
-
- # We skip the docker role at this point in upgrade to prevent
- # unintended package, container, or config upgrades which trigger
- # docker restarts. At this early stage of upgrade we can assume
- # docker is configured and running.
- skip_docker_role: True
+ openshift_release: '3.8'
+ when: hostvars[groups.oo_first_master.0].openshift_currently_installed_version | version_compare('3.8','<')
-- import_playbook: ../../../../openshift-master/private/validate_restart.yml
- tags:
- - pre_upgrade
+## 3.8 upgrade complete we should now be able to upgrade to 3.9
-- name: Verify upgrade targets
- hosts: oo_masters_to_config
- tasks:
- - include_tasks: ../pre/verify_upgrade_targets.yml
- tags:
- - pre_upgrade
-
-- name: Verify docker upgrade targets
- hosts: oo_masters_to_config:oo_etcd_to_config
+- name: Configure the upgrade target for the common upgrade tasks 3.9
+ hosts: oo_masters_to_config:oo_etcd_to_config:oo_lb_to_config
tasks:
- - include_tasks: ../pre/tasks/verify_docker_upgrade_targets.yml
- tags:
- - pre_upgrade
-
-- import_playbook: validator.yml
- tags:
- - pre_upgrade
-
-- import_playbook: ../pre/gate_checks.yml
- tags:
- - pre_upgrade
+ - meta: clear_facts
+ - set_fact:
+ openshift_upgrade_target: '3.9'
+ openshift_upgrade_min: '3.8'
+ openshift_release: '3.9'
+ openshift_pkg_version: "{{ _requested_pkg_version | default ('-3.9*') }}"
+ # Set the user's specified image_tag for 3.9 upgrade if it was provided.
+ - set_fact:
+ openshift_image_tag: "{{ _requested_image_tag }}"
+ l_force_image_tag_to_version: False
+ when: _requested_image_tag is defined
+ # If the user didn't specify an image_tag, we need to force update image_tag
+ # because it will have already been set during 3.8. If we aren't running
+ # a double upgrade, then we can preserve image_tag because it will still
+ # be the user provided value.
+ - set_fact:
+ l_force_image_tag_to_version: True
+ when:
+ - l_double_upgrade_cp is defined and l_double_upgrade_cp
+ - _requested_image_tag is not defined
-# Pre-upgrade completed, nothing after this should be tagged pre_upgrade.
+- import_playbook: ../pre/config.yml
+ # These vars a meant to exclude oo_nodes from plays that would otherwise include
+ # them by default.
+ vars:
+ l_openshift_version_set_hosts: "oo_etcd_to_config:oo_masters_to_config:!oo_first_master"
+ l_openshift_version_check_hosts: "oo_masters_to_config:!oo_first_master"
+ l_upgrade_repo_hosts: "oo_masters_to_config:oo_etcd_to_config:oo_lb_to_config"
+ l_upgrade_no_proxy_hosts: "oo_masters_to_config"
+ l_upgrade_health_check_hosts: "oo_masters_to_config:oo_etcd_to_config:oo_lb_to_config"
+ l_upgrade_verify_targets_hosts: "oo_masters_to_config"
+ l_upgrade_docker_target_hosts: "oo_masters_to_config:oo_etcd_to_config"
+ l_upgrade_excluder_hosts: "oo_masters_to_config"
+ openshift_protect_installed_version: False
-# Separate step so we can execute in parallel and clear out anything unused
-# before we get into the serialized upgrade process which will then remove
-# remaining images if possible.
-- name: Cleanup unused Docker images
+- name: Flag pre-upgrade checks complete for hosts without errors
hosts: oo_masters_to_config:oo_etcd_to_config
tasks:
- - include_tasks: ../cleanup_unused_images.yml
+ - set_fact:
+ pre_upgrade_complete: True
- import_playbook: ../upgrade_control_plane.yml
vars:
- master_config_hook: "v3_7/master_config_upgrade.yml"
+ openshift_release: '3.9'
# All controllers must be stopped at the same time then restarted
- name: Cycle all controller services to force new leader election mode
@@ -134,13 +123,19 @@
roles:
- role: openshift_facts
tasks:
- - name: Stop {{ openshift.common.service_type }}-master-controllers
+ - name: Stop {{ openshift_service_type }}-master-controllers
systemd:
- name: "{{ openshift.common.service_type }}-master-controllers"
+ name: "{{ openshift_service_type }}-master-controllers"
state: stopped
- - name: Start {{ openshift.common.service_type }}-master-controllers
+ - name: Start {{ openshift_service_type }}-master-controllers
systemd:
- name: "{{ openshift.common.service_type }}-master-controllers"
+ name: "{{ openshift_service_type }}-master-controllers"
state: started
- import_playbook: ../post_control_plane.yml
+
+- hosts: oo_masters
+ tasks:
+ - import_role:
+ name: openshift_web_console
+ tasks_from: remove_old_asset_config
diff --git a/playbooks/common/openshift-cluster/upgrades/v3_9/upgrade_nodes.yml b/playbooks/common/openshift-cluster/upgrades/v3_9/upgrade_nodes.yml
index d95cfa4e1..859b1d88b 100644
--- a/playbooks/common/openshift-cluster/upgrades/v3_9/upgrade_nodes.yml
+++ b/playbooks/common/openshift-cluster/upgrades/v3_9/upgrade_nodes.yml
@@ -5,111 +5,31 @@
# Upgrades nodes only, but requires the control plane to have already been upgraded.
#
- import_playbook: ../init.yml
- tags:
- - pre_upgrade
- name: Configure the upgrade target for the common upgrade tasks
hosts: oo_all_hosts
- tags:
- - pre_upgrade
tasks:
- set_fact:
openshift_upgrade_target: '3.9'
openshift_upgrade_min: '3.7'
+ openshift_release: '3.9'
-# Pre-upgrade
-- import_playbook: ../initialize_nodes_to_upgrade.yml
- tags:
- - pre_upgrade
-
-- name: Update repos on nodes
- hosts: oo_masters_to_config:oo_nodes_to_upgrade:oo_etcd_to_config:oo_lb_to_config
- roles:
- - openshift_repos
- tags:
- - pre_upgrade
-
-- name: Set openshift_no_proxy_internal_hostnames
- hosts: oo_masters_to_config:oo_nodes_to_upgrade
- tags:
- - pre_upgrade
- tasks:
- - set_fact:
- openshift_no_proxy_internal_hostnames: "{{ hostvars | oo_select_keys(groups['oo_nodes_to_upgrade']
- | union(groups['oo_masters_to_config'])
- | union(groups['oo_etcd_to_config'] | default([])))
- | oo_collect('openshift.common.hostname') | default([]) | join (',')
- }}"
- when:
- - openshift_http_proxy is defined or openshift_https_proxy is defined
- - openshift_generate_no_proxy_hosts | default(True) | bool
-
-- import_playbook: ../pre/verify_inventory_vars.yml
- tags:
- - pre_upgrade
-
-- import_playbook: ../pre/verify_health_checks.yml
- tags:
- - pre_upgrade
-
-- import_playbook: ../disable_node_excluders.yml
- tags:
- - pre_upgrade
-
-- import_playbook: ../../../../init/version.yml
- tags:
- - pre_upgrade
+- import_playbook: ../pre/config.yml
vars:
- # Request specific openshift_release and let the openshift_version role handle converting this
- # to a more specific version, respecting openshift_image_tag and openshift_pkg_version if
- # defined, and overriding the normal behavior of protecting the installed version
- openshift_release: "{{ openshift_upgrade_target }}"
- openshift_protect_installed_version: False
-
- # We skip the docker role at this point in upgrade to prevent
- # unintended package, container, or config upgrades which trigger
- # docker restarts. At this early stage of upgrade we can assume
- # docker is configured and running.
- skip_docker_role: True
-
-- name: Verify masters are already upgraded
- hosts: oo_masters_to_config
- tags:
- - pre_upgrade
+ l_upgrade_repo_hosts: "oo_nodes_to_config"
+ l_upgrade_no_proxy_hosts: "oo_all_hosts"
+ l_upgrade_health_check_hosts: "oo_nodes_to_config"
+ l_upgrade_verify_targets_hosts: "oo_nodes_to_config"
+ l_upgrade_docker_target_hosts: "oo_nodes_to_config"
+ l_upgrade_excluder_hosts: "oo_nodes_to_config:!oo_masters_to_config"
+ l_upgrade_nodes_only: True
+
+- name: Flag pre-upgrade checks complete for hosts without errors
+ hosts: oo_masters_to_config:oo_nodes_to_upgrade:oo_etcd_to_config
tasks:
- - fail: msg="Master running {{ openshift.common.version }} must be upgraded to {{ openshift_version }} before node upgrade can be run."
- when: openshift.common.version != openshift_version
-
-- import_playbook: ../pre/verify_control_plane_running.yml
- tags:
- - pre_upgrade
-
-- name: Verify upgrade targets
- hosts: oo_nodes_to_upgrade
- tasks:
- - include_tasks: ../pre/verify_upgrade_targets.yml
- tags:
- - pre_upgrade
-
-- name: Verify docker upgrade targets
- hosts: oo_nodes_to_upgrade
- tasks:
- - include_tasks: ../pre/tasks/verify_docker_upgrade_targets.yml
- tags:
- - pre_upgrade
-
-- import_playbook: ../pre/gate_checks.yml
- tags:
- - pre_upgrade
-
-# Pre-upgrade completed, nothing after this should be tagged pre_upgrade.
+ - set_fact:
+ pre_upgrade_complete: True
-# Separate step so we can execute in parallel and clear out anything unused
-# before we get into the serialized upgrade process which will then remove
-# remaining images if possible.
-- name: Cleanup unused Docker images
- hosts: oo_nodes_to_upgrade
- tasks:
- - include_tasks: ../cleanup_unused_images.yml
+# Pre-upgrade completed
- import_playbook: ../upgrade_nodes.yml
diff --git a/playbooks/common/openshift-cluster/upgrades/v3_9/validator.yml b/playbooks/common/openshift-cluster/upgrades/v3_9/validator.yml
index 4bd2d87b1..d8540abfb 100644
--- a/playbooks/common/openshift-cluster/upgrades/v3_9/validator.yml
+++ b/playbooks/common/openshift-cluster/upgrades/v3_9/validator.yml
@@ -1,5 +1,5 @@
---
-- name: Verify 3.9 specific upgrade checks
+- name: Verify 3.8 specific upgrade checks
hosts: oo_first_master
roles:
- { role: lib_openshift }
diff --git a/playbooks/common/private/components.yml b/playbooks/common/private/components.yml
new file mode 100644
index 000000000..089645d07
--- /dev/null
+++ b/playbooks/common/private/components.yml
@@ -0,0 +1,38 @@
+---
+# These are the core component plays that configure the layers above the control
+# plane. A component is generally considered any part of OpenShift that runs on
+# top of the cluster and may be considered optional. Over time, much of OpenShift
+# above the Kubernetes apiserver and masters may be considered components.
+#
+# Preconditions:
+#
+# 1. The control plane is configured and reachable from nodes inside the cluster
+# 2. An admin kubeconfig file in /etc/origin/master/admin.kubeconfig that can
+# perform root level actions against the cluster
+# 3. On cloud providers, persistent volume provisioners are configured
+# 4. A subset of nodes is available to allow components to schedule - this must
+# include the masters and usually includes infra nodes.
+# 5. The init/main.yml playbook has been invoked
+
+- import_playbook: ../../openshift-glusterfs/private/config.yml
+ when: groups.oo_glusterfs_to_config | default([]) | count > 0
+
+- import_playbook: ../../openshift-hosted/private/config.yml
+
+- import_playbook: ../../openshift-web-console/private/config.yml
+ when: openshift_web_console_install | default(true) | bool
+
+- import_playbook: ../../openshift-metrics/private/config.yml
+ when: openshift_metrics_install_metrics | default(false) | bool
+
+- import_playbook: ../../openshift-logging/private/config.yml
+ when: openshift_logging_install_logging | default(false) | bool
+
+- import_playbook: ../../openshift-prometheus/private/config.yml
+ when: openshift_hosted_prometheus_deploy | default(false) | bool
+
+- import_playbook: ../../openshift-service-catalog/private/config.yml
+ when: openshift_enable_service_catalog | default(true) | bool
+
+- import_playbook: ../../openshift-management/private/config.yml
+ when: openshift_management_install_management | default(false) | bool
diff --git a/playbooks/common/private/control_plane.yml b/playbooks/common/private/control_plane.yml
new file mode 100644
index 000000000..0a5f1142b
--- /dev/null
+++ b/playbooks/common/private/control_plane.yml
@@ -0,0 +1,34 @@
+---
+# These are the control plane plays that configure a control plane on top of hosts
+# identified as masters. Over time, some of the pieces of the current control plane
+# may be moved to the components list.
+#
+# It is not required for any nodes to be configured, or passed to be configured,
+# when this playbook is invoked.
+#
+# Preconditions:
+#
+# 1. A set of machines have been identified to act as masters
+# 2. On cloud providers, a load balancer has been configured to point to the masters
+# and that load balancer has a DNS name
+# 3. The init/main.yml playbook has been invoked
+#
+# Postconditions:
+#
+# 1. The control plane is reachable from the outside of the cluster
+# 2. The master has an /etc/origin/master/admin.kubeconfig file that gives cluster-admin
+# access.
+
+- import_playbook: ../../openshift-checks/private/install.yml
+
+- import_playbook: ../../openshift-etcd/private/config.yml
+
+- import_playbook: ../../openshift-nfs/private/config.yml
+ when: groups.oo_nfs_to_config | default([]) | count > 0
+
+- import_playbook: ../../openshift-loadbalancer/private/config.yml
+ when: groups.oo_lb_to_config | default([]) | count > 0
+
+- import_playbook: ../../openshift-master/private/config.yml
+
+- import_playbook: ../../openshift-master/private/additional_config.yml