summaryrefslogtreecommitdiffstats
path: root/playbooks/common
diff options
context:
space:
mode:
Diffstat (limited to 'playbooks/common')
-rw-r--r--playbooks/common/openshift-checks/install.yml16
-rw-r--r--playbooks/common/openshift-cluster/cockpit-ui.yml4
-rw-r--r--playbooks/common/openshift-cluster/create_persistent_volumes.yml9
-rw-r--r--playbooks/common/openshift-cluster/enable_dnsmasq.yml5
-rw-r--r--playbooks/common/openshift-cluster/evaluate_groups.yml190
-rw-r--r--playbooks/common/openshift-cluster/initialize_facts.yml169
-rw-r--r--playbooks/common/openshift-cluster/initialize_openshift_repos.yml8
-rw-r--r--playbooks/common/openshift-cluster/initialize_openshift_version.yml21
-rw-r--r--playbooks/common/openshift-cluster/openshift_hosted.yml16
-rw-r--r--playbooks/common/openshift-cluster/openshift_logging.yml16
-rw-r--r--playbooks/common/openshift-cluster/openshift_metrics.yml16
-rw-r--r--playbooks/common/openshift-cluster/openshift_prometheus.yml16
-rw-r--r--playbooks/common/openshift-cluster/redeploy-certificates/openshift-ca.yml21
-rw-r--r--playbooks/common/openshift-cluster/redeploy-certificates/registry.yml2
-rw-r--r--playbooks/common/openshift-cluster/sanity_checks.yml51
-rw-r--r--playbooks/common/openshift-cluster/service_catalog.yml16
-rw-r--r--playbooks/common/openshift-cluster/std_include.yml46
-rw-r--r--playbooks/common/openshift-cluster/upgrades/docker/docker_upgrade.yml2
-rw-r--r--playbooks/common/openshift-cluster/upgrades/init.yml9
-rw-r--r--playbooks/common/openshift-cluster/upgrades/post_control_plane.yml3
-rw-r--r--playbooks/common/openshift-cluster/upgrades/rpm_upgrade.yml38
-rw-r--r--playbooks/common/openshift-cluster/upgrades/upgrade_control_plane.yml99
-rw-r--r--playbooks/common/openshift-cluster/upgrades/upgrade_scheduler.yml173
-rw-r--r--playbooks/common/openshift-cluster/upgrades/v3_3/master_config_upgrade.yml66
-rw-r--r--playbooks/common/openshift-cluster/upgrades/v3_3/node_config_upgrade.yml20
l---------playbooks/common/openshift-cluster/upgrades/v3_3/roles1
-rw-r--r--playbooks/common/openshift-cluster/upgrades/v3_3/upgrade.yml118
-rw-r--r--playbooks/common/openshift-cluster/upgrades/v3_3/upgrade_control_plane.yml118
-rw-r--r--playbooks/common/openshift-cluster/upgrades/v3_3/upgrade_nodes.yml113
-rw-r--r--playbooks/common/openshift-cluster/upgrades/v3_4/master_config_upgrade.yml10
l---------playbooks/common/openshift-cluster/upgrades/v3_4/roles1
-rw-r--r--playbooks/common/openshift-cluster/upgrades/v3_4/upgrade.yml116
-rw-r--r--playbooks/common/openshift-cluster/upgrades/v3_4/upgrade_control_plane.yml118
-rw-r--r--playbooks/common/openshift-cluster/upgrades/v3_5/master_config_upgrade.yml10
-rw-r--r--playbooks/common/openshift-cluster/upgrades/v3_5/upgrade_nodes.yml111
-rw-r--r--playbooks/common/openshift-cluster/upgrades/v3_5/validator.yml67
-rw-r--r--playbooks/common/openshift-cluster/upgrades/v3_6/master_config_upgrade.yml15
-rw-r--r--playbooks/common/openshift-cluster/upgrades/v3_6/upgrade.yml2
-rw-r--r--playbooks/common/openshift-cluster/upgrades/v3_6/upgrade_control_plane.yml2
-rw-r--r--playbooks/common/openshift-cluster/upgrades/v3_6/upgrade_nodes.yml2
-rw-r--r--playbooks/common/openshift-cluster/upgrades/v3_7/upgrade.yml2
-rw-r--r--playbooks/common/openshift-cluster/upgrades/v3_7/upgrade_control_plane.yml2
-rw-r--r--playbooks/common/openshift-cluster/upgrades/v3_7/upgrade_nodes.yml2
-rw-r--r--playbooks/common/openshift-cluster/upgrades/v3_7/validator.yml8
l---------playbooks/common/openshift-cluster/upgrades/v3_8/filter_plugins (renamed from playbooks/common/openshift-cluster/upgrades/v3_5/filter_plugins)0
-rw-r--r--playbooks/common/openshift-cluster/upgrades/v3_8/master_config_upgrade.yml (renamed from playbooks/common/openshift-cluster/upgrades/v3_7/master_config_upgrade.yml)0
l---------playbooks/common/openshift-cluster/upgrades/v3_8/roles (renamed from playbooks/common/openshift-cluster/upgrades/v3_5/roles)0
-rw-r--r--playbooks/common/openshift-cluster/upgrades/v3_8/upgrade.yml (renamed from playbooks/common/openshift-cluster/upgrades/v3_5/upgrade.yml)30
-rw-r--r--playbooks/common/openshift-cluster/upgrades/v3_8/upgrade_control_plane.yml (renamed from playbooks/common/openshift-cluster/upgrades/v3_5/upgrade_control_plane.yml)30
-rw-r--r--playbooks/common/openshift-cluster/upgrades/v3_8/upgrade_nodes.yml (renamed from playbooks/common/openshift-cluster/upgrades/v3_4/upgrade_nodes.yml)10
-rw-r--r--playbooks/common/openshift-cluster/upgrades/v3_8/validator.yml7
-rw-r--r--playbooks/common/openshift-cluster/validate_hostnames.yml23
-rw-r--r--playbooks/common/openshift-etcd/config.yml16
-rw-r--r--playbooks/common/openshift-glusterfs/config.yml16
-rw-r--r--playbooks/common/openshift-loadbalancer/config.yml16
-rw-r--r--playbooks/common/openshift-management/config.yml16
-rw-r--r--playbooks/common/openshift-master/additional_config.yml18
-rw-r--r--playbooks/common/openshift-master/config.yml22
-rw-r--r--playbooks/common/openshift-master/files/openshift-ansible-catalog-console.js2
-rw-r--r--playbooks/common/openshift-master/restart_services.yml24
-rw-r--r--playbooks/common/openshift-master/revert-client-ca.yml17
-rw-r--r--playbooks/common/openshift-master/scaleup.yml11
-rw-r--r--playbooks/common/openshift-master/tasks/wire_aggregator.yml29
-rw-r--r--playbooks/common/openshift-master/templates/openshift-ansible-catalog-console.js1
-rw-r--r--playbooks/common/openshift-nfs/config.yml16
-rw-r--r--playbooks/common/openshift-node/config.yml16
-rw-r--r--playbooks/common/openshift-node/image_prep.yml9
-rw-r--r--playbooks/common/openshift-node/network_manager.yml2
68 files changed, 318 insertions, 1863 deletions
diff --git a/playbooks/common/openshift-checks/install.yml b/playbooks/common/openshift-checks/install.yml
index 6701a2e15..93cf6c359 100644
--- a/playbooks/common/openshift-checks/install.yml
+++ b/playbooks/common/openshift-checks/install.yml
@@ -1,13 +1,15 @@
---
- name: Health Check Checkpoint Start
- hosts: oo_all_hosts
+ hosts: all
gather_facts: false
tasks:
- name: Set Health Check 'In Progress'
+ run_once: true
set_stats:
data:
- installer_phase_health: "In Progress"
- aggregate: false
+ installer_phase_health:
+ status: "In Progress"
+ start: "{{ lookup('pipe', 'date +%Y%m%d%H%M%SZ') }}"
- name: OpenShift Health Checks
hosts: oo_all_hosts
@@ -37,11 +39,13 @@
- docker_image_availability
- name: Health Check Checkpoint End
- hosts: oo_all_hosts
+ hosts: all
gather_facts: false
tasks:
- name: Set Health Check 'Complete'
+ run_once: true
set_stats:
data:
- installer_phase_health: "Complete"
- aggregate: false
+ installer_phase_health:
+ status: "Complete"
+ end: "{{ lookup('pipe', 'date +%Y%m%d%H%M%SZ') }}"
diff --git a/playbooks/common/openshift-cluster/cockpit-ui.yml b/playbooks/common/openshift-cluster/cockpit-ui.yml
index 5ddafdb07..359132dd0 100644
--- a/playbooks/common/openshift-cluster/cockpit-ui.yml
+++ b/playbooks/common/openshift-cluster/cockpit-ui.yml
@@ -3,4 +3,6 @@
hosts: oo_first_master
roles:
- role: cockpit-ui
- when: ( openshift.common.version_gte_3_3_or_1_3 | bool ) and ( openshift_hosted_manage_registry | default(true) | bool ) and not (openshift.docker.hosted_registry_insecure | default(false) | bool)
+ when:
+ - openshift_hosted_manage_registry | default(true) | bool
+ - not openshift.docker.hosted_registry_insecure | default(false) | bool
diff --git a/playbooks/common/openshift-cluster/create_persistent_volumes.yml b/playbooks/common/openshift-cluster/create_persistent_volumes.yml
index ec6f2c52c..8a60a30b8 100644
--- a/playbooks/common/openshift-cluster/create_persistent_volumes.yml
+++ b/playbooks/common/openshift-cluster/create_persistent_volumes.yml
@@ -1,13 +1,4 @@
---
-- name: Create persistent volumes
- hosts: oo_first_master
- vars:
- persistent_volumes: "{{ hostvars[groups.oo_first_master.0] | oo_persistent_volumes(groups) }}"
- persistent_volume_claims: "{{ hostvars[groups.oo_first_master.0] | oo_persistent_volume_claims }}"
- tasks:
- - debug: var=persistent_volumes
- - debug: var=persistent_volume_claims
-
- name: Create Hosted Resources - persistent volumes
hosts: oo_first_master
vars:
diff --git a/playbooks/common/openshift-cluster/enable_dnsmasq.yml b/playbooks/common/openshift-cluster/enable_dnsmasq.yml
index be14b06f0..f91361b67 100644
--- a/playbooks/common/openshift-cluster/enable_dnsmasq.yml
+++ b/playbooks/common/openshift-cluster/enable_dnsmasq.yml
@@ -1,13 +1,8 @@
---
-- include: evaluate_groups.yml
-
- name: Load openshift_facts
hosts: oo_masters_to_config:oo_nodes_to_config
roles:
- openshift_facts
- post_tasks:
- - fail: msg="This playbook requires a master version of at least Origin 1.1 or OSE 3.1"
- when: not openshift.common.version_gte_3_1_1_or_1_1_1 | bool
- name: Reconfigure masters to listen on our new dns_port
hosts: oo_masters_to_config
diff --git a/playbooks/common/openshift-cluster/evaluate_groups.yml b/playbooks/common/openshift-cluster/evaluate_groups.yml
deleted file mode 100644
index 78b552279..000000000
--- a/playbooks/common/openshift-cluster/evaluate_groups.yml
+++ /dev/null
@@ -1,190 +0,0 @@
----
-- name: Populate config host groups
- hosts: localhost
- connection: local
- become: no
- gather_facts: no
- tasks:
- - name: Evaluate groups - g_etcd_hosts or g_new_etcd_hosts required
- fail:
- msg: This playbook requires g_etcd_hosts or g_new_etcd_hosts to be set
- when: g_etcd_hosts is not defined and g_new_etcd_hosts is not defined
-
- - name: Evaluate groups - g_master_hosts or g_new_master_hosts required
- fail:
- msg: This playbook requires g_master_hosts or g_new_master_hosts to be set
- when: g_master_hosts is not defined and g_new_master_hosts is not defined
-
- - name: Evaluate groups - g_node_hosts or g_new_node_hosts required
- fail:
- msg: This playbook requires g_node_hosts or g_new_node_hosts to be set
- when: g_node_hosts is not defined and g_new_node_hosts is not defined
-
- - name: Evaluate groups - g_lb_hosts required
- fail:
- msg: This playbook requires g_lb_hosts to be set
- when: g_lb_hosts is not defined
-
- - name: Evaluate groups - g_nfs_hosts required
- fail:
- msg: This playbook requires g_nfs_hosts to be set
- when: g_nfs_hosts is not defined
-
- - name: Evaluate groups - g_nfs_hosts is single host
- fail:
- msg: The nfs group must be limited to one host
- when: g_nfs_hosts | default([]) | length > 1
-
- - name: Evaluate groups - g_glusterfs_hosts required
- fail:
- msg: This playbook requires g_glusterfs_hosts to be set
- when: g_glusterfs_hosts is not defined
-
- - name: Evaluate groups - Fail if no etcd hosts group is defined
- fail:
- msg: >
- Running etcd as an embedded service is no longer supported. If this is a
- new install please define an 'etcd' group with either one or three
- hosts. These hosts may be the same hosts as your masters. If this is an
- upgrade you may set openshift_master_unsupported_embedded_etcd=true
- until a migration playbook becomes available.
- when:
- - g_etcd_hosts | default([]) | length not in [3,1]
- - not openshift_master_unsupported_embedded_etcd | default(False)
- - not (openshift_node_bootstrap | default(False))
-
- - name: Evaluate oo_all_hosts
- add_host:
- name: "{{ item }}"
- groups: oo_all_hosts
- ansible_ssh_user: "{{ g_ssh_user | default(omit) }}"
- ansible_become: "{{ g_sudo | default(omit) }}"
- with_items: "{{ g_all_hosts | default([]) }}"
- changed_when: no
-
- - name: Evaluate oo_masters
- add_host:
- name: "{{ item }}"
- groups: oo_masters
- ansible_ssh_user: "{{ g_ssh_user | default(omit) }}"
- ansible_become: "{{ g_sudo | default(omit) }}"
- with_items: "{{ g_master_hosts | union(g_new_master_hosts) | default([]) }}"
- changed_when: no
-
- - name: Evaluate oo_first_master
- add_host:
- name: "{{ g_master_hosts[0] }}"
- groups: oo_first_master
- ansible_ssh_user: "{{ g_ssh_user | default(omit) }}"
- ansible_become: "{{ g_sudo | default(omit) }}"
- when: g_master_hosts|length > 0
- changed_when: no
-
- - name: Evaluate oo_new_etcd_to_config
- add_host:
- name: "{{ item }}"
- groups: oo_new_etcd_to_config
- ansible_ssh_user: "{{ g_ssh_user | default(omit) }}"
- ansible_become: "{{ g_sudo | default(omit) }}"
- with_items: "{{ g_new_etcd_hosts | default([]) }}"
- changed_when: no
-
- - name: Evaluate oo_masters_to_config
- add_host:
- name: "{{ item }}"
- groups: oo_masters_to_config
- ansible_ssh_user: "{{ g_ssh_user | default(omit) }}"
- ansible_become: "{{ g_sudo | default(omit) }}"
- with_items: "{{ g_new_master_hosts | default(g_master_hosts | default([], true), true) }}"
- changed_when: no
-
- - name: Evaluate oo_etcd_to_config
- add_host:
- name: "{{ item }}"
- groups: oo_etcd_to_config
- ansible_ssh_user: "{{ g_ssh_user | default(omit) }}"
- ansible_become: "{{ g_sudo | default(omit) }}"
- with_items: "{{ g_etcd_hosts | default([]) }}"
- changed_when: no
-
- - name: Evaluate oo_first_etcd
- add_host:
- name: "{{ g_etcd_hosts[0] }}"
- groups: oo_first_etcd
- ansible_ssh_user: "{{ g_ssh_user | default(omit) }}"
- ansible_become: "{{ g_sudo | default(omit) }}"
- when: g_etcd_hosts|length > 0
- changed_when: no
-
- # We use two groups one for hosts we're upgrading which doesn't include embedded etcd
- # The other for backing up which includes the embedded etcd host, there's no need to
- # upgrade embedded etcd that just happens when the master is updated.
- - name: Evaluate oo_etcd_hosts_to_upgrade
- add_host:
- name: "{{ item }}"
- groups: oo_etcd_hosts_to_upgrade
- with_items: "{{ groups.oo_etcd_to_config if groups.oo_etcd_to_config is defined and groups.oo_etcd_to_config | length > 0 else [] }}"
- changed_when: False
-
- - name: Evaluate oo_etcd_hosts_to_backup
- add_host:
- name: "{{ item }}"
- groups: oo_etcd_hosts_to_backup
- with_items: "{{ groups.oo_etcd_to_config if groups.oo_etcd_to_config is defined and groups.oo_etcd_to_config | length > 0 else (groups.oo_first_master | default([])) }}"
- changed_when: False
-
- - name: Evaluate oo_nodes_to_config
- add_host:
- name: "{{ item }}"
- groups: oo_nodes_to_config
- ansible_ssh_user: "{{ g_ssh_user | default(omit) }}"
- ansible_become: "{{ g_sudo | default(omit) }}"
- with_items: "{{ g_new_node_hosts | default(g_node_hosts | default([], true), true) }}"
- changed_when: no
-
- # Skip adding the master to oo_nodes_to_config when g_new_node_hosts is
- - name: Add master to oo_nodes_to_config
- add_host:
- name: "{{ item }}"
- groups: oo_nodes_to_config
- ansible_ssh_user: "{{ g_ssh_user | default(omit) }}"
- ansible_become: "{{ g_sudo | default(omit) }}"
- with_items: "{{ g_master_hosts | default([]) }}"
- when: g_nodeonmaster | default(false) | bool and not g_new_node_hosts | default(false) | bool
- changed_when: no
-
- - name: Evaluate oo_lb_to_config
- add_host:
- name: "{{ item }}"
- groups: oo_lb_to_config
- ansible_ssh_user: "{{ g_ssh_user | default(omit) }}"
- ansible_become: "{{ g_sudo | default(omit) }}"
- with_items: "{{ g_lb_hosts | default([]) }}"
- changed_when: no
-
- - name: Evaluate oo_nfs_to_config
- add_host:
- name: "{{ item }}"
- groups: oo_nfs_to_config
- ansible_ssh_user: "{{ g_ssh_user | default(omit) }}"
- ansible_become: "{{ g_sudo | default(omit) }}"
- with_items: "{{ g_nfs_hosts | default([]) }}"
- changed_when: no
-
- - name: Evaluate oo_glusterfs_to_config
- add_host:
- name: "{{ item }}"
- groups: oo_glusterfs_to_config
- ansible_ssh_user: "{{ g_ssh_user | default(omit) }}"
- ansible_become: "{{ g_sudo | default(omit) }}"
- with_items: "{{ g_glusterfs_hosts | union(g_glusterfs_registry_hosts | default([])) }}"
- changed_when: no
-
- - name: Evaluate oo_etcd_to_migrate
- add_host:
- name: "{{ item }}"
- groups: oo_etcd_to_migrate
- ansible_ssh_user: "{{ g_ssh_user | default(omit) }}"
- ansible_become: "{{ g_sudo | default(omit) }}"
- with_items: "{{ groups.oo_etcd_to_config if groups.oo_etcd_to_config | default([]) | length != 0 else (groups.oo_first_master |default([]))}}"
- changed_when: no
diff --git a/playbooks/common/openshift-cluster/initialize_facts.yml b/playbooks/common/openshift-cluster/initialize_facts.yml
deleted file mode 100644
index 91223d368..000000000
--- a/playbooks/common/openshift-cluster/initialize_facts.yml
+++ /dev/null
@@ -1,169 +0,0 @@
----
-- name: Ensure that all non-node hosts are accessible
- hosts: oo_masters_to_config:oo_etcd_to_config:oo_lb_to_config:oo_nfs_to_config
- any_errors_fatal: true
- tasks:
-
-- name: Initialize host facts
- hosts: oo_all_hosts
- tasks:
- - name: load openshift_facts module
- include_role:
- name: openshift_facts
- static: yes
-
- # TODO: Should this role be refactored into health_checks??
- - name: Run openshift_sanitize_inventory to set variables
- include_role:
- name: openshift_sanitize_inventory
-
- - name: Detecting Operating System from ostree_booted
- stat:
- path: /run/ostree-booted
- register: ostree_booted
-
- # Locally setup containerized facts for now
- - name: initialize_facts set fact l_is_atomic
- set_fact:
- l_is_atomic: "{{ ostree_booted.stat.exists }}"
-
- - name: initialize_facts set fact for containerized and l_is_*_system_container
- set_fact:
- l_is_containerized: "{{ (l_is_atomic | bool) or (containerized | default(false) | bool) }}"
- l_is_openvswitch_system_container: "{{ (openshift_use_openvswitch_system_container | default(openshift_use_system_containers | default(false)) | bool) }}"
- l_is_node_system_container: "{{ (openshift_use_node_system_container | default(openshift_use_system_containers | default(false)) | bool) }}"
- l_is_master_system_container: "{{ (openshift_use_master_system_container | default(openshift_use_system_containers | default(false)) | bool) }}"
- l_is_etcd_system_container: "{{ (openshift_use_etcd_system_container | default(openshift_use_system_containers | default(false)) | bool) }}"
-
- - name: initialize_facts set facts for l_any_system_container
- set_fact:
- l_any_system_container: "{{ l_is_etcd_system_container or l_is_openvswitch_system_container or l_is_node_system_container or l_is_master_system_container }}"
-
- - name: initialize_facts set fact for l_etcd_runtime
- set_fact:
- l_etcd_runtime: "{{ 'runc' if l_is_etcd_system_container else 'docker' if l_is_containerized else 'host' }}"
-
- # TODO: Should this be moved into health checks??
- # Seems as though any check that happens with a corresponding fail should move into health_checks
- - name: Validate python version - ans_dist is fedora and python is v3
- fail:
- msg: |
- openshift-ansible requires Python 3 for {{ ansible_distribution }};
- For information on enabling Python 3 with Ansible, see https://docs.ansible.com/ansible/python_3_support.html
- when:
- - ansible_distribution == 'Fedora'
- - ansible_python['version']['major'] != 3
-
- # TODO: Should this be moved into health checks??
- # Seems as though any check that happens with a corresponding fail should move into health_checks
- - name: Validate python version - ans_dist not Fedora and python must be v2
- fail:
- msg: "openshift-ansible requires Python 2 for {{ ansible_distribution }}"
- when:
- - ansible_distribution != 'Fedora'
- - ansible_python['version']['major'] != 2
-
- # TODO: Should this be moved into health checks??
- # Seems as though any check that happens with a corresponding fail should move into health_checks
- # Fail as early as possible if Atomic and old version of Docker
- - when:
- - l_is_atomic | bool
- block:
-
- # See https://access.redhat.com/articles/2317361
- # and https://github.com/ansible/ansible/issues/15892
- # NOTE: the "'s can not be removed at this level else the docker command will fail
- # NOTE: When ansible >2.2.1.x is used this can be updated per
- # https://github.com/openshift/openshift-ansible/pull/3475#discussion_r103525121
- - name: Determine Atomic Host Docker Version
- shell: 'CURLY="{"; docker version --format "$CURLY{json .Server.Version}}"'
- register: l_atomic_docker_version
-
- - name: assert atomic host docker version is 1.12 or later
- assert:
- that:
- - l_atomic_docker_version.stdout | replace('"', '') | version_compare('1.12','>=')
- msg: Installation on Atomic Host requires Docker 1.12 or later. Please upgrade and restart the Atomic Host.
-
- - when:
- - not l_is_atomic | bool
- block:
- - name: Ensure openshift-ansible installer package deps are installed
- package:
- name: "{{ item }}"
- state: present
- with_items:
- - iproute
- - "{{ 'python3-dbus' if ansible_distribution == 'Fedora' else 'dbus-python' }}"
- - "{{ 'python3-PyYAML' if ansible_distribution == 'Fedora' else 'PyYAML' }}"
- - yum-utils
-
- - name: Ensure various deps for running system containers are installed
- package:
- name: "{{ item }}"
- state: present
- with_items:
- - atomic
- - ostree
- - runc
- when:
- - l_any_system_container | bool
-
- - name: Default system_images_registry to a enterprise registry
- set_fact:
- system_images_registry: "registry.access.redhat.com"
- when:
- - system_images_registry is not defined
- - openshift_deployment_type == "openshift-enterprise"
-
- - name: Default system_images_registry to community registry
- set_fact:
- system_images_registry: "docker.io"
- when:
- - system_images_registry is not defined
- - openshift_deployment_type == "origin"
-
- - name: Gather Cluster facts and set is_containerized if needed
- openshift_facts:
- role: common
- local_facts:
- deployment_type: "{{ openshift_deployment_type }}"
- deployment_subtype: "{{ openshift_deployment_subtype | default(None) }}"
- cli_image: "{{ osm_image | default(None) }}"
- hostname: "{{ openshift_hostname | default(None) }}"
- ip: "{{ openshift_ip | default(None) }}"
- is_containerized: "{{ l_is_containerized | default(None) }}"
- is_openvswitch_system_container: "{{ l_is_openvswitch_system_container | default(false) }}"
- is_node_system_container: "{{ l_is_node_system_container | default(false) }}"
- is_master_system_container: "{{ l_is_master_system_container | default(false) }}"
- is_etcd_system_container: "{{ l_is_etcd_system_container | default(false) }}"
- etcd_runtime: "{{ l_etcd_runtime }}"
- system_images_registry: "{{ system_images_registry }}"
- public_hostname: "{{ openshift_public_hostname | default(None) }}"
- public_ip: "{{ openshift_public_ip | default(None) }}"
- portal_net: "{{ openshift_portal_net | default(openshift_master_portal_net) | default(None) }}"
- http_proxy: "{{ openshift_http_proxy | default(None) }}"
- https_proxy: "{{ openshift_https_proxy | default(None) }}"
- no_proxy: "{{ openshift_no_proxy | default(None) }}"
- generate_no_proxy_hosts: "{{ openshift_generate_no_proxy_hosts | default(True) }}"
-
- - name: Set fact of no_proxy_internal_hostnames
- openshift_facts:
- role: common
- local_facts:
- no_proxy_internal_hostnames: "{{ hostvars | oo_select_keys(groups['oo_nodes_to_config']
- | union(groups['oo_masters_to_config'])
- | union(groups['oo_etcd_to_config'] | default([])))
- | oo_collect('openshift.common.hostname') | default([]) | join (',')
- }}"
- when:
- - openshift_http_proxy is defined or openshift_https_proxy is defined
- - openshift_generate_no_proxy_hosts | default(True) | bool
-
- - name: initialize_facts set_fact repoquery command
- set_fact:
- repoquery_cmd: "{{ 'dnf repoquery --latest-limit 1 -d 0' if ansible_pkg_mgr == 'dnf' else 'repoquery --plugins' }}"
-
- - name: initialize_facts set_fact on openshift_docker_hosted_registry_network
- set_fact:
- openshift_docker_hosted_registry_network: "{{ '' if 'oo_first_master' not in groups else hostvars[groups.oo_first_master.0].openshift.common.portal_net }}"
diff --git a/playbooks/common/openshift-cluster/initialize_openshift_repos.yml b/playbooks/common/openshift-cluster/initialize_openshift_repos.yml
deleted file mode 100644
index a7114fc80..000000000
--- a/playbooks/common/openshift-cluster/initialize_openshift_repos.yml
+++ /dev/null
@@ -1,8 +0,0 @@
----
-- name: Setup yum repositories for all hosts
- hosts: oo_all_hosts
- gather_facts: no
- tasks:
- - name: initialize openshift repos
- include_role:
- name: openshift_repos
diff --git a/playbooks/common/openshift-cluster/initialize_openshift_version.yml b/playbooks/common/openshift-cluster/initialize_openshift_version.yml
deleted file mode 100644
index 37a5284d5..000000000
--- a/playbooks/common/openshift-cluster/initialize_openshift_version.yml
+++ /dev/null
@@ -1,21 +0,0 @@
----
-# NOTE: requires openshift_facts be run
-- name: Determine openshift_version to configure on first master
- hosts: oo_first_master
- roles:
- - openshift_version
-
-# NOTE: We set this even on etcd hosts as they may also later run as masters,
-# and we don't want to install wrong version of docker and have to downgrade
-# later.
-- name: Set openshift_version for etcd, node, and master hosts
- hosts: oo_etcd_to_config:oo_nodes_to_config:oo_masters_to_config:!oo_first_master
- vars:
- openshift_version: "{{ hostvars[groups.oo_first_master.0].openshift_version }}"
- pre_tasks:
- - set_fact:
- openshift_pkg_version: -{{ openshift_version }}
- when: openshift_pkg_version is not defined
- - debug: msg="openshift_pkg_version set to {{ openshift_pkg_version }}"
- roles:
- - openshift_version
diff --git a/playbooks/common/openshift-cluster/openshift_hosted.yml b/playbooks/common/openshift-cluster/openshift_hosted.yml
index 281ccce2e..15ee60dc0 100644
--- a/playbooks/common/openshift-cluster/openshift_hosted.yml
+++ b/playbooks/common/openshift-cluster/openshift_hosted.yml
@@ -1,13 +1,15 @@
---
- name: Hosted Install Checkpoint Start
- hosts: oo_all_hosts
+ hosts: all
gather_facts: false
tasks:
- name: Set Hosted install 'In Progress'
+ run_once: true
set_stats:
data:
- installer_phase_hosted: "In Progress"
- aggregate: false
+ installer_phase_hosted:
+ status: "In Progress"
+ start: "{{ lookup('pipe', 'date +%Y%m%d%H%M%SZ') }}"
- include: create_persistent_volumes.yml
@@ -30,11 +32,13 @@
- openshift_crio_enable_docker_gc | default(False) | bool
- name: Hosted Install Checkpoint End
- hosts: oo_all_hosts
+ hosts: all
gather_facts: false
tasks:
- name: Set Hosted install 'Complete'
+ run_once: true
set_stats:
data:
- installer_phase_hosted: "Complete"
- aggregate: false
+ installer_phase_hosted:
+ status: "Complete"
+ end: "{{ lookup('pipe', 'date +%Y%m%d%H%M%SZ') }}"
diff --git a/playbooks/common/openshift-cluster/openshift_logging.yml b/playbooks/common/openshift-cluster/openshift_logging.yml
index 529a4c939..bc59bd95a 100644
--- a/playbooks/common/openshift-cluster/openshift_logging.yml
+++ b/playbooks/common/openshift-cluster/openshift_logging.yml
@@ -1,13 +1,15 @@
---
- name: Logging Install Checkpoint Start
- hosts: oo_all_hosts
+ hosts: all
gather_facts: false
tasks:
- name: Set Logging install 'In Progress'
+ run_once: true
set_stats:
data:
- installer_phase_logging: "In Progress"
- aggregate: false
+ installer_phase_logging:
+ status: "In Progress"
+ start: "{{ lookup('pipe', 'date +%Y%m%d%H%M%SZ') }}"
- name: OpenShift Aggregated Logging
hosts: oo_first_master
@@ -23,11 +25,13 @@
tasks_from: update_master_config
- name: Logging Install Checkpoint End
- hosts: oo_all_hosts
+ hosts: all
gather_facts: false
tasks:
- name: Set Logging install 'Complete'
+ run_once: true
set_stats:
data:
- installer_phase_logging: "Complete"
- aggregate: false
+ installer_phase_logging:
+ status: "Complete"
+ end: "{{ lookup('pipe', 'date +%Y%m%d%H%M%SZ') }}"
diff --git a/playbooks/common/openshift-cluster/openshift_metrics.yml b/playbooks/common/openshift-cluster/openshift_metrics.yml
index 9c0bd489b..80cd93e5f 100644
--- a/playbooks/common/openshift-cluster/openshift_metrics.yml
+++ b/playbooks/common/openshift-cluster/openshift_metrics.yml
@@ -1,13 +1,15 @@
---
- name: Metrics Install Checkpoint Start
- hosts: oo_all_hosts
+ hosts: all
gather_facts: false
tasks:
- name: Set Metrics install 'In Progress'
+ run_once: true
set_stats:
data:
- installer_phase_metrics: "In Progress"
- aggregate: false
+ installer_phase_metrics:
+ status: "In Progress"
+ start: "{{ lookup('pipe', 'date +%Y%m%d%H%M%SZ') }}"
- name: OpenShift Metrics
hosts: oo_first_master
@@ -24,11 +26,13 @@
tasks_from: update_master_config.yaml
- name: Metrics Install Checkpoint End
- hosts: oo_all_hosts
+ hosts: all
gather_facts: false
tasks:
- name: Set Metrics install 'Complete'
+ run_once: true
set_stats:
data:
- installer_phase_metrics: "Complete"
- aggregate: false
+ installer_phase_metrics:
+ status: "Complete"
+ end: "{{ lookup('pipe', 'date +%Y%m%d%H%M%SZ') }}"
diff --git a/playbooks/common/openshift-cluster/openshift_prometheus.yml b/playbooks/common/openshift-cluster/openshift_prometheus.yml
index a73b294a5..7aa9a16e6 100644
--- a/playbooks/common/openshift-cluster/openshift_prometheus.yml
+++ b/playbooks/common/openshift-cluster/openshift_prometheus.yml
@@ -1,13 +1,15 @@
---
- name: Prometheus Install Checkpoint Start
- hosts: oo_all_hosts
+ hosts: all
gather_facts: false
tasks:
- name: Set Prometheus install 'In Progress'
+ run_once: true
set_stats:
data:
- installer_phase_prometheus: "In Progress"
- aggregate: false
+ installer_phase_prometheus:
+ status: "In Progress"
+ start: "{{ lookup('pipe', 'date +%Y%m%d%H%M%SZ') }}"
- name: Create Hosted Resources - openshift_prometheus
hosts: oo_first_master
@@ -15,11 +17,13 @@
- role: openshift_prometheus
- name: Prometheus Install Checkpoint End
- hosts: oo_all_hosts
+ hosts: all
gather_facts: false
tasks:
- name: Set Prometheus install 'Complete'
+ run_once: true
set_stats:
data:
- installer_phase_prometheus: "Complete"
- aggregate: false
+ installer_phase_prometheus:
+ status: "Complete"
+ end: "{{ lookup('pipe', 'date +%Y%m%d%H%M%SZ') }}"
diff --git a/playbooks/common/openshift-cluster/redeploy-certificates/openshift-ca.yml b/playbooks/common/openshift-cluster/redeploy-certificates/openshift-ca.yml
index 2068ed199..eb225dfb5 100644
--- a/playbooks/common/openshift-cluster/redeploy-certificates/openshift-ca.yml
+++ b/playbooks/common/openshift-cluster/redeploy-certificates/openshift-ca.yml
@@ -1,11 +1,4 @@
---
-- name: Verify OpenShift version is greater than or equal to 1.2 or 3.2
- hosts: oo_first_master
- tasks:
- - fail:
- msg: "The current OpenShift version is less than 1.2/3.2 and does not support CA bundles."
- when: not openshift.common.version_gte_3_2_or_1_2 | bool
-
- name: Check cert expirys
hosts: oo_nodes_to_config:oo_masters_to_config:oo_etcd_to_config
vars:
@@ -43,11 +36,6 @@
when: (g_master_config_output.content|b64decode|from_yaml).oauthConfig.masterCA != 'ca-bundle.crt'
- modify_yaml:
dest: "{{ openshift.common.config_base }}/master/master-config.yaml"
- yaml_key: servingInfo.clientCA
- yaml_value: ca.crt
- when: (g_master_config_output.content|b64decode|from_yaml).servingInfo.clientCA != 'ca.crt'
- - modify_yaml:
- dest: "{{ openshift.common.config_base }}/master/master-config.yaml"
yaml_key: etcdClientInfo.ca
yaml_value: ca-bundle.crt
when:
@@ -67,6 +55,13 @@
when:
- groups.oo_etcd_to_config | default([]) | length == 0
- (g_master_config_output.content|b64decode|from_yaml).etcdConfig.servingInfo.clientCA != 'ca-bundle.crt'
+ # Set servingInfo.clientCA to client-ca-bundle.crt in order to roll the CA certificate.
+ # This change will be reverted in playbooks/byo/openshift-cluster/redeploy-certificates.yml
+ - modify_yaml:
+ dest: "{{ openshift.common.config_base }}/master/master-config.yaml"
+ yaml_key: servingInfo.clientCA
+ yaml_value: client-ca-bundle.crt
+ when: (g_master_config_output.content|b64decode|from_yaml).servingInfo.clientCA != 'client-ca-bundle.crt'
- name: Copy current OpenShift CA to legacy directory
hosts: oo_masters_to_config
@@ -155,6 +150,7 @@
- ca.key
- ca-bundle.crt
- ca.serial.txt
+ - client-ca-bundle.crt
delegate_to: "{{ openshift_ca_host }}"
run_once: true
changed_when: false
@@ -173,6 +169,7 @@
- ca.key
- ca-bundle.crt
- ca.serial.txt
+ - client-ca-bundle.crt
- name: Update master client kubeconfig CA data
kubeclient_ca:
client_path: "{{ openshift.common.config_base }}/master/openshift-master.kubeconfig"
diff --git a/playbooks/common/openshift-cluster/redeploy-certificates/registry.yml b/playbooks/common/openshift-cluster/redeploy-certificates/registry.yml
index afd5463b2..7e9363c5f 100644
--- a/playbooks/common/openshift-cluster/redeploy-certificates/registry.yml
+++ b/playbooks/common/openshift-cluster/redeploy-certificates/registry.yml
@@ -70,9 +70,7 @@
--hostnames="{{ docker_registry_service_ip.results.clusterip }},docker-registry.default.svc,docker-registry.default.svc.cluster.local,{{ docker_registry_route_hostname }}"
--cert={{ openshift.common.config_base }}/master/registry.crt
--key={{ openshift.common.config_base }}/master/registry.key
- {% if openshift_version | oo_version_gte_3_5_or_1_5(openshift.common.deployment_type) | bool %}
--expire-days={{ openshift_hosted_registry_cert_expire_days | default(730) }}
- {% endif %}
- name: Update registry certificates secret
oc_secret:
diff --git a/playbooks/common/openshift-cluster/sanity_checks.yml b/playbooks/common/openshift-cluster/sanity_checks.yml
deleted file mode 100644
index 26716a92d..000000000
--- a/playbooks/common/openshift-cluster/sanity_checks.yml
+++ /dev/null
@@ -1,51 +0,0 @@
----
-- name: Verify Requirements
- hosts: oo_all_hosts
- tasks:
- - fail:
- msg: Flannel can not be used with openshift sdn, set openshift_use_openshift_sdn=false if you want to use flannel
- when: openshift_use_openshift_sdn | default(true) | bool and openshift_use_flannel | default(false) | bool
-
- - fail:
- msg: Nuage sdn can not be used with openshift sdn, set openshift_use_openshift_sdn=false if you want to use nuage
- when: openshift_use_openshift_sdn | default(true) | bool and openshift_use_nuage | default(false) | bool
-
- - fail:
- msg: Nuage sdn can not be used with flannel
- when: openshift_use_flannel | default(false) | bool and openshift_use_nuage | default(false) | bool
-
- - fail:
- msg: Contiv can not be used with openshift sdn, set openshift_use_openshift_sdn=false if you want to use contiv
- when: openshift_use_openshift_sdn | default(true) | bool and openshift_use_contiv | default(false) | bool
-
- - fail:
- msg: Contiv can not be used with flannel
- when: openshift_use_flannel | default(false) | bool and openshift_use_contiv | default(false) | bool
-
- - fail:
- msg: Contiv can not be used with nuage
- when: openshift_use_nuage | default(false) | bool and openshift_use_contiv | default(false) | bool
-
- - fail:
- msg: Calico can not be used with openshift sdn, set openshift_use_openshift_sdn=false if you want to use Calico
- when: openshift_use_openshift_sdn | default(true) | bool and openshift_use_calico | default(false) | bool
-
- - fail:
- msg: The Calico playbook does not yet integrate with the Flannel playbook in Openshift. Set either openshift_use_calico or openshift_use_flannel, but not both.
- when: openshift_use_calico | default(false) | bool and openshift_use_flannel | default(false) | bool
-
- - fail:
- msg: Calico can not be used with Nuage in Openshift. Set either openshift_use_calico or openshift_use_nuage, but not both
- when: openshift_use_calico | default(false) | bool and openshift_use_nuage | default(false) | bool
-
- - fail:
- msg: Calico can not be used with Contiv in Openshift. Set either openshift_use_calico or openshift_use_contiv, but not both
- when: openshift_use_calico | default(false) | bool and openshift_use_contiv | default(false) | bool
-
- - fail:
- msg: openshift_hostname must be 63 characters or less
- when: openshift_hostname is defined and openshift_hostname | length > 63
-
- - fail:
- msg: openshift_public_hostname must be 63 characters or less
- when: openshift_public_hostname is defined and openshift_public_hostname | length > 63
diff --git a/playbooks/common/openshift-cluster/service_catalog.yml b/playbooks/common/openshift-cluster/service_catalog.yml
index bd964b2ce..7bb8511f6 100644
--- a/playbooks/common/openshift-cluster/service_catalog.yml
+++ b/playbooks/common/openshift-cluster/service_catalog.yml
@@ -1,13 +1,15 @@
---
- name: Service Catalog Install Checkpoint Start
- hosts: oo_all_hosts
+ hosts: all
gather_facts: false
tasks:
- name: Set Service Catalog install 'In Progress'
+ run_once: true
set_stats:
data:
- installer_phase_servicecatalog: "In Progress"
- aggregate: false
+ installer_phase_servicecatalog:
+ status: "In Progress"
+ start: "{{ lookup('pipe', 'date +%Y%m%d%H%M%SZ') }}"
- name: Service Catalog
hosts: oo_first_master
@@ -19,11 +21,13 @@
first_master: "{{ groups.oo_first_master[0] }}"
- name: Service Catalog Install Checkpoint End
- hosts: oo_all_hosts
+ hosts: all
gather_facts: false
tasks:
- name: Set Service Catalog install 'Complete'
+ run_once: true
set_stats:
data:
- installer_phase_servicecatalog: "Complete"
- aggregate: false
+ installer_phase_servicecatalog:
+ status: "Complete"
+ end: "{{ lookup('pipe', 'date +%Y%m%d%H%M%SZ') }}"
diff --git a/playbooks/common/openshift-cluster/std_include.yml b/playbooks/common/openshift-cluster/std_include.yml
deleted file mode 100644
index 45b34c8bd..000000000
--- a/playbooks/common/openshift-cluster/std_include.yml
+++ /dev/null
@@ -1,46 +0,0 @@
----
-- name: Initialization Checkpoint Start
- hosts: oo_all_hosts
- gather_facts: false
- roles:
- - installer_checkpoint
- tasks:
- - name: Set install initialization 'In Progress'
- set_stats:
- data:
- installer_phase_initialize: "In Progress"
- aggregate: false
-
-- include: evaluate_groups.yml
- tags:
- - always
-
-- include: initialize_facts.yml
- tags:
- - always
-
-- include: sanity_checks.yml
- tags:
- - always
-
-- include: validate_hostnames.yml
- tags:
- - node
-
-- include: initialize_openshift_repos.yml
- tags:
- - always
-
-- include: initialize_openshift_version.yml
- tags:
- - always
-
-- name: Initialization Checkpoint End
- hosts: oo_all_hosts
- gather_facts: false
- tasks:
- - name: Set install initialization 'Complete'
- set_stats:
- data:
- installer_phase_initialize: "Complete"
- aggregate: false
diff --git a/playbooks/common/openshift-cluster/upgrades/docker/docker_upgrade.yml b/playbooks/common/openshift-cluster/upgrades/docker/docker_upgrade.yml
index 98953f72e..6d4ddf011 100644
--- a/playbooks/common/openshift-cluster/upgrades/docker/docker_upgrade.yml
+++ b/playbooks/common/openshift-cluster/upgrades/docker/docker_upgrade.yml
@@ -1,5 +1,5 @@
---
-- include: ../../evaluate_groups.yml
+- include: ../../../../init/evaluate_groups.yml
vars:
# Do not allow adding hosts during upgrade.
g_new_master_hosts: []
diff --git a/playbooks/common/openshift-cluster/upgrades/init.yml b/playbooks/common/openshift-cluster/upgrades/init.yml
index 2826951e6..9981d905b 100644
--- a/playbooks/common/openshift-cluster/upgrades/init.yml
+++ b/playbooks/common/openshift-cluster/upgrades/init.yml
@@ -1,15 +1,20 @@
---
-- include: ../evaluate_groups.yml
+- include: ../../../init/evaluate_groups.yml
vars:
# Do not allow adding hosts during upgrade.
g_new_master_hosts: []
g_new_node_hosts: []
-- include: ../initialize_facts.yml
+- include: ../../../init/facts.yml
- name: Ensure firewall is not switched during upgrade
hosts: oo_all_hosts
+ vars:
+ openshift_master_installed_version: "{{ hostvars[groups.oo_first_master.0].openshift.common.version }}"
tasks:
+ - name: set currently installed version
+ set_fact:
+ openshift_currently_installed_version: "{{ openshift_master_installed_version }}"
- name: Check if iptables is running
command: systemctl status iptables
changed_when: false
diff --git a/playbooks/common/openshift-cluster/upgrades/post_control_plane.yml b/playbooks/common/openshift-cluster/upgrades/post_control_plane.yml
index 122066955..c634e0ab8 100644
--- a/playbooks/common/openshift-cluster/upgrades/post_control_plane.yml
+++ b/playbooks/common/openshift-cluster/upgrades/post_control_plane.yml
@@ -85,6 +85,8 @@
roles:
- openshift_manageiq
+ - role: openshift_project_request_template
+ when: openshift_project_request_template_manage
# Create the new templates shipped in 3.2, existing templates are left
# unmodified. This prevents the subsequent role definition for
# openshift_examples from failing when trying to replace templates that do
@@ -117,7 +119,6 @@
- name: grep pluginOrderOverride
command: grep pluginOrderOverride {{ openshift.common.config_base }}/master/master-config.yaml
register: grep_plugin_order_override
- when: openshift.common.version_gte_3_3_or_1_3 | bool
changed_when: false
failed_when: false
diff --git a/playbooks/common/openshift-cluster/upgrades/rpm_upgrade.yml b/playbooks/common/openshift-cluster/upgrades/rpm_upgrade.yml
deleted file mode 100644
index 8cc46ab68..000000000
--- a/playbooks/common/openshift-cluster/upgrades/rpm_upgrade.yml
+++ /dev/null
@@ -1,38 +0,0 @@
----
-# When we update package "a-${version}" and a requires b >= ${version} if we
-# don't specify the version of b yum will choose the latest version of b
-# available and the whole set of dependencies end up at the latest version.
-# Since the package module, unlike the yum module, doesn't flatten a list
-# of packages into one transaction we need to do that explicitly. The ansible
-# core team tells us not to rely on yum module transaction flattening anyway.
-
-# TODO: If the sdn package isn't already installed this will install it, we
-# should fix that
-- name: Upgrade master packages
- package: name={{ master_pkgs | join(',') }} state=present
- vars:
- master_pkgs:
- - "{{ openshift.common.service_type }}{{ openshift_pkg_version }}"
- - "{{ openshift.common.service_type }}-master{{ openshift_pkg_version }}"
- - "{{ openshift.common.service_type }}-node{{ openshift_pkg_version }}"
- - "{{ openshift.common.service_type }}-sdn-ovs{{ openshift_pkg_version }}"
- - "{{ openshift.common.service_type }}-clients{{ openshift_pkg_version }}"
- - "tuned-profiles-{{ openshift.common.service_type }}-node{{ openshift_pkg_version }}"
- - PyYAML
- when:
- - component == "master"
- - not openshift.common.is_atomic | bool
-
-- name: Upgrade node packages
- package: name={{ node_pkgs | join(',') }} state=present
- vars:
- node_pkgs:
- - "{{ openshift.common.service_type }}{{ openshift_pkg_version }}"
- - "{{ openshift.common.service_type }}-node{{ openshift_pkg_version }}"
- - "{{ openshift.common.service_type }}-sdn-ovs{{ openshift_pkg_version }}"
- - "{{ openshift.common.service_type }}-clients{{ openshift_pkg_version }}"
- - "tuned-profiles-{{ openshift.common.service_type }}-node{{ openshift_pkg_version }}"
- - PyYAML
- when:
- - component == "node"
- - not openshift.common.is_atomic | bool
diff --git a/playbooks/common/openshift-cluster/upgrades/upgrade_control_plane.yml b/playbooks/common/openshift-cluster/upgrades/upgrade_control_plane.yml
index a5e2f7940..399b818a7 100644
--- a/playbooks/common/openshift-cluster/upgrades/upgrade_control_plane.yml
+++ b/playbooks/common/openshift-cluster/upgrades/upgrade_control_plane.yml
@@ -3,22 +3,6 @@
# Upgrade Masters
###############################################################################
-# oc adm migrate storage should be run prior to etcd v3 upgrade
-# See: https://github.com/openshift/origin/pull/14625#issuecomment-308467060
-- name: Pre master upgrade - Upgrade all storage
- hosts: oo_first_master
- tasks:
- - name: Upgrade all storage
- command: >
- {{ openshift.common.client_binary }} adm --config={{ openshift.common.config_base }}/master/admin.kubeconfig
- migrate storage --include=* --confirm
- register: l_pb_upgrade_control_plane_pre_upgrade_storage
- when: openshift_upgrade_pre_storage_migration_enabled | default(true) | bool
- failed_when:
- - openshift_upgrade_pre_storage_migration_enabled | default(true) | bool
- - l_pb_upgrade_control_plane_pre_upgrade_storage.rc != 0
- - openshift_upgrade_pre_storage_migration_fatal | default(true) | bool
-
# If facts cache were for some reason deleted, this fact may not be set, and if not set
# it will always default to true. This causes problems for the etcd data dir fact detection
# so we must first make sure this is set correctly before attempting the backup.
@@ -48,6 +32,22 @@
- include: create_service_signer_cert.yml
+# oc adm migrate storage should be run prior to etcd v3 upgrade
+# See: https://github.com/openshift/origin/pull/14625#issuecomment-308467060
+- name: Pre master upgrade - Upgrade all storage
+ hosts: oo_first_master
+ tasks:
+ - name: Upgrade all storage
+ command: >
+ {{ openshift.common.client_binary }} adm --config={{ openshift.common.config_base }}/master/admin.kubeconfig
+ migrate storage --include=* --confirm
+ register: l_pb_upgrade_control_plane_pre_upgrade_storage
+ when: openshift_upgrade_pre_storage_migration_enabled | default(true) | bool
+ failed_when:
+ - openshift_upgrade_pre_storage_migration_enabled | default(true) | bool
+ - l_pb_upgrade_control_plane_pre_upgrade_storage.rc != 0
+ - openshift_upgrade_pre_storage_migration_fatal | default(true) | bool
+
# Set openshift_master_facts separately. In order to reconcile
# admission_config's, we currently must run openshift_master_facts and
# then run openshift_facts.
@@ -63,13 +63,9 @@
vars:
openshift_master_ha: "{{ groups.oo_masters_to_config | length > 1 }}"
serial: 1
- handlers:
- - include: ../../../../roles/openshift_master/handlers/main.yml
- static: yes
- roles:
- - openshift_facts
- - lib_utils
- post_tasks:
+ tasks:
+ - include_role:
+ name: openshift_facts
# Run the pre-upgrade hook if defined:
- debug: msg="Running master pre-upgrade hook {{ openshift_master_upgrade_pre_hook }}"
@@ -78,55 +74,9 @@
- include: "{{ openshift_master_upgrade_pre_hook }}"
when: openshift_master_upgrade_pre_hook is defined
- - include: rpm_upgrade.yml component=master
- when: not openshift.common.is_containerized | bool
-
- - include_vars: ../../../../roles/openshift_master_facts/vars/main.yml
-
- - include: upgrade_scheduler.yml
-
- - include: "{{ master_config_hook }}"
- when: master_config_hook is defined
-
- - include_vars: ../../../../roles/openshift_master/vars/main.yml
-
- - name: Update journald config
- include: ../../../../roles/openshift_master/tasks/journald.yml
-
- - name: Remove any legacy systemd units and update systemd units
- include: ../../../../roles/openshift_master/tasks/systemd_units.yml
-
- - name: Check for ca-bundle.crt
- stat:
- path: "{{ openshift.common.config_base }}/master/ca-bundle.crt"
- register: ca_bundle_stat
- failed_when: false
-
- - name: Check for ca.crt
- stat:
- path: "{{ openshift.common.config_base }}/master/ca.crt"
- register: ca_crt_stat
- failed_when: false
-
- - name: Migrate ca.crt to ca-bundle.crt
- command: mv ca.crt ca-bundle.crt
- args:
- chdir: "{{ openshift.common.config_base }}/master"
- when: ca_crt_stat.stat.isreg and not ca_bundle_stat.stat.exists
-
- - name: Link ca.crt to ca-bundle.crt
- file:
- src: "{{ openshift.common.config_base }}/master/ca-bundle.crt"
- path: "{{ openshift.common.config_base }}/master/ca.crt"
- state: link
- when: ca_crt_stat.stat.isreg and not ca_bundle_stat.stat.exists
-
- - name: Update oreg value
- yedit:
- src: "{{ openshift.common.config_base }}/master/master-config.yaml"
- key: 'imageConfig.format'
- value: "{{ oreg_url | default(oreg_url_master) }}"
- when: oreg_url is defined or oreg_url_master is defined
+ - include_role:
+ name: openshift_master
+ tasks_from: upgrade.yml
# Run the upgrade hook prior to restarting services/system if defined:
- debug: msg="Running master upgrade hook {{ openshift_master_upgrade_hook }}"
@@ -153,7 +103,9 @@
{{ openshift.common.client_binary }} adm --config={{ openshift.common.config_base }}/master/admin.kubeconfig
migrate storage --include=clusterpolicies --confirm
register: l_pb_upgrade_control_plane_post_upgrade_storage
- when: openshift_upgrade_post_storage_migration_enabled | default(true) | bool
+ when:
+ - openshift_upgrade_post_storage_migration_enabled | default(true) | bool
+ - openshift_version | version_compare('3.7','<')
failed_when:
- openshift_upgrade_post_storage_migration_enabled | default(true) | bool
- l_pb_upgrade_control_plane_post_upgrade_storage.rc != 0
@@ -234,7 +186,6 @@
- reconcile_jenkins_role_binding_result.rc == 0
when:
- openshift_version | version_compare('3.7','<')
- - openshift_version | version_compare('3.4','>=')
- when: openshift_upgrade_target | version_compare('3.7','<')
block:
diff --git a/playbooks/common/openshift-cluster/upgrades/upgrade_scheduler.yml b/playbooks/common/openshift-cluster/upgrades/upgrade_scheduler.yml
deleted file mode 100644
index 8558bf3e9..000000000
--- a/playbooks/common/openshift-cluster/upgrades/upgrade_scheduler.yml
+++ /dev/null
@@ -1,173 +0,0 @@
----
-# Upgrade predicates
-- vars:
- prev_predicates: "{{ lookup('openshift_master_facts_default_predicates', short_version=openshift_upgrade_min, deployment_type=openshift_deployment_type) }}"
- prev_predicates_no_region: "{{ lookup('openshift_master_facts_default_predicates', short_version=openshift_upgrade_min, deployment_type=openshift_deployment_type, regions_enabled=False) }}"
- default_predicates_no_region: "{{ lookup('openshift_master_facts_default_predicates', regions_enabled=False) }}"
- # older_predicates are the set of predicates that have previously been
- # hard-coded into openshift_facts
- older_predicates:
- - - name: MatchNodeSelector
- - name: PodFitsResources
- - name: PodFitsPorts
- - name: NoDiskConflict
- - name: NoVolumeZoneConflict
- - name: MaxEBSVolumeCount
- - name: MaxGCEPDVolumeCount
- - name: Region
- argument:
- serviceAffinity:
- labels:
- - region
- - - name: MatchNodeSelector
- - name: PodFitsResources
- - name: PodFitsPorts
- - name: NoDiskConflict
- - name: NoVolumeZoneConflict
- - name: Region
- argument:
- serviceAffinity:
- labels:
- - region
- - - name: MatchNodeSelector
- - name: PodFitsResources
- - name: PodFitsPorts
- - name: NoDiskConflict
- - name: Region
- argument:
- serviceAffinity:
- labels:
- - region
- # older_predicates_no_region are the set of predicates that have previously
- # been hard-coded into openshift_facts, with the Region predicate removed
- older_predicates_no_region:
- - - name: MatchNodeSelector
- - name: PodFitsResources
- - name: PodFitsPorts
- - name: NoDiskConflict
- - name: NoVolumeZoneConflict
- - name: MaxEBSVolumeCount
- - name: MaxGCEPDVolumeCount
- - - name: MatchNodeSelector
- - name: PodFitsResources
- - name: PodFitsPorts
- - name: NoDiskConflict
- - name: NoVolumeZoneConflict
- - - name: MatchNodeSelector
- - name: PodFitsResources
- - name: PodFitsPorts
- - name: NoDiskConflict
- block:
-
- # Handle case where openshift_master_predicates is defined
- - block:
- - debug:
- msg: "WARNING: openshift_master_scheduler_predicates is set to defaults from an earlier release of OpenShift current defaults are: {{ openshift_master_scheduler_default_predicates }}"
- when: openshift_master_scheduler_predicates in older_predicates + older_predicates_no_region + [prev_predicates] + [prev_predicates_no_region]
-
- - debug:
- msg: "WARNING: openshift_master_scheduler_predicates does not match current defaults of: {{ openshift_master_scheduler_default_predicates }}"
- when: openshift_master_scheduler_predicates != openshift_master_scheduler_default_predicates
- when: openshift_master_scheduler_predicates | default(none) is not none
-
- # Handle cases where openshift_master_predicates is not defined
- - block:
- - debug:
- msg: "WARNING: existing scheduler config does not match previous known defaults automated upgrade of scheduler config is disabled.\nexisting scheduler predicates: {{ openshift_master_scheduler_current_predicates }}\ncurrent scheduler default predicates are: {{ openshift_master_scheduler_default_predicates }}"
- when:
- - openshift_master_scheduler_current_predicates != openshift_master_scheduler_default_predicates
- - openshift_master_scheduler_current_predicates not in older_predicates + [prev_predicates]
-
- - set_fact:
- openshift_upgrade_scheduler_predicates: "{{ openshift_master_scheduler_default_predicates }}"
- when:
- - openshift_master_scheduler_current_predicates != openshift_master_scheduler_default_predicates
- - openshift_master_scheduler_current_predicates in older_predicates + [prev_predicates]
-
- - set_fact:
- openshift_upgrade_scheduler_predicates: "{{ default_predicates_no_region }}"
- when:
- - openshift_master_scheduler_current_predicates != default_predicates_no_region
- - openshift_master_scheduler_current_predicates in older_predicates_no_region + [prev_predicates_no_region]
-
- when: openshift_master_scheduler_predicates | default(none) is none
-
-
-# Upgrade priorities
-- vars:
- prev_priorities: "{{ lookup('openshift_master_facts_default_priorities', short_version=openshift_upgrade_min, deployment_type=openshift_deployment_type) }}"
- prev_priorities_no_zone: "{{ lookup('openshift_master_facts_default_priorities', short_version=openshift_upgrade_min, deployment_type=openshift_deployment_type, zones_enabled=False) }}"
- default_priorities_no_zone: "{{ lookup('openshift_master_facts_default_priorities', zones_enabled=False) }}"
- # older_priorities are the set of priorities that have previously been
- # hard-coded into openshift_facts
- older_priorities:
- - - name: LeastRequestedPriority
- weight: 1
- - name: SelectorSpreadPriority
- weight: 1
- - name: Zone
- weight: 2
- argument:
- serviceAntiAffinity:
- label: zone
- # older_priorities_no_region are the set of priorities that have previously
- # been hard-coded into openshift_facts, with the Zone priority removed
- older_priorities_no_zone:
- - - name: LeastRequestedPriority
- weight: 1
- - name: SelectorSpreadPriority
- weight: 1
- block:
-
- # Handle case where openshift_master_priorities is defined
- - block:
- - debug:
- msg: "WARNING: openshift_master_scheduler_priorities is set to defaults from an earlier release of OpenShift current defaults are: {{ openshift_master_scheduler_default_priorities }}"
- when: openshift_master_scheduler_priorities in older_priorities + older_priorities_no_zone + [prev_priorities] + [prev_priorities_no_zone]
-
- - debug:
- msg: "WARNING: openshift_master_scheduler_priorities does not match current defaults of: {{ openshift_master_scheduler_default_priorities }}"
- when: openshift_master_scheduler_priorities != openshift_master_scheduler_default_priorities
- when: openshift_master_scheduler_priorities | default(none) is not none
-
- # Handle cases where openshift_master_priorities is not defined
- - block:
- - debug:
- msg: "WARNING: existing scheduler config does not match previous known defaults automated upgrade of scheduler config is disabled.\nexisting scheduler priorities: {{ openshift_master_scheduler_current_priorities }}\ncurrent scheduler default priorities are: {{ openshift_master_scheduler_default_priorities }}"
- when:
- - openshift_master_scheduler_current_priorities != openshift_master_scheduler_default_priorities
- - openshift_master_scheduler_current_priorities not in older_priorities + [prev_priorities]
-
- - set_fact:
- openshift_upgrade_scheduler_priorities: "{{ openshift_master_scheduler_default_priorities }}"
- when:
- - openshift_master_scheduler_current_priorities != openshift_master_scheduler_default_priorities
- - openshift_master_scheduler_current_priorities in older_priorities + [prev_priorities]
-
- - set_fact:
- openshift_upgrade_scheduler_priorities: "{{ default_priorities_no_zone }}"
- when:
- - openshift_master_scheduler_current_priorities != default_priorities_no_zone
- - openshift_master_scheduler_current_priorities in older_priorities_no_zone + [prev_priorities_no_zone]
-
- when: openshift_master_scheduler_priorities | default(none) is none
-
-
-# Update scheduler
-- vars:
- scheduler_config:
- kind: Policy
- apiVersion: v1
- predicates: "{{ openshift_upgrade_scheduler_predicates
- | default(openshift_master_scheduler_current_predicates) }}"
- priorities: "{{ openshift_upgrade_scheduler_priorities
- | default(openshift_master_scheduler_current_priorities) }}"
- block:
- - name: Update scheduler config
- copy:
- content: "{{ scheduler_config | to_nice_json }}"
- dest: "{{ openshift_master_scheduler_conf }}"
- backup: true
- when: >
- openshift_upgrade_scheduler_predicates is defined or
- openshift_upgrade_scheduler_priorities is defined
diff --git a/playbooks/common/openshift-cluster/upgrades/v3_3/master_config_upgrade.yml b/playbooks/common/openshift-cluster/upgrades/v3_3/master_config_upgrade.yml
deleted file mode 100644
index 5e7a66171..000000000
--- a/playbooks/common/openshift-cluster/upgrades/v3_3/master_config_upgrade.yml
+++ /dev/null
@@ -1,66 +0,0 @@
----
-- modify_yaml:
- dest: "{{ openshift.common.config_base}}/master/master-config.yaml"
- yaml_key: 'masterClients.externalKubernetesClientConnectionOverrides.acceptContentTypes'
- yaml_value: 'application/vnd.kubernetes.protobuf,application/json'
-
-- modify_yaml:
- dest: "{{ openshift.common.config_base}}/master/master-config.yaml"
- yaml_key: 'masterClients.externalKubernetesClientConnectionOverrides.contentType'
- yaml_value: 'application/vnd.kubernetes.protobuf'
-
-- modify_yaml:
- dest: "{{ openshift.common.config_base}}/master/master-config.yaml"
- yaml_key: 'masterClients.externalKubernetesClientConnectionOverrides.burst'
- yaml_value: 400
-
-- modify_yaml:
- dest: "{{ openshift.common.config_base}}/master/master-config.yaml"
- yaml_key: 'masterClients.externalKubernetesClientConnectionOverrides.qps'
- yaml_value: 200
-
-- modify_yaml:
- dest: "{{ openshift.common.config_base}}/master/master-config.yaml"
- yaml_key: 'masterClients.openshiftLoopbackClientConnectionOverrides.acceptContentTypes'
- yaml_value: 'application/vnd.kubernetes.protobuf,application/json'
-
-- modify_yaml:
- dest: "{{ openshift.common.config_base}}/master/master-config.yaml"
- yaml_key: 'masterClients.openshiftLoopbackClientConnectionOverrides.contentType'
- yaml_value: 'application/vnd.kubernetes.protobuf'
-
-- modify_yaml:
- dest: "{{ openshift.common.config_base}}/master/master-config.yaml"
- yaml_key: 'masterClients.openshiftLoopbackClientConnectionOverrides.burst'
- yaml_value: 600
-
-- modify_yaml:
- dest: "{{ openshift.common.config_base}}/master/master-config.yaml"
- yaml_key: 'masterClients.openshiftLoopbackClientConnectionOverrides.qps'
- yaml_value: 300
-
-- modify_yaml:
- dest: "{{ openshift.common.config_base}}/master/master-config.yaml"
- yaml_key: 'controllerConfig.serviceServingCert.signer.certFile'
- yaml_value: service-signer.crt
-
-- modify_yaml:
- dest: "{{ openshift.common.config_base}}/master/master-config.yaml"
- yaml_key: 'controllerConfig.serviceServingCert.signer.keyFile'
- yaml_value: service-signer.key
-
-- modify_yaml:
- dest: "{{ openshift.common.config_base}}/master/master-config.yaml"
- yaml_key: 'admissionConfig.pluginConfig'
- yaml_value: "{{ openshift.master.admission_plugin_config }}"
- when: "'admission_plugin_config' in openshift.master"
-
-- modify_yaml:
- dest: "{{ openshift.common.config_base}}/master/master-config.yaml"
- yaml_key: 'admissionConfig.pluginOrderOverride'
- yaml_value:
-
-- modify_yaml:
- dest: "{{ openshift.common.config_base}}/master/master-config.yaml"
- yaml_key: 'kubernetesMasterConfig.admissionConfig'
- yaml_value:
diff --git a/playbooks/common/openshift-cluster/upgrades/v3_3/node_config_upgrade.yml b/playbooks/common/openshift-cluster/upgrades/v3_3/node_config_upgrade.yml
deleted file mode 100644
index 89b524f14..000000000
--- a/playbooks/common/openshift-cluster/upgrades/v3_3/node_config_upgrade.yml
+++ /dev/null
@@ -1,20 +0,0 @@
----
-- modify_yaml:
- dest: "{{ openshift.common.config_base}}/node/node-config.yaml"
- yaml_key: 'masterClientConnectionOverrides.acceptContentTypes'
- yaml_value: 'application/vnd.kubernetes.protobuf,application/json'
-
-- modify_yaml:
- dest: "{{ openshift.common.config_base}}/node/node-config.yaml"
- yaml_key: 'masterClientConnectionOverrides.contentType'
- yaml_value: 'application/vnd.kubernetes.protobuf'
-
-- modify_yaml:
- dest: "{{ openshift.common.config_base}}/node/node-config.yaml"
- yaml_key: 'masterClientConnectionOverrides.burst'
- yaml_value: 40
-
-- modify_yaml:
- dest: "{{ openshift.common.config_base}}/node/node-config.yaml"
- yaml_key: 'masterClientConnectionOverrides.qps'
- yaml_value: 20
diff --git a/playbooks/common/openshift-cluster/upgrades/v3_3/roles b/playbooks/common/openshift-cluster/upgrades/v3_3/roles
deleted file mode 120000
index 6bc1a7aef..000000000
--- a/playbooks/common/openshift-cluster/upgrades/v3_3/roles
+++ /dev/null
@@ -1 +0,0 @@
-../../../../../roles \ No newline at end of file
diff --git a/playbooks/common/openshift-cluster/upgrades/v3_3/upgrade.yml b/playbooks/common/openshift-cluster/upgrades/v3_3/upgrade.yml
deleted file mode 100644
index a241ef039..000000000
--- a/playbooks/common/openshift-cluster/upgrades/v3_3/upgrade.yml
+++ /dev/null
@@ -1,118 +0,0 @@
----
-#
-# Full Control Plane + Nodes Upgrade
-#
-- include: ../init.yml
- tags:
- - pre_upgrade
-
-- name: Configure the upgrade target for the common upgrade tasks
- hosts: oo_all_hosts
- tags:
- - pre_upgrade
- tasks:
- - set_fact:
- openshift_upgrade_target: "{{ '1.3' if deployment_type == 'origin' else '3.3' }}"
- openshift_upgrade_min: "{{ '1.2' if deployment_type == 'origin' else '3.2' }}"
-
-# Pre-upgrade
-
-- include: ../initialize_nodes_to_upgrade.yml
- tags:
- - pre_upgrade
-
-- name: Update repos and initialize facts on all hosts
- hosts: oo_masters_to_config:oo_nodes_to_upgrade:oo_etcd_to_config:oo_lb_to_config
- tags:
- - pre_upgrade
- roles:
- - openshift_repos
-
-- name: Set openshift_no_proxy_internal_hostnames
- hosts: oo_masters_to_config:oo_nodes_to_upgrade
- tags:
- - pre_upgrade
- tasks:
- - set_fact:
- openshift_no_proxy_internal_hostnames: "{{ hostvars | oo_select_keys(groups['oo_nodes_to_config']
- | union(groups['oo_masters_to_config'])
- | union(groups['oo_etcd_to_config'] | default([])))
- | oo_collect('openshift.common.hostname') | default([]) | join (',')
- }}"
- when:
- - openshift_http_proxy is defined or openshift_https_proxy is defined
- - openshift_generate_no_proxy_hosts | default(True) | bool
-
-- include: ../pre/verify_inventory_vars.yml
- tags:
- - pre_upgrade
-
-- include: ../disable_master_excluders.yml
- tags:
- - pre_upgrade
-
-- include: ../disable_node_excluders.yml
- tags:
- - pre_upgrade
-
-- include: ../../initialize_openshift_version.yml
- tags:
- - pre_upgrade
- vars:
- # Request specific openshift_release and let the openshift_version role handle converting this
- # to a more specific version, respecting openshift_image_tag and openshift_pkg_version if
- # defined, and overriding the normal behavior of protecting the installed version
- openshift_release: "{{ openshift_upgrade_target }}"
- openshift_protect_installed_version: False
-
- # We skip the docker role at this point in upgrade to prevent
- # unintended package, container, or config upgrades which trigger
- # docker restarts. At this early stage of upgrade we can assume
- # docker is configured and running.
- skip_docker_role: True
-
-- include: ../pre/verify_control_plane_running.yml
- tags:
- - pre_upgrade
-
-- include: ../../../openshift-master/validate_restart.yml
- tags:
- - pre_upgrade
-
-- name: Verify upgrade targets
- hosts: oo_masters_to_config:oo_nodes_to_upgrade
- tasks:
- - include: ../pre/verify_upgrade_targets.yml
- tags:
- - pre_upgrade
-
-- name: Verify docker upgrade targets
- hosts: oo_masters_to_config:oo_nodes_to_upgrade:oo_etcd_to_config
- tasks:
- - include: ../pre/tasks/verify_docker_upgrade_targets.yml
- tags:
- - pre_upgrade
-
-- include: ../pre/gate_checks.yml
- tags:
- - pre_upgrade
-
-# Pre-upgrade completed, nothing after this should be tagged pre_upgrade.
-
-# Separate step so we can execute in parallel and clear out anything unused
-# before we get into the serialized upgrade process which will then remove
-# remaining images if possible.
-- name: Cleanup unused Docker images
- hosts: oo_masters_to_config:oo_nodes_to_upgrade:oo_etcd_to_config
- tasks:
- - include: ../cleanup_unused_images.yml
-
-- include: ../upgrade_control_plane.yml
- vars:
- master_config_hook: "v3_3/master_config_upgrade.yml"
-
-- include: ../upgrade_nodes.yml
- vars:
- node_config_hook: "v3_3/node_config_upgrade.yml"
-
-- include: ../post_control_plane.yml
diff --git a/playbooks/common/openshift-cluster/upgrades/v3_3/upgrade_control_plane.yml b/playbooks/common/openshift-cluster/upgrades/v3_3/upgrade_control_plane.yml
deleted file mode 100644
index 54c85f0fb..000000000
--- a/playbooks/common/openshift-cluster/upgrades/v3_3/upgrade_control_plane.yml
+++ /dev/null
@@ -1,118 +0,0 @@
----
-#
-# Control Plane Upgrade Playbook
-#
-# Upgrades masters and Docker (only on standalone etcd hosts)
-#
-# This upgrade does not include:
-# - node service running on masters
-# - docker running on masters
-# - node service running on dedicated nodes
-#
-# You can run the upgrade_nodes.yml playbook after this to upgrade these components separately.
-#
-- include: ../init.yml
- tags:
- - pre_upgrade
-
-- name: Configure the upgrade target for the common upgrade tasks
- hosts: oo_all_hosts
- tags:
- - pre_upgrade
- tasks:
- - set_fact:
- openshift_upgrade_target: "{{ '1.3' if deployment_type == 'origin' else '3.3' }}"
- openshift_upgrade_min: "{{ '1.2' if deployment_type == 'origin' else '3.2' }}"
-
-# Pre-upgrade
-- include: ../initialize_nodes_to_upgrade.yml
- tags:
- - pre_upgrade
-
-- name: Update repos on control plane hosts
- hosts: oo_masters_to_config:oo_etcd_to_config:oo_lb_to_config
- tags:
- - pre_upgrade
- roles:
- - openshift_repos
-
-- name: Set openshift_no_proxy_internal_hostnames
- hosts: oo_masters_to_config:oo_nodes_to_upgrade
- tags:
- - pre_upgrade
- tasks:
- - set_fact:
- openshift_no_proxy_internal_hostnames: "{{ hostvars | oo_select_keys(groups['oo_nodes_to_config']
- | union(groups['oo_masters_to_config'])
- | union(groups['oo_etcd_to_config'] | default([])))
- | oo_collect('openshift.common.hostname') | default([]) | join (',')
- }}"
- when:
- - openshift_http_proxy is defined or openshift_https_proxy is defined
- - openshift_generate_no_proxy_hosts | default(True) | bool
-
-- include: ../pre/verify_inventory_vars.yml
- tags:
- - pre_upgrade
-
-- include: ../disable_master_excluders.yml
- tags:
- - pre_upgrade
-
-- include: ../../initialize_openshift_version.yml
- tags:
- - pre_upgrade
- vars:
- # Request specific openshift_release and let the openshift_version role handle converting this
- # to a more specific version, respecting openshift_image_tag and openshift_pkg_version if
- # defined, and overriding the normal behavior of protecting the installed version
- openshift_release: "{{ openshift_upgrade_target }}"
- openshift_protect_installed_version: False
-
- # We skip the docker role at this point in upgrade to prevent
- # unintended package, container, or config upgrades which trigger
- # docker restarts. At this early stage of upgrade we can assume
- # docker is configured and running.
- skip_docker_role: True
-
-- include: ../pre/verify_control_plane_running.yml
- tags:
- - pre_upgrade
-
-- include: ../../../openshift-master/validate_restart.yml
- tags:
- - pre_upgrade
-
-- name: Verify upgrade targets
- hosts: oo_masters_to_config
- tasks:
- - include: ../pre/verify_upgrade_targets.yml
- tags:
- - pre_upgrade
-
-- name: Verify docker upgrade targets
- hosts: oo_masters_to_config:oo_etcd_to_config
- tasks:
- - include: ../pre/tasks/verify_docker_upgrade_targets.yml
- tags:
- - pre_upgrade
-
-- include: ../pre/gate_checks.yml
- tags:
- - pre_upgrade
-
-# Pre-upgrade completed, nothing after this should be tagged pre_upgrade.
-
-# Separate step so we can execute in parallel and clear out anything unused
-# before we get into the serialized upgrade process which will then remove
-# remaining images if possible.
-- name: Cleanup unused Docker images
- hosts: oo_masters_to_config:oo_etcd_to_config
- tasks:
- - include: ../cleanup_unused_images.yml
-
-- include: ../upgrade_control_plane.yml
- vars:
- master_config_hook: "v3_3/master_config_upgrade.yml"
-
-- include: ../post_control_plane.yml
diff --git a/playbooks/common/openshift-cluster/upgrades/v3_3/upgrade_nodes.yml b/playbooks/common/openshift-cluster/upgrades/v3_3/upgrade_nodes.yml
deleted file mode 100644
index cee4e9087..000000000
--- a/playbooks/common/openshift-cluster/upgrades/v3_3/upgrade_nodes.yml
+++ /dev/null
@@ -1,113 +0,0 @@
----
-#
-# Node Upgrade Playbook
-#
-# Upgrades nodes only, but requires the control plane to have already been upgraded.
-#
-- include: ../init.yml
- tags:
- - pre_upgrade
-
-- name: Configure the upgrade target for the common upgrade tasks
- hosts: oo_all_hosts
- tags:
- - pre_upgrade
- tasks:
- - set_fact:
- openshift_upgrade_target: "{{ '1.3' if deployment_type == 'origin' else '3.3' }}"
- openshift_upgrade_min: "{{ '1.2' if deployment_type == 'origin' else '3.2' }}"
-
-# Pre-upgrade
-- include: ../initialize_nodes_to_upgrade.yml
- tags:
- - pre_upgrade
-
-- name: Update repos on nodes
- hosts: oo_masters_to_config:oo_nodes_to_upgrade:oo_etcd_to_config:oo_lb_to_config
- roles:
- - openshift_repos
- tags:
- - pre_upgrade
-
-- name: Set openshift_no_proxy_internal_hostnames
- hosts: oo_masters_to_config:oo_nodes_to_upgrade
- tags:
- - pre_upgrade
- tasks:
- - set_fact:
- openshift_no_proxy_internal_hostnames: "{{ hostvars | oo_select_keys(groups['oo_nodes_to_upgrade']
- | union(groups['oo_masters_to_config'])
- | union(groups['oo_etcd_to_config'] | default([])))
- | oo_collect('openshift.common.hostname') | default([]) | join (',')
- }}"
- when:
- - openshift_http_proxy is defined or openshift_https_proxy is defined
- - openshift_generate_no_proxy_hosts | default(True) | bool
-
-- include: ../pre/verify_inventory_vars.yml
- tags:
- - pre_upgrade
-
-- include: ../disable_node_excluders.yml
- tags:
- - pre_upgrade
-
-- include: ../../initialize_openshift_version.yml
- tags:
- - pre_upgrade
- vars:
- # Request specific openshift_release and let the openshift_version role handle converting this
- # to a more specific version, respecting openshift_image_tag and openshift_pkg_version if
- # defined, and overriding the normal behavior of protecting the installed version
- openshift_release: "{{ openshift_upgrade_target }}"
- openshift_protect_installed_version: False
-
- # We skip the docker role at this point in upgrade to prevent
- # unintended package, container, or config upgrades which trigger
- # docker restarts. At this early stage of upgrade we can assume
- # docker is configured and running.
- skip_docker_role: True
-
-- name: Verify masters are already upgraded
- hosts: oo_masters_to_config
- tags:
- - pre_upgrade
- tasks:
- - fail: msg="Master running {{ openshift.common.version }} must be upgraded to {{ openshift_version }} before node upgrade can be run."
- when: openshift.common.version != openshift_version
-
-- include: ../pre/verify_control_plane_running.yml
- tags:
- - pre_upgrade
-
-- name: Verify upgrade targets
- hosts: oo_nodes_to_upgrade
- tasks:
- - include: ../pre/verify_upgrade_targets.yml
- tags:
- - pre_upgrade
-
-- name: Verify docker upgrade targets
- hosts: oo_nodes_to_upgrade
- tasks:
- - include: ../pre/tasks/verify_docker_upgrade_targets.yml
- tags:
- - pre_upgrade
-
-- include: ../pre/gate_checks.yml
- tags:
- - pre_upgrade
-
-# Pre-upgrade completed, nothing after this should be tagged pre_upgrade.
-
-# Separate step so we can execute in parallel and clear out anything unused
-# before we get into the serialized upgrade process which will then remove
-# remaining images if possible.
-- name: Cleanup unused Docker images
- hosts: oo_nodes_to_upgrade
- tasks:
- - include: ../cleanup_unused_images.yml
-
-- include: ../upgrade_nodes.yml
- vars:
- node_config_hook: "v3_3/node_config_upgrade.yml"
diff --git a/playbooks/common/openshift-cluster/upgrades/v3_4/master_config_upgrade.yml b/playbooks/common/openshift-cluster/upgrades/v3_4/master_config_upgrade.yml
deleted file mode 100644
index 52458e03c..000000000
--- a/playbooks/common/openshift-cluster/upgrades/v3_4/master_config_upgrade.yml
+++ /dev/null
@@ -1,10 +0,0 @@
----
-- modify_yaml:
- dest: "{{ openshift.common.config_base}}/master/master-config.yaml"
- yaml_key: 'controllerConfig.serviceServingCert.signer.certFile'
- yaml_value: service-signer.crt
-
-- modify_yaml:
- dest: "{{ openshift.common.config_base}}/master/master-config.yaml"
- yaml_key: 'controllerConfig.serviceServingCert.signer.keyFile'
- yaml_value: service-signer.key
diff --git a/playbooks/common/openshift-cluster/upgrades/v3_4/roles b/playbooks/common/openshift-cluster/upgrades/v3_4/roles
deleted file mode 120000
index 6bc1a7aef..000000000
--- a/playbooks/common/openshift-cluster/upgrades/v3_4/roles
+++ /dev/null
@@ -1 +0,0 @@
-../../../../../roles \ No newline at end of file
diff --git a/playbooks/common/openshift-cluster/upgrades/v3_4/upgrade.yml b/playbooks/common/openshift-cluster/upgrades/v3_4/upgrade.yml
deleted file mode 100644
index ae217ba2e..000000000
--- a/playbooks/common/openshift-cluster/upgrades/v3_4/upgrade.yml
+++ /dev/null
@@ -1,116 +0,0 @@
----
-#
-# Full Control Plane + Nodes Upgrade
-#
-- include: ../init.yml
- tags:
- - pre_upgrade
-
-- name: Configure the upgrade target for the common upgrade tasks
- hosts: oo_all_hosts
- tags:
- - pre_upgrade
- tasks:
- - set_fact:
- openshift_upgrade_target: "{{ '1.4' if deployment_type == 'origin' else '3.4' }}"
- openshift_upgrade_min: "{{ '1.3' if deployment_type == 'origin' else '3.3' }}"
-
-# Pre-upgrade
-
-- include: ../initialize_nodes_to_upgrade.yml
- tags:
- - pre_upgrade
-
-- name: Update repos and initialize facts on all hosts
- hosts: oo_masters_to_config:oo_nodes_to_upgrade:oo_etcd_to_config:oo_lb_to_config
- tags:
- - pre_upgrade
- roles:
- - openshift_repos
-
-- name: Set openshift_no_proxy_internal_hostnames
- hosts: oo_masters_to_config:oo_nodes_to_upgrade
- tags:
- - pre_upgrade
- tasks:
- - set_fact:
- openshift_no_proxy_internal_hostnames: "{{ hostvars | oo_select_keys(groups['oo_nodes_to_config']
- | union(groups['oo_masters_to_config'])
- | union(groups['oo_etcd_to_config'] | default([])))
- | oo_collect('openshift.common.hostname') | default([]) | join (',')
- }}"
- when:
- - openshift_http_proxy is defined or openshift_https_proxy is defined
- - openshift_generate_no_proxy_hosts | default(True) | bool
-
-- include: ../pre/verify_inventory_vars.yml
- tags:
- - pre_upgrade
-
-- include: ../disable_master_excluders.yml
- tags:
- - pre_upgrade
-
-- include: ../disable_node_excluders.yml
- tags:
- - pre_upgrade
-
-- include: ../../initialize_openshift_version.yml
- tags:
- - pre_upgrade
- vars:
- # Request specific openshift_release and let the openshift_version role handle converting this
- # to a more specific version, respecting openshift_image_tag and openshift_pkg_version if
- # defined, and overriding the normal behavior of protecting the installed version
- openshift_release: "{{ openshift_upgrade_target }}"
- openshift_protect_installed_version: False
-
- # We skip the docker role at this point in upgrade to prevent
- # unintended package, container, or config upgrades which trigger
- # docker restarts. At this early stage of upgrade we can assume
- # docker is configured and running.
- skip_docker_role: True
-
-- include: ../pre/verify_control_plane_running.yml
- tags:
- - pre_upgrade
-
-- include: ../../../openshift-master/validate_restart.yml
- tags:
- - pre_upgrade
-
-- name: Verify upgrade targets
- hosts: oo_masters_to_config:oo_nodes_to_upgrade
- tasks:
- - include: ../pre/verify_upgrade_targets.yml
- tags:
- - pre_upgrade
-
-- name: Verify docker upgrade targets
- hosts: oo_masters_to_config:oo_nodes_to_upgrade:oo_etcd_to_config
- tasks:
- - include: ../pre/tasks/verify_docker_upgrade_targets.yml
- tags:
- - pre_upgrade
-
-- include: ../pre/gate_checks.yml
- tags:
- - pre_upgrade
-
-# Pre-upgrade completed, nothing after this should be tagged pre_upgrade.
-
-# Separate step so we can execute in parallel and clear out anything unused
-# before we get into the serialized upgrade process which will then remove
-# remaining images if possible.
-- name: Cleanup unused Docker images
- hosts: oo_masters_to_config:oo_nodes_to_upgrade:oo_etcd_to_config
- tasks:
- - include: ../cleanup_unused_images.yml
-
-- include: ../upgrade_control_plane.yml
- vars:
- master_config_hook: "v3_4/master_config_upgrade.yml"
-
-- include: ../upgrade_nodes.yml
-
-- include: ../post_control_plane.yml
diff --git a/playbooks/common/openshift-cluster/upgrades/v3_4/upgrade_control_plane.yml b/playbooks/common/openshift-cluster/upgrades/v3_4/upgrade_control_plane.yml
deleted file mode 100644
index d7cb38d03..000000000
--- a/playbooks/common/openshift-cluster/upgrades/v3_4/upgrade_control_plane.yml
+++ /dev/null
@@ -1,118 +0,0 @@
----
-#
-# Control Plane Upgrade Playbook
-#
-# Upgrades masters and Docker (only on standalone etcd hosts)
-#
-# This upgrade does not include:
-# - node service running on masters
-# - docker running on masters
-# - node service running on dedicated nodes
-#
-# You can run the upgrade_nodes.yml playbook after this to upgrade these components separately.
-#
-- include: ../init.yml
- tags:
- - pre_upgrade
-
-- name: Configure the upgrade target for the common upgrade tasks
- hosts: oo_all_hosts
- tags:
- - pre_upgrade
- tasks:
- - set_fact:
- openshift_upgrade_target: "{{ '1.4' if deployment_type == 'origin' else '3.4' }}"
- openshift_upgrade_min: "{{ '1.3' if deployment_type == 'origin' else '3.3' }}"
-
-# Pre-upgrade
-- include: ../initialize_nodes_to_upgrade.yml
- tags:
- - pre_upgrade
-
-- name: Update repos on control plane hosts
- hosts: oo_masters_to_config:oo_etcd_to_config:oo_lb_to_config
- tags:
- - pre_upgrade
- roles:
- - openshift_repos
-
-- name: Set openshift_no_proxy_internal_hostnames
- hosts: oo_masters_to_config:oo_nodes_to_upgrade
- tags:
- - pre_upgrade
- tasks:
- - set_fact:
- openshift_no_proxy_internal_hostnames: "{{ hostvars | oo_select_keys(groups['oo_nodes_to_config']
- | union(groups['oo_masters_to_config'])
- | union(groups['oo_etcd_to_config'] | default([])))
- | oo_collect('openshift.common.hostname') | default([]) | join (',')
- }}"
- when:
- - openshift_http_proxy is defined or openshift_https_proxy is defined
- - openshift_generate_no_proxy_hosts | default(True) | bool
-
-- include: ../pre/verify_inventory_vars.yml
- tags:
- - pre_upgrade
-
-- include: ../disable_master_excluders.yml
- tags:
- - pre_upgrade
-
-- include: ../../initialize_openshift_version.yml
- tags:
- - pre_upgrade
- vars:
- # Request specific openshift_release and let the openshift_version role handle converting this
- # to a more specific version, respecting openshift_image_tag and openshift_pkg_version if
- # defined, and overriding the normal behavior of protecting the installed version
- openshift_release: "{{ openshift_upgrade_target }}"
- openshift_protect_installed_version: False
-
- # We skip the docker role at this point in upgrade to prevent
- # unintended package, container, or config upgrades which trigger
- # docker restarts. At this early stage of upgrade we can assume
- # docker is configured and running.
- skip_docker_role: True
-
-- include: ../pre/verify_control_plane_running.yml
- tags:
- - pre_upgrade
-
-- include: ../../../openshift-master/validate_restart.yml
- tags:
- - pre_upgrade
-
-- name: Verify upgrade targets
- hosts: oo_masters_to_config
- tasks:
- - include: ../pre/verify_upgrade_targets.yml
- tags:
- - pre_upgrade
-
-- name: Verify docker upgrade targets
- hosts: oo_masters_to_config:oo_etcd_to_config
- tasks:
- - include: ../pre/tasks/verify_docker_upgrade_targets.yml
- tags:
- - pre_upgrade
-
-- include: ../pre/gate_checks.yml
- tags:
- - pre_upgrade
-
-# Pre-upgrade completed, nothing after this should be tagged pre_upgrade.
-
-# Separate step so we can execute in parallel and clear out anything unused
-# before we get into the serialized upgrade process which will then remove
-# remaining images if possible.
-- name: Cleanup unused Docker images
- hosts: oo_masters_to_config:oo_etcd_to_config
- tasks:
- - include: ../cleanup_unused_images.yml
-
-- include: ../upgrade_control_plane.yml
- vars:
- master_config_hook: "v3_4/master_config_upgrade.yml"
-
-- include: ../post_control_plane.yml
diff --git a/playbooks/common/openshift-cluster/upgrades/v3_5/master_config_upgrade.yml b/playbooks/common/openshift-cluster/upgrades/v3_5/master_config_upgrade.yml
deleted file mode 100644
index 52458e03c..000000000
--- a/playbooks/common/openshift-cluster/upgrades/v3_5/master_config_upgrade.yml
+++ /dev/null
@@ -1,10 +0,0 @@
----
-- modify_yaml:
- dest: "{{ openshift.common.config_base}}/master/master-config.yaml"
- yaml_key: 'controllerConfig.serviceServingCert.signer.certFile'
- yaml_value: service-signer.crt
-
-- modify_yaml:
- dest: "{{ openshift.common.config_base}}/master/master-config.yaml"
- yaml_key: 'controllerConfig.serviceServingCert.signer.keyFile'
- yaml_value: service-signer.key
diff --git a/playbooks/common/openshift-cluster/upgrades/v3_5/upgrade_nodes.yml b/playbooks/common/openshift-cluster/upgrades/v3_5/upgrade_nodes.yml
deleted file mode 100644
index e29d0f8e6..000000000
--- a/playbooks/common/openshift-cluster/upgrades/v3_5/upgrade_nodes.yml
+++ /dev/null
@@ -1,111 +0,0 @@
----
-#
-# Node Upgrade Playbook
-#
-# Upgrades nodes only, but requires the control plane to have already been upgraded.
-#
-- include: ../init.yml
- tags:
- - pre_upgrade
-
-- name: Configure the upgrade target for the common upgrade tasks
- hosts: oo_all_hosts
- tags:
- - pre_upgrade
- tasks:
- - set_fact:
- openshift_upgrade_target: "{{ '1.5' if deployment_type == 'origin' else '3.5' }}"
- openshift_upgrade_min: "{{ '1.4' if deployment_type == 'origin' else '3.4' }}"
-
-# Pre-upgrade
-- include: ../initialize_nodes_to_upgrade.yml
- tags:
- - pre_upgrade
-
-- name: Update repos on nodes
- hosts: oo_masters_to_config:oo_nodes_to_upgrade:oo_etcd_to_config:oo_lb_to_config
- roles:
- - openshift_repos
- tags:
- - pre_upgrade
-
-- name: Set openshift_no_proxy_internal_hostnames
- hosts: oo_masters_to_config:oo_nodes_to_upgrade
- tags:
- - pre_upgrade
- tasks:
- - set_fact:
- openshift_no_proxy_internal_hostnames: "{{ hostvars | oo_select_keys(groups['oo_nodes_to_upgrade']
- | union(groups['oo_masters_to_config'])
- | union(groups['oo_etcd_to_config'] | default([])))
- | oo_collect('openshift.common.hostname') | default([]) | join (',')
- }}"
- when:
- - openshift_http_proxy is defined or openshift_https_proxy is defined
- - openshift_generate_no_proxy_hosts | default(True) | bool
-
-- include: ../pre/verify_inventory_vars.yml
- tags:
- - pre_upgrade
-
-- include: ../disable_node_excluders.yml
- tags:
- - pre_upgrade
-
-- include: ../../initialize_openshift_version.yml
- tags:
- - pre_upgrade
- vars:
- # Request specific openshift_release and let the openshift_version role handle converting this
- # to a more specific version, respecting openshift_image_tag and openshift_pkg_version if
- # defined, and overriding the normal behavior of protecting the installed version
- openshift_release: "{{ openshift_upgrade_target }}"
- openshift_protect_installed_version: False
-
- # We skip the docker role at this point in upgrade to prevent
- # unintended package, container, or config upgrades which trigger
- # docker restarts. At this early stage of upgrade we can assume
- # docker is configured and running.
- skip_docker_role: True
-
-- name: Verify masters are already upgraded
- hosts: oo_masters_to_config
- tags:
- - pre_upgrade
- tasks:
- - fail: msg="Master running {{ openshift.common.version }} must be upgraded to {{ openshift_version }} before node upgrade can be run."
- when: openshift.common.version != openshift_version
-
-- include: ../pre/verify_control_plane_running.yml
- tags:
- - pre_upgrade
-
-- name: Verify upgrade targets
- hosts: oo_nodes_to_upgrade
- tasks:
- - include: ../pre/verify_upgrade_targets.yml
- tags:
- - pre_upgrade
-
-- name: Verify docker upgrade targets
- hosts: oo_nodes_to_upgrade
- tasks:
- - include: ../pre/tasks/verify_docker_upgrade_targets.yml
- tags:
- - pre_upgrade
-
-- include: ../pre/gate_checks.yml
- tags:
- - pre_upgrade
-
-# Pre-upgrade completed, nothing after this should be tagged pre_upgrade.
-
-# Separate step so we can execute in parallel and clear out anything unused
-# before we get into the serialized upgrade process which will then remove
-# remaining images if possible.
-- name: Cleanup unused Docker images
- hosts: oo_nodes_to_upgrade
- tasks:
- - include: ../cleanup_unused_images.yml
-
-- include: ../upgrade_nodes.yml
diff --git a/playbooks/common/openshift-cluster/upgrades/v3_5/validator.yml b/playbooks/common/openshift-cluster/upgrades/v3_5/validator.yml
deleted file mode 100644
index ae63c9ca9..000000000
--- a/playbooks/common/openshift-cluster/upgrades/v3_5/validator.yml
+++ /dev/null
@@ -1,67 +0,0 @@
----
-###############################################################################
-# Pre upgrade checks for known data problems, if this playbook fails you should
-# contact support. If you're not supported contact users@lists.openshift.com
-#
-# oc_objectvalidator provides these two checks
-# 1 - SDN Data issues, never seen in the wild but known possible due to code audits
-# https://github.com/openshift/origin/issues/12697
-# 2 - Namespace protections, https://bugzilla.redhat.com/show_bug.cgi?id=1428934
-#
-###############################################################################
-- name: Verify 3.5 specific upgrade checks
- hosts: oo_first_master
- roles:
- - { role: lib_openshift }
- tasks:
- - name: Check for invalid namespaces and SDN errors
- oc_objectvalidator:
-
- # What's all this PetSet business about?
- #
- # 'PetSets' were ALPHA resources in Kube <= 3.4. In >= 3.5 they are
- # no longer supported. The BETA resource 'StatefulSets' replaces
- # them. We can't migrate clients PetSets to
- # StatefulSets. Additionally, Red Hat has never officially supported
- # these resource types. Sorry users, but if you were using
- # unsupported resources from the Kube documentation then we can't
- # help you at this time.
- #
- # Reference: https://bugzilla.redhat.com/show_bug.cgi?id=1428229
- - name: Check if legacy PetSets exist
- oc_obj:
- state: list
- all_namespaces: true
- kind: petsets
- register: l_do_petsets_exist
-
- - name: Fail on unsupported resource migration 'PetSets'
- fail:
- msg: >
- PetSet objects were detected in your cluster. These are an
- Alpha feature in upstream Kubernetes 1.4 and are not supported
- by Red Hat. In Kubernetes 1.5, they are replaced by the Beta
- feature StatefulSets. Red Hat currently does not offer support
- for either PetSets or StatefulSets.
-
- Automatically migrating PetSets to StatefulSets in OpenShift
- Container Platform (OCP) 3.5 is not supported. See the
- Kubernetes "Upgrading from PetSets to StatefulSets"
- documentation for additional information:
-
- https://kubernetes.io/docs/tasks/manage-stateful-set/upgrade-pet-set-to-stateful-set/
-
- PetSets MUST be removed before upgrading to OCP 3.5. Red Hat
- strongly recommends reading the above referenced documentation
- in its entirety before taking any destructive actions.
-
- If you want to simply remove all PetSets without manually
- migrating to StatefulSets, run this command as a user with
- cluster-admin privileges:
-
- $ oc get petsets --all-namespaces -o yaml | oc delete -f - --cascade=false
- when:
- # Search did not fail, valid resource type found
- - l_do_petsets_exist.results.returncode == 0
- # Items do exist in the search results
- - l_do_petsets_exist.results.results.0['items'] | length > 0
diff --git a/playbooks/common/openshift-cluster/upgrades/v3_6/master_config_upgrade.yml b/playbooks/common/openshift-cluster/upgrades/v3_6/master_config_upgrade.yml
deleted file mode 100644
index db0c8f886..000000000
--- a/playbooks/common/openshift-cluster/upgrades/v3_6/master_config_upgrade.yml
+++ /dev/null
@@ -1,15 +0,0 @@
----
-- modify_yaml:
- dest: "{{ openshift.common.config_base}}/master/master-config.yaml"
- yaml_key: 'controllerConfig.serviceServingCert.signer.certFile'
- yaml_value: service-signer.crt
-
-- modify_yaml:
- dest: "{{ openshift.common.config_base}}/master/master-config.yaml"
- yaml_key: 'controllerConfig.serviceServingCert.signer.keyFile'
- yaml_value: service-signer.key
-
-- modify_yaml:
- dest: "{{ openshift.common.config_base }}/master/master-config.yaml"
- yaml_key: servingInfo.clientCA
- yaml_value: ca.crt
diff --git a/playbooks/common/openshift-cluster/upgrades/v3_6/upgrade.yml b/playbooks/common/openshift-cluster/upgrades/v3_6/upgrade.yml
index dd109cfa9..ef52f214b 100644
--- a/playbooks/common/openshift-cluster/upgrades/v3_6/upgrade.yml
+++ b/playbooks/common/openshift-cluster/upgrades/v3_6/upgrade.yml
@@ -63,7 +63,7 @@
tags:
- pre_upgrade
-- include: ../../initialize_openshift_version.yml
+- include: ../../../../init/version.yml
tags:
- pre_upgrade
vars:
diff --git a/playbooks/common/openshift-cluster/upgrades/v3_6/upgrade_control_plane.yml b/playbooks/common/openshift-cluster/upgrades/v3_6/upgrade_control_plane.yml
index 8ab68002d..4c6646a38 100644
--- a/playbooks/common/openshift-cluster/upgrades/v3_6/upgrade_control_plane.yml
+++ b/playbooks/common/openshift-cluster/upgrades/v3_6/upgrade_control_plane.yml
@@ -67,7 +67,7 @@
tags:
- pre_upgrade
-- include: ../../initialize_openshift_version.yml
+- include: ../../../../init/version.yml
tags:
- pre_upgrade
vars:
diff --git a/playbooks/common/openshift-cluster/upgrades/v3_6/upgrade_nodes.yml b/playbooks/common/openshift-cluster/upgrades/v3_6/upgrade_nodes.yml
index ba6fcc3f8..f25cfe0d0 100644
--- a/playbooks/common/openshift-cluster/upgrades/v3_6/upgrade_nodes.yml
+++ b/playbooks/common/openshift-cluster/upgrades/v3_6/upgrade_nodes.yml
@@ -56,7 +56,7 @@
tags:
- pre_upgrade
-- include: ../../initialize_openshift_version.yml
+- include: ../../../../init/version.yml
tags:
- pre_upgrade
vars:
diff --git a/playbooks/common/openshift-cluster/upgrades/v3_7/upgrade.yml b/playbooks/common/openshift-cluster/upgrades/v3_7/upgrade.yml
index f4862e321..e3c012380 100644
--- a/playbooks/common/openshift-cluster/upgrades/v3_7/upgrade.yml
+++ b/playbooks/common/openshift-cluster/upgrades/v3_7/upgrade.yml
@@ -67,7 +67,7 @@
tags:
- pre_upgrade
-- include: ../../initialize_openshift_version.yml
+- include: ../../../../init/version.yml
tags:
- pre_upgrade
vars:
diff --git a/playbooks/common/openshift-cluster/upgrades/v3_7/upgrade_control_plane.yml b/playbooks/common/openshift-cluster/upgrades/v3_7/upgrade_control_plane.yml
index b905d6d86..a88fa7b2e 100644
--- a/playbooks/common/openshift-cluster/upgrades/v3_7/upgrade_control_plane.yml
+++ b/playbooks/common/openshift-cluster/upgrades/v3_7/upgrade_control_plane.yml
@@ -71,7 +71,7 @@
tags:
- pre_upgrade
-- include: ../../initialize_openshift_version.yml
+- include: ../../../../init/version.yml
tags:
- pre_upgrade
vars:
diff --git a/playbooks/common/openshift-cluster/upgrades/v3_7/upgrade_nodes.yml b/playbooks/common/openshift-cluster/upgrades/v3_7/upgrade_nodes.yml
index bc080f9a3..c0546bd2d 100644
--- a/playbooks/common/openshift-cluster/upgrades/v3_7/upgrade_nodes.yml
+++ b/playbooks/common/openshift-cluster/upgrades/v3_7/upgrade_nodes.yml
@@ -56,7 +56,7 @@
tags:
- pre_upgrade
-- include: ../../initialize_openshift_version.yml
+- include: ../../../../init/version.yml
tags:
- pre_upgrade
vars:
diff --git a/playbooks/common/openshift-cluster/upgrades/v3_7/validator.yml b/playbooks/common/openshift-cluster/upgrades/v3_7/validator.yml
index 8e4f99c91..74d0cd8ad 100644
--- a/playbooks/common/openshift-cluster/upgrades/v3_7/validator.yml
+++ b/playbooks/common/openshift-cluster/upgrades/v3_7/validator.yml
@@ -11,13 +11,15 @@
tasks:
- name: Check for invalid namespaces and SDN errors
oc_objectvalidator:
-
+ # DO NOT DISABLE THIS, YOUR UPGRADE WILL FAIL IF YOU DO SO
- name: Confirm OpenShift authorization objects are in sync
command: >
{{ openshift.common.client_binary }} adm migrate authorization
- when: openshift_version | version_compare('3.7','<')
+ when:
+ - openshift_currently_installed_version | version_compare('3.7','<')
+ - openshift_upgrade_pre_authorization_migration_enabled | default(true) | bool
changed_when: false
register: l_oc_result
until: l_oc_result.rc == 0
- retries: 4
+ retries: 2
delay: 15
diff --git a/playbooks/common/openshift-cluster/upgrades/v3_5/filter_plugins b/playbooks/common/openshift-cluster/upgrades/v3_8/filter_plugins
index 7de3c1dd7..7de3c1dd7 120000
--- a/playbooks/common/openshift-cluster/upgrades/v3_5/filter_plugins
+++ b/playbooks/common/openshift-cluster/upgrades/v3_8/filter_plugins
diff --git a/playbooks/common/openshift-cluster/upgrades/v3_7/master_config_upgrade.yml b/playbooks/common/openshift-cluster/upgrades/v3_8/master_config_upgrade.yml
index 1d4d1919c..1d4d1919c 100644
--- a/playbooks/common/openshift-cluster/upgrades/v3_7/master_config_upgrade.yml
+++ b/playbooks/common/openshift-cluster/upgrades/v3_8/master_config_upgrade.yml
diff --git a/playbooks/common/openshift-cluster/upgrades/v3_5/roles b/playbooks/common/openshift-cluster/upgrades/v3_8/roles
index 415645be6..415645be6 120000
--- a/playbooks/common/openshift-cluster/upgrades/v3_5/roles
+++ b/playbooks/common/openshift-cluster/upgrades/v3_8/roles
diff --git a/playbooks/common/openshift-cluster/upgrades/v3_5/upgrade.yml b/playbooks/common/openshift-cluster/upgrades/v3_8/upgrade.yml
index bda245fe1..73df15d53 100644
--- a/playbooks/common/openshift-cluster/upgrades/v3_5/upgrade.yml
+++ b/playbooks/common/openshift-cluster/upgrades/v3_8/upgrade.yml
@@ -12,8 +12,8 @@
- pre_upgrade
tasks:
- set_fact:
- openshift_upgrade_target: "{{ '1.5' if deployment_type == 'origin' else '3.5' }}"
- openshift_upgrade_min: "{{ '1.4' if deployment_type == 'origin' else '3.4' }}"
+ openshift_upgrade_target: '3.8'
+ openshift_upgrade_min: '3.7'
# Pre-upgrade
@@ -21,6 +21,10 @@
tags:
- pre_upgrade
+- include: ../pre/verify_etcd3_backend.yml
+ tags:
+ - pre_upgrade
+
- name: Update repos and initialize facts on all hosts
hosts: oo_masters_to_config:oo_nodes_to_upgrade:oo_etcd_to_config:oo_lb_to_config
tags:
@@ -47,6 +51,10 @@
tags:
- pre_upgrade
+- include: ../pre/verify_health_checks.yml
+ tags:
+ - pre_upgrade
+
- include: ../pre/verify_control_plane_running.yml
tags:
- pre_upgrade
@@ -59,7 +67,7 @@
tags:
- pre_upgrade
-- include: ../../initialize_openshift_version.yml
+- include: ../../../../init/version.yml
tags:
- pre_upgrade
vars:
@@ -113,7 +121,21 @@
- include: ../upgrade_control_plane.yml
vars:
- master_config_hook: "v3_5/master_config_upgrade.yml"
+ master_config_hook: "v3_7/master_config_upgrade.yml"
+
+# All controllers must be stopped at the same time then restarted
+- name: Cycle all controller services to force new leader election mode
+ hosts: oo_masters_to_config
+ gather_facts: no
+ tasks:
+ - name: Stop {{ openshift.common.service_type }}-master-controllers
+ systemd:
+ name: "{{ openshift.common.service_type }}-master-controllers"
+ state: stopped
+ - name: Start {{ openshift.common.service_type }}-master-controllers
+ systemd:
+ name: "{{ openshift.common.service_type }}-master-controllers"
+ state: started
- include: ../upgrade_nodes.yml
diff --git a/playbooks/common/openshift-cluster/upgrades/v3_5/upgrade_control_plane.yml b/playbooks/common/openshift-cluster/upgrades/v3_8/upgrade_control_plane.yml
index 6cdea7b84..48d55c16f 100644
--- a/playbooks/common/openshift-cluster/upgrades/v3_5/upgrade_control_plane.yml
+++ b/playbooks/common/openshift-cluster/upgrades/v3_8/upgrade_control_plane.yml
@@ -21,14 +21,18 @@
- pre_upgrade
tasks:
- set_fact:
- openshift_upgrade_target: "{{ '1.5' if deployment_type == 'origin' else '3.5' }}"
- openshift_upgrade_min: "{{ '1.4' if deployment_type == 'origin' else '3.4' }}"
+ openshift_upgrade_target: '3.8'
+ openshift_upgrade_min: '3.7'
# Pre-upgrade
- include: ../initialize_nodes_to_upgrade.yml
tags:
- pre_upgrade
+- include: ../pre/verify_etcd3_backend.yml
+ tags:
+ - pre_upgrade
+
- name: Update repos on control plane hosts
hosts: oo_masters_to_config:oo_etcd_to_config:oo_lb_to_config
tags:
@@ -55,6 +59,10 @@
tags:
- pre_upgrade
+- include: ../pre/verify_health_checks.yml
+ tags:
+ - pre_upgrade
+
- include: ../pre/verify_control_plane_running.yml
tags:
- pre_upgrade
@@ -63,7 +71,7 @@
tags:
- pre_upgrade
-- include: ../../initialize_openshift_version.yml
+- include: ../../../../init/version.yml
tags:
- pre_upgrade
vars:
@@ -117,6 +125,20 @@
- include: ../upgrade_control_plane.yml
vars:
- master_config_hook: "v3_5/master_config_upgrade.yml"
+ master_config_hook: "v3_7/master_config_upgrade.yml"
+
+# All controllers must be stopped at the same time then restarted
+- name: Cycle all controller services to force new leader election mode
+ hosts: oo_masters_to_config
+ gather_facts: no
+ tasks:
+ - name: Stop {{ openshift.common.service_type }}-master-controllers
+ systemd:
+ name: "{{ openshift.common.service_type }}-master-controllers"
+ state: stopped
+ - name: Start {{ openshift.common.service_type }}-master-controllers
+ systemd:
+ name: "{{ openshift.common.service_type }}-master-controllers"
+ state: started
- include: ../post_control_plane.yml
diff --git a/playbooks/common/openshift-cluster/upgrades/v3_4/upgrade_nodes.yml b/playbooks/common/openshift-cluster/upgrades/v3_8/upgrade_nodes.yml
index 8531e6045..abd56e762 100644
--- a/playbooks/common/openshift-cluster/upgrades/v3_4/upgrade_nodes.yml
+++ b/playbooks/common/openshift-cluster/upgrades/v3_8/upgrade_nodes.yml
@@ -14,8 +14,8 @@
- pre_upgrade
tasks:
- set_fact:
- openshift_upgrade_target: "{{ '1.4' if deployment_type == 'origin' else '3.4' }}"
- openshift_upgrade_min: "{{ '1.3' if deployment_type == 'origin' else '3.3' }}"
+ openshift_upgrade_target: '3.8'
+ openshift_upgrade_min: '3.7'
# Pre-upgrade
- include: ../initialize_nodes_to_upgrade.yml
@@ -48,11 +48,15 @@
tags:
- pre_upgrade
+- include: ../pre/verify_health_checks.yml
+ tags:
+ - pre_upgrade
+
- include: ../disable_node_excluders.yml
tags:
- pre_upgrade
-- include: ../../initialize_openshift_version.yml
+- include: ../../../../init/version.yml
tags:
- pre_upgrade
vars:
diff --git a/playbooks/common/openshift-cluster/upgrades/v3_8/validator.yml b/playbooks/common/openshift-cluster/upgrades/v3_8/validator.yml
new file mode 100644
index 000000000..d8540abfb
--- /dev/null
+++ b/playbooks/common/openshift-cluster/upgrades/v3_8/validator.yml
@@ -0,0 +1,7 @@
+---
+- name: Verify 3.8 specific upgrade checks
+ hosts: oo_first_master
+ roles:
+ - { role: lib_openshift }
+ tasks:
+ - debug: msg="noop"
diff --git a/playbooks/common/openshift-cluster/validate_hostnames.yml b/playbooks/common/openshift-cluster/validate_hostnames.yml
deleted file mode 100644
index be2e6a15a..000000000
--- a/playbooks/common/openshift-cluster/validate_hostnames.yml
+++ /dev/null
@@ -1,23 +0,0 @@
----
-- name: Validate node hostnames
- hosts: oo_nodes_to_config
- tasks:
- - name: Query DNS for IP address of {{ openshift.common.hostname }}
- shell:
- getent ahostsv4 {{ openshift.common.hostname }} | head -n 1 | awk '{ print $1 }'
- register: lookupip
- changed_when: false
- failed_when: false
- - name: Warn user about bad openshift_hostname values
- pause:
- prompt:
- The hostname {{ openshift.common.hostname }} for {{ ansible_nodename }}
- doesn't resolve to an IP address owned by this host. Please set
- openshift_hostname variable to a hostname that when resolved on the host
- in question resolves to an IP address matching an interface on this
- host. This host will fail liveness checks for pods utilizing hostPorts,
- press ENTER to continue or CTRL-C to abort.
- seconds: "{{ 10 if openshift_override_hostname_check | default(false) | bool else omit }}"
- when:
- - lookupip.stdout != '127.0.0.1'
- - lookupip.stdout not in ansible_all_ipv4_addresses
diff --git a/playbooks/common/openshift-etcd/config.yml b/playbooks/common/openshift-etcd/config.yml
index 48d46bbb0..3fe483785 100644
--- a/playbooks/common/openshift-etcd/config.yml
+++ b/playbooks/common/openshift-etcd/config.yml
@@ -1,13 +1,15 @@
---
- name: etcd Install Checkpoint Start
- hosts: oo_all_hosts
+ hosts: all
gather_facts: false
tasks:
- name: Set etcd install 'In Progress'
+ run_once: true
set_stats:
data:
- installer_phase_etcd: "In Progress"
- aggregate: false
+ installer_phase_etcd:
+ status: "In Progress"
+ start: "{{ lookup('pipe', 'date +%Y%m%d%H%M%SZ') }}"
- include: ca.yml
@@ -26,11 +28,13 @@
- role: nickhammond.logrotate
- name: etcd Install Checkpoint End
- hosts: oo_all_hosts
+ hosts: all
gather_facts: false
tasks:
- name: Set etcd install 'Complete'
+ run_once: true
set_stats:
data:
- installer_phase_etcd: "Complete"
- aggregate: false
+ installer_phase_etcd:
+ status: "Complete"
+ end: "{{ lookup('pipe', 'date +%Y%m%d%H%M%SZ') }}"
diff --git a/playbooks/common/openshift-glusterfs/config.yml b/playbooks/common/openshift-glusterfs/config.yml
index c2ae5f313..19e14ab3e 100644
--- a/playbooks/common/openshift-glusterfs/config.yml
+++ b/playbooks/common/openshift-glusterfs/config.yml
@@ -1,13 +1,15 @@
---
- name: GlusterFS Install Checkpoint Start
- hosts: oo_all_hosts
+ hosts: all
gather_facts: false
tasks:
- name: Set GlusterFS install 'In Progress'
+ run_once: true
set_stats:
data:
- installer_phase_glusterfs: "In Progress"
- aggregate: false
+ installer_phase_glusterfs:
+ status: "In Progress"
+ start: "{{ lookup('pipe', 'date +%Y%m%d%H%M%SZ') }}"
- name: Open firewall ports for GlusterFS nodes
hosts: glusterfs
@@ -46,11 +48,13 @@
when: groups.oo_glusterfs_to_config | default([]) | count > 0
- name: GlusterFS Install Checkpoint End
- hosts: oo_all_hosts
+ hosts: all
gather_facts: false
tasks:
- name: Set GlusterFS install 'Complete'
+ run_once: true
set_stats:
data:
- installer_phase_glusterfs: "Complete"
- aggregate: false
+ installer_phase_glusterfs:
+ status: "Complete"
+ end: "{{ lookup('pipe', 'date +%Y%m%d%H%M%SZ') }}"
diff --git a/playbooks/common/openshift-loadbalancer/config.yml b/playbooks/common/openshift-loadbalancer/config.yml
index 2a703cb61..d737b836b 100644
--- a/playbooks/common/openshift-loadbalancer/config.yml
+++ b/playbooks/common/openshift-loadbalancer/config.yml
@@ -1,13 +1,15 @@
---
- name: Load Balancer Install Checkpoint Start
- hosts: oo_all_hosts
+ hosts: all
gather_facts: false
tasks:
- name: Set load balancer install 'In Progress'
+ run_once: true
set_stats:
data:
- installer_phase_loadbalancer: "In Progress"
- aggregate: false
+ installer_phase_loadbalancer:
+ status: "In Progress"
+ start: "{{ lookup('pipe', 'date +%Y%m%d%H%M%SZ') }}"
- name: Configure firewall and docker for load balancers
hosts: oo_lb_to_config:!oo_masters_to_config:!oo_nodes_to_config
@@ -37,11 +39,13 @@
- role: tuned
- name: Load Balancer Install Checkpoint End
- hosts: oo_all_hosts
+ hosts: all
gather_facts: false
tasks:
- name: Set load balancer install 'Complete'
+ run_once: true
set_stats:
data:
- installer_phase_loadbalancer: "Complete"
- aggregate: false
+ installer_phase_loadbalancer:
+ status: "Complete"
+ end: "{{ lookup('pipe', 'date +%Y%m%d%H%M%SZ') }}"
diff --git a/playbooks/common/openshift-management/config.yml b/playbooks/common/openshift-management/config.yml
index 908679e81..3f1cdf713 100644
--- a/playbooks/common/openshift-management/config.yml
+++ b/playbooks/common/openshift-management/config.yml
@@ -1,13 +1,15 @@
---
- name: Management Install Checkpoint Start
- hosts: oo_all_hosts
+ hosts: all
gather_facts: false
tasks:
- name: Set Management install 'In Progress'
+ run_once: true
set_stats:
data:
- installer_phase_management: "In Progress"
- aggregate: false
+ installer_phase_management:
+ status: "In Progress"
+ start: "{{ lookup('pipe', 'date +%Y%m%d%H%M%SZ') }}"
- name: Setup CFME
hosts: oo_first_master
@@ -25,11 +27,13 @@
template_dir: "{{ hostvars[groups.masters.0].r_openshift_management_mktemp.stdout }}"
- name: Management Install Checkpoint End
- hosts: oo_all_hosts
+ hosts: all
gather_facts: false
tasks:
- name: Set Management install 'Complete'
+ run_once: true
set_stats:
data:
- installer_phase_management: "Complete"
- aggregate: false
+ installer_phase_management:
+ status: "Complete"
+ end: "{{ lookup('pipe', 'date +%Y%m%d%H%M%SZ') }}"
diff --git a/playbooks/common/openshift-master/additional_config.yml b/playbooks/common/openshift-master/additional_config.yml
index 350557f19..32f638d42 100644
--- a/playbooks/common/openshift-master/additional_config.yml
+++ b/playbooks/common/openshift-master/additional_config.yml
@@ -1,13 +1,15 @@
---
- name: Master Additional Install Checkpoint Start
- hosts: oo_all_hosts
+ hosts: all
gather_facts: false
tasks:
- name: Set Master Additional install 'In Progress'
+ run_once: true
set_stats:
data:
- installer_phase_master_additional: "In Progress"
- aggregate: false
+ installer_phase_master_additional:
+ status: "In Progress"
+ start: "{{ lookup('pipe', 'date +%Y%m%d%H%M%SZ') }}"
- name: Additional master configuration
hosts: oo_first_master
@@ -19,6 +21,8 @@
roles:
- role: openshift_master_cluster
when: openshift_master_ha | bool and openshift.master.cluster_method == "pacemaker"
+ - role: openshift_project_request_template
+ when: openshift_project_request_template_manage
- role: openshift_examples
when: openshift_install_examples | default(true, true) | bool
registry_url: "{{ openshift.master.registry_url }}"
@@ -36,11 +40,13 @@
when: openshift_use_flannel | default(false) | bool
- name: Master Additional Install Checkpoint End
- hosts: oo_all_hosts
+ hosts: all
gather_facts: false
tasks:
- name: Set Master Additional install 'Complete'
+ run_once: true
set_stats:
data:
- installer_phase_master_additional: "Complete"
- aggregate: false
+ installer_phase_master_additional:
+ status: "Complete"
+ end: "{{ lookup('pipe', 'date +%Y%m%d%H%M%SZ') }}"
diff --git a/playbooks/common/openshift-master/config.yml b/playbooks/common/openshift-master/config.yml
index b359919ba..6b0fd6b7c 100644
--- a/playbooks/common/openshift-master/config.yml
+++ b/playbooks/common/openshift-master/config.yml
@@ -1,13 +1,15 @@
---
- name: Master Install Checkpoint Start
- hosts: oo_all_hosts
+ hosts: all
gather_facts: false
tasks:
- name: Set Master install 'In Progress'
+ run_once: true
set_stats:
data:
- installer_phase_master: "In Progress"
- aggregate: false
+ installer_phase_master:
+ status: "In Progress"
+ start: "{{ lookup('pipe', 'date +%Y%m%d%H%M%SZ') }}"
- include: certificates.yml
@@ -212,6 +214,12 @@
tasks_from: master
when: openshift_use_kuryr | default(false) | bool
+ - name: Setup the node group config maps
+ include_role:
+ name: openshift_node_group
+ when: openshift_master_bootstrap_enabled | default(false) | bool
+ run_once: True
+
post_tasks:
- name: Create group for deployment type
group_by: key=oo_masters_deployment_type_{{ openshift.common.deployment_type }}
@@ -232,11 +240,13 @@
r_openshift_excluder_service_type: "{{ openshift.common.service_type }}"
- name: Master Install Checkpoint End
- hosts: oo_all_hosts
+ hosts: all
gather_facts: false
tasks:
- name: Set Master install 'Complete'
+ run_once: true
set_stats:
data:
- installer_phase_master: "Complete"
- aggregate: false
+ installer_phase_master:
+ status: "Complete"
+ end: "{{ lookup('pipe', 'date +%Y%m%d%H%M%SZ') }}"
diff --git a/playbooks/common/openshift-master/files/openshift-ansible-catalog-console.js b/playbooks/common/openshift-master/files/openshift-ansible-catalog-console.js
deleted file mode 100644
index d0a9f11dc..000000000
--- a/playbooks/common/openshift-master/files/openshift-ansible-catalog-console.js
+++ /dev/null
@@ -1,2 +0,0 @@
-// empty file so that the master-config can still point to a file that exists
-// this file will be replaced by the template service broker role if enabled
diff --git a/playbooks/common/openshift-master/restart_services.yml b/playbooks/common/openshift-master/restart_services.yml
index 4f8b758fd..4e1b3a3be 100644
--- a/playbooks/common/openshift-master/restart_services.yml
+++ b/playbooks/common/openshift-master/restart_services.yml
@@ -1,22 +1,4 @@
---
-- name: Restart master API
- service:
- name: "{{ openshift.common.service_type }}-master-api"
- state: restarted
- when: openshift_master_ha | bool
-- name: Wait for master API to come back online
- wait_for:
- host: "{{ openshift.common.hostname }}"
- state: started
- delay: 10
- port: "{{ openshift.master.api_port }}"
- timeout: 600
- when: openshift_master_ha | bool
-- name: Restart master controllers
- service:
- name: "{{ openshift.common.service_type }}-master-controllers"
- state: restarted
- # Ignore errrors since it is possible that type != simple for
- # pre-3.1.1 installations.
- ignore_errors: true
- when: openshift_master_ha | bool
+- include_role:
+ name: openshift_master
+ tasks_from: restart.yml
diff --git a/playbooks/common/openshift-master/revert-client-ca.yml b/playbooks/common/openshift-master/revert-client-ca.yml
new file mode 100644
index 000000000..9ae23bf5b
--- /dev/null
+++ b/playbooks/common/openshift-master/revert-client-ca.yml
@@ -0,0 +1,17 @@
+---
+- name: Set servingInfo.clientCA = ca.crt in master config
+ hosts: oo_masters_to_config
+ tasks:
+ - name: Read master config
+ slurp:
+ src: "{{ openshift.common.config_base }}/master/master-config.yaml"
+ register: g_master_config_output
+
+ # servingInfo.clientCA may be set as the client-ca-bundle.crt from
+ # CA redeployment and this task reverts that change.
+ - name: Set servingInfo.clientCA = ca.crt in master config
+ modify_yaml:
+ dest: "{{ openshift.common.config_base }}/master/master-config.yaml"
+ yaml_key: servingInfo.clientCA
+ yaml_value: ca.crt
+ when: (g_master_config_output.content|b64decode|from_yaml).servingInfo.clientCA != 'ca.crt'
diff --git a/playbooks/common/openshift-master/scaleup.yml b/playbooks/common/openshift-master/scaleup.yml
index f4dc9df8a..4c415ebce 100644
--- a/playbooks/common/openshift-master/scaleup.yml
+++ b/playbooks/common/openshift-master/scaleup.yml
@@ -22,16 +22,17 @@
- name: restart master api
service: name={{ openshift.common.service_type }}-master-controllers state=restarted
notify: verify api server
+ # We retry the controllers because the API may not be 100% initialized yet.
- name: restart master controllers
- service: name={{ openshift.common.service_type }}-master-controllers state=restarted
+ command: "systemctl restart {{ openshift.common.service_type }}-master-controllers"
+ retries: 3
+ delay: 5
+ register: result
+ until: result.rc == 0
- name: verify api server
command: >
curl --silent --tlsv1.2
- {% if openshift.common.version_gte_3_2_or_1_2 | bool %}
--cacert {{ openshift.common.config_base }}/master/ca-bundle.crt
- {% else %}
- --cacert {{ openshift.common.config_base }}/master/ca.crt
- {% endif %}
{{ openshift.master.api_url }}/healthz/ready
args:
# Disables the following warning:
diff --git a/playbooks/common/openshift-master/tasks/wire_aggregator.yml b/playbooks/common/openshift-master/tasks/wire_aggregator.yml
index 560eea785..97acc5d5d 100644
--- a/playbooks/common/openshift-master/tasks/wire_aggregator.yml
+++ b/playbooks/common/openshift-master/tasks/wire_aggregator.yml
@@ -136,9 +136,15 @@
when:
- not front_proxy_kubeconfig.stat.exists
-- name: copy tech preview extension file for service console UI
- copy:
- src: openshift-ansible-catalog-console.js
+- name: Delete temp directory
+ file:
+ name: "{{ certtemp.stdout }}"
+ state: absent
+ changed_when: False
+
+- name: Setup extension file for service console UI
+ template:
+ src: ../templates/openshift-ansible-catalog-console.js
dest: /etc/origin/master/openshift-ansible-catalog-console.js
- name: Update master config
@@ -179,8 +185,13 @@
- yedit_output.changed
- openshift.master.cluster_method == 'native'
+# We retry the controllers because the API may not be 100% initialized yet.
- name: restart master controllers
- systemd: name={{ openshift.common.service_type }}-master-controllers state=restarted
+ command: "systemctl restart {{ openshift.common.service_type }}-master-controllers"
+ retries: 3
+ delay: 5
+ register: result
+ until: result.rc == 0
when:
- yedit_output.changed
- openshift.master.cluster_method == 'native'
@@ -190,11 +201,7 @@
# wait_for port doesn't provide health information.
command: >
curl --silent --tlsv1.2
- {% if openshift.common.version_gte_3_2_or_1_2 | bool %}
--cacert {{ openshift.common.config_base }}/master/ca-bundle.crt
- {% else %}
- --cacert {{ openshift.common.config_base }}/master/ca.crt
- {% endif %}
{{ openshift.master.api_url }}/healthz/ready
args:
# Disables the following warning:
@@ -207,9 +214,3 @@
changed_when: false
when:
- yedit_output.changed
-
-- name: Delete temp directory
- file:
- name: "{{ certtemp.stdout }}"
- state: absent
- changed_when: False
diff --git a/playbooks/common/openshift-master/templates/openshift-ansible-catalog-console.js b/playbooks/common/openshift-master/templates/openshift-ansible-catalog-console.js
new file mode 100644
index 000000000..fd02325ba
--- /dev/null
+++ b/playbooks/common/openshift-master/templates/openshift-ansible-catalog-console.js
@@ -0,0 +1 @@
+window.OPENSHIFT_CONSTANTS.TEMPLATE_SERVICE_BROKER_ENABLED = {{ 'true' if (template_service_broker_install | default(True)) else 'false' }};
diff --git a/playbooks/common/openshift-nfs/config.yml b/playbooks/common/openshift-nfs/config.yml
index ce672daf5..6ea77e00b 100644
--- a/playbooks/common/openshift-nfs/config.yml
+++ b/playbooks/common/openshift-nfs/config.yml
@@ -1,13 +1,15 @@
---
- name: NFS Install Checkpoint Start
- hosts: oo_all_hosts
+ hosts: all
gather_facts: false
tasks:
- name: Set NFS install 'In Progress'
+ run_once: true
set_stats:
data:
- installer_phase_nfs: "In Progress"
- aggregate: false
+ installer_phase_nfs:
+ status: "In Progress"
+ start: "{{ lookup('pipe', 'date +%Y%m%d%H%M%SZ') }}"
- name: Configure nfs
hosts: oo_nfs_to_config
@@ -16,11 +18,13 @@
- role: openshift_storage_nfs
- name: NFS Install Checkpoint End
- hosts: oo_all_hosts
+ hosts: all
gather_facts: false
tasks:
- name: Set NFS install 'Complete'
+ run_once: true
set_stats:
data:
- installer_phase_nfs: "Complete"
- aggregate: false
+ installer_phase_nfs:
+ status: "Complete"
+ end: "{{ lookup('pipe', 'date +%Y%m%d%H%M%SZ') }}"
diff --git a/playbooks/common/openshift-node/config.yml b/playbooks/common/openshift-node/config.yml
index 4f8f98aef..28e3c1b1b 100644
--- a/playbooks/common/openshift-node/config.yml
+++ b/playbooks/common/openshift-node/config.yml
@@ -1,13 +1,15 @@
---
- name: Node Install Checkpoint Start
- hosts: oo_all_hosts
+ hosts: all
gather_facts: false
tasks:
- name: Set Node install 'In Progress'
+ run_once: true
set_stats:
data:
- installer_phase_node: "In Progress"
- aggregate: false
+ installer_phase_node:
+ status: "In Progress"
+ start: "{{ lookup('pipe', 'date +%Y%m%d%H%M%SZ') }}"
- include: certificates.yml
@@ -24,11 +26,13 @@
- include: enable_excluders.yml
- name: Node Install Checkpoint End
- hosts: oo_all_hosts
+ hosts: all
gather_facts: false
tasks:
- name: Set Node install 'Complete'
+ run_once: true
set_stats:
data:
- installer_phase_node: "Complete"
- aggregate: false
+ installer_phase_node:
+ status: "Complete"
+ end: "{{ lookup('pipe', 'date +%Y%m%d%H%M%SZ') }}"
diff --git a/playbooks/common/openshift-node/image_prep.yml b/playbooks/common/openshift-node/image_prep.yml
index 30651a1df..3c042acdc 100644
--- a/playbooks/common/openshift-node/image_prep.yml
+++ b/playbooks/common/openshift-node/image_prep.yml
@@ -1,15 +1,12 @@
---
- name: normalize groups
- include: ../../byo/openshift-cluster/initialize_groups.yml
-
-- name: evaluate the groups
- include: ../openshift-cluster/evaluate_groups.yml
+ include: ../../init/evaluate_groups.yml
- name: initialize the facts
- include: ../openshift-cluster/initialize_facts.yml
+ include: ../../init/facts.yml
- name: initialize the repositories
- include: ../openshift-cluster/initialize_openshift_repos.yml
+ include: ../../init/repos.yml
- name: run node config setup
include: setup.yml
diff --git a/playbooks/common/openshift-node/network_manager.yml b/playbooks/common/openshift-node/network_manager.yml
index b3a7399dc..c2efb0483 100644
--- a/playbooks/common/openshift-node/network_manager.yml
+++ b/playbooks/common/openshift-node/network_manager.yml
@@ -1,5 +1,5 @@
---
-- include: ../openshift-cluster/evaluate_groups.yml
+- include: ../../init/evaluate_groups.yml
- name: Install and configure NetworkManager
hosts: oo_all_hosts