summaryrefslogtreecommitdiffstats
path: root/playbooks/common
diff options
context:
space:
mode:
Diffstat (limited to 'playbooks/common')
-rw-r--r--playbooks/common/openshift-cluster/config.yml20
-rw-r--r--playbooks/common/openshift-cluster/disable_excluder.yml17
-rw-r--r--playbooks/common/openshift-cluster/evaluate_groups.yml104
-rw-r--r--playbooks/common/openshift-cluster/initialize_openshift_version.yml5
-rw-r--r--playbooks/common/openshift-cluster/openshift_logging.yml4
-rw-r--r--playbooks/common/openshift-cluster/openshift_metrics.yml2
-rw-r--r--playbooks/common/openshift-cluster/redeploy-certificates/ca.yml6
-rw-r--r--playbooks/common/openshift-cluster/redeploy-certificates/router.yml61
-rw-r--r--playbooks/common/openshift-cluster/reset_excluder.yml8
-rw-r--r--playbooks/common/openshift-cluster/std_include.yml24
-rw-r--r--playbooks/common/openshift-cluster/upgrades/disable_excluder.yml21
-rw-r--r--playbooks/common/openshift-cluster/upgrades/disable_master_excluders.yml12
-rw-r--r--playbooks/common/openshift-cluster/upgrades/disable_node_excluders.yml12
-rw-r--r--playbooks/common/openshift-cluster/upgrades/etcd/backup.yml66
l---------playbooks/common/openshift-cluster/upgrades/etcd/files/etcdctl.sh1
-rw-r--r--playbooks/common/openshift-cluster/upgrades/etcd/main.yml32
-rw-r--r--playbooks/common/openshift-cluster/upgrades/etcd/upgrade.yml114
-rw-r--r--playbooks/common/openshift-cluster/upgrades/init.yml35
-rwxr-xr-xplaybooks/common/openshift-cluster/upgrades/library/openshift_upgrade_config.py2
-rw-r--r--playbooks/common/openshift-cluster/upgrades/post_control_plane.yml30
-rw-r--r--playbooks/common/openshift-cluster/upgrades/pre/validate_excluder.yml29
-rw-r--r--playbooks/common/openshift-cluster/upgrades/pre/verify_upgrade_targets.yml54
-rw-r--r--playbooks/common/openshift-cluster/upgrades/rpm_upgrade.yml56
-rw-r--r--playbooks/common/openshift-cluster/upgrades/upgrade_control_plane.yml11
-rw-r--r--playbooks/common/openshift-cluster/upgrades/upgrade_nodes.yml7
-rw-r--r--playbooks/common/openshift-cluster/upgrades/upgrade_scheduler.yml16
-rw-r--r--playbooks/common/openshift-cluster/upgrades/v3_3/master_config_upgrade.yml2
l---------playbooks/common/openshift-cluster/upgrades/v3_3/roles1
-rw-r--r--playbooks/common/openshift-cluster/upgrades/v3_3/upgrade.yml111
-rw-r--r--playbooks/common/openshift-cluster/upgrades/v3_3/upgrade_control_plane.yml111
-rw-r--r--playbooks/common/openshift-cluster/upgrades/v3_3/upgrade_nodes.yml106
-rw-r--r--playbooks/common/openshift-cluster/upgrades/v3_4/master_config_upgrade.yml2
l---------playbooks/common/openshift-cluster/upgrades/v3_4/roles1
-rw-r--r--playbooks/common/openshift-cluster/upgrades/v3_4/upgrade.yml109
-rw-r--r--playbooks/common/openshift-cluster/upgrades/v3_4/upgrade_control_plane.yml111
-rw-r--r--playbooks/common/openshift-cluster/upgrades/v3_4/upgrade_nodes.yml104
-rw-r--r--playbooks/common/openshift-cluster/upgrades/v3_5/upgrade.yml113
-rw-r--r--playbooks/common/openshift-cluster/upgrades/v3_5/upgrade_control_plane.yml115
-rw-r--r--playbooks/common/openshift-cluster/upgrades/v3_5/upgrade_nodes.yml104
-rw-r--r--playbooks/common/openshift-cluster/upgrades/v3_6/upgrade.yml113
-rw-r--r--playbooks/common/openshift-cluster/upgrades/v3_6/upgrade_control_plane.yml115
-rw-r--r--playbooks/common/openshift-cluster/upgrades/v3_6/upgrade_nodes.yml104
-rw-r--r--playbooks/common/openshift-cluster/upgrades/v3_6/validator.yml4
-rw-r--r--playbooks/common/openshift-glusterfs/config.yml23
l---------playbooks/common/openshift-glusterfs/filter_plugins1
l---------playbooks/common/openshift-glusterfs/lookup_plugins1
-rw-r--r--playbooks/common/openshift-glusterfs/registry.yml49
l---------playbooks/common/openshift-glusterfs/roles1
-rw-r--r--playbooks/common/openshift-master/scaleup.yml18
-rw-r--r--playbooks/common/openshift-node/network_manager.yml26
-rw-r--r--playbooks/common/openshift-node/scaleup.yml16
51 files changed, 1841 insertions, 369 deletions
diff --git a/playbooks/common/openshift-cluster/config.yml b/playbooks/common/openshift-cluster/config.yml
index 3c70db6a9..1482b3a3f 100644
--- a/playbooks/common/openshift-cluster/config.yml
+++ b/playbooks/common/openshift-cluster/config.yml
@@ -3,9 +3,15 @@
tags:
- always
-- include: disable_excluder.yml
+- name: Disable excluders
+ hosts: oo_masters_to_config:oo_nodes_to_config
tags:
- always
+ gather_facts: no
+ roles:
+ - role: openshift_excluder
+ r_openshift_excluder_action: disable
+ r_openshift_excluder_service_type: "{{ openshift.common.service_type }}"
- include: ../openshift-etcd/config.yml
tags:
@@ -31,10 +37,20 @@
tags:
- node
+- include: ../openshift-glusterfs/config.yml
+ tags:
+ - glusterfs
+
- include: openshift_hosted.yml
tags:
- hosted
-- include: reset_excluder.yml
+- name: Re-enable excluder if it was previously enabled
+ hosts: oo_masters_to_config:oo_nodes_to_config
tags:
- always
+ gather_facts: no
+ roles:
+ - role: openshift_excluder
+ r_openshift_excluder_action: enable
+ r_openshift_excluder_service_type: "{{ openshift.common.service_type }}"
diff --git a/playbooks/common/openshift-cluster/disable_excluder.yml b/playbooks/common/openshift-cluster/disable_excluder.yml
deleted file mode 100644
index f664c51c9..000000000
--- a/playbooks/common/openshift-cluster/disable_excluder.yml
+++ /dev/null
@@ -1,17 +0,0 @@
----
-- name: Disable excluders
- hosts: oo_masters_to_config:oo_nodes_to_config
- gather_facts: no
- tasks:
-
- # During installation the excluders are installed with present state.
- # So no pre-validation check here as the excluders are either to be installed (present = latest)
- # or they are not going to be updated if already installed
-
- # disable excluders based on their status
- - include_role:
- name: openshift_excluder
- tasks_from: disable
- vars:
- openshift_excluder_package_state: present
- docker_excluder_package_state: present
diff --git a/playbooks/common/openshift-cluster/evaluate_groups.yml b/playbooks/common/openshift-cluster/evaluate_groups.yml
index 45a4875a3..46932b27f 100644
--- a/playbooks/common/openshift-cluster/evaluate_groups.yml
+++ b/playbooks/common/openshift-cluster/evaluate_groups.yml
@@ -5,29 +5,40 @@
become: no
gather_facts: no
tasks:
- - fail:
+ - name: Evaluate groups - g_etcd_hosts required
+ fail:
msg: This playbook requires g_etcd_hosts to be set
- when: "{{ g_etcd_hosts is not defined }}"
+ when: g_etcd_hosts is not defined
- - fail:
+ - name: Evaluate groups - g_master_hosts or g_new_master_hosts required
+ fail:
msg: This playbook requires g_master_hosts or g_new_master_hosts to be set
- when: "{{ g_master_hosts is not defined and g_new_master_hosts is not defined }}"
+ when: g_master_hosts is not defined or g_new_master_hosts is not defined
- - fail:
+ - name: Evaluate groups - g_node_hosts or g_new_node_hosts required
+ fail:
msg: This playbook requires g_node_hosts or g_new_node_hosts to be set
- when: "{{ g_node_hosts is not defined and g_new_node_hosts is not defined }}"
+ when: g_node_hosts is not defined or g_new_node_hosts is not defined
- - fail:
+ - name: Evaluate groups - g_lb_hosts required
+ fail:
msg: This playbook requires g_lb_hosts to be set
- when: "{{ g_lb_hosts is not defined }}"
+ when: g_lb_hosts is not defined
- - fail:
+ - name: Evaluate groups - g_nfs_hosts required
+ fail:
msg: This playbook requires g_nfs_hosts to be set
- when: "{{ g_nfs_hosts is not defined }}"
+ when: g_nfs_hosts is not defined
- - fail:
+ - name: Evaluate groups - g_nfs_hosts is single host
+ fail:
msg: The nfs group must be limited to one host
- when: "{{ (groups[g_nfs_hosts] | default([])) | length > 1 }}"
+ when: (groups[g_nfs_hosts] | default([])) | length > 1
+
+ - name: Evaluate groups - g_glusterfs_hosts required
+ fail:
+ msg: This playbook requires g_glusterfs_hosts to be set
+ when: g_glusterfs_hosts is not defined
- name: Evaluate oo_all_hosts
add_host:
@@ -47,13 +58,13 @@
with_items: "{{ g_master_hosts | union(g_new_master_hosts) | default([]) }}"
changed_when: no
- - name: Evaluate oo_etcd_to_config
+ - name: Evaluate oo_first_master
add_host:
- name: "{{ item }}"
- groups: oo_etcd_to_config
+ name: "{{ g_master_hosts[0] }}"
+ groups: oo_first_master
ansible_ssh_user: "{{ g_ssh_user | default(omit) }}"
ansible_become: "{{ g_sudo | default(omit) }}"
- with_items: "{{ g_etcd_hosts | default([]) }}"
+ when: g_master_hosts|length > 0
changed_when: no
- name: Evaluate oo_masters_to_config
@@ -65,41 +76,59 @@
with_items: "{{ g_new_master_hosts | default(g_master_hosts | default([], true), true) }}"
changed_when: no
- - name: Evaluate oo_nodes_to_config
+ - name: Evaluate oo_etcd_to_config
add_host:
name: "{{ item }}"
- groups: oo_nodes_to_config
+ groups: oo_etcd_to_config
ansible_ssh_user: "{{ g_ssh_user | default(omit) }}"
ansible_become: "{{ g_sudo | default(omit) }}"
- with_items: "{{ g_new_node_hosts | default(g_node_hosts | default([], true), true) }}"
+ with_items: "{{ g_etcd_hosts | default([]) }}"
changed_when: no
- # Skip adding the master to oo_nodes_to_config when g_new_node_hosts is
- - name: Add master to oo_nodes_to_config
+ - name: Evaluate oo_first_etcd
add_host:
- name: "{{ item }}"
- groups: oo_nodes_to_config
+ name: "{{ g_etcd_hosts[0] }}"
+ groups: oo_first_etcd
ansible_ssh_user: "{{ g_ssh_user | default(omit) }}"
ansible_become: "{{ g_sudo | default(omit) }}"
- with_items: "{{ g_master_hosts | default([]) }}"
- when: "{{ g_nodeonmaster | default(false) | bool and not g_new_node_hosts | default(false) | bool }}"
+ when: g_etcd_hosts|length > 0
changed_when: no
- - name: Evaluate oo_first_etcd
+ # We use two groups one for hosts we're upgrading which doesn't include embedded etcd
+ # The other for backing up which includes the embedded etcd host, there's no need to
+ # upgrade embedded etcd that just happens when the master is updated.
+ - name: Evaluate oo_etcd_hosts_to_upgrade
add_host:
- name: "{{ g_etcd_hosts[0] }}"
- groups: oo_first_etcd
+ name: "{{ item }}"
+ groups: oo_etcd_hosts_to_upgrade
+ with_items: "{{ groups.oo_etcd_to_config if groups.oo_etcd_to_config is defined and groups.oo_etcd_to_config | length > 0 else [] }}"
+ changed_when: False
+
+ - name: Evaluate oo_etcd_hosts_to_backup
+ add_host:
+ name: "{{ item }}"
+ groups: oo_etcd_hosts_to_backup
+ with_items: "{{ groups.oo_etcd_to_config if groups.oo_etcd_to_config is defined and groups.oo_etcd_to_config | length > 0 else groups.oo_first_master }}"
+ changed_when: False
+
+ - name: Evaluate oo_nodes_to_config
+ add_host:
+ name: "{{ item }}"
+ groups: oo_nodes_to_config
ansible_ssh_user: "{{ g_ssh_user | default(omit) }}"
- when: "{{ g_etcd_hosts|length > 0 }}"
+ ansible_become: "{{ g_sudo | default(omit) }}"
+ with_items: "{{ g_new_node_hosts | default(g_node_hosts | default([], true), true) }}"
changed_when: no
- - name: Evaluate oo_first_master
+ # Skip adding the master to oo_nodes_to_config when g_new_node_hosts is
+ - name: Add master to oo_nodes_to_config
add_host:
- name: "{{ g_master_hosts[0] }}"
- groups: oo_first_master
+ name: "{{ item }}"
+ groups: oo_nodes_to_config
ansible_ssh_user: "{{ g_ssh_user | default(omit) }}"
ansible_become: "{{ g_sudo | default(omit) }}"
- when: "{{ g_master_hosts|length > 0 }}"
+ with_items: "{{ g_master_hosts | default([]) }}"
+ when: g_nodeonmaster | default(false) | bool and not g_new_node_hosts | default(false) | bool
changed_when: no
- name: Evaluate oo_lb_to_config
@@ -119,3 +148,12 @@
ansible_become: "{{ g_sudo | default(omit) }}"
with_items: "{{ g_nfs_hosts | default([]) }}"
changed_when: no
+
+ - name: Evaluate oo_glusterfs_to_config
+ add_host:
+ name: "{{ item }}"
+ groups: oo_glusterfs_to_config
+ ansible_ssh_user: "{{ g_ssh_user | default(omit) }}"
+ ansible_become: "{{ g_sudo | default(omit) }}"
+ with_items: "{{ g_glusterfs_hosts | union(g_glusterfs_registry_hosts) | default([]) }}"
+ changed_when: no
diff --git a/playbooks/common/openshift-cluster/initialize_openshift_version.yml b/playbooks/common/openshift-cluster/initialize_openshift_version.yml
index 07b38920f..f4e52869e 100644
--- a/playbooks/common/openshift-cluster/initialize_openshift_version.yml
+++ b/playbooks/common/openshift-cluster/initialize_openshift_version.yml
@@ -1,13 +1,14 @@
---
# NOTE: requires openshift_facts be run
- name: Verify compatible yum/subscription-manager combination
- hosts: l_oo_all_hosts
+ hosts: oo_all_hosts
gather_facts: no
tasks:
# See:
# https://bugzilla.redhat.com/show_bug.cgi?id=1395047
# https://bugzilla.redhat.com/show_bug.cgi?id=1282961
# https://github.com/openshift/openshift-ansible/issues/1138
+ # Consider the repoquery module for this work
- name: Check for bad combinations of yum and subscription-manager
command: >
{{ repoquery_cmd }} --installed --qf '%{version}' "yum"
@@ -16,7 +17,7 @@
when: not openshift.common.is_atomic | bool
- fail:
msg: Incompatible versions of yum and subscription-manager found. You may need to update yum and yum-utils.
- when: "not openshift.common.is_atomic | bool and 'Plugin \"search-disabled-repos\" requires API 2.7. Supported API is 2.6.' in yum_ver_test.stdout"
+ when: not openshift.common.is_atomic | bool and 'Plugin \"search-disabled-repos\" requires API 2.7. Supported API is 2.6.' in yum_ver_test.stdout
- name: Determine openshift_version to configure on first master
hosts: oo_first_master
diff --git a/playbooks/common/openshift-cluster/openshift_logging.yml b/playbooks/common/openshift-cluster/openshift_logging.yml
index d96a78c4c..57580406c 100644
--- a/playbooks/common/openshift-cluster/openshift_logging.yml
+++ b/playbooks/common/openshift-cluster/openshift_logging.yml
@@ -1,11 +1,13 @@
---
+- include: evaluate_groups.yml
+
- name: OpenShift Aggregated Logging
hosts: oo_first_master
roles:
- openshift_logging
- name: Update Master configs
- hosts: masters:!oo_first_master
+ hosts: oo_masters:!oo_first_master
tasks:
- block:
- include_role:
diff --git a/playbooks/common/openshift-cluster/openshift_metrics.yml b/playbooks/common/openshift-cluster/openshift_metrics.yml
index 9f38ceea6..bcff4a1a1 100644
--- a/playbooks/common/openshift-cluster/openshift_metrics.yml
+++ b/playbooks/common/openshift-cluster/openshift_metrics.yml
@@ -1,4 +1,6 @@
---
+- include: evaluate_groups.yml
+
- name: OpenShift Metrics
hosts: oo_first_master
roles:
diff --git a/playbooks/common/openshift-cluster/redeploy-certificates/ca.yml b/playbooks/common/openshift-cluster/redeploy-certificates/ca.yml
index 3b26abcc7..4fa7f9cdf 100644
--- a/playbooks/common/openshift-cluster/redeploy-certificates/ca.yml
+++ b/playbooks/common/openshift-cluster/redeploy-certificates/ca.yml
@@ -130,7 +130,7 @@
state: absent
changed_when: false
-- include: ../openshift-etcd/restart.yml
+- include: ../../openshift-etcd/restart.yml
# Update master config when ca-bundle not referenced. Services will be
# restarted below after new CA certificate has been distributed.
@@ -322,7 +322,7 @@
group: "{{ 'root' if item == 'root' else _ansible_ssh_user_gid.stdout }}"
with_items: "{{ client_users }}"
-- include: ../openshift-master/restart.yml
+- include: ../../openshift-master/restart.yml
- name: Distribute OpenShift CA certificate to nodes
hosts: oo_nodes_to_config
@@ -371,4 +371,4 @@
state: absent
changed_when: false
-- include: ../openshift-node/restart.yml
+- include: ../../openshift-node/restart.yml
diff --git a/playbooks/common/openshift-cluster/redeploy-certificates/router.yml b/playbooks/common/openshift-cluster/redeploy-certificates/router.yml
index a7b614341..9f14f2d69 100644
--- a/playbooks/common/openshift-cluster/redeploy-certificates/router.yml
+++ b/playbooks/common/openshift-cluster/redeploy-certificates/router.yml
@@ -67,7 +67,66 @@
service.alpha.openshift.io/serving-cert-secret-name=router-certs
--config={{ mktemp.stdout }}/admin.kubeconfig
-n default
- when: l_router_dc.rc == 0 and 'router-certs' in router_secrets
+ when: l_router_dc.rc == 0 and 'router-certs' in router_secrets and openshift_hosted_router_certificate is undefined
+
+ - block:
+ - assert:
+ that:
+ - "'certfile' in openshift_hosted_router_certificate"
+ - "'keyfile' in openshift_hosted_router_certificate"
+ - "'cafile' in openshift_hosted_router_certificate"
+ msg: |-
+ openshift_hosted_router_certificate has been set in the inventory but is
+ missing one or more required keys. Ensure that 'certfile', 'keyfile',
+ and 'cafile' keys have been specified for the openshift_hosted_router_certificate
+ inventory variable.
+
+ - name: Read router certificate and key
+ become: no
+ local_action:
+ module: slurp
+ src: "{{ item }}"
+ register: openshift_router_certificate_output
+ # Defaulting dictionary keys to none to avoid deprecation warnings
+ # (future fatal errors) during template evaluation. Dictionary keys
+ # won't be accessed unless openshift_hosted_router_certificate is
+ # defined and has all keys (certfile, keyfile, cafile) which we
+ # check above.
+ with_items:
+ - "{{ (openshift_hosted_router_certificate | default({'certfile':none})).certfile }}"
+ - "{{ (openshift_hosted_router_certificate | default({'keyfile':none})).keyfile }}"
+ - "{{ (openshift_hosted_router_certificate | default({'cafile':none})).cafile }}"
+
+ - name: Write temporary router certificate file
+ copy:
+ content: "{% for certificate in openshift_router_certificate_output.results -%}{{ certificate.content | b64decode }}{% endfor -%}"
+ dest: "{{ mktemp.stdout }}/openshift-hosted-router-certificate.pem"
+ mode: 0600
+
+ - name: Write temporary router key file
+ copy:
+ content: "{{ (openshift_router_certificate_output.results
+ | oo_collect('content', {'source':(openshift_hosted_router_certificate | default({'keyfile':none})).keyfile}))[0] | b64decode }}"
+ dest: "{{ mktemp.stdout }}/openshift-hosted-router-certificate.key"
+ mode: 0600
+
+ - name: Replace router-certs secret
+ shell: >
+ {{ openshift.common.client_binary }} secrets new router-certs
+ tls.crt="{{ mktemp.stdout }}/openshift-hosted-router-certificate.pem"
+ tls.key="{{ mktemp.stdout }}/openshift-hosted-router-certificate.key"
+ --type=kubernetes.io/tls
+ --confirm
+ -o json | {{ openshift.common.client_binary }} replace -f -
+
+ - name: Remove temporary router certificate and key files
+ file:
+ path: "{{ item }}"
+ state: absent
+ with_items:
+ - "{{ mktemp.stdout }}/openshift-hosted-router-certificate.pem"
+ - "{{ mktemp.stdout }}/openshift-hosted-router-certificate.key"
+ when: l_router_dc.rc == 0 and 'router-certs' in router_secrets and openshift_hosted_router_certificate is defined
- name: Redeploy router
command: >
diff --git a/playbooks/common/openshift-cluster/reset_excluder.yml b/playbooks/common/openshift-cluster/reset_excluder.yml
deleted file mode 100644
index eaa8ce39c..000000000
--- a/playbooks/common/openshift-cluster/reset_excluder.yml
+++ /dev/null
@@ -1,8 +0,0 @@
----
-- name: Re-enable excluder if it was previously enabled
- hosts: oo_masters_to_config:oo_nodes_to_config
- gather_facts: no
- tasks:
- - include_role:
- name: openshift_excluder
- tasks_from: enable
diff --git a/playbooks/common/openshift-cluster/std_include.yml b/playbooks/common/openshift-cluster/std_include.yml
index 74cc1d527..6ed31a644 100644
--- a/playbooks/common/openshift-cluster/std_include.yml
+++ b/playbooks/common/openshift-cluster/std_include.yml
@@ -1,28 +1,4 @@
---
-- name: Create initial host groups for localhost
- hosts: localhost
- connection: local
- become: no
- gather_facts: no
- tags:
- - always
- tasks:
- - include_vars: ../../byo/openshift-cluster/cluster_hosts.yml
- - name: Evaluate group l_oo_all_hosts
- add_host:
- name: "{{ item }}"
- groups: l_oo_all_hosts
- with_items: "{{ g_all_hosts | default([]) }}"
- changed_when: no
-
-- name: Create initial host groups for all hosts
- hosts: l_oo_all_hosts
- gather_facts: no
- tags:
- - always
- tasks:
- - include_vars: ../../byo/openshift-cluster/cluster_hosts.yml
-
- include: evaluate_groups.yml
tags:
- always
diff --git a/playbooks/common/openshift-cluster/upgrades/disable_excluder.yml b/playbooks/common/openshift-cluster/upgrades/disable_excluder.yml
deleted file mode 100644
index d1e431c5e..000000000
--- a/playbooks/common/openshift-cluster/upgrades/disable_excluder.yml
+++ /dev/null
@@ -1,21 +0,0 @@
----
-- name: Record excluder state and disable
- hosts: oo_masters_to_config:oo_nodes_to_config
- gather_facts: no
- tasks:
- - include: pre/validate_excluder.yml
- vars:
- #repoquery_cmd: repoquery_cmd
- #openshift_upgrade_target: openshift_upgrade_target
- excluder: "{{ item }}"
- with_items:
- - "{{ openshift.common.service_type }}-docker-excluder"
- - "{{ openshift.common.service_type }}-excluder"
-
- # disable excluders based on their status
- - include_role:
- name: openshift_excluder
- tasks_from: disable
- vars:
- openshift_excluder_package_state: latest
- docker_excluder_package_state: latest
diff --git a/playbooks/common/openshift-cluster/upgrades/disable_master_excluders.yml b/playbooks/common/openshift-cluster/upgrades/disable_master_excluders.yml
new file mode 100644
index 000000000..800621857
--- /dev/null
+++ b/playbooks/common/openshift-cluster/upgrades/disable_master_excluders.yml
@@ -0,0 +1,12 @@
+---
+- name: Disable excluders
+ hosts: oo_masters_to_config
+ gather_facts: no
+ roles:
+ - role: openshift_excluder
+ r_openshift_excluder_action: disable
+ r_openshift_excluder_service_type: "{{ openshift.common.service_type }}"
+ r_openshift_excluder_verify_upgrade: true
+ r_openshift_excluder_upgrade_target: "{{ openshift_upgrade_target }}"
+ r_openshift_excluder_package_state: latest
+ r_openshift_excluder_docker_package_state: latest
diff --git a/playbooks/common/openshift-cluster/upgrades/disable_node_excluders.yml b/playbooks/common/openshift-cluster/upgrades/disable_node_excluders.yml
new file mode 100644
index 000000000..7988e97ab
--- /dev/null
+++ b/playbooks/common/openshift-cluster/upgrades/disable_node_excluders.yml
@@ -0,0 +1,12 @@
+---
+- name: Disable excluders
+ hosts: oo_nodes_to_config
+ gather_facts: no
+ roles:
+ - role: openshift_excluder
+ r_openshift_excluder_action: disable
+ r_openshift_excluder_service_type: "{{ openshift.common.service_type }}"
+ r_openshift_excluder_verify_upgrade: true
+ r_openshift_excluder_upgrade_target: "{{ openshift_upgrade_target }}"
+ r_openshift_excluder_package_state: latest
+ r_openshift_excluder_docker_package_state: latest
diff --git a/playbooks/common/openshift-cluster/upgrades/etcd/backup.yml b/playbooks/common/openshift-cluster/upgrades/etcd/backup.yml
index 7ef79afa9..9d0333ca8 100644
--- a/playbooks/common/openshift-cluster/upgrades/etcd/backup.yml
+++ b/playbooks/common/openshift-cluster/upgrades/etcd/backup.yml
@@ -1,10 +1,10 @@
---
- name: Backup etcd
- hosts: etcd_hosts_to_backup
+ hosts: oo_etcd_hosts_to_backup
vars:
embedded_etcd: "{{ groups.oo_etcd_to_config | default([]) | length == 0 }}"
- timestamp: "{{ lookup('pipe', 'date +%Y%m%d%H%M%S') }}"
etcdctl_command: "{{ 'etcdctl' if not openshift.common.is_containerized or embedded_etcd else 'docker exec etcd_container etcdctl' if not openshift.common.is_etcd_system_container else 'runc exec etcd etcdctl' }}"
+ timestamp: "{{ lookup('pipe', 'date +%Y%m%d%H%M%S') }}"
roles:
- openshift_facts
tasks:
@@ -13,29 +13,20 @@
role: etcd
local_facts: {}
when: "'etcd' not in openshift"
-
- - stat: path=/var/lib/openshift
- register: var_lib_openshift
-
- - stat: path=/var/lib/origin
- register: var_lib_origin
-
- - name: Create origin symlink if necessary
- file: src=/var/lib/openshift/ dest=/var/lib/origin state=link
- when: var_lib_openshift.stat.exists == True and var_lib_origin.stat.exists == False
+ - set_fact:
+ etcd_backup_dir: "{{ openshift.etcd.etcd_data_dir }}/openshift-backup-{{ backup_tag | default('') }}{{ timestamp }}"
# TODO: replace shell module with command and update later checks
- # We assume to be using the data dir for all backups.
- name: Check available disk space for etcd backup
- shell: df --output=avail -k {{ openshift.common.data_dir }} | tail -n 1
+ shell: df --output=avail -k {{ openshift.etcd.etcd_data_dir }} | tail -n 1
register: avail_disk
# AUDIT:changed_when: `false` because we are only inspecting
# state, not manipulating anything
changed_when: false
# TODO: replace shell module with command and update later checks
- - name: Check current embedded etcd disk usage
- shell: du -k {{ openshift.etcd.etcd_data_dir }} | tail -n 1 | cut -f1
+ - name: Check current etcd disk usage
+ shell: du --exclude='*openshift-backup*' -k {{ openshift.etcd.etcd_data_dir }} | tail -n 1 | cut -f1
register: etcd_disk_usage
when: embedded_etcd | bool
# AUDIT:changed_when: `false` because we are only inspecting
@@ -52,37 +43,42 @@
# For non containerized and non embedded we should have the correct version of
# etcd installed already. So don't do anything.
#
- # For embedded or containerized we need to use the latest because OCP 3.3 uses
- # a version of etcd that can only be backed up with etcd-3.x and if it's
- # containerized then etcd version may be newer than that on the host so
- # upgrade it.
- #
- # On atomic we have neither yum nor dnf so ansible throws a hard to debug error
- # if you use package there, like this: "Could not find a module for unknown."
- # see https://bugzilla.redhat.com/show_bug.cgi?id=1408668
+ # For containerized installs we now exec into etcd_container
#
- # TODO - We should refactor all containerized backups to use the containerized
- # version of etcd to perform the backup rather than relying on the host's
- # binaries. Until we do that we'll continue to have problems backing up etcd
- # when atomic host has an older version than the version that's running in the
- # container whether that's embedded or not
- - name: Install latest etcd for containerized or embedded
+ # For embedded non containerized we need to ensure we have the latest version
+ # etcd on the host.
+ - name: Install latest etcd for embedded
package:
name: etcd
state: latest
- when: ( embedded_etcd | bool or openshift.common.is_containerized ) and not openshift.common.is_atomic
+ when:
+ - embedded_etcd | bool
+ - not openshift.common.is_atomic | bool
- name: Generate etcd backup
command: >
{{ etcdctl_command }} backup --data-dir={{ openshift.etcd.etcd_data_dir }}
- --backup-dir={{ openshift.common.data_dir }}/etcd-backup-{{ backup_tag | default('') }}{{ timestamp }}
+ --backup-dir={{ etcd_backup_dir }}
+
+ # According to the docs change you can simply copy snap/db
+ # https://github.com/openshift/openshift-docs/commit/b38042de02d9780842dce95cfa0ef45d53b58bc6
+ - name: Check for v3 data store
+ stat:
+ path: "{{ openshift.etcd.etcd_data_dir }}/member/snap/db"
+ register: v3_db
+
+ - name: Copy etcd v3 data store
+ command: >
+ cp -a {{ openshift.etcd.etcd_data_dir }}/member/snap/db
+ {{ etcd_backup_dir }}/member/snap/
+ when: v3_db.stat.exists
- set_fact:
etcd_backup_complete: True
- name: Display location of etcd backup
debug:
- msg: "Etcd backup created in {{ openshift.common.data_dir }}/etcd-backup-{{ backup_tag | default('') }}{{ timestamp }}"
+ msg: "Etcd backup created in {{ etcd_backup_dir }}"
- name: Gate on etcd backup
hosts: localhost
@@ -91,10 +87,10 @@
tasks:
- set_fact:
etcd_backup_completed: "{{ hostvars
- | oo_select_keys(groups.etcd_hosts_to_backup)
+ | oo_select_keys(groups.oo_etcd_hosts_to_backup)
| oo_collect('inventory_hostname', {'etcd_backup_complete': true}) }}"
- set_fact:
- etcd_backup_failed: "{{ groups.etcd_hosts_to_backup | difference(etcd_backup_completed) }}"
+ etcd_backup_failed: "{{ groups.oo_etcd_hosts_to_backup | difference(etcd_backup_completed) }}"
- fail:
msg: "Upgrade cannot continue. The following hosts did not complete etcd backup: {{ etcd_backup_failed | join(',') }}"
when: etcd_backup_failed | length > 0
diff --git a/playbooks/common/openshift-cluster/upgrades/etcd/files/etcdctl.sh b/playbooks/common/openshift-cluster/upgrades/etcd/files/etcdctl.sh
deleted file mode 120000
index 641e04e44..000000000
--- a/playbooks/common/openshift-cluster/upgrades/etcd/files/etcdctl.sh
+++ /dev/null
@@ -1 +0,0 @@
-../roles/etcd/files/etcdctl.sh \ No newline at end of file
diff --git a/playbooks/common/openshift-cluster/upgrades/etcd/main.yml b/playbooks/common/openshift-cluster/upgrades/etcd/main.yml
index fa86d29fb..d9b59edcb 100644
--- a/playbooks/common/openshift-cluster/upgrades/etcd/main.yml
+++ b/playbooks/common/openshift-cluster/upgrades/etcd/main.yml
@@ -5,32 +5,6 @@
# mirrored packages on your own because only the GA and latest versions are
# available in the repos. So for Fedora we'll simply skip this, sorry.
-- include: ../../evaluate_groups.yml
- tags:
- - always
-
-# We use two groups one for hosts we're upgrading which doesn't include embedded etcd
-# The other for backing up which includes the embedded etcd host, there's no need to
-# upgrade embedded etcd that just happens when the master is updated.
-- name: Evaluate additional groups for etcd
- hosts: localhost
- connection: local
- become: no
- tasks:
- - name: Evaluate etcd_hosts_to_upgrade
- add_host:
- name: "{{ item }}"
- groups: etcd_hosts_to_upgrade
- with_items: "{{ groups.oo_etcd_to_config if groups.oo_etcd_to_config is defined and groups.oo_etcd_to_config | length > 0 else [] }}"
- changed_when: False
-
- - name: Evaluate etcd_hosts_to_backup
- add_host:
- name: "{{ item }}"
- groups: etcd_hosts_to_backup
- with_items: "{{ groups.oo_etcd_to_config if groups.oo_etcd_to_config is defined and groups.oo_etcd_to_config | length > 0 else groups.oo_first_master }}"
- changed_when: False
-
- name: Backup etcd before upgrading anything
include: backup.yml
vars:
@@ -38,9 +12,11 @@
when: openshift_etcd_backup | default(true) | bool
- name: Drop etcdctl profiles
- hosts: etcd_hosts_to_upgrade
+ hosts: oo_etcd_hosts_to_upgrade
tasks:
- - include: roles/etcd/tasks/etcdctl.yml
+ - include_role:
+ name: etcd_common
+ tasks_from: etcdctl.yml
- name: Perform etcd upgrade
include: ./upgrade.yml
diff --git a/playbooks/common/openshift-cluster/upgrades/etcd/upgrade.yml b/playbooks/common/openshift-cluster/upgrades/etcd/upgrade.yml
index a9b5b94e6..54f9e21a1 100644
--- a/playbooks/common/openshift-cluster/upgrades/etcd/upgrade.yml
+++ b/playbooks/common/openshift-cluster/upgrades/etcd/upgrade.yml
@@ -1,49 +1,67 @@
---
- name: Determine etcd version
- hosts: etcd_hosts_to_upgrade
+ hosts: oo_etcd_hosts_to_upgrade
tasks:
- - name: Record RPM based etcd version
- command: rpm -qa --qf '%{version}' etcd\*
- args:
- warn: no
- register: etcd_rpm_version
- failed_when: false
+ - block:
+ - name: Record RPM based etcd version
+ command: rpm -qa --qf '%{version}' etcd\*
+ args:
+ warn: no
+ register: etcd_rpm_version
+ failed_when: false
+ # AUDIT:changed_when: `false` because we are only inspecting
+ # state, not manipulating anything
+ changed_when: false
+
+ - debug:
+ msg: "Etcd rpm version {{ etcd_rpm_version.stdout }} detected"
when: not openshift.common.is_containerized | bool
- # AUDIT:changed_when: `false` because we are only inspecting
- # state, not manipulating anything
- changed_when: false
-
- - name: Record containerized etcd version
- command: docker exec etcd_container rpm -qa --qf '%{version}' etcd\*
- register: etcd_container_version
- failed_when: false
- when: openshift.common.is_containerized | bool
- # AUDIT:changed_when: `false` because we are only inspecting
- # state, not manipulating anything
- changed_when: false
-
- - name: Record containerized etcd version
- command: docker exec etcd_container rpm -qa --qf '%{version}' etcd\*
- register: etcd_container_version
- failed_when: false
- when: openshift.common.is_containerized | bool and not openshift.common.is_etcd_system_container | bool
- # AUDIT:changed_when: `false` because we are only inspecting
- # state, not manipulating anything
- changed_when: false
-
- - name: Record containerized etcd version
- command: runc exec etcd_container rpm -qa --qf '%{version}' etcd\*
- register: etcd_container_version
- failed_when: false
- when: openshift.common.is_containerized | bool and openshift.common.is_etcd_system_container | bool
- # AUDIT:changed_when: `false` because we are only inspecting
- # state, not manipulating anything
- changed_when: false
+
+ - block:
+ - name: Record containerized etcd version (docker)
+ command: docker exec etcd_container rpm -qa --qf '%{version}' etcd\*
+ register: etcd_container_version_docker
+ failed_when: false
+ # AUDIT:changed_when: `false` because we are only inspecting
+ # state, not manipulating anything
+ changed_when: false
+ when:
+ - not openshift.common.is_etcd_system_container | bool
+
+ # Given a register variables is set even if the whwen condition
+ # is false, we need to set etcd_container_version separately
+ - set_fact:
+ etcd_container_version: "{{ etcd_container_version_docker.stdout }}"
+ when:
+ - not openshift.common.is_etcd_system_container | bool
+
+ - name: Record containerized etcd version (runc)
+ command: runc exec etcd_container rpm -qa --qf '%{version}' etcd\*
+ register: etcd_container_version_runc
+ failed_when: false
+ # AUDIT:changed_when: `false` because we are only inspecting
+ # state, not manipulating anything
+ changed_when: false
+ when:
+ - openshift.common.is_etcd_system_container | bool
+
+ # Given a register variables is set even if the whwen condition
+ # is false, we need to set etcd_container_version separately
+ - set_fact:
+ etcd_container_version: "{{ etcd_container_version_runc.stdout }}"
+ when:
+ - openshift.common.is_etcd_system_container | bool
+
+ - debug:
+ msg: "Etcd containerized version {{ etcd_container_version }} detected"
+
+ when:
+ - openshift.common.is_containerized | bool
# I really dislike this copy/pasta but I wasn't able to find a way to get it to loop
# through hosts, then loop through tasks only when appropriate
- name: Upgrade to 2.1
- hosts: etcd_hosts_to_upgrade
+ hosts: oo_etcd_hosts_to_upgrade
serial: 1
vars:
upgrade_version: '2.1'
@@ -52,7 +70,7 @@
when: etcd_rpm_version.stdout | default('99') | version_compare('2.1','<') and ansible_distribution == 'RedHat' and not openshift.common.is_containerized | bool
- name: Upgrade RPM hosts to 2.2
- hosts: etcd_hosts_to_upgrade
+ hosts: oo_etcd_hosts_to_upgrade
serial: 1
vars:
upgrade_version: '2.2'
@@ -61,16 +79,16 @@
when: etcd_rpm_version.stdout | default('99') | version_compare('2.2','<') and ansible_distribution == 'RedHat' and not openshift.common.is_containerized | bool
- name: Upgrade containerized hosts to 2.2.5
- hosts: etcd_hosts_to_upgrade
+ hosts: oo_etcd_hosts_to_upgrade
serial: 1
vars:
upgrade_version: 2.2.5
tasks:
- include: containerized_tasks.yml
- when: etcd_container_version.stdout | default('99') | version_compare('2.2','<') and openshift.common.is_containerized | bool
+ when: etcd_container_version | default('99') | version_compare('2.2','<') and openshift.common.is_containerized | bool
- name: Upgrade RPM hosts to 2.3
- hosts: etcd_hosts_to_upgrade
+ hosts: oo_etcd_hosts_to_upgrade
serial: 1
vars:
upgrade_version: '2.3'
@@ -79,16 +97,16 @@
when: etcd_rpm_version.stdout | default('99') | version_compare('2.3','<') and ansible_distribution == 'RedHat' and not openshift.common.is_containerized | bool
- name: Upgrade containerized hosts to 2.3.7
- hosts: etcd_hosts_to_upgrade
+ hosts: oo_etcd_hosts_to_upgrade
serial: 1
vars:
upgrade_version: 2.3.7
tasks:
- include: containerized_tasks.yml
- when: etcd_container_version.stdout | default('99') | version_compare('2.3','<') and openshift.common.is_containerized | bool
+ when: etcd_container_version | default('99') | version_compare('2.3','<') and openshift.common.is_containerized | bool
- name: Upgrade RPM hosts to 3.0
- hosts: etcd_hosts_to_upgrade
+ hosts: oo_etcd_hosts_to_upgrade
serial: 1
vars:
upgrade_version: '3.0'
@@ -97,16 +115,16 @@
when: etcd_rpm_version.stdout | default('99') | version_compare('3.0','<') and ansible_distribution == 'RedHat' and not openshift.common.is_containerized | bool
- name: Upgrade containerized hosts to etcd3 image
- hosts: etcd_hosts_to_upgrade
+ hosts: oo_etcd_hosts_to_upgrade
serial: 1
vars:
upgrade_version: 3.0.15
tasks:
- include: containerized_tasks.yml
- when: etcd_container_version.stdout | default('99') | version_compare('3.0','<') and openshift.common.is_containerized | bool
+ when: etcd_container_version | default('99') | version_compare('3.0','<') and openshift.common.is_containerized | bool
- name: Upgrade fedora to latest
- hosts: etcd_hosts_to_upgrade
+ hosts: oo_etcd_hosts_to_upgrade
serial: 1
tasks:
- include: fedora_tasks.yml
diff --git a/playbooks/common/openshift-cluster/upgrades/init.yml b/playbooks/common/openshift-cluster/upgrades/init.yml
index cc1fa5a0a..0f421928b 100644
--- a/playbooks/common/openshift-cluster/upgrades/init.yml
+++ b/playbooks/common/openshift-cluster/upgrades/init.yml
@@ -1,28 +1,4 @@
---
-- name: Create initial host groups for localhost
- hosts: localhost
- connection: local
- become: no
- gather_facts: no
- tags:
- - always
- tasks:
- - include_vars: ../../../byo/openshift-cluster/cluster_hosts.yml
- - name: Evaluate group l_oo_all_hosts
- add_host:
- name: "{{ item }}"
- groups: l_oo_all_hosts
- with_items: "{{ g_all_hosts | default([]) }}"
- changed_when: False
-
-- name: Create initial host groups for all hosts
- hosts: l_oo_all_hosts
- gather_facts: no
- tags:
- - always
- tasks:
- - include_vars: ../../../byo/openshift-cluster/cluster_hosts.yml
-
- include: ../evaluate_groups.yml
vars:
# Do not allow adding hosts during upgrade.
@@ -34,17 +10,6 @@
- include: ../initialize_facts.yml
-- name: Ensure clean repo cache in the event repos have been changed manually
- hosts: oo_all_hosts
- tags:
- - pre_upgrade
- tasks:
- - name: Clean package cache
- command: "{{ ansible_pkg_mgr }} clean all"
- when: not openshift.common.is_atomic | bool
- args:
- warn: no
-
- name: Ensure firewall is not switched during upgrade
hosts: oo_all_hosts
tasks:
diff --git a/playbooks/common/openshift-cluster/upgrades/library/openshift_upgrade_config.py b/playbooks/common/openshift-cluster/upgrades/library/openshift_upgrade_config.py
index 673f11889..4eac8b067 100755
--- a/playbooks/common/openshift-cluster/upgrades/library/openshift_upgrade_config.py
+++ b/playbooks/common/openshift-cluster/upgrades/library/openshift_upgrade_config.py
@@ -1,7 +1,5 @@
#!/usr/bin/python
# -*- coding: utf-8 -*-
-# vim: expandtab:tabstop=4:shiftwidth=4
-
"""Ansible module for modifying OpenShift configs during an upgrade"""
import os
diff --git a/playbooks/common/openshift-cluster/upgrades/post_control_plane.yml b/playbooks/common/openshift-cluster/upgrades/post_control_plane.yml
index 0d7cdb227..9b76f1dd0 100644
--- a/playbooks/common/openshift-cluster/upgrades/post_control_plane.yml
+++ b/playbooks/common/openshift-cluster/upgrades/post_control_plane.yml
@@ -9,6 +9,8 @@
replace ( '${version}', openshift_image_tag ) }}"
router_image: "{{ openshift.master.registry_url | replace( '${component}', 'haproxy-router' ) |
replace ( '${version}', openshift_image_tag ) }}"
+ registry_console_image: "{{ openshift.master.registry_url | replace ( '${component}', 'registry-console') |
+ replace ( '${version}', openshift.common.short_version ) }}"
pre_tasks:
- name: Load lib_openshift modules
@@ -61,6 +63,26 @@
when:
- _default_registry.results.results[0] != {}
+ - name: Check for registry-console
+ oc_obj:
+ state: list
+ kind: dc
+ name: registry-console
+ register: _registry_console
+ when:
+ - openshift.common.deployment_type != 'origin'
+
+ - name: Update registry-console image to current version
+ oc_edit:
+ kind: dc
+ name: registry-console
+ namespace: default
+ content:
+ spec.template.spec.containers[0].image: "{{ registry_console_image }}"
+ when:
+ - openshift.common.deployment_type != 'origin'
+ - _registry_console.results.results[0] != {}
+
roles:
- openshift_manageiq
# Create the new templates shipped in 3.2, existing templates are left
@@ -97,6 +119,12 @@
- not grep_plugin_order_override | skipped
- grep_plugin_order_override.rc == 0
-- include: ../reset_excluder.yml
+- name: Re-enable excluder if it was previously enabled
+ hosts: oo_masters_to_config
tags:
- always
+ gather_facts: no
+ roles:
+ - role: openshift_excluder
+ r_openshift_excluder_action: enable
+ r_openshift_excluder_service_type: "{{ openshift.common.service_type }}"
diff --git a/playbooks/common/openshift-cluster/upgrades/pre/validate_excluder.yml b/playbooks/common/openshift-cluster/upgrades/pre/validate_excluder.yml
deleted file mode 100644
index 6de1ed061..000000000
--- a/playbooks/common/openshift-cluster/upgrades/pre/validate_excluder.yml
+++ /dev/null
@@ -1,29 +0,0 @@
----
-# input variables:
-# - repoquery_cmd
-# - excluder
-# - openshift_upgrade_target
-- block:
- - name: Get available excluder version
- command: >
- {{ repoquery_cmd }} --qf '%{version}' "{{ excluder }}"
- register: excluder_version
- failed_when: false
- changed_when: false
-
- - name: Docker excluder version detected
- debug:
- msg: "{{ excluder }}: {{ excluder_version.stdout }}"
-
- - name: Printing upgrade target version
- debug:
- msg: "{{ openshift_upgrade_target }}"
-
- - name: Check the available {{ excluder }} version is at most of the upgrade target version
- fail:
- msg: "Available {{ excluder }} version {{ excluder_version.stdout }} is higher than the upgrade target version"
- when:
- - "{{ excluder_version.stdout != '' }}"
- - "{{ excluder_version.stdout.split('.')[0:2] | join('.') | version_compare(openshift_upgrade_target.split('.')[0:2] | join('.'), '>', strict=True) }}"
- when:
- - not openshift.common.is_atomic | bool
diff --git a/playbooks/common/openshift-cluster/upgrades/pre/verify_upgrade_targets.yml b/playbooks/common/openshift-cluster/upgrades/pre/verify_upgrade_targets.yml
index c83923dae..6a9f88707 100644
--- a/playbooks/common/openshift-cluster/upgrades/pre/verify_upgrade_targets.yml
+++ b/playbooks/common/openshift-cluster/upgrades/pre/verify_upgrade_targets.yml
@@ -1,21 +1,13 @@
---
- name: Verify upgrade targets
hosts: oo_masters_to_config:oo_nodes_to_upgrade
- vars:
- openshift_docker_hosted_registry_network: "{{ hostvars[groups.oo_first_master.0].openshift.common.portal_net }}"
- pre_tasks:
- - fail:
+
+ tasks:
+ - name: Fail when OpenShift is not installed
+ fail:
msg: Verify OpenShift is already installed
when: openshift.common.version is not defined
- - fail:
- msg: Verify the correct version was found
- when: verify_upgrade_version is defined and openshift_version != verify_upgrade_version
-
- - set_fact:
- g_new_service_name: "{{ 'origin' if deployment_type =='origin' else 'atomic-openshift' }}"
- when: not openshift.common.is_containerized | bool
-
- name: Verify containers are available for upgrade
command: >
docker pull {{ openshift.common.cli_image }}:{{ openshift_image_tag }}
@@ -23,19 +15,31 @@
changed_when: "'Downloaded newer image' in pull_result.stdout"
when: openshift.common.is_containerized | bool
- - name: Check latest available OpenShift RPM version
- command: >
- {{ repoquery_cmd }} --qf '%{version}' "{{ openshift.common.service_type }}"
- failed_when: false
- changed_when: false
- register: avail_openshift_version
- when: not openshift.common.is_containerized | bool
+ - when: not openshift.common.is_containerized | bool
+ block:
+ - name: Check latest available OpenShift RPM version
+ command: >
+ {{ repoquery_cmd }} --qf '%{version}' "{{ openshift.common.service_type }}"
+ failed_when: false
+ changed_when: false
+ register: avail_openshift_version
- - name: Verify OpenShift RPMs are available for upgrade
- fail:
- msg: "OpenShift {{ avail_openshift_version.stdout }} is available, but {{ openshift_upgrade_target }} or greater is required"
- when: not openshift.common.is_containerized | bool and not avail_openshift_version | skipped and avail_openshift_version.stdout | default('0.0', True) | version_compare(openshift_release, '<')
+ - name: Fail when unable to determine available OpenShift RPM version
+ fail:
+ msg: "Unable to determine available OpenShift RPM version"
+ when:
+ - avail_openshift_version.stdout == ''
- - fail:
+ - name: Verify OpenShift RPMs are available for upgrade
+ fail:
+ msg: "OpenShift {{ avail_openshift_version.stdout }} is available, but {{ openshift_upgrade_target }} or greater is required"
+ when:
+ - not avail_openshift_version | skipped
+ - avail_openshift_version.stdout | default('0.0', True) | version_compare(openshift_release, '<')
+
+ - name: Fail when openshift version does not meet minium requirement for Origin upgrade
+ fail:
msg: "This upgrade playbook must be run against OpenShift {{ openshift_upgrade_min }} or later"
- when: deployment_type == 'origin' and openshift.common.version | version_compare(openshift_upgrade_min,'<')
+ when:
+ - deployment_type == 'origin'
+ - openshift.common.version | version_compare(openshift_upgrade_min,'<')
diff --git a/playbooks/common/openshift-cluster/upgrades/rpm_upgrade.yml b/playbooks/common/openshift-cluster/upgrades/rpm_upgrade.yml
index 03ac02e9f..164baca81 100644
--- a/playbooks/common/openshift-cluster/upgrades/rpm_upgrade.yml
+++ b/playbooks/common/openshift-cluster/upgrades/rpm_upgrade.yml
@@ -1,27 +1,39 @@
---
-# We verified latest rpm available is suitable, so just yum update.
+# When we update package "a-${version}" and a requires b >= ${version} if we
+# don't specify the version of b yum will choose the latest version of b
+# available and the whole set of dependencies end up at the latest version.
+# Since the package module, unlike the yum module, doesn't flatten a list
+# of packages into one transaction we need to do that explicitly. The ansible
+# core team tells us not to rely on yum module transaction flattening anyway.
+
+# TODO: If the sdn package isn't already installed this will install it, we
+# should fix that
-# Master package upgrade ends up depending on node and sdn packages, we need to be explicit
-# with all versions to avoid yum from accidentally jumping to something newer than intended:
- name: Upgrade master packages
- package: name={{ item }} state=present
- when: component == "master"
- with_items:
- - "{{ openshift.common.service_type }}{{ openshift_pkg_version }}"
- - "{{ openshift.common.service_type }}-master{{ openshift_pkg_version }}"
- - "{{ openshift.common.service_type }}-node{{ openshift_pkg_version }}"
- - "{{ openshift.common.service_type }}-sdn-ovs{{ openshift_pkg_version }}"
- - "{{ openshift.common.service_type }}-clients{{ openshift_pkg_version }}"
+ package: name={{ master_pkgs | join(',') }} state=present
+ vars:
+ master_pkgs:
+ - "{{ openshift.common.service_type }}{{ openshift_pkg_version }}"
+ - "{{ openshift.common.service_type }}-master{{ openshift_pkg_version }}"
+ - "{{ openshift.common.service_type }}-node{{ openshift_pkg_version }}"
+ - "{{ openshift.common.service_type }}-sdn-ovs{{ openshift_pkg_version}}"
+ - "{{ openshift.common.service_type }}-clients{{ openshift_pkg_version }}"
+ - "tuned-profiles-{{ openshift.common.service_type }}-node{{ openshift_pkg_version }}"
+ - PyYAML
+ when:
+ - component == "master"
+ - not openshift.common.is_atomic | bool
- name: Upgrade node packages
- package: name={{ item }} state=present
- when: component == "node"
- with_items:
- - "{{ openshift.common.service_type }}{{ openshift_pkg_version }}"
- - "{{ openshift.common.service_type }}-node{{ openshift_pkg_version }}"
- - "{{ openshift.common.service_type }}-sdn-ovs{{ openshift_pkg_version }}"
- - "{{ openshift.common.service_type }}-clients{{ openshift_pkg_version }}"
-
-- name: Ensure python-yaml present for config upgrade
- package: name=PyYAML state=present
- when: not openshift.common.is_atomic | bool
+ package: name={{ node_pkgs | join(',') }} state=present
+ vars:
+ node_pkgs:
+ - "{{ openshift.common.service_type }}{{ openshift_pkg_version }}"
+ - "{{ openshift.common.service_type }}-node{{ openshift_pkg_version }}"
+ - "{{ openshift.common.service_type }}-sdn-ovs{{ openshift_pkg_version }}"
+ - "{{ openshift.common.service_type }}-clients{{ openshift_pkg_version }}"
+ - "tuned-profiles-{{ openshift.common.service_type }}-node{{ openshift_pkg_version }}"
+ - PyYAML
+ when:
+ - component == "node"
+ - not openshift.common.is_atomic | bool
diff --git a/playbooks/common/openshift-cluster/upgrades/upgrade_control_plane.yml b/playbooks/common/openshift-cluster/upgrades/upgrade_control_plane.yml
index c6e799261..0ad934d2d 100644
--- a/playbooks/common/openshift-cluster/upgrades/upgrade_control_plane.yml
+++ b/playbooks/common/openshift-cluster/upgrades/upgrade_control_plane.yml
@@ -2,17 +2,6 @@
###############################################################################
# Upgrade Masters
###############################################################################
-- name: Evaluate additional groups for upgrade
- hosts: localhost
- connection: local
- become: no
- tasks:
- - name: Evaluate etcd_hosts_to_backup
- add_host:
- name: "{{ item }}"
- groups: etcd_hosts_to_backup
- with_items: "{{ groups.oo_etcd_to_config if groups.oo_etcd_to_config is defined and groups.oo_etcd_to_config | length > 0 else groups.oo_first_master }}"
- changed_when: False
# If facts cache were for some reason deleted, this fact may not be set, and if not set
# it will always default to true. This causes problems for the etcd data dir fact detection
diff --git a/playbooks/common/openshift-cluster/upgrades/upgrade_nodes.yml b/playbooks/common/openshift-cluster/upgrades/upgrade_nodes.yml
index e9f894942..4d455fe0a 100644
--- a/playbooks/common/openshift-cluster/upgrades/upgrade_nodes.yml
+++ b/playbooks/common/openshift-cluster/upgrades/upgrade_nodes.yml
@@ -34,6 +34,9 @@
- openshift_facts
- docker
- openshift_node_upgrade
+ - role: openshift_excluder
+ r_openshift_excluder_action: enable
+ r_openshift_excluder_service_type: "{{ openshift.common.service_type }}"
post_tasks:
- name: Set node schedulability
@@ -46,7 +49,3 @@
register: node_schedulable
until: node_schedulable|succeeded
when: node_unschedulable|changed
-
-- include: ../reset_excluder.yml
- tags:
- - always
diff --git a/playbooks/common/openshift-cluster/upgrades/upgrade_scheduler.yml b/playbooks/common/openshift-cluster/upgrades/upgrade_scheduler.yml
index 88f2ddc78..83d2cec81 100644
--- a/playbooks/common/openshift-cluster/upgrades/upgrade_scheduler.yml
+++ b/playbooks/common/openshift-cluster/upgrades/upgrade_scheduler.yml
@@ -63,12 +63,12 @@
- block:
- debug:
msg: "WARNING: openshift_master_scheduler_predicates is set to defaults from an earlier release of OpenShift current defaults are: {{ openshift_master_scheduler_default_predicates }}"
- when: "{{ openshift_master_scheduler_predicates in older_predicates + older_predicates_no_region + [prev_predicates] + [prev_predicates_no_region] }}"
+ when: openshift_master_scheduler_predicates in older_predicates + older_predicates_no_region + [prev_predicates] + [prev_predicates_no_region]
- debug:
msg: "WARNING: openshift_master_scheduler_predicates does not match current defaults of: {{ openshift_master_scheduler_default_predicates }}"
- when: "{{ openshift_master_scheduler_predicates != openshift_master_scheduler_default_predicates }}"
- when: "{{ openshift_master_scheduler_predicates | default(none) is not none }}"
+ when: openshift_master_scheduler_predicates != openshift_master_scheduler_default_predicates
+ when: openshift_master_scheduler_predicates | default(none) is not none
# Handle cases where openshift_master_predicates is not defined
- block:
@@ -87,7 +87,7 @@
when: "{{ openshift_master_scheduler_current_predicates != default_predicates_no_region and
openshift_master_scheduler_current_predicates in older_predicates_no_region + [prev_predicates_no_region] }}"
- when: "{{ openshift_master_scheduler_predicates | default(none) is none }}"
+ when: openshift_master_scheduler_predicates | default(none) is none
# Upgrade priorities
@@ -120,12 +120,12 @@
- block:
- debug:
msg: "WARNING: openshift_master_scheduler_priorities is set to defaults from an earlier release of OpenShift current defaults are: {{ openshift_master_scheduler_default_priorities }}"
- when: "{{ openshift_master_scheduler_priorities in older_priorities + older_priorities_no_zone + [prev_priorities] + [prev_priorities_no_zone] }}"
+ when: openshift_master_scheduler_priorities in older_priorities + older_priorities_no_zone + [prev_priorities] + [prev_priorities_no_zone]
- debug:
msg: "WARNING: openshift_master_scheduler_priorities does not match current defaults of: {{ openshift_master_scheduler_default_priorities }}"
- when: "{{ openshift_master_scheduler_priorities != openshift_master_scheduler_default_priorities }}"
- when: "{{ openshift_master_scheduler_priorities | default(none) is not none }}"
+ when: openshift_master_scheduler_priorities != openshift_master_scheduler_default_priorities
+ when: openshift_master_scheduler_priorities | default(none) is not none
# Handle cases where openshift_master_priorities is not defined
- block:
@@ -144,7 +144,7 @@
when: "{{ openshift_master_scheduler_current_priorities != default_priorities_no_zone and
openshift_master_scheduler_current_priorities in older_priorities_no_zone + [prev_priorities_no_zone] }}"
- when: "{{ openshift_master_scheduler_priorities | default(none) is none }}"
+ when: openshift_master_scheduler_priorities | default(none) is none
# Update scheduler
diff --git a/playbooks/common/openshift-cluster/upgrades/v3_3/master_config_upgrade.yml b/playbooks/common/openshift-cluster/upgrades/v3_3/master_config_upgrade.yml
index 68c71a132..d69472fad 100644
--- a/playbooks/common/openshift-cluster/upgrades/v3_3/master_config_upgrade.yml
+++ b/playbooks/common/openshift-cluster/upgrades/v3_3/master_config_upgrade.yml
@@ -53,7 +53,7 @@
dest: "{{ openshift.common.config_base}}/master/master-config.yaml"
yaml_key: 'admissionConfig.pluginConfig'
yaml_value: "{{ openshift.master.admission_plugin_config }}"
- when: "{{ 'admission_plugin_config' in openshift.master }}"
+ when: "'admission_plugin_config' in openshift.master"
- modify_yaml:
dest: "{{ openshift.common.config_base}}/master/master-config.yaml"
diff --git a/playbooks/common/openshift-cluster/upgrades/v3_3/roles b/playbooks/common/openshift-cluster/upgrades/v3_3/roles
new file mode 120000
index 000000000..6bc1a7aef
--- /dev/null
+++ b/playbooks/common/openshift-cluster/upgrades/v3_3/roles
@@ -0,0 +1 @@
+../../../../../roles \ No newline at end of file
diff --git a/playbooks/common/openshift-cluster/upgrades/v3_3/upgrade.yml b/playbooks/common/openshift-cluster/upgrades/v3_3/upgrade.yml
new file mode 100644
index 000000000..d81a13ef2
--- /dev/null
+++ b/playbooks/common/openshift-cluster/upgrades/v3_3/upgrade.yml
@@ -0,0 +1,111 @@
+---
+#
+# Full Control Plane + Nodes Upgrade
+#
+- include: ../init.yml
+ tags:
+ - pre_upgrade
+
+- name: Configure the upgrade target for the common upgrade tasks
+ hosts: oo_all_hosts
+ tags:
+ - pre_upgrade
+ tasks:
+ - set_fact:
+ openshift_upgrade_target: "{{ '1.3' if deployment_type == 'origin' else '3.3' }}"
+ openshift_upgrade_min: "{{ '1.2' if deployment_type == 'origin' else '3.2' }}"
+
+# Pre-upgrade
+
+- include: ../initialize_nodes_to_upgrade.yml
+ tags:
+ - pre_upgrade
+
+- name: Update repos and initialize facts on all hosts
+ hosts: oo_masters_to_config:oo_nodes_to_upgrade:oo_etcd_to_config:oo_lb_to_config
+ tags:
+ - pre_upgrade
+ roles:
+ - openshift_repos
+
+- name: Set openshift_no_proxy_internal_hostnames
+ hosts: oo_masters_to_config:oo_nodes_to_upgrade
+ tags:
+ - pre_upgrade
+ tasks:
+ - set_fact:
+ openshift_no_proxy_internal_hostnames: "{{ hostvars | oo_select_keys(groups['oo_nodes_to_config']
+ | union(groups['oo_masters_to_config'])
+ | union(groups['oo_etcd_to_config'] | default([])))
+ | oo_collect('openshift.common.hostname') | default([]) | join (',')
+ }}"
+ when: "{{ (openshift_http_proxy is defined or openshift_https_proxy is defined) and
+ openshift_generate_no_proxy_hosts | default(True) | bool }}"
+
+- include: ../pre/verify_inventory_vars.yml
+ tags:
+ - pre_upgrade
+
+- include: ../disable_master_excluders.yml
+ tags:
+ - pre_upgrade
+
+- include: ../disable_node_excluders.yml
+ tags:
+ - pre_upgrade
+
+- include: ../../initialize_openshift_version.yml
+ tags:
+ - pre_upgrade
+ vars:
+ # Request specific openshift_release and let the openshift_version role handle converting this
+ # to a more specific version, respecting openshift_image_tag and openshift_pkg_version if
+ # defined, and overriding the normal behavior of protecting the installed version
+ openshift_release: "{{ openshift_upgrade_target }}"
+ openshift_protect_installed_version: False
+
+ # We skip the docker role at this point in upgrade to prevent
+ # unintended package, container, or config upgrades which trigger
+ # docker restarts. At this early stage of upgrade we can assume
+ # docker is configured and running.
+ skip_docker_role: True
+
+- include: ../pre/verify_control_plane_running.yml
+ tags:
+ - pre_upgrade
+
+- include: ../../../openshift-master/validate_restart.yml
+ tags:
+ - pre_upgrade
+
+- include: ../pre/verify_upgrade_targets.yml
+ tags:
+ - pre_upgrade
+
+- include: ../pre/verify_docker_upgrade_targets.yml
+ tags:
+ - pre_upgrade
+
+- include: ../pre/gate_checks.yml
+ tags:
+ - pre_upgrade
+
+# Pre-upgrade completed, nothing after this should be tagged pre_upgrade.
+
+# Separate step so we can execute in parallel and clear out anything unused
+# before we get into the serialized upgrade process which will then remove
+# remaining images if possible.
+- name: Cleanup unused Docker images
+ hosts: oo_masters_to_config:oo_nodes_to_upgrade:oo_etcd_to_config
+ tasks:
+ - include: ../cleanup_unused_images.yml
+
+- include: ../upgrade_control_plane.yml
+ vars:
+ master_config_hook: "v3_3/master_config_upgrade.yml"
+
+- include: ../upgrade_nodes.yml
+ vars:
+ node_config_hook: "v3_3/node_config_upgrade.yml"
+
+- include: ../post_control_plane.yml
diff --git a/playbooks/common/openshift-cluster/upgrades/v3_3/upgrade_control_plane.yml b/playbooks/common/openshift-cluster/upgrades/v3_3/upgrade_control_plane.yml
new file mode 100644
index 000000000..8a692d02b
--- /dev/null
+++ b/playbooks/common/openshift-cluster/upgrades/v3_3/upgrade_control_plane.yml
@@ -0,0 +1,111 @@
+---
+#
+# Control Plane Upgrade Playbook
+#
+# Upgrades masters and Docker (only on standalone etcd hosts)
+#
+# This upgrade does not include:
+# - node service running on masters
+# - docker running on masters
+# - node service running on dedicated nodes
+#
+# You can run the upgrade_nodes.yml playbook after this to upgrade these components separately.
+#
+- include: ../init.yml
+ tags:
+ - pre_upgrade
+
+- name: Configure the upgrade target for the common upgrade tasks
+ hosts: oo_all_hosts
+ tags:
+ - pre_upgrade
+ tasks:
+ - set_fact:
+ openshift_upgrade_target: "{{ '1.3' if deployment_type == 'origin' else '3.3' }}"
+ openshift_upgrade_min: "{{ '1.2' if deployment_type == 'origin' else '3.2' }}"
+
+# Pre-upgrade
+- include: ../initialize_nodes_to_upgrade.yml
+ tags:
+ - pre_upgrade
+
+- name: Update repos on control plane hosts
+ hosts: oo_masters_to_config:oo_etcd_to_config:oo_lb_to_config
+ tags:
+ - pre_upgrade
+ roles:
+ - openshift_repos
+
+- name: Set openshift_no_proxy_internal_hostnames
+ hosts: oo_masters_to_config:oo_nodes_to_upgrade
+ tags:
+ - pre_upgrade
+ tasks:
+ - set_fact:
+ openshift_no_proxy_internal_hostnames: "{{ hostvars | oo_select_keys(groups['oo_nodes_to_config']
+ | union(groups['oo_masters_to_config'])
+ | union(groups['oo_etcd_to_config'] | default([])))
+ | oo_collect('openshift.common.hostname') | default([]) | join (',')
+ }}"
+ when: "{{ (openshift_http_proxy is defined or openshift_https_proxy is defined) and
+ openshift_generate_no_proxy_hosts | default(True) | bool }}"
+
+- include: ../pre/verify_inventory_vars.yml
+ tags:
+ - pre_upgrade
+
+- include: ../disable_master_excluders.yml
+ tags:
+ - pre_upgrade
+
+- include: ../../initialize_openshift_version.yml
+ tags:
+ - pre_upgrade
+ vars:
+ # Request specific openshift_release and let the openshift_version role handle converting this
+ # to a more specific version, respecting openshift_image_tag and openshift_pkg_version if
+ # defined, and overriding the normal behavior of protecting the installed version
+ openshift_release: "{{ openshift_upgrade_target }}"
+ openshift_protect_installed_version: False
+
+ # We skip the docker role at this point in upgrade to prevent
+ # unintended package, container, or config upgrades which trigger
+ # docker restarts. At this early stage of upgrade we can assume
+ # docker is configured and running.
+ skip_docker_role: True
+
+- include: ../pre/verify_control_plane_running.yml
+ tags:
+ - pre_upgrade
+
+- include: ../../../openshift-master/validate_restart.yml
+ tags:
+ - pre_upgrade
+
+- include: ../pre/verify_upgrade_targets.yml
+ tags:
+ - pre_upgrade
+
+- include: ../pre/verify_docker_upgrade_targets.yml
+ tags:
+ - pre_upgrade
+
+- include: ../pre/gate_checks.yml
+ tags:
+ - pre_upgrade
+
+# Pre-upgrade completed, nothing after this should be tagged pre_upgrade.
+
+# Separate step so we can execute in parallel and clear out anything unused
+# before we get into the serialized upgrade process which will then remove
+# remaining images if possible.
+- name: Cleanup unused Docker images
+ hosts: oo_masters_to_config:oo_etcd_to_config
+ tasks:
+ - include: ../cleanup_unused_images.yml
+
+- include: ../upgrade_control_plane.yml
+ vars:
+ master_config_hook: "v3_3/master_config_upgrade.yml"
+
+- include: ../post_control_plane.yml
diff --git a/playbooks/common/openshift-cluster/upgrades/v3_3/upgrade_nodes.yml b/playbooks/common/openshift-cluster/upgrades/v3_3/upgrade_nodes.yml
new file mode 100644
index 000000000..2d30bba94
--- /dev/null
+++ b/playbooks/common/openshift-cluster/upgrades/v3_3/upgrade_nodes.yml
@@ -0,0 +1,106 @@
+---
+#
+# Node Upgrade Playbook
+#
+# Upgrades nodes only, but requires the control plane to have already been upgraded.
+#
+- include: ../init.yml
+ tags:
+ - pre_upgrade
+
+- name: Configure the upgrade target for the common upgrade tasks
+ hosts: oo_all_hosts
+ tags:
+ - pre_upgrade
+ tasks:
+ - set_fact:
+ openshift_upgrade_target: "{{ '1.3' if deployment_type == 'origin' else '3.3' }}"
+ openshift_upgrade_min: "{{ '1.2' if deployment_type == 'origin' else '3.2' }}"
+
+# Pre-upgrade
+- include: ../initialize_nodes_to_upgrade.yml
+ tags:
+ - pre_upgrade
+
+- name: Update repos on nodes
+ hosts: oo_masters_to_config:oo_nodes_to_upgrade:oo_etcd_to_config:oo_lb_to_config
+ roles:
+ - openshift_repos
+ tags:
+ - pre_upgrade
+
+- name: Set openshift_no_proxy_internal_hostnames
+ hosts: oo_masters_to_config:oo_nodes_to_upgrade
+ tags:
+ - pre_upgrade
+ tasks:
+ - set_fact:
+ openshift_no_proxy_internal_hostnames: "{{ hostvars | oo_select_keys(groups['oo_nodes_to_upgrade']
+ | union(groups['oo_masters_to_config'])
+ | union(groups['oo_etcd_to_config'] | default([])))
+ | oo_collect('openshift.common.hostname') | default([]) | join (',')
+ }}"
+ when: "{{ (openshift_http_proxy is defined or openshift_https_proxy is defined) and
+ openshift_generate_no_proxy_hosts | default(True) | bool }}"
+
+- include: ../pre/verify_inventory_vars.yml
+ tags:
+ - pre_upgrade
+
+- include: ../disable_node_excluders.yml
+ tags:
+ - pre_upgrade
+
+- include: ../../initialize_openshift_version.yml
+ tags:
+ - pre_upgrade
+ vars:
+ # Request specific openshift_release and let the openshift_version role handle converting this
+ # to a more specific version, respecting openshift_image_tag and openshift_pkg_version if
+ # defined, and overriding the normal behavior of protecting the installed version
+ openshift_release: "{{ openshift_upgrade_target }}"
+ openshift_protect_installed_version: False
+
+ # We skip the docker role at this point in upgrade to prevent
+ # unintended package, container, or config upgrades which trigger
+ # docker restarts. At this early stage of upgrade we can assume
+ # docker is configured and running.
+ skip_docker_role: True
+
+- name: Verify masters are already upgraded
+ hosts: oo_masters_to_config
+ tags:
+ - pre_upgrade
+ tasks:
+ - fail: msg="Master running {{ openshift.common.version }} must be upgraded to {{ openshift_version }} before node upgrade can be run."
+ when: openshift.common.version != openshift_version
+
+- include: ../pre/verify_control_plane_running.yml
+ tags:
+ - pre_upgrade
+
+- include: ../pre/verify_upgrade_targets.yml
+ tags:
+ - pre_upgrade
+
+- include: ../pre/verify_docker_upgrade_targets.yml
+ tags:
+ - pre_upgrade
+
+- include: ../pre/gate_checks.yml
+ tags:
+ - pre_upgrade
+
+# Pre-upgrade completed, nothing after this should be tagged pre_upgrade.
+
+# Separate step so we can execute in parallel and clear out anything unused
+# before we get into the serialized upgrade process which will then remove
+# remaining images if possible.
+- name: Cleanup unused Docker images
+ hosts: oo_nodes_to_upgrade
+ tasks:
+ - include: ../cleanup_unused_images.yml
+
+- include: ../upgrade_nodes.yml
+ vars:
+ node_config_hook: "v3_3/node_config_upgrade.yml"
diff --git a/playbooks/common/openshift-cluster/upgrades/v3_4/master_config_upgrade.yml b/playbooks/common/openshift-cluster/upgrades/v3_4/master_config_upgrade.yml
index 43c2ffcd4..ed89dbe8d 100644
--- a/playbooks/common/openshift-cluster/upgrades/v3_4/master_config_upgrade.yml
+++ b/playbooks/common/openshift-cluster/upgrades/v3_4/master_config_upgrade.yml
@@ -3,7 +3,7 @@
dest: "{{ openshift.common.config_base}}/master/master-config.yaml"
yaml_key: 'admissionConfig.pluginConfig'
yaml_value: "{{ openshift.master.admission_plugin_config }}"
- when: "{{ 'admission_plugin_config' in openshift.master }}"
+ when: "'admission_plugin_config' in openshift.master"
- modify_yaml:
dest: "{{ openshift.common.config_base}}/master/master-config.yaml"
diff --git a/playbooks/common/openshift-cluster/upgrades/v3_4/roles b/playbooks/common/openshift-cluster/upgrades/v3_4/roles
new file mode 120000
index 000000000..6bc1a7aef
--- /dev/null
+++ b/playbooks/common/openshift-cluster/upgrades/v3_4/roles
@@ -0,0 +1 @@
+../../../../../roles \ No newline at end of file
diff --git a/playbooks/common/openshift-cluster/upgrades/v3_4/upgrade.yml b/playbooks/common/openshift-cluster/upgrades/v3_4/upgrade.yml
new file mode 100644
index 000000000..e9ff47f32
--- /dev/null
+++ b/playbooks/common/openshift-cluster/upgrades/v3_4/upgrade.yml
@@ -0,0 +1,109 @@
+---
+#
+# Full Control Plane + Nodes Upgrade
+#
+- include: ../init.yml
+ tags:
+ - pre_upgrade
+
+- name: Configure the upgrade target for the common upgrade tasks
+ hosts: oo_all_hosts
+ tags:
+ - pre_upgrade
+ tasks:
+ - set_fact:
+ openshift_upgrade_target: "{{ '1.4' if deployment_type == 'origin' else '3.4' }}"
+ openshift_upgrade_min: "{{ '1.3' if deployment_type == 'origin' else '3.3' }}"
+
+# Pre-upgrade
+
+- include: ../initialize_nodes_to_upgrade.yml
+ tags:
+ - pre_upgrade
+
+- name: Update repos and initialize facts on all hosts
+ hosts: oo_masters_to_config:oo_nodes_to_upgrade:oo_etcd_to_config:oo_lb_to_config
+ tags:
+ - pre_upgrade
+ roles:
+ - openshift_repos
+
+- name: Set openshift_no_proxy_internal_hostnames
+ hosts: oo_masters_to_config:oo_nodes_to_upgrade
+ tags:
+ - pre_upgrade
+ tasks:
+ - set_fact:
+ openshift_no_proxy_internal_hostnames: "{{ hostvars | oo_select_keys(groups['oo_nodes_to_config']
+ | union(groups['oo_masters_to_config'])
+ | union(groups['oo_etcd_to_config'] | default([])))
+ | oo_collect('openshift.common.hostname') | default([]) | join (',')
+ }}"
+ when: "{{ (openshift_http_proxy is defined or openshift_https_proxy is defined) and
+ openshift_generate_no_proxy_hosts | default(True) | bool }}"
+
+- include: ../pre/verify_inventory_vars.yml
+ tags:
+ - pre_upgrade
+
+- include: ../disable_master_excluders.yml
+ tags:
+ - pre_upgrade
+
+- include: ../disable_node_excluders.yml
+ tags:
+ - pre_upgrade
+
+- include: ../../initialize_openshift_version.yml
+ tags:
+ - pre_upgrade
+ vars:
+ # Request specific openshift_release and let the openshift_version role handle converting this
+ # to a more specific version, respecting openshift_image_tag and openshift_pkg_version if
+ # defined, and overriding the normal behavior of protecting the installed version
+ openshift_release: "{{ openshift_upgrade_target }}"
+ openshift_protect_installed_version: False
+
+ # We skip the docker role at this point in upgrade to prevent
+ # unintended package, container, or config upgrades which trigger
+ # docker restarts. At this early stage of upgrade we can assume
+ # docker is configured and running.
+ skip_docker_role: True
+
+- include: ../pre/verify_control_plane_running.yml
+ tags:
+ - pre_upgrade
+
+- include: ../../../openshift-master/validate_restart.yml
+ tags:
+ - pre_upgrade
+
+- include: ../pre/verify_upgrade_targets.yml
+ tags:
+ - pre_upgrade
+
+- include: ../pre/verify_docker_upgrade_targets.yml
+ tags:
+ - pre_upgrade
+
+- include: ../pre/gate_checks.yml
+ tags:
+ - pre_upgrade
+
+# Pre-upgrade completed, nothing after this should be tagged pre_upgrade.
+
+# Separate step so we can execute in parallel and clear out anything unused
+# before we get into the serialized upgrade process which will then remove
+# remaining images if possible.
+- name: Cleanup unused Docker images
+ hosts: oo_masters_to_config:oo_nodes_to_upgrade:oo_etcd_to_config
+ tasks:
+ - include: ../cleanup_unused_images.yml
+
+- include: ../upgrade_control_plane.yml
+ vars:
+ master_config_hook: "v3_4/master_config_upgrade.yml"
+
+- include: ../upgrade_nodes.yml
+
+- include: ../post_control_plane.yml
diff --git a/playbooks/common/openshift-cluster/upgrades/v3_4/upgrade_control_plane.yml b/playbooks/common/openshift-cluster/upgrades/v3_4/upgrade_control_plane.yml
new file mode 100644
index 000000000..d4ae8d8b4
--- /dev/null
+++ b/playbooks/common/openshift-cluster/upgrades/v3_4/upgrade_control_plane.yml
@@ -0,0 +1,111 @@
+---
+#
+# Control Plane Upgrade Playbook
+#
+# Upgrades masters and Docker (only on standalone etcd hosts)
+#
+# This upgrade does not include:
+# - node service running on masters
+# - docker running on masters
+# - node service running on dedicated nodes
+#
+# You can run the upgrade_nodes.yml playbook after this to upgrade these components separately.
+#
+- include: ../init.yml
+ tags:
+ - pre_upgrade
+
+- name: Configure the upgrade target for the common upgrade tasks
+ hosts: oo_all_hosts
+ tags:
+ - pre_upgrade
+ tasks:
+ - set_fact:
+ openshift_upgrade_target: "{{ '1.4' if deployment_type == 'origin' else '3.4' }}"
+ openshift_upgrade_min: "{{ '1.3' if deployment_type == 'origin' else '3.3' }}"
+
+# Pre-upgrade
+- include: ../initialize_nodes_to_upgrade.yml
+ tags:
+ - pre_upgrade
+
+- name: Update repos on control plane hosts
+ hosts: oo_masters_to_config:oo_etcd_to_config:oo_lb_to_config
+ tags:
+ - pre_upgrade
+ roles:
+ - openshift_repos
+
+- name: Set openshift_no_proxy_internal_hostnames
+ hosts: oo_masters_to_config:oo_nodes_to_upgrade
+ tags:
+ - pre_upgrade
+ tasks:
+ - set_fact:
+ openshift_no_proxy_internal_hostnames: "{{ hostvars | oo_select_keys(groups['oo_nodes_to_config']
+ | union(groups['oo_masters_to_config'])
+ | union(groups['oo_etcd_to_config'] | default([])))
+ | oo_collect('openshift.common.hostname') | default([]) | join (',')
+ }}"
+ when: "{{ (openshift_http_proxy is defined or openshift_https_proxy is defined) and
+ openshift_generate_no_proxy_hosts | default(True) | bool }}"
+
+- include: ../pre/verify_inventory_vars.yml
+ tags:
+ - pre_upgrade
+
+- include: ../disable_master_excluders.yml
+ tags:
+ - pre_upgrade
+
+- include: ../../initialize_openshift_version.yml
+ tags:
+ - pre_upgrade
+ vars:
+ # Request specific openshift_release and let the openshift_version role handle converting this
+ # to a more specific version, respecting openshift_image_tag and openshift_pkg_version if
+ # defined, and overriding the normal behavior of protecting the installed version
+ openshift_release: "{{ openshift_upgrade_target }}"
+ openshift_protect_installed_version: False
+
+ # We skip the docker role at this point in upgrade to prevent
+ # unintended package, container, or config upgrades which trigger
+ # docker restarts. At this early stage of upgrade we can assume
+ # docker is configured and running.
+ skip_docker_role: True
+
+- include: ../pre/verify_control_plane_running.yml
+ tags:
+ - pre_upgrade
+
+- include: ../../../openshift-master/validate_restart.yml
+ tags:
+ - pre_upgrade
+
+- include: ../pre/verify_upgrade_targets.yml
+ tags:
+ - pre_upgrade
+
+- include: ../pre/verify_docker_upgrade_targets.yml
+ tags:
+ - pre_upgrade
+
+- include: ../pre/gate_checks.yml
+ tags:
+ - pre_upgrade
+
+# Pre-upgrade completed, nothing after this should be tagged pre_upgrade.
+
+# Separate step so we can execute in parallel and clear out anything unused
+# before we get into the serialized upgrade process which will then remove
+# remaining images if possible.
+- name: Cleanup unused Docker images
+ hosts: oo_masters_to_config:oo_etcd_to_config
+ tasks:
+ - include: ../cleanup_unused_images.yml
+
+- include: ../upgrade_control_plane.yml
+ vars:
+ master_config_hook: "v3_4/master_config_upgrade.yml"
+
+- include: ../post_control_plane.yml
diff --git a/playbooks/common/openshift-cluster/upgrades/v3_4/upgrade_nodes.yml b/playbooks/common/openshift-cluster/upgrades/v3_4/upgrade_nodes.yml
new file mode 100644
index 000000000..ae205b172
--- /dev/null
+++ b/playbooks/common/openshift-cluster/upgrades/v3_4/upgrade_nodes.yml
@@ -0,0 +1,104 @@
+---
+#
+# Node Upgrade Playbook
+#
+# Upgrades nodes only, but requires the control plane to have already been upgraded.
+#
+- include: ../init.yml
+ tags:
+ - pre_upgrade
+
+- name: Configure the upgrade target for the common upgrade tasks
+ hosts: oo_all_hosts
+ tags:
+ - pre_upgrade
+ tasks:
+ - set_fact:
+ openshift_upgrade_target: "{{ '1.4' if deployment_type == 'origin' else '3.4' }}"
+ openshift_upgrade_min: "{{ '1.3' if deployment_type == 'origin' else '3.3' }}"
+
+# Pre-upgrade
+- include: ../initialize_nodes_to_upgrade.yml
+ tags:
+ - pre_upgrade
+
+- name: Update repos on nodes
+ hosts: oo_masters_to_config:oo_nodes_to_upgrade:oo_etcd_to_config:oo_lb_to_config
+ roles:
+ - openshift_repos
+ tags:
+ - pre_upgrade
+
+- name: Set openshift_no_proxy_internal_hostnames
+ hosts: oo_masters_to_config:oo_nodes_to_upgrade
+ tags:
+ - pre_upgrade
+ tasks:
+ - set_fact:
+ openshift_no_proxy_internal_hostnames: "{{ hostvars | oo_select_keys(groups['oo_nodes_to_upgrade']
+ | union(groups['oo_masters_to_config'])
+ | union(groups['oo_etcd_to_config'] | default([])))
+ | oo_collect('openshift.common.hostname') | default([]) | join (',')
+ }}"
+ when: "{{ (openshift_http_proxy is defined or openshift_https_proxy is defined) and
+ openshift_generate_no_proxy_hosts | default(True) | bool }}"
+
+- include: ../pre/verify_inventory_vars.yml
+ tags:
+ - pre_upgrade
+
+- include: ../disable_node_excluders.yml
+ tags:
+ - pre_upgrade
+
+- include: ../../initialize_openshift_version.yml
+ tags:
+ - pre_upgrade
+ vars:
+ # Request specific openshift_release and let the openshift_version role handle converting this
+ # to a more specific version, respecting openshift_image_tag and openshift_pkg_version if
+ # defined, and overriding the normal behavior of protecting the installed version
+ openshift_release: "{{ openshift_upgrade_target }}"
+ openshift_protect_installed_version: False
+
+ # We skip the docker role at this point in upgrade to prevent
+ # unintended package, container, or config upgrades which trigger
+ # docker restarts. At this early stage of upgrade we can assume
+ # docker is configured and running.
+ skip_docker_role: True
+
+- name: Verify masters are already upgraded
+ hosts: oo_masters_to_config
+ tags:
+ - pre_upgrade
+ tasks:
+ - fail: msg="Master running {{ openshift.common.version }} must be upgraded to {{ openshift_version }} before node upgrade can be run."
+ when: openshift.common.version != openshift_version
+
+- include: ../pre/verify_control_plane_running.yml
+ tags:
+ - pre_upgrade
+
+- include: ../pre/verify_upgrade_targets.yml
+ tags:
+ - pre_upgrade
+
+- include: ../pre/verify_docker_upgrade_targets.yml
+ tags:
+ - pre_upgrade
+
+- include: ../pre/gate_checks.yml
+ tags:
+ - pre_upgrade
+
+# Pre-upgrade completed, nothing after this should be tagged pre_upgrade.
+
+# Separate step so we can execute in parallel and clear out anything unused
+# before we get into the serialized upgrade process which will then remove
+# remaining images if possible.
+- name: Cleanup unused Docker images
+ hosts: oo_nodes_to_upgrade
+ tasks:
+ - include: ../cleanup_unused_images.yml
+
+- include: ../upgrade_nodes.yml
diff --git a/playbooks/common/openshift-cluster/upgrades/v3_5/upgrade.yml b/playbooks/common/openshift-cluster/upgrades/v3_5/upgrade.yml
new file mode 100644
index 000000000..1269634d1
--- /dev/null
+++ b/playbooks/common/openshift-cluster/upgrades/v3_5/upgrade.yml
@@ -0,0 +1,113 @@
+---
+#
+# Full Control Plane + Nodes Upgrade
+#
+- include: ../init.yml
+ tags:
+ - pre_upgrade
+
+- name: Configure the upgrade target for the common upgrade tasks
+ hosts: oo_all_hosts
+ tags:
+ - pre_upgrade
+ tasks:
+ - set_fact:
+ openshift_upgrade_target: "{{ '1.5' if deployment_type == 'origin' else '3.5' }}"
+ openshift_upgrade_min: "{{ '1.4' if deployment_type == 'origin' else '3.4' }}"
+
+# Pre-upgrade
+
+- include: ../initialize_nodes_to_upgrade.yml
+ tags:
+ - pre_upgrade
+
+- name: Update repos and initialize facts on all hosts
+ hosts: oo_masters_to_config:oo_nodes_to_upgrade:oo_etcd_to_config:oo_lb_to_config
+ tags:
+ - pre_upgrade
+ roles:
+ - openshift_repos
+
+- name: Set openshift_no_proxy_internal_hostnames
+ hosts: oo_masters_to_config:oo_nodes_to_upgrade
+ tags:
+ - pre_upgrade
+ tasks:
+ - set_fact:
+ openshift_no_proxy_internal_hostnames: "{{ hostvars | oo_select_keys(groups['oo_nodes_to_config']
+ | union(groups['oo_masters_to_config'])
+ | union(groups['oo_etcd_to_config'] | default([])))
+ | oo_collect('openshift.common.hostname') | default([]) | join (',')
+ }}"
+ when: "{{ (openshift_http_proxy is defined or openshift_https_proxy is defined) and
+ openshift_generate_no_proxy_hosts | default(True) | bool }}"
+
+- include: ../pre/verify_inventory_vars.yml
+ tags:
+ - pre_upgrade
+
+- include: ../disable_master_excluders.yml
+ tags:
+ - pre_upgrade
+
+- include: ../disable_node_excluders.yml
+ tags:
+ - pre_upgrade
+
+- include: ../../initialize_openshift_version.yml
+ tags:
+ - pre_upgrade
+ vars:
+ # Request specific openshift_release and let the openshift_version role handle converting this
+ # to a more specific version, respecting openshift_image_tag and openshift_pkg_version if
+ # defined, and overriding the normal behavior of protecting the installed version
+ openshift_release: "{{ openshift_upgrade_target }}"
+ openshift_protect_installed_version: False
+
+ # We skip the docker role at this point in upgrade to prevent
+ # unintended package, container, or config upgrades which trigger
+ # docker restarts. At this early stage of upgrade we can assume
+ # docker is configured and running.
+ skip_docker_role: True
+
+- include: ../pre/verify_control_plane_running.yml
+ tags:
+ - pre_upgrade
+
+- include: ../../../openshift-master/validate_restart.yml
+ tags:
+ - pre_upgrade
+
+- include: ../pre/verify_upgrade_targets.yml
+ tags:
+ - pre_upgrade
+
+- include: ../pre/verify_docker_upgrade_targets.yml
+ tags:
+ - pre_upgrade
+
+- include: validator.yml
+ tags:
+ - pre_upgrade
+
+- include: ../pre/gate_checks.yml
+ tags:
+ - pre_upgrade
+
+# Pre-upgrade completed, nothing after this should be tagged pre_upgrade.
+
+# Separate step so we can execute in parallel and clear out anything unused
+# before we get into the serialized upgrade process which will then remove
+# remaining images if possible.
+- name: Cleanup unused Docker images
+ hosts: oo_masters_to_config:oo_nodes_to_upgrade:oo_etcd_to_config
+ tasks:
+ - include: ../cleanup_unused_images.yml
+
+- include: ../upgrade_control_plane.yml
+
+- include: ../upgrade_nodes.yml
+
+- include: ../post_control_plane.yml
+
+- include: storage_upgrade.yml
diff --git a/playbooks/common/openshift-cluster/upgrades/v3_5/upgrade_control_plane.yml b/playbooks/common/openshift-cluster/upgrades/v3_5/upgrade_control_plane.yml
new file mode 100644
index 000000000..21c075678
--- /dev/null
+++ b/playbooks/common/openshift-cluster/upgrades/v3_5/upgrade_control_plane.yml
@@ -0,0 +1,115 @@
+---
+#
+# Control Plane Upgrade Playbook
+#
+# Upgrades masters and Docker (only on standalone etcd hosts)
+#
+# This upgrade does not include:
+# - node service running on masters
+# - docker running on masters
+# - node service running on dedicated nodes
+#
+# You can run the upgrade_nodes.yml playbook after this to upgrade these components separately.
+#
+- include: ../init.yml
+ tags:
+ - pre_upgrade
+
+- name: Configure the upgrade target for the common upgrade tasks
+ hosts: oo_all_hosts
+ tags:
+ - pre_upgrade
+ tasks:
+ - set_fact:
+ openshift_upgrade_target: "{{ '1.5' if deployment_type == 'origin' else '3.5' }}"
+ openshift_upgrade_min: "{{ '1.4' if deployment_type == 'origin' else '3.4' }}"
+
+# Pre-upgrade
+- include: ../initialize_nodes_to_upgrade.yml
+ tags:
+ - pre_upgrade
+
+- name: Update repos on control plane hosts
+ hosts: oo_masters_to_config:oo_etcd_to_config:oo_lb_to_config
+ tags:
+ - pre_upgrade
+ roles:
+ - openshift_repos
+
+- name: Set openshift_no_proxy_internal_hostnames
+ hosts: oo_masters_to_config:oo_nodes_to_upgrade
+ tags:
+ - pre_upgrade
+ tasks:
+ - set_fact:
+ openshift_no_proxy_internal_hostnames: "{{ hostvars | oo_select_keys(groups['oo_nodes_to_config']
+ | union(groups['oo_masters_to_config'])
+ | union(groups['oo_etcd_to_config'] | default([])))
+ | oo_collect('openshift.common.hostname') | default([]) | join (',')
+ }}"
+ when: "{{ (openshift_http_proxy is defined or openshift_https_proxy is defined) and
+ openshift_generate_no_proxy_hosts | default(True) | bool }}"
+
+- include: ../pre/verify_inventory_vars.yml
+ tags:
+ - pre_upgrade
+
+- include: ../disable_master_excluders.yml
+ tags:
+ - pre_upgrade
+
+- include: ../../initialize_openshift_version.yml
+ tags:
+ - pre_upgrade
+ vars:
+ # Request specific openshift_release and let the openshift_version role handle converting this
+ # to a more specific version, respecting openshift_image_tag and openshift_pkg_version if
+ # defined, and overriding the normal behavior of protecting the installed version
+ openshift_release: "{{ openshift_upgrade_target }}"
+ openshift_protect_installed_version: False
+
+ # We skip the docker role at this point in upgrade to prevent
+ # unintended package, container, or config upgrades which trigger
+ # docker restarts. At this early stage of upgrade we can assume
+ # docker is configured and running.
+ skip_docker_role: True
+
+- include: ../pre/verify_control_plane_running.yml
+ tags:
+ - pre_upgrade
+
+- include: ../../../openshift-master/validate_restart.yml
+ tags:
+ - pre_upgrade
+
+- include: ../pre/verify_upgrade_targets.yml
+ tags:
+ - pre_upgrade
+
+- include: ../pre/verify_docker_upgrade_targets.yml
+ tags:
+ - pre_upgrade
+
+- include: validator.yml
+ tags:
+ - pre_upgrade
+
+- include: ../pre/gate_checks.yml
+ tags:
+ - pre_upgrade
+
+# Pre-upgrade completed, nothing after this should be tagged pre_upgrade.
+
+# Separate step so we can execute in parallel and clear out anything unused
+# before we get into the serialized upgrade process which will then remove
+# remaining images if possible.
+- name: Cleanup unused Docker images
+ hosts: oo_masters_to_config:oo_etcd_to_config
+ tasks:
+ - include: ../cleanup_unused_images.yml
+
+- include: ../upgrade_control_plane.yml
+
+- include: ../post_control_plane.yml
+
+- include: storage_upgrade.yml
diff --git a/playbooks/common/openshift-cluster/upgrades/v3_5/upgrade_nodes.yml b/playbooks/common/openshift-cluster/upgrades/v3_5/upgrade_nodes.yml
new file mode 100644
index 000000000..e67e169fc
--- /dev/null
+++ b/playbooks/common/openshift-cluster/upgrades/v3_5/upgrade_nodes.yml
@@ -0,0 +1,104 @@
+---
+#
+# Node Upgrade Playbook
+#
+# Upgrades nodes only, but requires the control plane to have already been upgraded.
+#
+- include: ../init.yml
+ tags:
+ - pre_upgrade
+
+- name: Configure the upgrade target for the common upgrade tasks
+ hosts: oo_all_hosts
+ tags:
+ - pre_upgrade
+ tasks:
+ - set_fact:
+ openshift_upgrade_target: "{{ '1.5' if deployment_type == 'origin' else '3.5' }}"
+ openshift_upgrade_min: "{{ '1.4' if deployment_type == 'origin' else '3.4' }}"
+
+# Pre-upgrade
+- include: ../initialize_nodes_to_upgrade.yml
+ tags:
+ - pre_upgrade
+
+- name: Update repos on nodes
+ hosts: oo_masters_to_config:oo_nodes_to_upgrade:oo_etcd_to_config:oo_lb_to_config
+ roles:
+ - openshift_repos
+ tags:
+ - pre_upgrade
+
+- name: Set openshift_no_proxy_internal_hostnames
+ hosts: oo_masters_to_config:oo_nodes_to_upgrade
+ tags:
+ - pre_upgrade
+ tasks:
+ - set_fact:
+ openshift_no_proxy_internal_hostnames: "{{ hostvars | oo_select_keys(groups['oo_nodes_to_upgrade']
+ | union(groups['oo_masters_to_config'])
+ | union(groups['oo_etcd_to_config'] | default([])))
+ | oo_collect('openshift.common.hostname') | default([]) | join (',')
+ }}"
+ when: "{{ (openshift_http_proxy is defined or openshift_https_proxy is defined) and
+ openshift_generate_no_proxy_hosts | default(True) | bool }}"
+
+- include: ../pre/verify_inventory_vars.yml
+ tags:
+ - pre_upgrade
+
+- include: ../disable_node_excluders.yml
+ tags:
+ - pre_upgrade
+
+- include: ../../initialize_openshift_version.yml
+ tags:
+ - pre_upgrade
+ vars:
+ # Request specific openshift_release and let the openshift_version role handle converting this
+ # to a more specific version, respecting openshift_image_tag and openshift_pkg_version if
+ # defined, and overriding the normal behavior of protecting the installed version
+ openshift_release: "{{ openshift_upgrade_target }}"
+ openshift_protect_installed_version: False
+
+ # We skip the docker role at this point in upgrade to prevent
+ # unintended package, container, or config upgrades which trigger
+ # docker restarts. At this early stage of upgrade we can assume
+ # docker is configured and running.
+ skip_docker_role: True
+
+- name: Verify masters are already upgraded
+ hosts: oo_masters_to_config
+ tags:
+ - pre_upgrade
+ tasks:
+ - fail: msg="Master running {{ openshift.common.version }} must be upgraded to {{ openshift_version }} before node upgrade can be run."
+ when: openshift.common.version != openshift_version
+
+- include: ../pre/verify_control_plane_running.yml
+ tags:
+ - pre_upgrade
+
+- include: ../pre/verify_upgrade_targets.yml
+ tags:
+ - pre_upgrade
+
+- include: ../pre/verify_docker_upgrade_targets.yml
+ tags:
+ - pre_upgrade
+
+- include: ../pre/gate_checks.yml
+ tags:
+ - pre_upgrade
+
+# Pre-upgrade completed, nothing after this should be tagged pre_upgrade.
+
+# Separate step so we can execute in parallel and clear out anything unused
+# before we get into the serialized upgrade process which will then remove
+# remaining images if possible.
+- name: Cleanup unused Docker images
+ hosts: oo_nodes_to_upgrade
+ tasks:
+ - include: ../cleanup_unused_images.yml
+
+- include: ../upgrade_nodes.yml
diff --git a/playbooks/common/openshift-cluster/upgrades/v3_6/upgrade.yml b/playbooks/common/openshift-cluster/upgrades/v3_6/upgrade.yml
new file mode 100644
index 000000000..a1b1f3301
--- /dev/null
+++ b/playbooks/common/openshift-cluster/upgrades/v3_6/upgrade.yml
@@ -0,0 +1,113 @@
+---
+#
+# Full Control Plane + Nodes Upgrade
+#
+- include: ../init.yml
+ tags:
+ - pre_upgrade
+
+- name: Configure the upgrade target for the common upgrade tasks
+ hosts: oo_all_hosts
+ tags:
+ - pre_upgrade
+ tasks:
+ - set_fact:
+ openshift_upgrade_target: '3.6'
+ openshift_upgrade_min: "{{ '1.5' if deployment_type == 'origin' else '3.5' }}"
+
+# Pre-upgrade
+
+- include: ../initialize_nodes_to_upgrade.yml
+ tags:
+ - pre_upgrade
+
+- name: Update repos and initialize facts on all hosts
+ hosts: oo_masters_to_config:oo_nodes_to_upgrade:oo_etcd_to_config:oo_lb_to_config
+ tags:
+ - pre_upgrade
+ roles:
+ - openshift_repos
+
+- name: Set openshift_no_proxy_internal_hostnames
+ hosts: oo_masters_to_config:oo_nodes_to_upgrade
+ tags:
+ - pre_upgrade
+ tasks:
+ - set_fact:
+ openshift_no_proxy_internal_hostnames: "{{ hostvars | oo_select_keys(groups['oo_nodes_to_config']
+ | union(groups['oo_masters_to_config'])
+ | union(groups['oo_etcd_to_config'] | default([])))
+ | oo_collect('openshift.common.hostname') | default([]) | join (',')
+ }}"
+ when: "{{ (openshift_http_proxy is defined or openshift_https_proxy is defined) and
+ openshift_generate_no_proxy_hosts | default(True) | bool }}"
+
+- include: ../pre/verify_inventory_vars.yml
+ tags:
+ - pre_upgrade
+
+- include: ../disable_master_excluders.yml
+ tags:
+ - pre_upgrade
+
+- include: ../disable_node_excluders.yml
+ tags:
+ - pre_upgrade
+
+- include: ../../initialize_openshift_version.yml
+ tags:
+ - pre_upgrade
+ vars:
+ # Request specific openshift_release and let the openshift_version role handle converting this
+ # to a more specific version, respecting openshift_image_tag and openshift_pkg_version if
+ # defined, and overriding the normal behavior of protecting the installed version
+ openshift_release: "{{ openshift_upgrade_target }}"
+ openshift_protect_installed_version: False
+
+ # We skip the docker role at this point in upgrade to prevent
+ # unintended package, container, or config upgrades which trigger
+ # docker restarts. At this early stage of upgrade we can assume
+ # docker is configured and running.
+ skip_docker_role: True
+
+- include: ../pre/verify_control_plane_running.yml
+ tags:
+ - pre_upgrade
+
+- include: ../../../openshift-master/validate_restart.yml
+ tags:
+ - pre_upgrade
+
+- include: ../pre/verify_upgrade_targets.yml
+ tags:
+ - pre_upgrade
+
+- include: ../pre/verify_docker_upgrade_targets.yml
+ tags:
+ - pre_upgrade
+
+- include: validator.yml
+ tags:
+ - pre_upgrade
+
+- include: ../pre/gate_checks.yml
+ tags:
+ - pre_upgrade
+
+# Pre-upgrade completed, nothing after this should be tagged pre_upgrade.
+
+# Separate step so we can execute in parallel and clear out anything unused
+# before we get into the serialized upgrade process which will then remove
+# remaining images if possible.
+- name: Cleanup unused Docker images
+ hosts: oo_masters_to_config:oo_nodes_to_upgrade:oo_etcd_to_config
+ tasks:
+ - include: ../cleanup_unused_images.yml
+
+- include: ../upgrade_control_plane.yml
+
+- include: ../upgrade_nodes.yml
+
+- include: ../post_control_plane.yml
+
+- include: storage_upgrade.yml
diff --git a/playbooks/common/openshift-cluster/upgrades/v3_6/upgrade_control_plane.yml b/playbooks/common/openshift-cluster/upgrades/v3_6/upgrade_control_plane.yml
new file mode 100644
index 000000000..af6e1f71b
--- /dev/null
+++ b/playbooks/common/openshift-cluster/upgrades/v3_6/upgrade_control_plane.yml
@@ -0,0 +1,115 @@
+---
+#
+# Control Plane Upgrade Playbook
+#
+# Upgrades masters and Docker (only on standalone etcd hosts)
+#
+# This upgrade does not include:
+# - node service running on masters
+# - docker running on masters
+# - node service running on dedicated nodes
+#
+# You can run the upgrade_nodes.yml playbook after this to upgrade these components separately.
+#
+- include: ../init.yml
+ tags:
+ - pre_upgrade
+
+- name: Configure the upgrade target for the common upgrade tasks
+ hosts: oo_all_hosts
+ tags:
+ - pre_upgrade
+ tasks:
+ - set_fact:
+ openshift_upgrade_target: '3.6'
+ openshift_upgrade_min: "{{ '1.5' if deployment_type == 'origin' else '3.5' }}"
+
+# Pre-upgrade
+- include: ../initialize_nodes_to_upgrade.yml
+ tags:
+ - pre_upgrade
+
+- name: Update repos on control plane hosts
+ hosts: oo_masters_to_config:oo_etcd_to_config:oo_lb_to_config
+ tags:
+ - pre_upgrade
+ roles:
+ - openshift_repos
+
+- name: Set openshift_no_proxy_internal_hostnames
+ hosts: oo_masters_to_config:oo_nodes_to_upgrade
+ tags:
+ - pre_upgrade
+ tasks:
+ - set_fact:
+ openshift_no_proxy_internal_hostnames: "{{ hostvars | oo_select_keys(groups['oo_nodes_to_config']
+ | union(groups['oo_masters_to_config'])
+ | union(groups['oo_etcd_to_config'] | default([])))
+ | oo_collect('openshift.common.hostname') | default([]) | join (',')
+ }}"
+ when: "{{ (openshift_http_proxy is defined or openshift_https_proxy is defined) and
+ openshift_generate_no_proxy_hosts | default(True) | bool }}"
+
+- include: ../pre/verify_inventory_vars.yml
+ tags:
+ - pre_upgrade
+
+- include: ../disable_master_excluders.yml
+ tags:
+ - pre_upgrade
+
+- include: ../../initialize_openshift_version.yml
+ tags:
+ - pre_upgrade
+ vars:
+ # Request specific openshift_release and let the openshift_version role handle converting this
+ # to a more specific version, respecting openshift_image_tag and openshift_pkg_version if
+ # defined, and overriding the normal behavior of protecting the installed version
+ openshift_release: "{{ openshift_upgrade_target }}"
+ openshift_protect_installed_version: False
+
+ # We skip the docker role at this point in upgrade to prevent
+ # unintended package, container, or config upgrades which trigger
+ # docker restarts. At this early stage of upgrade we can assume
+ # docker is configured and running.
+ skip_docker_role: True
+
+- include: ../pre/verify_control_plane_running.yml
+ tags:
+ - pre_upgrade
+
+- include: ../../../openshift-master/validate_restart.yml
+ tags:
+ - pre_upgrade
+
+- include: ../pre/verify_upgrade_targets.yml
+ tags:
+ - pre_upgrade
+
+- include: ../pre/verify_docker_upgrade_targets.yml
+ tags:
+ - pre_upgrade
+
+- include: validator.yml
+ tags:
+ - pre_upgrade
+
+- include: ../pre/gate_checks.yml
+ tags:
+ - pre_upgrade
+
+# Pre-upgrade completed, nothing after this should be tagged pre_upgrade.
+
+# Separate step so we can execute in parallel and clear out anything unused
+# before we get into the serialized upgrade process which will then remove
+# remaining images if possible.
+- name: Cleanup unused Docker images
+ hosts: oo_masters_to_config:oo_etcd_to_config
+ tasks:
+ - include: ../cleanup_unused_images.yml
+
+- include: ../upgrade_control_plane.yml
+
+- include: ../post_control_plane.yml
+
+- include: storage_upgrade.yml
diff --git a/playbooks/common/openshift-cluster/upgrades/v3_6/upgrade_nodes.yml b/playbooks/common/openshift-cluster/upgrades/v3_6/upgrade_nodes.yml
new file mode 100644
index 000000000..285c18b7b
--- /dev/null
+++ b/playbooks/common/openshift-cluster/upgrades/v3_6/upgrade_nodes.yml
@@ -0,0 +1,104 @@
+---
+#
+# Node Upgrade Playbook
+#
+# Upgrades nodes only, but requires the control plane to have already been upgraded.
+#
+- include: ../init.yml
+ tags:
+ - pre_upgrade
+
+- name: Configure the upgrade target for the common upgrade tasks
+ hosts: oo_all_hosts
+ tags:
+ - pre_upgrade
+ tasks:
+ - set_fact:
+ openshift_upgrade_target: '3.6'
+ openshift_upgrade_min: "{{ '1.5' if deployment_type == 'origin' else '3.5' }}"
+
+# Pre-upgrade
+- include: ../initialize_nodes_to_upgrade.yml
+ tags:
+ - pre_upgrade
+
+- name: Update repos on nodes
+ hosts: oo_masters_to_config:oo_nodes_to_upgrade:oo_etcd_to_config:oo_lb_to_config
+ roles:
+ - openshift_repos
+ tags:
+ - pre_upgrade
+
+- name: Set openshift_no_proxy_internal_hostnames
+ hosts: oo_masters_to_config:oo_nodes_to_upgrade
+ tags:
+ - pre_upgrade
+ tasks:
+ - set_fact:
+ openshift_no_proxy_internal_hostnames: "{{ hostvars | oo_select_keys(groups['oo_nodes_to_upgrade']
+ | union(groups['oo_masters_to_config'])
+ | union(groups['oo_etcd_to_config'] | default([])))
+ | oo_collect('openshift.common.hostname') | default([]) | join (',')
+ }}"
+ when: "{{ (openshift_http_proxy is defined or openshift_https_proxy is defined) and
+ openshift_generate_no_proxy_hosts | default(True) | bool }}"
+
+- include: ../pre/verify_inventory_vars.yml
+ tags:
+ - pre_upgrade
+
+- include: ../disable_node_excluders.yml
+ tags:
+ - pre_upgrade
+
+- include: ../../initialize_openshift_version.yml
+ tags:
+ - pre_upgrade
+ vars:
+ # Request specific openshift_release and let the openshift_version role handle converting this
+ # to a more specific version, respecting openshift_image_tag and openshift_pkg_version if
+ # defined, and overriding the normal behavior of protecting the installed version
+ openshift_release: "{{ openshift_upgrade_target }}"
+ openshift_protect_installed_version: False
+
+ # We skip the docker role at this point in upgrade to prevent
+ # unintended package, container, or config upgrades which trigger
+ # docker restarts. At this early stage of upgrade we can assume
+ # docker is configured and running.
+ skip_docker_role: True
+
+- name: Verify masters are already upgraded
+ hosts: oo_masters_to_config
+ tags:
+ - pre_upgrade
+ tasks:
+ - fail: msg="Master running {{ openshift.common.version }} must be upgraded to {{ openshift_version }} before node upgrade can be run."
+ when: openshift.common.version != openshift_version
+
+- include: ../pre/verify_control_plane_running.yml
+ tags:
+ - pre_upgrade
+
+- include: ../pre/verify_upgrade_targets.yml
+ tags:
+ - pre_upgrade
+
+- include: ../pre/verify_docker_upgrade_targets.yml
+ tags:
+ - pre_upgrade
+
+- include: ../pre/gate_checks.yml
+ tags:
+ - pre_upgrade
+
+# Pre-upgrade completed, nothing after this should be tagged pre_upgrade.
+
+# Separate step so we can execute in parallel and clear out anything unused
+# before we get into the serialized upgrade process which will then remove
+# remaining images if possible.
+- name: Cleanup unused Docker images
+ hosts: oo_nodes_to_upgrade
+ tasks:
+ - include: ../cleanup_unused_images.yml
+
+- include: ../upgrade_nodes.yml
diff --git a/playbooks/common/openshift-cluster/upgrades/v3_6/validator.yml b/playbooks/common/openshift-cluster/upgrades/v3_6/validator.yml
index ac5704f69..78c1767b8 100644
--- a/playbooks/common/openshift-cluster/upgrades/v3_6/validator.yml
+++ b/playbooks/common/openshift-cluster/upgrades/v3_6/validator.yml
@@ -7,4 +7,6 @@
hosts: oo_first_master
roles:
- { role: lib_openshift }
- tasks: []
+ tasks:
+ - name: Check for invalid namespaces and SDN errors
+ oc_objectvalidator:
diff --git a/playbooks/common/openshift-glusterfs/config.yml b/playbooks/common/openshift-glusterfs/config.yml
new file mode 100644
index 000000000..1efdfb336
--- /dev/null
+++ b/playbooks/common/openshift-glusterfs/config.yml
@@ -0,0 +1,23 @@
+---
+- name: Open firewall ports for GlusterFS
+ hosts: oo_glusterfs_to_config
+ vars:
+ os_firewall_allow:
+ - service: glusterfs_sshd
+ port: "2222/tcp"
+ - service: glusterfs_daemon
+ port: "24007/tcp"
+ - service: glusterfs_management
+ port: "24008/tcp"
+ - service: glusterfs_bricks
+ port: "49152-49251/tcp"
+ roles:
+ - role: os_firewall
+ when:
+ - openshift_storage_glusterfs_is_native | default(True)
+
+- name: Configure GlusterFS
+ hosts: oo_first_master
+ roles:
+ - role: openshift_storage_glusterfs
+ when: groups.oo_glusterfs_to_config | default([]) | count > 0
diff --git a/playbooks/common/openshift-glusterfs/filter_plugins b/playbooks/common/openshift-glusterfs/filter_plugins
new file mode 120000
index 000000000..99a95e4ca
--- /dev/null
+++ b/playbooks/common/openshift-glusterfs/filter_plugins
@@ -0,0 +1 @@
+../../../filter_plugins \ No newline at end of file
diff --git a/playbooks/common/openshift-glusterfs/lookup_plugins b/playbooks/common/openshift-glusterfs/lookup_plugins
new file mode 120000
index 000000000..ac79701db
--- /dev/null
+++ b/playbooks/common/openshift-glusterfs/lookup_plugins
@@ -0,0 +1 @@
+../../../lookup_plugins \ No newline at end of file
diff --git a/playbooks/common/openshift-glusterfs/registry.yml b/playbooks/common/openshift-glusterfs/registry.yml
new file mode 100644
index 000000000..80cf7529e
--- /dev/null
+++ b/playbooks/common/openshift-glusterfs/registry.yml
@@ -0,0 +1,49 @@
+---
+- include: config.yml
+
+- name: Initialize GlusterFS registry PV and PVC vars
+ hosts: oo_first_master
+ tags: hosted
+ tasks:
+ - set_fact:
+ glusterfs_pv: []
+ glusterfs_pvc: []
+
+ - set_fact:
+ glusterfs_pv:
+ - name: "{{ openshift.hosted.registry.storage.volume.name }}-glusterfs-volume"
+ capacity: "{{ openshift.hosted.registry.storage.volume.size }}"
+ access_modes: "{{ openshift.hosted.registry.storage.access.modes }}"
+ storage:
+ glusterfs:
+ endpoints: "{{ openshift.hosted.registry.storage.glusterfs.endpoints }}"
+ path: "{{ openshift.hosted.registry.storage.glusterfs.path }}"
+ readOnly: "{{ openshift.hosted.registry.storage.glusterfs.readOnly }}"
+ glusterfs_pvc:
+ - name: "{{ openshift.hosted.registry.storage.volume.name }}-glusterfs-claim"
+ capacity: "{{ openshift.hosted.registry.storage.volume.size }}"
+ access_modes: "{{ openshift.hosted.registry.storage.access.modes }}"
+ when: openshift.hosted.registry.storage.glusterfs.swap
+
+- name: Create persistent volumes
+ hosts: oo_first_master
+ tags:
+ - hosted
+ vars:
+ persistent_volumes: "{{ hostvars[groups.oo_first_master.0] | oo_persistent_volumes(groups, glusterfs_pv) }}"
+ persistent_volume_claims: "{{ hostvars[groups.oo_first_master.0] | oo_persistent_volume_claims(glusterfs_pvc) }}"
+ roles:
+ - role: openshift_persistent_volumes
+ when: persistent_volumes | union(glusterfs_pv) | length > 0 or persistent_volume_claims | union(glusterfs_pvc) | length > 0
+
+- name: Create Hosted Resources
+ hosts: oo_first_master
+ tags:
+ - hosted
+ pre_tasks:
+ - set_fact:
+ openshift_hosted_router_registryurl: "{{ hostvars[groups.oo_first_master.0].openshift.master.registry_url }}"
+ openshift_hosted_registry_registryurl: "{{ hostvars[groups.oo_first_master.0].openshift.master.registry_url }}"
+ when: "'master' in hostvars[groups.oo_first_master.0].openshift and 'registry_url' in hostvars[groups.oo_first_master.0].openshift.master"
+ roles:
+ - role: openshift_hosted
diff --git a/playbooks/common/openshift-glusterfs/roles b/playbooks/common/openshift-glusterfs/roles
new file mode 120000
index 000000000..e2b799b9d
--- /dev/null
+++ b/playbooks/common/openshift-glusterfs/roles
@@ -0,0 +1 @@
+../../../roles/ \ No newline at end of file
diff --git a/playbooks/common/openshift-master/scaleup.yml b/playbooks/common/openshift-master/scaleup.yml
index 92f16dc47..bc61ee9bb 100644
--- a/playbooks/common/openshift-master/scaleup.yml
+++ b/playbooks/common/openshift-master/scaleup.yml
@@ -51,7 +51,7 @@
changed_when: false
- name: Configure docker hosts
- hosts: oo_masters_to-config:oo_nodes_to_config
+ hosts: oo_masters_to_config:oo_nodes_to_config
vars:
docker_additional_registries: "{{ lookup('oo_option', 'docker_additional_registries') | oo_split }}"
docker_insecure_registries: "{{ lookup('oo_option', 'docker_insecure_registries') | oo_split }}"
@@ -60,9 +60,15 @@
- openshift_facts
- openshift_docker
-- include: ../openshift-cluster/disable_excluder.yml
+- name: Disable excluders
+ hosts: oo_masters_to_config
tags:
- always
+ gather_facts: no
+ roles:
+ - role: openshift_excluder
+ r_openshift_excluder_action: disable
+ r_openshift_excluder_service_type: "{{ openshift.common.service_type }}"
- include: ../openshift-master/config.yml
@@ -70,6 +76,12 @@
- include: ../openshift-node/config.yml
-- include: ../openshift-cluster/reset_excluder.yml
+- name: Re-enable excluder if it was previously enabled
+ hosts: oo_masters_to_config
tags:
- always
+ gather_facts: no
+ roles:
+ - role: openshift_excluder
+ r_openshift_excluder_action: enable
+ r_openshift_excluder_service_type: "{{ openshift.common.service_type }}"
diff --git a/playbooks/common/openshift-node/network_manager.yml b/playbooks/common/openshift-node/network_manager.yml
new file mode 100644
index 000000000..0014a5dbd
--- /dev/null
+++ b/playbooks/common/openshift-node/network_manager.yml
@@ -0,0 +1,26 @@
+---
+- name: Install and configure NetworkManager
+ hosts: oo_all_hosts
+ become: yes
+ tasks:
+ - name: install NetworkManager
+ package:
+ name: 'NetworkManager'
+ state: present
+
+ - name: configure NetworkManager
+ lineinfile:
+ dest: "/etc/sysconfig/network-scripts/ifcfg-{{ ansible_default_ipv4['interface'] }}"
+ regexp: '^{{ item }}='
+ line: '{{ item }}=yes'
+ state: present
+ create: yes
+ with_items:
+ - 'USE_PEERDNS'
+ - 'NM_CONTROLLED'
+
+ - name: enable and start NetworkManager
+ service:
+ name: 'NetworkManager'
+ state: started
+ enabled: yes
diff --git a/playbooks/common/openshift-node/scaleup.yml b/playbooks/common/openshift-node/scaleup.yml
index c31aca62b..40da8990d 100644
--- a/playbooks/common/openshift-node/scaleup.yml
+++ b/playbooks/common/openshift-node/scaleup.yml
@@ -27,12 +27,24 @@
- openshift_facts
- openshift_docker
-- include: ../openshift-cluster/disable_excluder.yml
+- name: Disable excluders
+ hosts: oo_nodes_to_config
tags:
- always
+ gather_facts: no
+ roles:
+ - role: openshift_excluder
+ r_openshift_excluder_action: disable
+ r_openshift_excluder_service_type: "{{ openshift.common.service_type }}"
- include: ../openshift-node/config.yml
-- include: ../openshift-cluster/reset_excluder.yml
+- name: Re-enable excluder if it was previously enabled
+ hosts: oo_nodes_to_config
tags:
- always
+ gather_facts: no
+ roles:
+ - role: openshift_excluder
+ r_openshift_excluder_action: enable
+ r_openshift_excluder_service_type: "{{ openshift.common.service_type }}"