summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rwxr-xr-x.redhat-ci.sh5
-rw-r--r--.redhat-ci.yml2
-rw-r--r--.tito/packages/openshift-ansible2
-rw-r--r--README.md2
-rw-r--r--callback_plugins/aa_version_requirement.py20
-rw-r--r--docs/best_practices_guide.adoc4
-rw-r--r--inventory/byo/hosts.origin.example9
-rw-r--r--inventory/byo/hosts.ose.example9
-rw-r--r--openshift-ansible.spec40
-rw-r--r--playbooks/adhoc/create_pv/create_pv.yaml2
-rw-r--r--playbooks/adhoc/docker_loopback_to_lvm/docker_loopback_to_direct_lvm.yml2
-rwxr-xr-xplaybooks/adhoc/docker_loopback_to_lvm/ops-docker-loopback-to-direct-lvm.yml2
-rw-r--r--playbooks/adhoc/docker_storage_cleanup/docker_storage_cleanup.yml2
-rw-r--r--playbooks/adhoc/grow_docker_vg/grow_docker_vg.yml2
-rw-r--r--playbooks/adhoc/uninstall.yml8
-rw-r--r--playbooks/byo/openshift-cluster/initialize_groups.yml14
-rw-r--r--playbooks/byo/openshift-cluster/upgrades/v3_3/upgrade.yml104
-rw-r--r--playbooks/byo/openshift-cluster/upgrades/v3_3/upgrade_control_plane.yml99
-rw-r--r--playbooks/byo/openshift-cluster/upgrades/v3_3/upgrade_nodes.yml101
-rw-r--r--playbooks/byo/openshift-cluster/upgrades/v3_4/upgrade.yml102
-rw-r--r--playbooks/byo/openshift-cluster/upgrades/v3_4/upgrade_control_plane.yml99
-rw-r--r--playbooks/byo/openshift-cluster/upgrades/v3_4/upgrade_nodes.yml99
l---------playbooks/byo/openshift-cluster/upgrades/v3_5/roles1
-rw-r--r--playbooks/byo/openshift-cluster/upgrades/v3_5/upgrade.yml108
-rw-r--r--playbooks/byo/openshift-cluster/upgrades/v3_5/upgrade_control_plane.yml103
-rw-r--r--playbooks/byo/openshift-cluster/upgrades/v3_5/upgrade_nodes.yml99
l---------playbooks/byo/openshift-cluster/upgrades/v3_6/roles1
-rw-r--r--playbooks/byo/openshift-cluster/upgrades/v3_6/upgrade.yml108
-rw-r--r--playbooks/byo/openshift-cluster/upgrades/v3_6/upgrade_control_plane.yml103
-rw-r--r--playbooks/byo/openshift-cluster/upgrades/v3_6/upgrade_nodes.yml99
-rw-r--r--playbooks/byo/openshift-preflight/check.yml4
-rw-r--r--playbooks/byo/openshift_facts.yml2
-rw-r--r--playbooks/byo/rhel_subscribe.yml6
-rw-r--r--playbooks/common/openshift-cluster/evaluate_groups.yml95
-rw-r--r--playbooks/common/openshift-cluster/initialize_openshift_version.yml5
-rw-r--r--playbooks/common/openshift-cluster/upgrades/disable_excluder.yml13
-rw-r--r--playbooks/common/openshift-cluster/upgrades/etcd/backup.yml6
-rw-r--r--playbooks/common/openshift-cluster/upgrades/etcd/main.yml28
-rw-r--r--playbooks/common/openshift-cluster/upgrades/etcd/upgrade.yml18
-rw-r--r--playbooks/common/openshift-cluster/upgrades/upgrade_control_plane.yml11
-rw-r--r--playbooks/common/openshift-cluster/upgrades/upgrade_scheduler.yml16
-rw-r--r--playbooks/common/openshift-cluster/upgrades/v3_3/master_config_upgrade.yml2
l---------playbooks/common/openshift-cluster/upgrades/v3_3/roles (renamed from playbooks/byo/openshift-cluster/upgrades/v3_3/roles)0
-rw-r--r--playbooks/common/openshift-cluster/upgrades/v3_3/upgrade.yml107
-rw-r--r--playbooks/common/openshift-cluster/upgrades/v3_3/upgrade_control_plane.yml111
-rw-r--r--playbooks/common/openshift-cluster/upgrades/v3_3/upgrade_nodes.yml106
-rw-r--r--playbooks/common/openshift-cluster/upgrades/v3_4/master_config_upgrade.yml2
l---------playbooks/common/openshift-cluster/upgrades/v3_4/roles (renamed from playbooks/byo/openshift-cluster/upgrades/v3_4/roles)0
-rw-r--r--playbooks/common/openshift-cluster/upgrades/v3_4/upgrade.yml105
-rw-r--r--playbooks/common/openshift-cluster/upgrades/v3_4/upgrade_control_plane.yml111
-rw-r--r--playbooks/common/openshift-cluster/upgrades/v3_4/upgrade_nodes.yml104
-rw-r--r--playbooks/common/openshift-cluster/upgrades/v3_5/upgrade.yml111
-rw-r--r--playbooks/common/openshift-cluster/upgrades/v3_5/upgrade_control_plane.yml115
-rw-r--r--playbooks/common/openshift-cluster/upgrades/v3_5/upgrade_nodes.yml104
-rw-r--r--playbooks/common/openshift-cluster/upgrades/v3_6/upgrade.yml111
-rw-r--r--playbooks/common/openshift-cluster/upgrades/v3_6/upgrade_control_plane.yml115
-rw-r--r--playbooks/common/openshift-cluster/upgrades/v3_6/upgrade_nodes.yml104
-rw-r--r--playbooks/common/openshift-master/scaleup.yml2
-rw-r--r--playbooks/common/openshift-node/network_manager.yml2
-rw-r--r--playbooks/libvirt/openshift-cluster/tasks/launch_instances.yml2
-rw-r--r--roles/etcd/tasks/main.yml2
-rwxr-xr-xroles/lib_openshift/src/test/integration/oc_label.yml2
-rwxr-xr-xroles/lib_openshift/src/test/integration/oc_user.yml2
-rw-r--r--roles/openshift_ca/tasks/main.yml2
-rw-r--r--roles/openshift_certificate_expiry/filter_plugins/oo_cert_expiry.py2
-rw-r--r--roles/openshift_certificate_expiry/library/openshift_cert_expiry.py4
-rw-r--r--roles/openshift_certificate_expiry/tasks/main.yml6
-rw-r--r--roles/openshift_certificate_expiry/test/test_fakeopensslclasses.py3
-rw-r--r--roles/openshift_cloud_provider/tasks/openstack.yml2
-rw-r--r--roles/openshift_excluder/tasks/verify_excluder.yml (renamed from playbooks/common/openshift-cluster/upgrades/pre/validate_excluder.yml)2
-rw-r--r--roles/openshift_excluder/tasks/verify_upgrade.yml15
-rw-r--r--roles/openshift_expand_partition/tasks/main.yml2
-rw-r--r--roles/openshift_hosted_logging/tasks/deploy_logging.yaml12
-rw-r--r--roles/openshift_hosted_metrics/tasks/install.yml2
-rw-r--r--roles/openshift_logging/defaults/main.yml4
-rw-r--r--roles/openshift_logging/tasks/generate_routes.yaml6
-rw-r--r--roles/openshift_logging/tasks/install_elasticsearch.yaml4
-rw-r--r--roles/openshift_logging/tasks/install_fluentd.yaml4
-rw-r--r--roles/openshift_logging/tasks/install_mux.yaml4
-rw-r--r--roles/openshift_logging/tasks/install_support.yaml41
-rw-r--r--roles/openshift_logging/tasks/main.yaml2
-rw-r--r--roles/openshift_logging/tasks/start_cluster.yaml2
-rw-r--r--roles/openshift_logging/tasks/stop_cluster.yaml2
-rw-r--r--roles/openshift_master/tasks/main.yml2
-rw-r--r--roles/openshift_master_facts/tasks/main.yml4
-rw-r--r--roles/openshift_metrics/tasks/install_cassandra.yaml2
-rw-r--r--roles/openshift_metrics/tasks/install_heapster.yaml2
-rw-r--r--roles/openshift_metrics/tasks/install_metrics.yaml6
-rw-r--r--roles/openshift_metrics/tasks/main.yaml2
-rw-r--r--roles/openshift_metrics/tasks/start_metrics.yaml4
-rw-r--r--roles/openshift_metrics/tasks/stop_metrics.yaml4
-rw-r--r--roles/openshift_metrics/tasks/uninstall_metrics.yaml4
-rw-r--r--roles/openshift_node/tasks/main.yml2
-rw-r--r--roles/openshift_provisioners/tasks/install_efs.yaml2
-rw-r--r--roles/openshift_storage_glusterfs/tasks/glusterfs_registry.yml2
-rw-r--r--roles/openshift_storage_glusterfs/tasks/heketi_deploy_part2.yml4
-rw-r--r--roles/openshift_storage_glusterfs/tasks/main.yml4
-rw-r--r--roles/os_firewall/tasks/firewall/firewalld.yml2
-rw-r--r--roles/os_firewall/tasks/firewall/iptables.yml2
-rw-r--r--utils/src/ooinstall/variants.py7
100 files changed, 1597 insertions, 1454 deletions
diff --git a/.redhat-ci.sh b/.redhat-ci.sh
index 29d64e4d5..fce8c1d52 100755
--- a/.redhat-ci.sh
+++ b/.redhat-ci.sh
@@ -1,10 +1,9 @@
#!/bin/bash
set -xeuo pipefail
-# F25 currently has 2.2.1, so install from pypi
-pip install ansible==2.2.2.0
+pip install -r requirements.txt
-# do a simple ping to make sure the nodes are available
+# ping the nodes to check they're responding and register their ostree versions
ansible -vvv -i .redhat-ci.inventory nodes -a 'rpm-ostree status'
upload_journals() {
diff --git a/.redhat-ci.yml b/.redhat-ci.yml
index 887cc6ef0..6dac7b256 100644
--- a/.redhat-ci.yml
+++ b/.redhat-ci.yml
@@ -24,7 +24,7 @@ env:
OPENSHIFT_IMAGE_TAG: v3.6.0-alpha.1
tests:
- - sh .redhat-ci.sh
+ - ./.redhat-ci.sh
artifacts:
- journals/
diff --git a/.tito/packages/openshift-ansible b/.tito/packages/openshift-ansible
index 9c522f18c..ebb0299ff 100644
--- a/.tito/packages/openshift-ansible
+++ b/.tito/packages/openshift-ansible
@@ -1 +1 @@
-3.6.46-1 ./
+3.6.52-1 ./
diff --git a/README.md b/README.md
index 42e629484..1cf8d1156 100644
--- a/README.md
+++ b/README.md
@@ -54,7 +54,7 @@ you are not running a stable release.
***
Requirements:
- - Ansible >= 2.2.0
+ - Ansible >= 2.2.2.0
- Jinja >= 2.7
- pyOpenSSL
- python-lxml
diff --git a/callback_plugins/aa_version_requirement.py b/callback_plugins/aa_version_requirement.py
index f31445381..20bdd9056 100644
--- a/callback_plugins/aa_version_requirement.py
+++ b/callback_plugins/aa_version_requirement.py
@@ -7,7 +7,6 @@ The plugin is named with leading `aa_` to ensure this plugin is loaded
first (alphanumerically) by Ansible.
"""
import sys
-from subprocess import check_output
from ansible import __version__
if __version__ < '2.0':
@@ -30,13 +29,8 @@ else:
# Set to minimum required Ansible version
-REQUIRED_VERSION = '2.2.0.0'
-DESCRIPTION = "Supported versions: %s or newer (except 2.2.1.0)" % REQUIRED_VERSION
-FAIL_ON_2_2_1_0 = "There are known issues with Ansible version 2.2.1.0 which " \
- "are impacting OpenShift-Ansible. Please use Ansible " \
- "version 2.2.0.0 or a version greater than 2.2.1.0. " \
- "See this issue for more details: " \
- "https://github.com/openshift/openshift-ansible/issues/3111"
+REQUIRED_VERSION = '2.2.2.0'
+DESCRIPTION = "Supported versions: %s or newer" % REQUIRED_VERSION
def version_requirement(version):
@@ -64,13 +58,3 @@ class CallbackModule(CallbackBase):
'FATAL: Current Ansible version (%s) is not supported. %s'
% (__version__, DESCRIPTION), color='red')
sys.exit(1)
-
- if __version__ == '2.2.1.0':
- rpm_ver = str(check_output(["rpm", "-qa", "ansible"]))
- patched_ansible = '2.2.1.0-2'
-
- if patched_ansible not in rpm_ver:
- display(
- 'FATAL: Current Ansible version (%s) is not supported. %s'
- % (__version__, FAIL_ON_2_2_1_0), color='red')
- sys.exit(1)
diff --git a/docs/best_practices_guide.adoc b/docs/best_practices_guide.adoc
index dd849e87d..4ecd535e4 100644
--- a/docs/best_practices_guide.adoc
+++ b/docs/best_practices_guide.adoc
@@ -493,12 +493,12 @@ The Ansible `package` module calls the associated package manager for the underl
# tasks.yml
- name: Install etcd (for etcdctl)
yum: name=etcd state=latest
- when: "ansible_pkg_mgr == yum"
+ when: ansible_pkg_mgr == yum
register: install_result
- name: Install etcd (for etcdctl)
dnf: name=etcd state=latest
- when: "ansible_pkg_mgr == dnf"
+ when: ansible_pkg_mgr == dnf
register: install_result
----
diff --git a/inventory/byo/hosts.origin.example b/inventory/byo/hosts.origin.example
index f70971537..d31c35f69 100644
--- a/inventory/byo/hosts.origin.example
+++ b/inventory/byo/hosts.origin.example
@@ -571,10 +571,17 @@ openshift_master_identity_providers=[{'name': 'htpasswd_auth', 'login': 'true',
# network blocks should be private and should not conflict with network blocks
# in your infrastructure that pods may require access to. Can not be changed
# after deployment.
+#
+# WARNING : Do not pick subnets that overlap with the default Docker bridge subnet of
+# 172.17.0.0/16. Your installation will fail and/or your configuration change will
+# cause the Pod SDN or Cluster SDN to fail.
+#
+# WORKAROUND : If you must use an overlapping subnet, you can configure a non conflicting
+# docker0 CIDR range by adding '--bip=192.168.2.1/24' to DOCKER_NETWORK_OPTIONS
+# environment variable located in /etc/sysconfig/docker-network.
#osm_cluster_network_cidr=10.128.0.0/14
#openshift_portal_net=172.30.0.0/16
-
# ExternalIPNetworkCIDRs controls what values are acceptable for the
# service external IP field. If empty, no externalIP may be set. It
# may contain a list of CIDRs which are checked for access. If a CIDR
diff --git a/inventory/byo/hosts.ose.example b/inventory/byo/hosts.ose.example
index f5e0de1b0..80cc65f06 100644
--- a/inventory/byo/hosts.ose.example
+++ b/inventory/byo/hosts.ose.example
@@ -572,10 +572,17 @@ openshift_master_identity_providers=[{'name': 'htpasswd_auth', 'login': 'true',
# network blocks should be private and should not conflict with network blocks
# in your infrastructure that pods may require access to. Can not be changed
# after deployment.
+#
+# WARNING : Do not pick subnets that overlap with the default Docker bridge subnet of
+# 172.17.0.0/16. Your installation will fail and/or your configuration change will
+# cause the Pod SDN or Cluster SDN to fail.
+#
+# WORKAROUND : If you must use an overlapping subnet, you can configure a non conflicting
+# docker0 CIDR range by adding '--bip=192.168.2.1/24' to DOCKER_NETWORK_OPTIONS
+# environment variable located in /etc/sysconfig/docker-network.
#osm_cluster_network_cidr=10.128.0.0/14
#openshift_portal_net=172.30.0.0/16
-
# ExternalIPNetworkCIDRs controls what values are acceptable for the
# service external IP field. If empty, no externalIP may be set. It
# may contain a list of CIDRs which are checked for access. If a CIDR
diff --git a/openshift-ansible.spec b/openshift-ansible.spec
index 78e801313..9af1bd366 100644
--- a/openshift-ansible.spec
+++ b/openshift-ansible.spec
@@ -9,7 +9,7 @@
%global __requires_exclude ^/usr/bin/ansible-playbook$
Name: openshift-ansible
-Version: 3.6.46
+Version: 3.6.52
Release: 1%{?dist}
Summary: Openshift and Atomic Enterprise Ansible
License: ASL 2.0
@@ -17,7 +17,7 @@ URL: https://github.com/openshift/openshift-ansible
Source0: https://github.com/openshift/openshift-ansible/archive/%{commit}/%{name}-%{version}.tar.gz
BuildArch: noarch
-Requires: ansible >= 2.2.0.0-1
+Requires: ansible >= 2.2.2.0
Requires: python2
Requires: python-six
Requires: tar
@@ -273,6 +273,42 @@ Atomic OpenShift Utilities includes
%changelog
+* Wed May 03 2017 Jenkins CD Merge Bot <tdawson@redhat.com> 3.6.52-1
+- Making mux with_items list evaluate as empty if didnt get objects before
+ (ewolinet@redhat.com)
+- etcd Upgrade Refactor (rteague@redhat.com)
+- v3.3 Upgrade Refactor (rteague@redhat.com)
+- v3.4 Upgrade Refactor (rteague@redhat.com)
+- v3.5 Upgrade Refactor (rteague@redhat.com)
+- v3.6 Upgrade Refactor (rteague@redhat.com)
+- Fix variants for v3.6 (rteague@redhat.com)
+- Normalizing groups. (kwoodson@redhat.com)
+- Use openshift_ca_host's hostnames to sign the CA (sdodson@redhat.com)
+
+* Tue May 02 2017 Jenkins CD Merge Bot <tdawson@redhat.com> 3.6.51-1
+- Remove std_include from playbooks/byo/rhel_subscribe.yml
+ (abutcher@redhat.com)
+- Adding way to add labels and nodeselectors to logging project
+ (ewolinet@redhat.com)
+
+* Tue May 02 2017 Jenkins CD Merge Bot <tdawson@redhat.com> 3.6.50-1
+- Don't double quote when conditions (sdodson@redhat.com)
+- Remove jinja template delimeters from when conditions (sdodson@redhat.com)
+- move excluder upgrade validation tasks under openshift_excluder role
+ (jchaloup@redhat.com)
+- Fix test compatibility with OpenSSL 1.1.0 (pierre-
+ louis.bonicoli@libregerbil.fr)
+
+* Mon May 01 2017 Jenkins CD Merge Bot <tdawson@redhat.com> 3.6.49-1
+- Warn users about conflicts with docker0 CIDR range (lpsantil@gmail.com)
+- Bump ansible rpm dependency to 2.2.2.0 (sdodson@redhat.com)
+
+* Mon May 01 2017 Jenkins CD Merge Bot <tdawson@redhat.com> 3.6.48-1
+-
+
+* Mon May 01 2017 Jenkins CD Merge Bot <tdawson@redhat.com> 3.6.47-1
+-
+
* Mon May 01 2017 Jenkins CD Merge Bot <tdawson@redhat.com> 3.6.46-1
- Contrib: Hook to verify modules match assembled fragments
(tbielawa@redhat.com)
diff --git a/playbooks/adhoc/create_pv/create_pv.yaml b/playbooks/adhoc/create_pv/create_pv.yaml
index 81c1ee653..64f861c6a 100644
--- a/playbooks/adhoc/create_pv/create_pv.yaml
+++ b/playbooks/adhoc/create_pv/create_pv.yaml
@@ -20,7 +20,7 @@
pre_tasks:
- fail:
msg: "This playbook requires {{item}} to be set."
- when: "{{ item }} is not defined or {{ item }} == ''"
+ when: item is not defined or item == ''
with_items:
- cli_volume_size
- cli_device_name
diff --git a/playbooks/adhoc/docker_loopback_to_lvm/docker_loopback_to_direct_lvm.yml b/playbooks/adhoc/docker_loopback_to_lvm/docker_loopback_to_direct_lvm.yml
index f638fab83..507ac0f05 100644
--- a/playbooks/adhoc/docker_loopback_to_lvm/docker_loopback_to_direct_lvm.yml
+++ b/playbooks/adhoc/docker_loopback_to_lvm/docker_loopback_to_direct_lvm.yml
@@ -33,7 +33,7 @@
pre_tasks:
- fail:
msg: "This playbook requires {{item}} to be set."
- when: "{{ item }} is not defined or {{ item }} == ''"
+ when: item is not defined or item == ''
with_items:
- cli_tag_name
- cli_volume_size
diff --git a/playbooks/adhoc/docker_loopback_to_lvm/ops-docker-loopback-to-direct-lvm.yml b/playbooks/adhoc/docker_loopback_to_lvm/ops-docker-loopback-to-direct-lvm.yml
index d988a28b0..3059d3dc5 100755
--- a/playbooks/adhoc/docker_loopback_to_lvm/ops-docker-loopback-to-direct-lvm.yml
+++ b/playbooks/adhoc/docker_loopback_to_lvm/ops-docker-loopback-to-direct-lvm.yml
@@ -24,7 +24,7 @@
pre_tasks:
- fail:
msg: "This playbook requires {{item}} to be set."
- when: "{{ item }} is not defined or {{ item }} == ''"
+ when: item is not defined or item == ''
with_items:
- cli_docker_device
diff --git a/playbooks/adhoc/docker_storage_cleanup/docker_storage_cleanup.yml b/playbooks/adhoc/docker_storage_cleanup/docker_storage_cleanup.yml
index b6dde357e..5e12cd181 100644
--- a/playbooks/adhoc/docker_storage_cleanup/docker_storage_cleanup.yml
+++ b/playbooks/adhoc/docker_storage_cleanup/docker_storage_cleanup.yml
@@ -25,7 +25,7 @@
- fail:
msg: "This playbook requires {{item}} to be set."
- when: "{{ item }} is not defined or {{ item }} == ''"
+ when: item is not defined or item == ''
with_items:
- cli_tag_name
diff --git a/playbooks/adhoc/grow_docker_vg/grow_docker_vg.yml b/playbooks/adhoc/grow_docker_vg/grow_docker_vg.yml
index 598f1966d..eb8440d1b 100644
--- a/playbooks/adhoc/grow_docker_vg/grow_docker_vg.yml
+++ b/playbooks/adhoc/grow_docker_vg/grow_docker_vg.yml
@@ -42,7 +42,7 @@
pre_tasks:
- fail:
msg: "This playbook requires {{item}} to be set."
- when: "{{ item }} is not defined or {{ item }} == ''"
+ when: item is not defined or item == ''
with_items:
- cli_tag_name
- cli_volume_size
diff --git a/playbooks/adhoc/uninstall.yml b/playbooks/adhoc/uninstall.yml
index ffdcd0ce1..beaf20b07 100644
--- a/playbooks/adhoc/uninstall.yml
+++ b/playbooks/adhoc/uninstall.yml
@@ -125,7 +125,7 @@
- name: Remove flannel package
package: name=flannel state=absent
when: openshift_use_flannel | default(false) | bool
- when: "{{ not is_atomic | bool }}"
+ when: not is_atomic | bool
- shell: systemctl reset-failed
changed_when: False
@@ -146,7 +146,7 @@
- lbr0
- vlinuxbr
- vovsbr
- when: "{{ openshift_remove_all | default(true) | bool }}"
+ when: openshift_remove_all | default(true) | bool
- shell: atomic uninstall "{{ item }}"-master-api
changed_when: False
@@ -239,7 +239,7 @@
changed_when: False
failed_when: False
with_items: "{{ images_to_delete.results }}"
- when: "{{ openshift_uninstall_images | default(True) | bool }}"
+ when: openshift_uninstall_images | default(True) | bool
- name: remove sdn drop files
file:
@@ -252,7 +252,7 @@
- /etc/sysconfig/openshift-node
- /etc/sysconfig/openvswitch
- /run/openshift-sdn
- when: "{{ openshift_remove_all | default(True) | bool }}"
+ when: openshift_remove_all | default(True) | bool
- find: path={{ item }} file_type=file
register: files
diff --git a/playbooks/byo/openshift-cluster/initialize_groups.yml b/playbooks/byo/openshift-cluster/initialize_groups.yml
index 2785dcc3b..2a725510a 100644
--- a/playbooks/byo/openshift-cluster/initialize_groups.yml
+++ b/playbooks/byo/openshift-cluster/initialize_groups.yml
@@ -8,17 +8,3 @@
- always
tasks:
- include_vars: cluster_hosts.yml
- - name: Evaluate group l_oo_all_hosts
- add_host:
- name: "{{ item }}"
- groups: l_oo_all_hosts
- with_items: "{{ g_all_hosts | default([]) }}"
- changed_when: no
-
-- name: Create initial host groups for all hosts
- hosts: l_oo_all_hosts
- gather_facts: no
- tags:
- - always
- tasks:
- - include_vars: cluster_hosts.yml
diff --git a/playbooks/byo/openshift-cluster/upgrades/v3_3/upgrade.yml b/playbooks/byo/openshift-cluster/upgrades/v3_3/upgrade.yml
index 690b663f4..697a18c4d 100644
--- a/playbooks/byo/openshift-cluster/upgrades/v3_3/upgrade.yml
+++ b/playbooks/byo/openshift-cluster/upgrades/v3_3/upgrade.yml
@@ -4,106 +4,4 @@
#
- include: ../../initialize_groups.yml
-- include: ../../../../common/openshift-cluster/upgrades/init.yml
- tags:
- - pre_upgrade
-
-- name: Configure the upgrade target for the common upgrade tasks
- hosts: l_oo_all_hosts
- tags:
- - pre_upgrade
- tasks:
- - set_fact:
- openshift_upgrade_target: "{{ '1.3' if deployment_type == 'origin' else '3.3' }}"
- openshift_upgrade_min: "{{ '1.2' if deployment_type == 'origin' else '3.2' }}"
-
-# Pre-upgrade
-
-- include: ../../../../common/openshift-cluster/upgrades/initialize_nodes_to_upgrade.yml
- tags:
- - pre_upgrade
-
-- name: Update repos and initialize facts on all hosts
- hosts: oo_masters_to_config:oo_nodes_to_upgrade:oo_etcd_to_config:oo_lb_to_config
- tags:
- - pre_upgrade
- roles:
- - openshift_repos
-
-- name: Set openshift_no_proxy_internal_hostnames
- hosts: oo_masters_to_config:oo_nodes_to_upgrade
- tags:
- - pre_upgrade
- tasks:
- - set_fact:
- openshift_no_proxy_internal_hostnames: "{{ hostvars | oo_select_keys(groups['oo_nodes_to_config']
- | union(groups['oo_masters_to_config'])
- | union(groups['oo_etcd_to_config'] | default([])))
- | oo_collect('openshift.common.hostname') | default([]) | join (',')
- }}"
- when: "{{ (openshift_http_proxy is defined or openshift_https_proxy is defined) and
- openshift_generate_no_proxy_hosts | default(True) | bool }}"
-
-- include: ../../../../common/openshift-cluster/upgrades/pre/verify_inventory_vars.yml
- tags:
- - pre_upgrade
-
-- include: ../../../../common/openshift-cluster/upgrades/disable_excluder.yml
- tags:
- - pre_upgrade
-
-- include: ../../../../common/openshift-cluster/initialize_openshift_version.yml
- tags:
- - pre_upgrade
- vars:
- # Request specific openshift_release and let the openshift_version role handle converting this
- # to a more specific version, respecting openshift_image_tag and openshift_pkg_version if
- # defined, and overriding the normal behavior of protecting the installed version
- openshift_release: "{{ openshift_upgrade_target }}"
- openshift_protect_installed_version: False
-
- # We skip the docker role at this point in upgrade to prevent
- # unintended package, container, or config upgrades which trigger
- # docker restarts. At this early stage of upgrade we can assume
- # docker is configured and running.
- skip_docker_role: True
-
-- include: ../../../../common/openshift-cluster/upgrades/pre/verify_control_plane_running.yml
- tags:
- - pre_upgrade
-
-- include: ../../../../common/openshift-master/validate_restart.yml
- tags:
- - pre_upgrade
-
-- include: ../../../../common/openshift-cluster/upgrades/pre/verify_upgrade_targets.yml
- tags:
- - pre_upgrade
-
-- include: ../../../../common/openshift-cluster/upgrades/pre/verify_docker_upgrade_targets.yml
- tags:
- - pre_upgrade
-
-- include: ../../../../common/openshift-cluster/upgrades/pre/gate_checks.yml
- tags:
- - pre_upgrade
-
-# Pre-upgrade completed, nothing after this should be tagged pre_upgrade.
-
-# Separate step so we can execute in parallel and clear out anything unused
-# before we get into the serialized upgrade process which will then remove
-# remaining images if possible.
-- name: Cleanup unused Docker images
- hosts: oo_masters_to_config:oo_nodes_to_upgrade:oo_etcd_to_config
- tasks:
- - include: ../../../../common/openshift-cluster/upgrades/cleanup_unused_images.yml
-
-- include: ../../../../common/openshift-cluster/upgrades/upgrade_control_plane.yml
- vars:
- master_config_hook: "v3_3/master_config_upgrade.yml"
-
-- include: ../../../../common/openshift-cluster/upgrades/upgrade_nodes.yml
- vars:
- node_config_hook: "v3_3/node_config_upgrade.yml"
-
-- include: ../../../../common/openshift-cluster/upgrades/post_control_plane.yml
+- include: ../../../../common/openshift-cluster/upgrades/v3_3/upgrade.yml
diff --git a/playbooks/byo/openshift-cluster/upgrades/v3_3/upgrade_control_plane.yml b/playbooks/byo/openshift-cluster/upgrades/v3_3/upgrade_control_plane.yml
index fca2c04f3..4d284c279 100644
--- a/playbooks/byo/openshift-cluster/upgrades/v3_3/upgrade_control_plane.yml
+++ b/playbooks/byo/openshift-cluster/upgrades/v3_3/upgrade_control_plane.yml
@@ -13,101 +13,4 @@
#
- include: ../../initialize_groups.yml
-- include: ../../../../common/openshift-cluster/upgrades/init.yml
- tags:
- - pre_upgrade
-
-- name: Configure the upgrade target for the common upgrade tasks
- hosts: l_oo_all_hosts
- tags:
- - pre_upgrade
- tasks:
- - set_fact:
- openshift_upgrade_target: "{{ '1.3' if deployment_type == 'origin' else '3.3' }}"
- openshift_upgrade_min: "{{ '1.2' if deployment_type == 'origin' else '3.2' }}"
-
-# Pre-upgrade
-- include: ../../../../common/openshift-cluster/upgrades/initialize_nodes_to_upgrade.yml
- tags:
- - pre_upgrade
-
-- name: Update repos on control plane hosts
- hosts: oo_masters_to_config:oo_etcd_to_config:oo_lb_to_config
- tags:
- - pre_upgrade
- roles:
- - openshift_repos
-
-- name: Set openshift_no_proxy_internal_hostnames
- hosts: oo_masters_to_config:oo_nodes_to_upgrade
- tags:
- - pre_upgrade
- tasks:
- - set_fact:
- openshift_no_proxy_internal_hostnames: "{{ hostvars | oo_select_keys(groups['oo_nodes_to_config']
- | union(groups['oo_masters_to_config'])
- | union(groups['oo_etcd_to_config'] | default([])))
- | oo_collect('openshift.common.hostname') | default([]) | join (',')
- }}"
- when: "{{ (openshift_http_proxy is defined or openshift_https_proxy is defined) and
- openshift_generate_no_proxy_hosts | default(True) | bool }}"
-
-- include: ../../../../common/openshift-cluster/upgrades/pre/verify_inventory_vars.yml
- tags:
- - pre_upgrade
-
-- include: ../../../../common/openshift-cluster/upgrades/disable_excluder.yml
- tags:
- - pre_upgrade
-
-- include: ../../../../common/openshift-cluster/initialize_openshift_version.yml
- tags:
- - pre_upgrade
- vars:
- # Request specific openshift_release and let the openshift_version role handle converting this
- # to a more specific version, respecting openshift_image_tag and openshift_pkg_version if
- # defined, and overriding the normal behavior of protecting the installed version
- openshift_release: "{{ openshift_upgrade_target }}"
- openshift_protect_installed_version: False
-
- # We skip the docker role at this point in upgrade to prevent
- # unintended package, container, or config upgrades which trigger
- # docker restarts. At this early stage of upgrade we can assume
- # docker is configured and running.
- skip_docker_role: True
-
-- include: ../../../../common/openshift-cluster/upgrades/pre/verify_control_plane_running.yml
- tags:
- - pre_upgrade
-
-- include: ../../../../common/openshift-master/validate_restart.yml
- tags:
- - pre_upgrade
-
-- include: ../../../../common/openshift-cluster/upgrades/pre/verify_upgrade_targets.yml
- tags:
- - pre_upgrade
-
-- include: ../../../../common/openshift-cluster/upgrades/pre/verify_docker_upgrade_targets.yml
- tags:
- - pre_upgrade
-
-- include: ../../../../common/openshift-cluster/upgrades/pre/gate_checks.yml
- tags:
- - pre_upgrade
-
-# Pre-upgrade completed, nothing after this should be tagged pre_upgrade.
-
-# Separate step so we can execute in parallel and clear out anything unused
-# before we get into the serialized upgrade process which will then remove
-# remaining images if possible.
-- name: Cleanup unused Docker images
- hosts: oo_masters_to_config:oo_etcd_to_config
- tasks:
- - include: ../../../../common/openshift-cluster/upgrades/cleanup_unused_images.yml
-
-- include: ../../../../common/openshift-cluster/upgrades/upgrade_control_plane.yml
- vars:
- master_config_hook: "v3_3/master_config_upgrade.yml"
-
-- include: ../../../../common/openshift-cluster/upgrades/post_control_plane.yml
+- include: ../../../../common/openshift-cluster/upgrades/v3_3/upgrade_control_plane.yml
diff --git a/playbooks/byo/openshift-cluster/upgrades/v3_3/upgrade_nodes.yml b/playbooks/byo/openshift-cluster/upgrades/v3_3/upgrade_nodes.yml
index d171ac3cd..180a2821f 100644
--- a/playbooks/byo/openshift-cluster/upgrades/v3_3/upgrade_nodes.yml
+++ b/playbooks/byo/openshift-cluster/upgrades/v3_3/upgrade_nodes.yml
@@ -6,103 +6,4 @@
#
- include: ../../initialize_groups.yml
-- include: ../../../../common/openshift-cluster/upgrades/init.yml
- tags:
- - pre_upgrade
-
-- name: Configure the upgrade target for the common upgrade tasks
- hosts: l_oo_all_hosts
- tags:
- - pre_upgrade
- tasks:
- - set_fact:
- openshift_upgrade_target: "{{ '1.3' if deployment_type == 'origin' else '3.3' }}"
- openshift_upgrade_min: "{{ '1.2' if deployment_type == 'origin' else '3.2' }}"
-
-# Pre-upgrade
-- include: ../../../../common/openshift-cluster/upgrades/initialize_nodes_to_upgrade.yml
- tags:
- - pre_upgrade
-
-- name: Update repos on nodes
- hosts: oo_masters_to_config:oo_nodes_to_upgrade:oo_etcd_to_config:oo_lb_to_config
- roles:
- - openshift_repos
- tags:
- - pre_upgrade
-
-- name: Set openshift_no_proxy_internal_hostnames
- hosts: oo_masters_to_config:oo_nodes_to_upgrade
- tags:
- - pre_upgrade
- tasks:
- - set_fact:
- openshift_no_proxy_internal_hostnames: "{{ hostvars | oo_select_keys(groups['oo_nodes_to_upgrade']
- | union(groups['oo_masters_to_config'])
- | union(groups['oo_etcd_to_config'] | default([])))
- | oo_collect('openshift.common.hostname') | default([]) | join (',')
- }}"
- when: "{{ (openshift_http_proxy is defined or openshift_https_proxy is defined) and
- openshift_generate_no_proxy_hosts | default(True) | bool }}"
-
-- include: ../../../../common/openshift-cluster/upgrades/pre/verify_inventory_vars.yml
- tags:
- - pre_upgrade
-
-- include: ../../../../common/openshift-cluster/upgrades/disable_excluder.yml
- tags:
- - pre_upgrade
-
-- include: ../../../../common/openshift-cluster/initialize_openshift_version.yml
- tags:
- - pre_upgrade
- vars:
- # Request specific openshift_release and let the openshift_version role handle converting this
- # to a more specific version, respecting openshift_image_tag and openshift_pkg_version if
- # defined, and overriding the normal behavior of protecting the installed version
- openshift_release: "{{ openshift_upgrade_target }}"
- openshift_protect_installed_version: False
-
- # We skip the docker role at this point in upgrade to prevent
- # unintended package, container, or config upgrades which trigger
- # docker restarts. At this early stage of upgrade we can assume
- # docker is configured and running.
- skip_docker_role: True
-
-- name: Verify masters are already upgraded
- hosts: oo_masters_to_config
- tags:
- - pre_upgrade
- tasks:
- - fail: msg="Master running {{ openshift.common.version }} must be upgraded to {{ openshift_version }} before node upgrade can be run."
- when: openshift.common.version != openshift_version
-
-- include: ../../../../common/openshift-cluster/upgrades/pre/verify_control_plane_running.yml
- tags:
- - pre_upgrade
-
-- include: ../../../../common/openshift-cluster/upgrades/pre/verify_upgrade_targets.yml
- tags:
- - pre_upgrade
-
-- include: ../../../../common/openshift-cluster/upgrades/pre/verify_docker_upgrade_targets.yml
- tags:
- - pre_upgrade
-
-- include: ../../../../common/openshift-cluster/upgrades/pre/gate_checks.yml
- tags:
- - pre_upgrade
-
-# Pre-upgrade completed, nothing after this should be tagged pre_upgrade.
-
-# Separate step so we can execute in parallel and clear out anything unused
-# before we get into the serialized upgrade process which will then remove
-# remaining images if possible.
-- name: Cleanup unused Docker images
- hosts: oo_nodes_to_upgrade
- tasks:
- - include: ../../../../common/openshift-cluster/upgrades/cleanup_unused_images.yml
-
-- include: ../../../../common/openshift-cluster/upgrades/upgrade_nodes.yml
- vars:
- node_config_hook: "v3_3/node_config_upgrade.yml"
+- include: ../../../../common/openshift-cluster/upgrades/v3_3/upgrade_nodes.yml
diff --git a/playbooks/byo/openshift-cluster/upgrades/v3_4/upgrade.yml b/playbooks/byo/openshift-cluster/upgrades/v3_4/upgrade.yml
index 217163802..8cce91b3f 100644
--- a/playbooks/byo/openshift-cluster/upgrades/v3_4/upgrade.yml
+++ b/playbooks/byo/openshift-cluster/upgrades/v3_4/upgrade.yml
@@ -4,104 +4,4 @@
#
- include: ../../initialize_groups.yml
-- include: ../../../../common/openshift-cluster/upgrades/init.yml
- tags:
- - pre_upgrade
-
-- name: Configure the upgrade target for the common upgrade tasks
- hosts: l_oo_all_hosts
- tags:
- - pre_upgrade
- tasks:
- - set_fact:
- openshift_upgrade_target: "{{ '1.4' if deployment_type == 'origin' else '3.4' }}"
- openshift_upgrade_min: "{{ '1.3' if deployment_type == 'origin' else '3.3' }}"
-
-# Pre-upgrade
-
-- include: ../../../../common/openshift-cluster/upgrades/initialize_nodes_to_upgrade.yml
- tags:
- - pre_upgrade
-
-- name: Update repos and initialize facts on all hosts
- hosts: oo_masters_to_config:oo_nodes_to_upgrade:oo_etcd_to_config:oo_lb_to_config
- tags:
- - pre_upgrade
- roles:
- - openshift_repos
-
-- name: Set openshift_no_proxy_internal_hostnames
- hosts: oo_masters_to_config:oo_nodes_to_upgrade
- tags:
- - pre_upgrade
- tasks:
- - set_fact:
- openshift_no_proxy_internal_hostnames: "{{ hostvars | oo_select_keys(groups['oo_nodes_to_config']
- | union(groups['oo_masters_to_config'])
- | union(groups['oo_etcd_to_config'] | default([])))
- | oo_collect('openshift.common.hostname') | default([]) | join (',')
- }}"
- when: "{{ (openshift_http_proxy is defined or openshift_https_proxy is defined) and
- openshift_generate_no_proxy_hosts | default(True) | bool }}"
-
-- include: ../../../../common/openshift-cluster/upgrades/pre/verify_inventory_vars.yml
- tags:
- - pre_upgrade
-
-- include: ../../../../common/openshift-cluster/upgrades/disable_excluder.yml
- tags:
- - pre_upgrade
-
-- include: ../../../../common/openshift-cluster/initialize_openshift_version.yml
- tags:
- - pre_upgrade
- vars:
- # Request specific openshift_release and let the openshift_version role handle converting this
- # to a more specific version, respecting openshift_image_tag and openshift_pkg_version if
- # defined, and overriding the normal behavior of protecting the installed version
- openshift_release: "{{ openshift_upgrade_target }}"
- openshift_protect_installed_version: False
-
- # We skip the docker role at this point in upgrade to prevent
- # unintended package, container, or config upgrades which trigger
- # docker restarts. At this early stage of upgrade we can assume
- # docker is configured and running.
- skip_docker_role: True
-
-- include: ../../../../common/openshift-cluster/upgrades/pre/verify_control_plane_running.yml
- tags:
- - pre_upgrade
-
-- include: ../../../../common/openshift-master/validate_restart.yml
- tags:
- - pre_upgrade
-
-- include: ../../../../common/openshift-cluster/upgrades/pre/verify_upgrade_targets.yml
- tags:
- - pre_upgrade
-
-- include: ../../../../common/openshift-cluster/upgrades/pre/verify_docker_upgrade_targets.yml
- tags:
- - pre_upgrade
-
-- include: ../../../../common/openshift-cluster/upgrades/pre/gate_checks.yml
- tags:
- - pre_upgrade
-
-# Pre-upgrade completed, nothing after this should be tagged pre_upgrade.
-
-# Separate step so we can execute in parallel and clear out anything unused
-# before we get into the serialized upgrade process which will then remove
-# remaining images if possible.
-- name: Cleanup unused Docker images
- hosts: oo_masters_to_config:oo_nodes_to_upgrade:oo_etcd_to_config
- tasks:
- - include: ../../../../common/openshift-cluster/upgrades/cleanup_unused_images.yml
-
-- include: ../../../../common/openshift-cluster/upgrades/upgrade_control_plane.yml
- vars:
- master_config_hook: "v3_4/master_config_upgrade.yml"
-
-- include: ../../../../common/openshift-cluster/upgrades/upgrade_nodes.yml
-
-- include: ../../../../common/openshift-cluster/upgrades/post_control_plane.yml
+- include: ../../../../common/openshift-cluster/upgrades/v3_4/upgrade.yml
diff --git a/playbooks/byo/openshift-cluster/upgrades/v3_4/upgrade_control_plane.yml b/playbooks/byo/openshift-cluster/upgrades/v3_4/upgrade_control_plane.yml
index d21c195bf..8e5d0f5f9 100644
--- a/playbooks/byo/openshift-cluster/upgrades/v3_4/upgrade_control_plane.yml
+++ b/playbooks/byo/openshift-cluster/upgrades/v3_4/upgrade_control_plane.yml
@@ -13,101 +13,4 @@
#
- include: ../../initialize_groups.yml
-- include: ../../../../common/openshift-cluster/upgrades/init.yml
- tags:
- - pre_upgrade
-
-- name: Configure the upgrade target for the common upgrade tasks
- hosts: l_oo_all_hosts
- tags:
- - pre_upgrade
- tasks:
- - set_fact:
- openshift_upgrade_target: "{{ '1.4' if deployment_type == 'origin' else '3.4' }}"
- openshift_upgrade_min: "{{ '1.3' if deployment_type == 'origin' else '3.3' }}"
-
-# Pre-upgrade
-- include: ../../../../common/openshift-cluster/upgrades/initialize_nodes_to_upgrade.yml
- tags:
- - pre_upgrade
-
-- name: Update repos on control plane hosts
- hosts: oo_masters_to_config:oo_etcd_to_config:oo_lb_to_config
- tags:
- - pre_upgrade
- roles:
- - openshift_repos
-
-- name: Set openshift_no_proxy_internal_hostnames
- hosts: oo_masters_to_config:oo_nodes_to_upgrade
- tags:
- - pre_upgrade
- tasks:
- - set_fact:
- openshift_no_proxy_internal_hostnames: "{{ hostvars | oo_select_keys(groups['oo_nodes_to_config']
- | union(groups['oo_masters_to_config'])
- | union(groups['oo_etcd_to_config'] | default([])))
- | oo_collect('openshift.common.hostname') | default([]) | join (',')
- }}"
- when: "{{ (openshift_http_proxy is defined or openshift_https_proxy is defined) and
- openshift_generate_no_proxy_hosts | default(True) | bool }}"
-
-- include: ../../../../common/openshift-cluster/upgrades/pre/verify_inventory_vars.yml
- tags:
- - pre_upgrade
-
-- include: ../../../../common/openshift-cluster/upgrades/disable_excluder.yml
- tags:
- - pre_upgrade
-
-- include: ../../../../common/openshift-cluster/initialize_openshift_version.yml
- tags:
- - pre_upgrade
- vars:
- # Request specific openshift_release and let the openshift_version role handle converting this
- # to a more specific version, respecting openshift_image_tag and openshift_pkg_version if
- # defined, and overriding the normal behavior of protecting the installed version
- openshift_release: "{{ openshift_upgrade_target }}"
- openshift_protect_installed_version: False
-
- # We skip the docker role at this point in upgrade to prevent
- # unintended package, container, or config upgrades which trigger
- # docker restarts. At this early stage of upgrade we can assume
- # docker is configured and running.
- skip_docker_role: True
-
-- include: ../../../../common/openshift-cluster/upgrades/pre/verify_control_plane_running.yml
- tags:
- - pre_upgrade
-
-- include: ../../../../common/openshift-master/validate_restart.yml
- tags:
- - pre_upgrade
-
-- include: ../../../../common/openshift-cluster/upgrades/pre/verify_upgrade_targets.yml
- tags:
- - pre_upgrade
-
-- include: ../../../../common/openshift-cluster/upgrades/pre/verify_docker_upgrade_targets.yml
- tags:
- - pre_upgrade
-
-- include: ../../../../common/openshift-cluster/upgrades/pre/gate_checks.yml
- tags:
- - pre_upgrade
-
-# Pre-upgrade completed, nothing after this should be tagged pre_upgrade.
-
-# Separate step so we can execute in parallel and clear out anything unused
-# before we get into the serialized upgrade process which will then remove
-# remaining images if possible.
-- name: Cleanup unused Docker images
- hosts: oo_masters_to_config:oo_etcd_to_config
- tasks:
- - include: ../../../../common/openshift-cluster/upgrades/cleanup_unused_images.yml
-
-- include: ../../../../common/openshift-cluster/upgrades/upgrade_control_plane.yml
- vars:
- master_config_hook: "v3_4/master_config_upgrade.yml"
-
-- include: ../../../../common/openshift-cluster/upgrades/post_control_plane.yml
+- include: ../../../../common/openshift-cluster/upgrades/v3_4/upgrade_control_plane.yml
diff --git a/playbooks/byo/openshift-cluster/upgrades/v3_4/upgrade_nodes.yml b/playbooks/byo/openshift-cluster/upgrades/v3_4/upgrade_nodes.yml
index 7bb66611c..d5329b858 100644
--- a/playbooks/byo/openshift-cluster/upgrades/v3_4/upgrade_nodes.yml
+++ b/playbooks/byo/openshift-cluster/upgrades/v3_4/upgrade_nodes.yml
@@ -6,101 +6,4 @@
#
- include: ../../initialize_groups.yml
-- include: ../../../../common/openshift-cluster/upgrades/init.yml
- tags:
- - pre_upgrade
-
-- name: Configure the upgrade target for the common upgrade tasks
- hosts: l_oo_all_hosts
- tags:
- - pre_upgrade
- tasks:
- - set_fact:
- openshift_upgrade_target: "{{ '1.4' if deployment_type == 'origin' else '3.4' }}"
- openshift_upgrade_min: "{{ '1.3' if deployment_type == 'origin' else '3.3' }}"
-
-# Pre-upgrade
-- include: ../../../../common/openshift-cluster/upgrades/initialize_nodes_to_upgrade.yml
- tags:
- - pre_upgrade
-
-- name: Update repos on nodes
- hosts: oo_masters_to_config:oo_nodes_to_upgrade:oo_etcd_to_config:oo_lb_to_config
- roles:
- - openshift_repos
- tags:
- - pre_upgrade
-
-- name: Set openshift_no_proxy_internal_hostnames
- hosts: oo_masters_to_config:oo_nodes_to_upgrade
- tags:
- - pre_upgrade
- tasks:
- - set_fact:
- openshift_no_proxy_internal_hostnames: "{{ hostvars | oo_select_keys(groups['oo_nodes_to_upgrade']
- | union(groups['oo_masters_to_config'])
- | union(groups['oo_etcd_to_config'] | default([])))
- | oo_collect('openshift.common.hostname') | default([]) | join (',')
- }}"
- when: "{{ (openshift_http_proxy is defined or openshift_https_proxy is defined) and
- openshift_generate_no_proxy_hosts | default(True) | bool }}"
-
-- include: ../../../../common/openshift-cluster/upgrades/pre/verify_inventory_vars.yml
- tags:
- - pre_upgrade
-
-- include: ../../../../common/openshift-cluster/upgrades/disable_excluder.yml
- tags:
- - pre_upgrade
-
-- include: ../../../../common/openshift-cluster/initialize_openshift_version.yml
- tags:
- - pre_upgrade
- vars:
- # Request specific openshift_release and let the openshift_version role handle converting this
- # to a more specific version, respecting openshift_image_tag and openshift_pkg_version if
- # defined, and overriding the normal behavior of protecting the installed version
- openshift_release: "{{ openshift_upgrade_target }}"
- openshift_protect_installed_version: False
-
- # We skip the docker role at this point in upgrade to prevent
- # unintended package, container, or config upgrades which trigger
- # docker restarts. At this early stage of upgrade we can assume
- # docker is configured and running.
- skip_docker_role: True
-
-- name: Verify masters are already upgraded
- hosts: oo_masters_to_config
- tags:
- - pre_upgrade
- tasks:
- - fail: msg="Master running {{ openshift.common.version }} must be upgraded to {{ openshift_version }} before node upgrade can be run."
- when: openshift.common.version != openshift_version
-
-- include: ../../../../common/openshift-cluster/upgrades/pre/verify_control_plane_running.yml
- tags:
- - pre_upgrade
-
-- include: ../../../../common/openshift-cluster/upgrades/pre/verify_upgrade_targets.yml
- tags:
- - pre_upgrade
-
-- include: ../../../../common/openshift-cluster/upgrades/pre/verify_docker_upgrade_targets.yml
- tags:
- - pre_upgrade
-
-- include: ../../../../common/openshift-cluster/upgrades/pre/gate_checks.yml
- tags:
- - pre_upgrade
-
-# Pre-upgrade completed, nothing after this should be tagged pre_upgrade.
-
-# Separate step so we can execute in parallel and clear out anything unused
-# before we get into the serialized upgrade process which will then remove
-# remaining images if possible.
-- name: Cleanup unused Docker images
- hosts: oo_nodes_to_upgrade
- tasks:
- - include: ../../../../common/openshift-cluster/upgrades/cleanup_unused_images.yml
-
-- include: ../../../../common/openshift-cluster/upgrades/upgrade_nodes.yml
+- include: ../../../../common/openshift-cluster/upgrades/v3_4/upgrade_nodes.yml
diff --git a/playbooks/byo/openshift-cluster/upgrades/v3_5/roles b/playbooks/byo/openshift-cluster/upgrades/v3_5/roles
deleted file mode 120000
index 6bc1a7aef..000000000
--- a/playbooks/byo/openshift-cluster/upgrades/v3_5/roles
+++ /dev/null
@@ -1 +0,0 @@
-../../../../../roles \ No newline at end of file
diff --git a/playbooks/byo/openshift-cluster/upgrades/v3_5/upgrade.yml b/playbooks/byo/openshift-cluster/upgrades/v3_5/upgrade.yml
index f0900e04e..f44d55ad2 100644
--- a/playbooks/byo/openshift-cluster/upgrades/v3_5/upgrade.yml
+++ b/playbooks/byo/openshift-cluster/upgrades/v3_5/upgrade.yml
@@ -4,110 +4,4 @@
#
- include: ../../initialize_groups.yml
-- include: ../../../../common/openshift-cluster/upgrades/init.yml
- tags:
- - pre_upgrade
-
-- name: Configure the upgrade target for the common upgrade tasks
- hosts: l_oo_all_hosts
- tags:
- - pre_upgrade
- tasks:
- - set_fact:
- openshift_upgrade_target: "{{ '1.5' if deployment_type == 'origin' else '3.5' }}"
- openshift_upgrade_min: "{{ '1.4' if deployment_type == 'origin' else '3.4' }}"
-
-# Pre-upgrade
-
-- include: ../../../../common/openshift-cluster/upgrades/initialize_nodes_to_upgrade.yml
- tags:
- - pre_upgrade
-
-- name: Update repos and initialize facts on all hosts
- hosts: oo_masters_to_config:oo_nodes_to_upgrade:oo_etcd_to_config:oo_lb_to_config
- tags:
- - pre_upgrade
- roles:
- - openshift_repos
-
-- name: Set openshift_no_proxy_internal_hostnames
- hosts: oo_masters_to_config:oo_nodes_to_upgrade
- tags:
- - pre_upgrade
- tasks:
- - set_fact:
- openshift_no_proxy_internal_hostnames: "{{ hostvars | oo_select_keys(groups['oo_nodes_to_config']
- | union(groups['oo_masters_to_config'])
- | union(groups['oo_etcd_to_config'] | default([])))
- | oo_collect('openshift.common.hostname') | default([]) | join (',')
- }}"
- when: "{{ (openshift_http_proxy is defined or openshift_https_proxy is defined) and
- openshift_generate_no_proxy_hosts | default(True) | bool }}"
-
-- include: ../../../../common/openshift-cluster/upgrades/pre/verify_inventory_vars.yml
- tags:
- - pre_upgrade
-
-- include: ../../../../common/openshift-cluster/upgrades/disable_excluder.yml
- tags:
- - pre_upgrade
-
-# Note: During upgrade the openshift excluder is not unexcluded inside the initialize_openshift_version.yml play.
-# So it is necassary to run the play after running disable_excluder.yml.
-- include: ../../../../common/openshift-cluster/initialize_openshift_version.yml
- tags:
- - pre_upgrade
- vars:
- # Request specific openshift_release and let the openshift_version role handle converting this
- # to a more specific version, respecting openshift_image_tag and openshift_pkg_version if
- # defined, and overriding the normal behavior of protecting the installed version
- openshift_release: "{{ openshift_upgrade_target }}"
- openshift_protect_installed_version: False
-
- # We skip the docker role at this point in upgrade to prevent
- # unintended package, container, or config upgrades which trigger
- # docker restarts. At this early stage of upgrade we can assume
- # docker is configured and running.
- skip_docker_role: True
-
-- include: ../../../../common/openshift-cluster/upgrades/pre/verify_control_plane_running.yml
- tags:
- - pre_upgrade
-
-- include: ../../../../common/openshift-master/validate_restart.yml
- tags:
- - pre_upgrade
-
-- include: ../../../../common/openshift-cluster/upgrades/pre/verify_upgrade_targets.yml
- tags:
- - pre_upgrade
-
-- include: ../../../../common/openshift-cluster/upgrades/pre/verify_docker_upgrade_targets.yml
- tags:
- - pre_upgrade
-
-- include: ../../../../common/openshift-cluster/upgrades/v3_5/validator.yml
- tags:
- - pre_upgrade
-
-- include: ../../../../common/openshift-cluster/upgrades/pre/gate_checks.yml
- tags:
- - pre_upgrade
-
-# Pre-upgrade completed, nothing after this should be tagged pre_upgrade.
-
-# Separate step so we can execute in parallel and clear out anything unused
-# before we get into the serialized upgrade process which will then remove
-# remaining images if possible.
-- name: Cleanup unused Docker images
- hosts: oo_masters_to_config:oo_nodes_to_upgrade:oo_etcd_to_config
- tasks:
- - include: ../../../../common/openshift-cluster/upgrades/cleanup_unused_images.yml
-
-- include: ../../../../common/openshift-cluster/upgrades/upgrade_control_plane.yml
-
-- include: ../../../../common/openshift-cluster/upgrades/upgrade_nodes.yml
-
-- include: ../../../../common/openshift-cluster/upgrades/post_control_plane.yml
-
-- include: ../../../../common/openshift-cluster/upgrades/v3_5/storage_upgrade.yml
+- include: ../../../../common/openshift-cluster/upgrades/v3_5/upgrade.yml
diff --git a/playbooks/byo/openshift-cluster/upgrades/v3_5/upgrade_control_plane.yml b/playbooks/byo/openshift-cluster/upgrades/v3_5/upgrade_control_plane.yml
index e8d834a04..2377713fa 100644
--- a/playbooks/byo/openshift-cluster/upgrades/v3_5/upgrade_control_plane.yml
+++ b/playbooks/byo/openshift-cluster/upgrades/v3_5/upgrade_control_plane.yml
@@ -13,105 +13,4 @@
#
- include: ../../initialize_groups.yml
-- include: ../../../../common/openshift-cluster/upgrades/init.yml
- tags:
- - pre_upgrade
-
-# Configure the upgrade target for the common upgrade tasks:
-- hosts: l_oo_all_hosts
- tags:
- - pre_upgrade
- tasks:
- - set_fact:
- openshift_upgrade_target: "{{ '1.5' if deployment_type == 'origin' else '3.5' }}"
- openshift_upgrade_min: "{{ '1.4' if deployment_type == 'origin' else '3.4' }}"
-
-# Pre-upgrade
-- include: ../../../../common/openshift-cluster/upgrades/initialize_nodes_to_upgrade.yml
- tags:
- - pre_upgrade
-
-- name: Update repos on control plane hosts
- hosts: oo_masters_to_config:oo_etcd_to_config:oo_lb_to_config
- tags:
- - pre_upgrade
- roles:
- - openshift_repos
-
-- name: Set openshift_no_proxy_internal_hostnames
- hosts: oo_masters_to_config:oo_nodes_to_upgrade
- tags:
- - pre_upgrade
- tasks:
- - set_fact:
- openshift_no_proxy_internal_hostnames: "{{ hostvars | oo_select_keys(groups['oo_nodes_to_config']
- | union(groups['oo_masters_to_config'])
- | union(groups['oo_etcd_to_config'] | default([])))
- | oo_collect('openshift.common.hostname') | default([]) | join (',')
- }}"
- when: "{{ (openshift_http_proxy is defined or openshift_https_proxy is defined) and
- openshift_generate_no_proxy_hosts | default(True) | bool }}"
-
-- include: ../../../../common/openshift-cluster/upgrades/pre/verify_inventory_vars.yml
- tags:
- - pre_upgrade
-
-- include: ../../../../common/openshift-cluster/upgrades/disable_excluder.yml
- tags:
- - pre_upgrade
-
-- include: ../../../../common/openshift-cluster/initialize_openshift_version.yml
- tags:
- - pre_upgrade
- vars:
- # Request specific openshift_release and let the openshift_version role handle converting this
- # to a more specific version, respecting openshift_image_tag and openshift_pkg_version if
- # defined, and overriding the normal behavior of protecting the installed version
- openshift_release: "{{ openshift_upgrade_target }}"
- openshift_protect_installed_version: False
-
- # We skip the docker role at this point in upgrade to prevent
- # unintended package, container, or config upgrades which trigger
- # docker restarts. At this early stage of upgrade we can assume
- # docker is configured and running.
- skip_docker_role: True
-
-- include: ../../../../common/openshift-cluster/upgrades/pre/verify_control_plane_running.yml
- tags:
- - pre_upgrade
-
-- include: ../../../../common/openshift-master/validate_restart.yml
- tags:
- - pre_upgrade
-
-- include: ../../../../common/openshift-cluster/upgrades/pre/verify_upgrade_targets.yml
- tags:
- - pre_upgrade
-
-- include: ../../../../common/openshift-cluster/upgrades/pre/verify_docker_upgrade_targets.yml
- tags:
- - pre_upgrade
-
-- include: ../../../../common/openshift-cluster/upgrades/v3_5/validator.yml
- tags:
- - pre_upgrade
-
-- include: ../../../../common/openshift-cluster/upgrades/pre/gate_checks.yml
- tags:
- - pre_upgrade
-
-# Pre-upgrade completed, nothing after this should be tagged pre_upgrade.
-
-# Separate step so we can execute in parallel and clear out anything unused
-# before we get into the serialized upgrade process which will then remove
-# remaining images if possible.
-- name: Cleanup unused Docker images
- hosts: oo_masters_to_config:oo_etcd_to_config
- tasks:
- - include: ../../../../common/openshift-cluster/upgrades/cleanup_unused_images.yml
-
-- include: ../../../../common/openshift-cluster/upgrades/upgrade_control_plane.yml
-
-- include: ../../../../common/openshift-cluster/upgrades/post_control_plane.yml
-
-- include: ../../../../common/openshift-cluster/upgrades/v3_5/storage_upgrade.yml
+- include: ../../../../common/openshift-cluster/upgrades/v3_5/upgrade_control_plane.yml
diff --git a/playbooks/byo/openshift-cluster/upgrades/v3_5/upgrade_nodes.yml b/playbooks/byo/openshift-cluster/upgrades/v3_5/upgrade_nodes.yml
index c2a4debc8..5b3f6ab06 100644
--- a/playbooks/byo/openshift-cluster/upgrades/v3_5/upgrade_nodes.yml
+++ b/playbooks/byo/openshift-cluster/upgrades/v3_5/upgrade_nodes.yml
@@ -6,101 +6,4 @@
#
- include: ../../initialize_groups.yml
-- include: ../../../../common/openshift-cluster/upgrades/init.yml
- tags:
- - pre_upgrade
-
-# Configure the upgrade target for the common upgrade tasks:
-- hosts: l_oo_all_hosts
- tags:
- - pre_upgrade
- tasks:
- - set_fact:
- openshift_upgrade_target: "{{ '1.5' if deployment_type == 'origin' else '3.5' }}"
- openshift_upgrade_min: "{{ '1.4' if deployment_type == 'origin' else '3.4' }}"
-
-# Pre-upgrade
-- include: ../../../../common/openshift-cluster/upgrades/initialize_nodes_to_upgrade.yml
- tags:
- - pre_upgrade
-
-- name: Update repos on nodes
- hosts: oo_masters_to_config:oo_nodes_to_upgrade:oo_etcd_to_config:oo_lb_to_config
- roles:
- - openshift_repos
- tags:
- - pre_upgrade
-
-- name: Set openshift_no_proxy_internal_hostnames
- hosts: oo_masters_to_config:oo_nodes_to_upgrade
- tags:
- - pre_upgrade
- tasks:
- - set_fact:
- openshift_no_proxy_internal_hostnames: "{{ hostvars | oo_select_keys(groups['oo_nodes_to_upgrade']
- | union(groups['oo_masters_to_config'])
- | union(groups['oo_etcd_to_config'] | default([])))
- | oo_collect('openshift.common.hostname') | default([]) | join (',')
- }}"
- when: "{{ (openshift_http_proxy is defined or openshift_https_proxy is defined) and
- openshift_generate_no_proxy_hosts | default(True) | bool }}"
-
-- include: ../../../../common/openshift-cluster/upgrades/pre/verify_inventory_vars.yml
- tags:
- - pre_upgrade
-
-- include: ../../../../common/openshift-cluster/upgrades/disable_excluder.yml
- tags:
- - pre_upgrade
-
-- include: ../../../../common/openshift-cluster/initialize_openshift_version.yml
- tags:
- - pre_upgrade
- vars:
- # Request specific openshift_release and let the openshift_version role handle converting this
- # to a more specific version, respecting openshift_image_tag and openshift_pkg_version if
- # defined, and overriding the normal behavior of protecting the installed version
- openshift_release: "{{ openshift_upgrade_target }}"
- openshift_protect_installed_version: False
-
- # We skip the docker role at this point in upgrade to prevent
- # unintended package, container, or config upgrades which trigger
- # docker restarts. At this early stage of upgrade we can assume
- # docker is configured and running.
- skip_docker_role: True
-
-- name: Verify masters are already upgraded
- hosts: oo_masters_to_config
- tags:
- - pre_upgrade
- tasks:
- - fail: msg="Master running {{ openshift.common.version }} must be upgraded to {{ openshift_version }} before node upgrade can be run."
- when: openshift.common.version != openshift_version
-
-- include: ../../../../common/openshift-cluster/upgrades/pre/verify_control_plane_running.yml
- tags:
- - pre_upgrade
-
-- include: ../../../../common/openshift-cluster/upgrades/pre/verify_upgrade_targets.yml
- tags:
- - pre_upgrade
-
-- include: ../../../../common/openshift-cluster/upgrades/pre/verify_docker_upgrade_targets.yml
- tags:
- - pre_upgrade
-
-- include: ../../../../common/openshift-cluster/upgrades/pre/gate_checks.yml
- tags:
- - pre_upgrade
-
-# Pre-upgrade completed, nothing after this should be tagged pre_upgrade.
-
-# Separate step so we can execute in parallel and clear out anything unused
-# before we get into the serialized upgrade process which will then remove
-# remaining images if possible.
-- name: Cleanup unused Docker images
- hosts: oo_nodes_to_upgrade
- tasks:
- - include: ../../../../common/openshift-cluster/upgrades/cleanup_unused_images.yml
-
-- include: ../../../../common/openshift-cluster/upgrades/upgrade_nodes.yml
+- include: ../../../../common/openshift-cluster/upgrades/v3_5/upgrade_nodes.yml
diff --git a/playbooks/byo/openshift-cluster/upgrades/v3_6/roles b/playbooks/byo/openshift-cluster/upgrades/v3_6/roles
deleted file mode 120000
index 6bc1a7aef..000000000
--- a/playbooks/byo/openshift-cluster/upgrades/v3_6/roles
+++ /dev/null
@@ -1 +0,0 @@
-../../../../../roles \ No newline at end of file
diff --git a/playbooks/byo/openshift-cluster/upgrades/v3_6/upgrade.yml b/playbooks/byo/openshift-cluster/upgrades/v3_6/upgrade.yml
index 763e79e01..40120b3e8 100644
--- a/playbooks/byo/openshift-cluster/upgrades/v3_6/upgrade.yml
+++ b/playbooks/byo/openshift-cluster/upgrades/v3_6/upgrade.yml
@@ -4,110 +4,4 @@
#
- include: ../../initialize_groups.yml
-- include: ../../../../common/openshift-cluster/upgrades/init.yml
- tags:
- - pre_upgrade
-
-- name: Configure the upgrade target for the common upgrade tasks
- hosts: l_oo_all_hosts
- tags:
- - pre_upgrade
- tasks:
- - set_fact:
- openshift_upgrade_target: '3.6'
- openshift_upgrade_min: "{{ '1.5' if deployment_type == 'origin' else '3.5' }}"
-
-# Pre-upgrade
-
-- include: ../../../../common/openshift-cluster/upgrades/initialize_nodes_to_upgrade.yml
- tags:
- - pre_upgrade
-
-- name: Update repos and initialize facts on all hosts
- hosts: oo_masters_to_config:oo_nodes_to_upgrade:oo_etcd_to_config:oo_lb_to_config
- tags:
- - pre_upgrade
- roles:
- - openshift_repos
-
-- name: Set openshift_no_proxy_internal_hostnames
- hosts: oo_masters_to_config:oo_nodes_to_upgrade
- tags:
- - pre_upgrade
- tasks:
- - set_fact:
- openshift_no_proxy_internal_hostnames: "{{ hostvars | oo_select_keys(groups['oo_nodes_to_config']
- | union(groups['oo_masters_to_config'])
- | union(groups['oo_etcd_to_config'] | default([])))
- | oo_collect('openshift.common.hostname') | default([]) | join (',')
- }}"
- when: "{{ (openshift_http_proxy is defined or openshift_https_proxy is defined) and
- openshift_generate_no_proxy_hosts | default(True) | bool }}"
-
-- include: ../../../../common/openshift-cluster/upgrades/pre/verify_inventory_vars.yml
- tags:
- - pre_upgrade
-
-- include: ../../../../common/openshift-cluster/upgrades/disable_excluder.yml
- tags:
- - pre_upgrade
-
-# Note: During upgrade the openshift excluder is not unexcluded inside the initialize_openshift_version.yml play.
-# So it is necassary to run the play after running disable_excluder.yml.
-- include: ../../../../common/openshift-cluster/initialize_openshift_version.yml
- tags:
- - pre_upgrade
- vars:
- # Request specific openshift_release and let the openshift_version role handle converting this
- # to a more specific version, respecting openshift_image_tag and openshift_pkg_version if
- # defined, and overriding the normal behavior of protecting the installed version
- openshift_release: "{{ openshift_upgrade_target }}"
- openshift_protect_installed_version: False
-
- # We skip the docker role at this point in upgrade to prevent
- # unintended package, container, or config upgrades which trigger
- # docker restarts. At this early stage of upgrade we can assume
- # docker is configured and running.
- skip_docker_role: True
-
-- include: ../../../../common/openshift-cluster/upgrades/pre/verify_control_plane_running.yml
- tags:
- - pre_upgrade
-
-- include: ../../../../common/openshift-master/validate_restart.yml
- tags:
- - pre_upgrade
-
-- include: ../../../../common/openshift-cluster/upgrades/pre/verify_upgrade_targets.yml
- tags:
- - pre_upgrade
-
-- include: ../../../../common/openshift-cluster/upgrades/pre/verify_docker_upgrade_targets.yml
- tags:
- - pre_upgrade
-
-- include: ../../../../common/openshift-cluster/upgrades/v3_6/validator.yml
- tags:
- - pre_upgrade
-
-- include: ../../../../common/openshift-cluster/upgrades/pre/gate_checks.yml
- tags:
- - pre_upgrade
-
-# Pre-upgrade completed, nothing after this should be tagged pre_upgrade.
-
-# Separate step so we can execute in parallel and clear out anything unused
-# before we get into the serialized upgrade process which will then remove
-# remaining images if possible.
-- name: Cleanup unused Docker images
- hosts: oo_masters_to_config:oo_nodes_to_upgrade:oo_etcd_to_config
- tasks:
- - include: ../../../../common/openshift-cluster/upgrades/cleanup_unused_images.yml
-
-- include: ../../../../common/openshift-cluster/upgrades/upgrade_control_plane.yml
-
-- include: ../../../../common/openshift-cluster/upgrades/upgrade_nodes.yml
-
-- include: ../../../../common/openshift-cluster/upgrades/post_control_plane.yml
-
-- include: ../../../../common/openshift-cluster/upgrades/v3_6/storage_upgrade.yml
+- include: ../../../../common/openshift-cluster/upgrades/v3_6/upgrade.yml
diff --git a/playbooks/byo/openshift-cluster/upgrades/v3_6/upgrade_control_plane.yml b/playbooks/byo/openshift-cluster/upgrades/v3_6/upgrade_control_plane.yml
index 7a1377be2..408a4c631 100644
--- a/playbooks/byo/openshift-cluster/upgrades/v3_6/upgrade_control_plane.yml
+++ b/playbooks/byo/openshift-cluster/upgrades/v3_6/upgrade_control_plane.yml
@@ -13,105 +13,4 @@
#
- include: ../../initialize_groups.yml
-- include: ../../../../common/openshift-cluster/upgrades/init.yml
- tags:
- - pre_upgrade
-
-# Configure the upgrade target for the common upgrade tasks:
-- hosts: l_oo_all_hosts
- tags:
- - pre_upgrade
- tasks:
- - set_fact:
- openshift_upgrade_target: '3.6'
- openshift_upgrade_min: "{{ '1.5' if deployment_type == 'origin' else '3.5' }}"
-
-# Pre-upgrade
-- include: ../../../../common/openshift-cluster/upgrades/initialize_nodes_to_upgrade.yml
- tags:
- - pre_upgrade
-
-- name: Update repos on control plane hosts
- hosts: oo_masters_to_config:oo_etcd_to_config:oo_lb_to_config
- tags:
- - pre_upgrade
- roles:
- - openshift_repos
-
-- name: Set openshift_no_proxy_internal_hostnames
- hosts: oo_masters_to_config:oo_nodes_to_upgrade
- tags:
- - pre_upgrade
- tasks:
- - set_fact:
- openshift_no_proxy_internal_hostnames: "{{ hostvars | oo_select_keys(groups['oo_nodes_to_config']
- | union(groups['oo_masters_to_config'])
- | union(groups['oo_etcd_to_config'] | default([])))
- | oo_collect('openshift.common.hostname') | default([]) | join (',')
- }}"
- when: "{{ (openshift_http_proxy is defined or openshift_https_proxy is defined) and
- openshift_generate_no_proxy_hosts | default(True) | bool }}"
-
-- include: ../../../../common/openshift-cluster/upgrades/pre/verify_inventory_vars.yml
- tags:
- - pre_upgrade
-
-- include: ../../../../common/openshift-cluster/upgrades/disable_excluder.yml
- tags:
- - pre_upgrade
-
-- include: ../../../../common/openshift-cluster/initialize_openshift_version.yml
- tags:
- - pre_upgrade
- vars:
- # Request specific openshift_release and let the openshift_version role handle converting this
- # to a more specific version, respecting openshift_image_tag and openshift_pkg_version if
- # defined, and overriding the normal behavior of protecting the installed version
- openshift_release: "{{ openshift_upgrade_target }}"
- openshift_protect_installed_version: False
-
- # We skip the docker role at this point in upgrade to prevent
- # unintended package, container, or config upgrades which trigger
- # docker restarts. At this early stage of upgrade we can assume
- # docker is configured and running.
- skip_docker_role: True
-
-- include: ../../../../common/openshift-cluster/upgrades/pre/verify_control_plane_running.yml
- tags:
- - pre_upgrade
-
-- include: ../../../../common/openshift-master/validate_restart.yml
- tags:
- - pre_upgrade
-
-- include: ../../../../common/openshift-cluster/upgrades/pre/verify_upgrade_targets.yml
- tags:
- - pre_upgrade
-
-- include: ../../../../common/openshift-cluster/upgrades/pre/verify_docker_upgrade_targets.yml
- tags:
- - pre_upgrade
-
-- include: ../../../../common/openshift-cluster/upgrades/v3_6/validator.yml
- tags:
- - pre_upgrade
-
-- include: ../../../../common/openshift-cluster/upgrades/pre/gate_checks.yml
- tags:
- - pre_upgrade
-
-# Pre-upgrade completed, nothing after this should be tagged pre_upgrade.
-
-# Separate step so we can execute in parallel and clear out anything unused
-# before we get into the serialized upgrade process which will then remove
-# remaining images if possible.
-- name: Cleanup unused Docker images
- hosts: oo_masters_to_config:oo_etcd_to_config
- tasks:
- - include: ../../../../common/openshift-cluster/upgrades/cleanup_unused_images.yml
-
-- include: ../../../../common/openshift-cluster/upgrades/upgrade_control_plane.yml
-
-- include: ../../../../common/openshift-cluster/upgrades/post_control_plane.yml
-
-- include: ../../../../common/openshift-cluster/upgrades/v3_6/storage_upgrade.yml
+- include: ../../../../common/openshift-cluster/upgrades/v3_6/upgrade_control_plane.yml
diff --git a/playbooks/byo/openshift-cluster/upgrades/v3_6/upgrade_nodes.yml b/playbooks/byo/openshift-cluster/upgrades/v3_6/upgrade_nodes.yml
index 065746493..b5f42b804 100644
--- a/playbooks/byo/openshift-cluster/upgrades/v3_6/upgrade_nodes.yml
+++ b/playbooks/byo/openshift-cluster/upgrades/v3_6/upgrade_nodes.yml
@@ -6,101 +6,4 @@
#
- include: ../../initialize_groups.yml
-- include: ../../../../common/openshift-cluster/upgrades/init.yml
- tags:
- - pre_upgrade
-
-# Configure the upgrade target for the common upgrade tasks:
-- hosts: l_oo_all_hosts
- tags:
- - pre_upgrade
- tasks:
- - set_fact:
- openshift_upgrade_target: '3.6'
- openshift_upgrade_min: "{{ '1.5' if deployment_type == 'origin' else '3.5' }}"
-
-# Pre-upgrade
-- include: ../../../../common/openshift-cluster/upgrades/initialize_nodes_to_upgrade.yml
- tags:
- - pre_upgrade
-
-- name: Update repos on nodes
- hosts: oo_masters_to_config:oo_nodes_to_upgrade:oo_etcd_to_config:oo_lb_to_config
- roles:
- - openshift_repos
- tags:
- - pre_upgrade
-
-- name: Set openshift_no_proxy_internal_hostnames
- hosts: oo_masters_to_config:oo_nodes_to_upgrade
- tags:
- - pre_upgrade
- tasks:
- - set_fact:
- openshift_no_proxy_internal_hostnames: "{{ hostvars | oo_select_keys(groups['oo_nodes_to_upgrade']
- | union(groups['oo_masters_to_config'])
- | union(groups['oo_etcd_to_config'] | default([])))
- | oo_collect('openshift.common.hostname') | default([]) | join (',')
- }}"
- when: "{{ (openshift_http_proxy is defined or openshift_https_proxy is defined) and
- openshift_generate_no_proxy_hosts | default(True) | bool }}"
-
-- include: ../../../../common/openshift-cluster/upgrades/pre/verify_inventory_vars.yml
- tags:
- - pre_upgrade
-
-- include: ../../../../common/openshift-cluster/upgrades/disable_excluder.yml
- tags:
- - pre_upgrade
-
-- include: ../../../../common/openshift-cluster/initialize_openshift_version.yml
- tags:
- - pre_upgrade
- vars:
- # Request specific openshift_release and let the openshift_version role handle converting this
- # to a more specific version, respecting openshift_image_tag and openshift_pkg_version if
- # defined, and overriding the normal behavior of protecting the installed version
- openshift_release: "{{ openshift_upgrade_target }}"
- openshift_protect_installed_version: False
-
- # We skip the docker role at this point in upgrade to prevent
- # unintended package, container, or config upgrades which trigger
- # docker restarts. At this early stage of upgrade we can assume
- # docker is configured and running.
- skip_docker_role: True
-
-- name: Verify masters are already upgraded
- hosts: oo_masters_to_config
- tags:
- - pre_upgrade
- tasks:
- - fail: msg="Master running {{ openshift.common.version }} must be upgraded to {{ openshift_version }} before node upgrade can be run."
- when: openshift.common.version != openshift_version
-
-- include: ../../../../common/openshift-cluster/upgrades/pre/verify_control_plane_running.yml
- tags:
- - pre_upgrade
-
-- include: ../../../../common/openshift-cluster/upgrades/pre/verify_upgrade_targets.yml
- tags:
- - pre_upgrade
-
-- include: ../../../../common/openshift-cluster/upgrades/pre/verify_docker_upgrade_targets.yml
- tags:
- - pre_upgrade
-
-- include: ../../../../common/openshift-cluster/upgrades/pre/gate_checks.yml
- tags:
- - pre_upgrade
-
-# Pre-upgrade completed, nothing after this should be tagged pre_upgrade.
-
-# Separate step so we can execute in parallel and clear out anything unused
-# before we get into the serialized upgrade process which will then remove
-# remaining images if possible.
-- name: Cleanup unused Docker images
- hosts: oo_nodes_to_upgrade
- tasks:
- - include: ../../../../common/openshift-cluster/upgrades/cleanup_unused_images.yml
-
-- include: ../../../../common/openshift-cluster/upgrades/upgrade_nodes.yml
+- include: ../../../../common/openshift-cluster/upgrades/v3_6/upgrade_nodes.yml
diff --git a/playbooks/byo/openshift-preflight/check.yml b/playbooks/byo/openshift-preflight/check.yml
index c5f05d0f0..04a55308a 100644
--- a/playbooks/byo/openshift-preflight/check.yml
+++ b/playbooks/byo/openshift-preflight/check.yml
@@ -1,5 +1,7 @@
---
-- hosts: OSEv3
+- include: ../openshift-cluster/initialize_groups.yml
+
+- hosts: g_all_hosts
name: run OpenShift health checks
roles:
- openshift_health_checker
diff --git a/playbooks/byo/openshift_facts.yml b/playbooks/byo/openshift_facts.yml
index 3b10323d6..75b606e61 100644
--- a/playbooks/byo/openshift_facts.yml
+++ b/playbooks/byo/openshift_facts.yml
@@ -8,7 +8,7 @@
- always
- name: Gather Cluster facts
- hosts: OSEv3
+ hosts: g_all_hosts
roles:
- openshift_facts
tasks:
diff --git a/playbooks/byo/rhel_subscribe.yml b/playbooks/byo/rhel_subscribe.yml
index 777743def..aec87cf82 100644
--- a/playbooks/byo/rhel_subscribe.yml
+++ b/playbooks/byo/rhel_subscribe.yml
@@ -3,12 +3,8 @@
tags:
- always
-- include: ../common/openshift-cluster/std_include.yml
- tags:
- - always
-
- name: Subscribe hosts, update repos and update OS packages
- hosts: l_oo_all_hosts
+ hosts: g_all_hosts
roles:
- role: rhel_subscribe
when: deployment_type in ['atomic-enterprise', 'enterprise', 'openshift-enterprise'] and
diff --git a/playbooks/common/openshift-cluster/evaluate_groups.yml b/playbooks/common/openshift-cluster/evaluate_groups.yml
index 6aac70f63..17a177644 100644
--- a/playbooks/common/openshift-cluster/evaluate_groups.yml
+++ b/playbooks/common/openshift-cluster/evaluate_groups.yml
@@ -5,33 +5,40 @@
become: no
gather_facts: no
tasks:
- - fail:
+ - name: Evaluate groups - g_etcd_hosts required
+ fail:
msg: This playbook requires g_etcd_hosts to be set
- when: "{{ g_etcd_hosts is not defined }}"
+ when: g_etcd_hosts is not defined
- - fail:
+ - name: Evaluate groups - g_master_hosts or g_new_master_hosts required
+ fail:
msg: This playbook requires g_master_hosts or g_new_master_hosts to be set
- when: "{{ g_master_hosts is not defined and g_new_master_hosts is not defined }}"
+ when: g_master_hosts is not defined or g_new_master_hosts is not defined
- - fail:
+ - name: Evaluate groups - g_node_hosts or g_new_node_hosts required
+ fail:
msg: This playbook requires g_node_hosts or g_new_node_hosts to be set
- when: "{{ g_node_hosts is not defined and g_new_node_hosts is not defined }}"
+ when: g_node_hosts is not defined or g_new_node_hosts is not defined
- - fail:
+ - name: Evaluate groups - g_lb_hosts required
+ fail:
msg: This playbook requires g_lb_hosts to be set
- when: "{{ g_lb_hosts is not defined }}"
+ when: g_lb_hosts is not defined
- - fail:
+ - name: Evaluate groups - g_nfs_hosts required
+ fail:
msg: This playbook requires g_nfs_hosts to be set
- when: "{{ g_nfs_hosts is not defined }}"
+ when: g_nfs_hosts is not defined
- - fail:
+ - name: Evaluate groups - g_nfs_hosts is single host
+ fail:
msg: The nfs group must be limited to one host
- when: "{{ (groups[g_nfs_hosts] | default([])) | length > 1 }}"
+ when: (groups[g_nfs_hosts] | default([])) | length > 1
- - fail:
+ - name: Evaluate groups - g_glusterfs_hosts required
+ fail:
msg: This playbook requires g_glusterfs_hosts to be set
- when: "{{ g_glusterfs_hosts is not defined }}"
+ when: g_glusterfs_hosts is not defined
- name: Evaluate oo_all_hosts
add_host:
@@ -51,13 +58,13 @@
with_items: "{{ g_master_hosts | union(g_new_master_hosts) | default([]) }}"
changed_when: no
- - name: Evaluate oo_etcd_to_config
+ - name: Evaluate oo_first_master
add_host:
- name: "{{ item }}"
- groups: oo_etcd_to_config
+ name: "{{ g_master_hosts[0] }}"
+ groups: oo_first_master
ansible_ssh_user: "{{ g_ssh_user | default(omit) }}"
ansible_become: "{{ g_sudo | default(omit) }}"
- with_items: "{{ g_etcd_hosts | default([]) }}"
+ when: g_master_hosts|length > 0
changed_when: no
- name: Evaluate oo_masters_to_config
@@ -69,41 +76,59 @@
with_items: "{{ g_new_master_hosts | default(g_master_hosts | default([], true), true) }}"
changed_when: no
- - name: Evaluate oo_nodes_to_config
+ - name: Evaluate oo_etcd_to_config
add_host:
name: "{{ item }}"
- groups: oo_nodes_to_config
+ groups: oo_etcd_to_config
ansible_ssh_user: "{{ g_ssh_user | default(omit) }}"
ansible_become: "{{ g_sudo | default(omit) }}"
- with_items: "{{ g_new_node_hosts | default(g_node_hosts | default([], true), true) }}"
+ with_items: "{{ g_etcd_hosts | default([]) }}"
changed_when: no
- # Skip adding the master to oo_nodes_to_config when g_new_node_hosts is
- - name: Add master to oo_nodes_to_config
+ - name: Evaluate oo_first_etcd
add_host:
- name: "{{ item }}"
- groups: oo_nodes_to_config
+ name: "{{ g_etcd_hosts[0] }}"
+ groups: oo_first_etcd
ansible_ssh_user: "{{ g_ssh_user | default(omit) }}"
ansible_become: "{{ g_sudo | default(omit) }}"
- with_items: "{{ g_master_hosts | default([]) }}"
- when: "{{ g_nodeonmaster | default(false) | bool and not g_new_node_hosts | default(false) | bool }}"
+ when: g_etcd_hosts|length > 0
changed_when: no
- - name: Evaluate oo_first_etcd
+ # We use two groups one for hosts we're upgrading which doesn't include embedded etcd
+ # The other for backing up which includes the embedded etcd host, there's no need to
+ # upgrade embedded etcd that just happens when the master is updated.
+ - name: Evaluate oo_etcd_hosts_to_upgrade
add_host:
- name: "{{ g_etcd_hosts[0] }}"
- groups: oo_first_etcd
+ name: "{{ item }}"
+ groups: oo_etcd_hosts_to_upgrade
+ with_items: "{{ groups.oo_etcd_to_config if groups.oo_etcd_to_config is defined and groups.oo_etcd_to_config | length > 0 else [] }}"
+ changed_when: False
+
+ - name: Evaluate oo_etcd_hosts_to_backup
+ add_host:
+ name: "{{ item }}"
+ groups: oo_etcd_hosts_to_backup
+ with_items: "{{ groups.oo_etcd_to_config if groups.oo_etcd_to_config is defined and groups.oo_etcd_to_config | length > 0 else groups.oo_first_master }}"
+ changed_when: False
+
+ - name: Evaluate oo_nodes_to_config
+ add_host:
+ name: "{{ item }}"
+ groups: oo_nodes_to_config
ansible_ssh_user: "{{ g_ssh_user | default(omit) }}"
- when: "{{ g_etcd_hosts|length > 0 }}"
+ ansible_become: "{{ g_sudo | default(omit) }}"
+ with_items: "{{ g_new_node_hosts | default(g_node_hosts | default([], true), true) }}"
changed_when: no
- - name: Evaluate oo_first_master
+ # Skip adding the master to oo_nodes_to_config when g_new_node_hosts is
+ - name: Add master to oo_nodes_to_config
add_host:
- name: "{{ g_master_hosts[0] }}"
- groups: oo_first_master
+ name: "{{ item }}"
+ groups: oo_nodes_to_config
ansible_ssh_user: "{{ g_ssh_user | default(omit) }}"
ansible_become: "{{ g_sudo | default(omit) }}"
- when: "{{ g_master_hosts|length > 0 }}"
+ with_items: "{{ g_master_hosts | default([]) }}"
+ when: g_nodeonmaster | default(false) | bool and not g_new_node_hosts | default(false) | bool
changed_when: no
- name: Evaluate oo_lb_to_config
diff --git a/playbooks/common/openshift-cluster/initialize_openshift_version.yml b/playbooks/common/openshift-cluster/initialize_openshift_version.yml
index 07b38920f..f4e52869e 100644
--- a/playbooks/common/openshift-cluster/initialize_openshift_version.yml
+++ b/playbooks/common/openshift-cluster/initialize_openshift_version.yml
@@ -1,13 +1,14 @@
---
# NOTE: requires openshift_facts be run
- name: Verify compatible yum/subscription-manager combination
- hosts: l_oo_all_hosts
+ hosts: oo_all_hosts
gather_facts: no
tasks:
# See:
# https://bugzilla.redhat.com/show_bug.cgi?id=1395047
# https://bugzilla.redhat.com/show_bug.cgi?id=1282961
# https://github.com/openshift/openshift-ansible/issues/1138
+ # Consider the repoquery module for this work
- name: Check for bad combinations of yum and subscription-manager
command: >
{{ repoquery_cmd }} --installed --qf '%{version}' "yum"
@@ -16,7 +17,7 @@
when: not openshift.common.is_atomic | bool
- fail:
msg: Incompatible versions of yum and subscription-manager found. You may need to update yum and yum-utils.
- when: "not openshift.common.is_atomic | bool and 'Plugin \"search-disabled-repos\" requires API 2.7. Supported API is 2.6.' in yum_ver_test.stdout"
+ when: not openshift.common.is_atomic | bool and 'Plugin \"search-disabled-repos\" requires API 2.7. Supported API is 2.6.' in yum_ver_test.stdout
- name: Determine openshift_version to configure on first master
hosts: oo_first_master
diff --git a/playbooks/common/openshift-cluster/upgrades/disable_excluder.yml b/playbooks/common/openshift-cluster/upgrades/disable_excluder.yml
index a30952929..02042c1ef 100644
--- a/playbooks/common/openshift-cluster/upgrades/disable_excluder.yml
+++ b/playbooks/common/openshift-cluster/upgrades/disable_excluder.yml
@@ -3,15 +3,10 @@
hosts: oo_masters_to_config:oo_nodes_to_config
gather_facts: no
tasks:
- - include: pre/validate_excluder.yml
- vars:
- excluder: "{{ openshift.common.service_type }}-docker-excluder"
- when: enable_docker_excluder | default(enable_excluders) | default(True) | bool
- - include: pre/validate_excluder.yml
- vars:
- excluder: "{{ openshift.common.service_type }}-excluder"
- when: enable_openshift_excluder | default(enable_excluders) | default(True) | bool
-
+ # verify the excluders can be upgraded
+ - include_role:
+ name: openshift_excluder
+ tasks_from: verify_upgrade
# disable excluders based on their status
- include_role:
diff --git a/playbooks/common/openshift-cluster/upgrades/etcd/backup.yml b/playbooks/common/openshift-cluster/upgrades/etcd/backup.yml
index fb51a0061..9d0333ca8 100644
--- a/playbooks/common/openshift-cluster/upgrades/etcd/backup.yml
+++ b/playbooks/common/openshift-cluster/upgrades/etcd/backup.yml
@@ -1,6 +1,6 @@
---
- name: Backup etcd
- hosts: etcd_hosts_to_backup
+ hosts: oo_etcd_hosts_to_backup
vars:
embedded_etcd: "{{ groups.oo_etcd_to_config | default([]) | length == 0 }}"
etcdctl_command: "{{ 'etcdctl' if not openshift.common.is_containerized or embedded_etcd else 'docker exec etcd_container etcdctl' if not openshift.common.is_etcd_system_container else 'runc exec etcd etcdctl' }}"
@@ -87,10 +87,10 @@
tasks:
- set_fact:
etcd_backup_completed: "{{ hostvars
- | oo_select_keys(groups.etcd_hosts_to_backup)
+ | oo_select_keys(groups.oo_etcd_hosts_to_backup)
| oo_collect('inventory_hostname', {'etcd_backup_complete': true}) }}"
- set_fact:
- etcd_backup_failed: "{{ groups.etcd_hosts_to_backup | difference(etcd_backup_completed) }}"
+ etcd_backup_failed: "{{ groups.oo_etcd_hosts_to_backup | difference(etcd_backup_completed) }}"
- fail:
msg: "Upgrade cannot continue. The following hosts did not complete etcd backup: {{ etcd_backup_failed | join(',') }}"
when: etcd_backup_failed | length > 0
diff --git a/playbooks/common/openshift-cluster/upgrades/etcd/main.yml b/playbooks/common/openshift-cluster/upgrades/etcd/main.yml
index fa86d29fb..73657d2f1 100644
--- a/playbooks/common/openshift-cluster/upgrades/etcd/main.yml
+++ b/playbooks/common/openshift-cluster/upgrades/etcd/main.yml
@@ -5,32 +5,6 @@
# mirrored packages on your own because only the GA and latest versions are
# available in the repos. So for Fedora we'll simply skip this, sorry.
-- include: ../../evaluate_groups.yml
- tags:
- - always
-
-# We use two groups one for hosts we're upgrading which doesn't include embedded etcd
-# The other for backing up which includes the embedded etcd host, there's no need to
-# upgrade embedded etcd that just happens when the master is updated.
-- name: Evaluate additional groups for etcd
- hosts: localhost
- connection: local
- become: no
- tasks:
- - name: Evaluate etcd_hosts_to_upgrade
- add_host:
- name: "{{ item }}"
- groups: etcd_hosts_to_upgrade
- with_items: "{{ groups.oo_etcd_to_config if groups.oo_etcd_to_config is defined and groups.oo_etcd_to_config | length > 0 else [] }}"
- changed_when: False
-
- - name: Evaluate etcd_hosts_to_backup
- add_host:
- name: "{{ item }}"
- groups: etcd_hosts_to_backup
- with_items: "{{ groups.oo_etcd_to_config if groups.oo_etcd_to_config is defined and groups.oo_etcd_to_config | length > 0 else groups.oo_first_master }}"
- changed_when: False
-
- name: Backup etcd before upgrading anything
include: backup.yml
vars:
@@ -38,7 +12,7 @@
when: openshift_etcd_backup | default(true) | bool
- name: Drop etcdctl profiles
- hosts: etcd_hosts_to_upgrade
+ hosts: oo_etcd_hosts_to_upgrade
tasks:
- include: roles/etcd/tasks/etcdctl.yml
diff --git a/playbooks/common/openshift-cluster/upgrades/etcd/upgrade.yml b/playbooks/common/openshift-cluster/upgrades/etcd/upgrade.yml
index a9b5b94e6..45e301315 100644
--- a/playbooks/common/openshift-cluster/upgrades/etcd/upgrade.yml
+++ b/playbooks/common/openshift-cluster/upgrades/etcd/upgrade.yml
@@ -1,6 +1,6 @@
---
- name: Determine etcd version
- hosts: etcd_hosts_to_upgrade
+ hosts: oo_etcd_hosts_to_upgrade
tasks:
- name: Record RPM based etcd version
command: rpm -qa --qf '%{version}' etcd\*
@@ -43,7 +43,7 @@
# I really dislike this copy/pasta but I wasn't able to find a way to get it to loop
# through hosts, then loop through tasks only when appropriate
- name: Upgrade to 2.1
- hosts: etcd_hosts_to_upgrade
+ hosts: oo_etcd_hosts_to_upgrade
serial: 1
vars:
upgrade_version: '2.1'
@@ -52,7 +52,7 @@
when: etcd_rpm_version.stdout | default('99') | version_compare('2.1','<') and ansible_distribution == 'RedHat' and not openshift.common.is_containerized | bool
- name: Upgrade RPM hosts to 2.2
- hosts: etcd_hosts_to_upgrade
+ hosts: oo_etcd_hosts_to_upgrade
serial: 1
vars:
upgrade_version: '2.2'
@@ -61,7 +61,7 @@
when: etcd_rpm_version.stdout | default('99') | version_compare('2.2','<') and ansible_distribution == 'RedHat' and not openshift.common.is_containerized | bool
- name: Upgrade containerized hosts to 2.2.5
- hosts: etcd_hosts_to_upgrade
+ hosts: oo_etcd_hosts_to_upgrade
serial: 1
vars:
upgrade_version: 2.2.5
@@ -70,7 +70,7 @@
when: etcd_container_version.stdout | default('99') | version_compare('2.2','<') and openshift.common.is_containerized | bool
- name: Upgrade RPM hosts to 2.3
- hosts: etcd_hosts_to_upgrade
+ hosts: oo_etcd_hosts_to_upgrade
serial: 1
vars:
upgrade_version: '2.3'
@@ -79,7 +79,7 @@
when: etcd_rpm_version.stdout | default('99') | version_compare('2.3','<') and ansible_distribution == 'RedHat' and not openshift.common.is_containerized | bool
- name: Upgrade containerized hosts to 2.3.7
- hosts: etcd_hosts_to_upgrade
+ hosts: oo_etcd_hosts_to_upgrade
serial: 1
vars:
upgrade_version: 2.3.7
@@ -88,7 +88,7 @@
when: etcd_container_version.stdout | default('99') | version_compare('2.3','<') and openshift.common.is_containerized | bool
- name: Upgrade RPM hosts to 3.0
- hosts: etcd_hosts_to_upgrade
+ hosts: oo_etcd_hosts_to_upgrade
serial: 1
vars:
upgrade_version: '3.0'
@@ -97,7 +97,7 @@
when: etcd_rpm_version.stdout | default('99') | version_compare('3.0','<') and ansible_distribution == 'RedHat' and not openshift.common.is_containerized | bool
- name: Upgrade containerized hosts to etcd3 image
- hosts: etcd_hosts_to_upgrade
+ hosts: oo_etcd_hosts_to_upgrade
serial: 1
vars:
upgrade_version: 3.0.15
@@ -106,7 +106,7 @@
when: etcd_container_version.stdout | default('99') | version_compare('3.0','<') and openshift.common.is_containerized | bool
- name: Upgrade fedora to latest
- hosts: etcd_hosts_to_upgrade
+ hosts: oo_etcd_hosts_to_upgrade
serial: 1
tasks:
- include: fedora_tasks.yml
diff --git a/playbooks/common/openshift-cluster/upgrades/upgrade_control_plane.yml b/playbooks/common/openshift-cluster/upgrades/upgrade_control_plane.yml
index c6e799261..0ad934d2d 100644
--- a/playbooks/common/openshift-cluster/upgrades/upgrade_control_plane.yml
+++ b/playbooks/common/openshift-cluster/upgrades/upgrade_control_plane.yml
@@ -2,17 +2,6 @@
###############################################################################
# Upgrade Masters
###############################################################################
-- name: Evaluate additional groups for upgrade
- hosts: localhost
- connection: local
- become: no
- tasks:
- - name: Evaluate etcd_hosts_to_backup
- add_host:
- name: "{{ item }}"
- groups: etcd_hosts_to_backup
- with_items: "{{ groups.oo_etcd_to_config if groups.oo_etcd_to_config is defined and groups.oo_etcd_to_config | length > 0 else groups.oo_first_master }}"
- changed_when: False
# If facts cache were for some reason deleted, this fact may not be set, and if not set
# it will always default to true. This causes problems for the etcd data dir fact detection
diff --git a/playbooks/common/openshift-cluster/upgrades/upgrade_scheduler.yml b/playbooks/common/openshift-cluster/upgrades/upgrade_scheduler.yml
index 88f2ddc78..83d2cec81 100644
--- a/playbooks/common/openshift-cluster/upgrades/upgrade_scheduler.yml
+++ b/playbooks/common/openshift-cluster/upgrades/upgrade_scheduler.yml
@@ -63,12 +63,12 @@
- block:
- debug:
msg: "WARNING: openshift_master_scheduler_predicates is set to defaults from an earlier release of OpenShift current defaults are: {{ openshift_master_scheduler_default_predicates }}"
- when: "{{ openshift_master_scheduler_predicates in older_predicates + older_predicates_no_region + [prev_predicates] + [prev_predicates_no_region] }}"
+ when: openshift_master_scheduler_predicates in older_predicates + older_predicates_no_region + [prev_predicates] + [prev_predicates_no_region]
- debug:
msg: "WARNING: openshift_master_scheduler_predicates does not match current defaults of: {{ openshift_master_scheduler_default_predicates }}"
- when: "{{ openshift_master_scheduler_predicates != openshift_master_scheduler_default_predicates }}"
- when: "{{ openshift_master_scheduler_predicates | default(none) is not none }}"
+ when: openshift_master_scheduler_predicates != openshift_master_scheduler_default_predicates
+ when: openshift_master_scheduler_predicates | default(none) is not none
# Handle cases where openshift_master_predicates is not defined
- block:
@@ -87,7 +87,7 @@
when: "{{ openshift_master_scheduler_current_predicates != default_predicates_no_region and
openshift_master_scheduler_current_predicates in older_predicates_no_region + [prev_predicates_no_region] }}"
- when: "{{ openshift_master_scheduler_predicates | default(none) is none }}"
+ when: openshift_master_scheduler_predicates | default(none) is none
# Upgrade priorities
@@ -120,12 +120,12 @@
- block:
- debug:
msg: "WARNING: openshift_master_scheduler_priorities is set to defaults from an earlier release of OpenShift current defaults are: {{ openshift_master_scheduler_default_priorities }}"
- when: "{{ openshift_master_scheduler_priorities in older_priorities + older_priorities_no_zone + [prev_priorities] + [prev_priorities_no_zone] }}"
+ when: openshift_master_scheduler_priorities in older_priorities + older_priorities_no_zone + [prev_priorities] + [prev_priorities_no_zone]
- debug:
msg: "WARNING: openshift_master_scheduler_priorities does not match current defaults of: {{ openshift_master_scheduler_default_priorities }}"
- when: "{{ openshift_master_scheduler_priorities != openshift_master_scheduler_default_priorities }}"
- when: "{{ openshift_master_scheduler_priorities | default(none) is not none }}"
+ when: openshift_master_scheduler_priorities != openshift_master_scheduler_default_priorities
+ when: openshift_master_scheduler_priorities | default(none) is not none
# Handle cases where openshift_master_priorities is not defined
- block:
@@ -144,7 +144,7 @@
when: "{{ openshift_master_scheduler_current_priorities != default_priorities_no_zone and
openshift_master_scheduler_current_priorities in older_priorities_no_zone + [prev_priorities_no_zone] }}"
- when: "{{ openshift_master_scheduler_priorities | default(none) is none }}"
+ when: openshift_master_scheduler_priorities | default(none) is none
# Update scheduler
diff --git a/playbooks/common/openshift-cluster/upgrades/v3_3/master_config_upgrade.yml b/playbooks/common/openshift-cluster/upgrades/v3_3/master_config_upgrade.yml
index 68c71a132..d69472fad 100644
--- a/playbooks/common/openshift-cluster/upgrades/v3_3/master_config_upgrade.yml
+++ b/playbooks/common/openshift-cluster/upgrades/v3_3/master_config_upgrade.yml
@@ -53,7 +53,7 @@
dest: "{{ openshift.common.config_base}}/master/master-config.yaml"
yaml_key: 'admissionConfig.pluginConfig'
yaml_value: "{{ openshift.master.admission_plugin_config }}"
- when: "{{ 'admission_plugin_config' in openshift.master }}"
+ when: "'admission_plugin_config' in openshift.master"
- modify_yaml:
dest: "{{ openshift.common.config_base}}/master/master-config.yaml"
diff --git a/playbooks/byo/openshift-cluster/upgrades/v3_3/roles b/playbooks/common/openshift-cluster/upgrades/v3_3/roles
index 6bc1a7aef..6bc1a7aef 120000
--- a/playbooks/byo/openshift-cluster/upgrades/v3_3/roles
+++ b/playbooks/common/openshift-cluster/upgrades/v3_3/roles
diff --git a/playbooks/common/openshift-cluster/upgrades/v3_3/upgrade.yml b/playbooks/common/openshift-cluster/upgrades/v3_3/upgrade.yml
new file mode 100644
index 000000000..be18c1edd
--- /dev/null
+++ b/playbooks/common/openshift-cluster/upgrades/v3_3/upgrade.yml
@@ -0,0 +1,107 @@
+---
+#
+# Full Control Plane + Nodes Upgrade
+#
+- include: ../init.yml
+ tags:
+ - pre_upgrade
+
+- name: Configure the upgrade target for the common upgrade tasks
+ hosts: oo_all_hosts
+ tags:
+ - pre_upgrade
+ tasks:
+ - set_fact:
+ openshift_upgrade_target: "{{ '1.3' if deployment_type == 'origin' else '3.3' }}"
+ openshift_upgrade_min: "{{ '1.2' if deployment_type == 'origin' else '3.2' }}"
+
+# Pre-upgrade
+
+- include: ../initialize_nodes_to_upgrade.yml
+ tags:
+ - pre_upgrade
+
+- name: Update repos and initialize facts on all hosts
+ hosts: oo_masters_to_config:oo_nodes_to_upgrade:oo_etcd_to_config:oo_lb_to_config
+ tags:
+ - pre_upgrade
+ roles:
+ - openshift_repos
+
+- name: Set openshift_no_proxy_internal_hostnames
+ hosts: oo_masters_to_config:oo_nodes_to_upgrade
+ tags:
+ - pre_upgrade
+ tasks:
+ - set_fact:
+ openshift_no_proxy_internal_hostnames: "{{ hostvars | oo_select_keys(groups['oo_nodes_to_config']
+ | union(groups['oo_masters_to_config'])
+ | union(groups['oo_etcd_to_config'] | default([])))
+ | oo_collect('openshift.common.hostname') | default([]) | join (',')
+ }}"
+ when: "{{ (openshift_http_proxy is defined or openshift_https_proxy is defined) and
+ openshift_generate_no_proxy_hosts | default(True) | bool }}"
+
+- include: ../pre/verify_inventory_vars.yml
+ tags:
+ - pre_upgrade
+
+- include: ../disable_excluder.yml
+ tags:
+ - pre_upgrade
+
+- include: ../../initialize_openshift_version.yml
+ tags:
+ - pre_upgrade
+ vars:
+ # Request specific openshift_release and let the openshift_version role handle converting this
+ # to a more specific version, respecting openshift_image_tag and openshift_pkg_version if
+ # defined, and overriding the normal behavior of protecting the installed version
+ openshift_release: "{{ openshift_upgrade_target }}"
+ openshift_protect_installed_version: False
+
+ # We skip the docker role at this point in upgrade to prevent
+ # unintended package, container, or config upgrades which trigger
+ # docker restarts. At this early stage of upgrade we can assume
+ # docker is configured and running.
+ skip_docker_role: True
+
+- include: ../pre/verify_control_plane_running.yml
+ tags:
+ - pre_upgrade
+
+- include: ../../../openshift-master/validate_restart.yml
+ tags:
+ - pre_upgrade
+
+- include: ../pre/verify_upgrade_targets.yml
+ tags:
+ - pre_upgrade
+
+- include: ../pre/verify_docker_upgrade_targets.yml
+ tags:
+ - pre_upgrade
+
+- include: ../pre/gate_checks.yml
+ tags:
+ - pre_upgrade
+
+# Pre-upgrade completed, nothing after this should be tagged pre_upgrade.
+
+# Separate step so we can execute in parallel and clear out anything unused
+# before we get into the serialized upgrade process which will then remove
+# remaining images if possible.
+- name: Cleanup unused Docker images
+ hosts: oo_masters_to_config:oo_nodes_to_upgrade:oo_etcd_to_config
+ tasks:
+ - include: ../cleanup_unused_images.yml
+
+- include: ../upgrade_control_plane.yml
+ vars:
+ master_config_hook: "v3_3/master_config_upgrade.yml"
+
+- include: ../upgrade_nodes.yml
+ vars:
+ node_config_hook: "v3_3/node_config_upgrade.yml"
+
+- include: ../post_control_plane.yml
diff --git a/playbooks/common/openshift-cluster/upgrades/v3_3/upgrade_control_plane.yml b/playbooks/common/openshift-cluster/upgrades/v3_3/upgrade_control_plane.yml
new file mode 100644
index 000000000..20dffb44b
--- /dev/null
+++ b/playbooks/common/openshift-cluster/upgrades/v3_3/upgrade_control_plane.yml
@@ -0,0 +1,111 @@
+---
+#
+# Control Plane Upgrade Playbook
+#
+# Upgrades masters and Docker (only on standalone etcd hosts)
+#
+# This upgrade does not include:
+# - node service running on masters
+# - docker running on masters
+# - node service running on dedicated nodes
+#
+# You can run the upgrade_nodes.yml playbook after this to upgrade these components separately.
+#
+- include: ../init.yml
+ tags:
+ - pre_upgrade
+
+- name: Configure the upgrade target for the common upgrade tasks
+ hosts: oo_all_hosts
+ tags:
+ - pre_upgrade
+ tasks:
+ - set_fact:
+ openshift_upgrade_target: "{{ '1.3' if deployment_type == 'origin' else '3.3' }}"
+ openshift_upgrade_min: "{{ '1.2' if deployment_type == 'origin' else '3.2' }}"
+
+# Pre-upgrade
+- include: ../initialize_nodes_to_upgrade.yml
+ tags:
+ - pre_upgrade
+
+- name: Update repos on control plane hosts
+ hosts: oo_masters_to_config:oo_etcd_to_config:oo_lb_to_config
+ tags:
+ - pre_upgrade
+ roles:
+ - openshift_repos
+
+- name: Set openshift_no_proxy_internal_hostnames
+ hosts: oo_masters_to_config:oo_nodes_to_upgrade
+ tags:
+ - pre_upgrade
+ tasks:
+ - set_fact:
+ openshift_no_proxy_internal_hostnames: "{{ hostvars | oo_select_keys(groups['oo_nodes_to_config']
+ | union(groups['oo_masters_to_config'])
+ | union(groups['oo_etcd_to_config'] | default([])))
+ | oo_collect('openshift.common.hostname') | default([]) | join (',')
+ }}"
+ when: "{{ (openshift_http_proxy is defined or openshift_https_proxy is defined) and
+ openshift_generate_no_proxy_hosts | default(True) | bool }}"
+
+- include: ../pre/verify_inventory_vars.yml
+ tags:
+ - pre_upgrade
+
+- include: ../disable_excluder.yml
+ tags:
+ - pre_upgrade
+
+- include: ../../initialize_openshift_version.yml
+ tags:
+ - pre_upgrade
+ vars:
+ # Request specific openshift_release and let the openshift_version role handle converting this
+ # to a more specific version, respecting openshift_image_tag and openshift_pkg_version if
+ # defined, and overriding the normal behavior of protecting the installed version
+ openshift_release: "{{ openshift_upgrade_target }}"
+ openshift_protect_installed_version: False
+
+ # We skip the docker role at this point in upgrade to prevent
+ # unintended package, container, or config upgrades which trigger
+ # docker restarts. At this early stage of upgrade we can assume
+ # docker is configured and running.
+ skip_docker_role: True
+
+- include: ../pre/verify_control_plane_running.yml
+ tags:
+ - pre_upgrade
+
+- include: ../../../openshift-master/validate_restart.yml
+ tags:
+ - pre_upgrade
+
+- include: ../pre/verify_upgrade_targets.yml
+ tags:
+ - pre_upgrade
+
+- include: ../pre/verify_docker_upgrade_targets.yml
+ tags:
+ - pre_upgrade
+
+- include: ../pre/gate_checks.yml
+ tags:
+ - pre_upgrade
+
+# Pre-upgrade completed, nothing after this should be tagged pre_upgrade.
+
+# Separate step so we can execute in parallel and clear out anything unused
+# before we get into the serialized upgrade process which will then remove
+# remaining images if possible.
+- name: Cleanup unused Docker images
+ hosts: oo_masters_to_config:oo_etcd_to_config
+ tasks:
+ - include: ../cleanup_unused_images.yml
+
+- include: ../upgrade_control_plane.yml
+ vars:
+ master_config_hook: "v3_3/master_config_upgrade.yml"
+
+- include: ../post_control_plane.yml
diff --git a/playbooks/common/openshift-cluster/upgrades/v3_3/upgrade_nodes.yml b/playbooks/common/openshift-cluster/upgrades/v3_3/upgrade_nodes.yml
new file mode 100644
index 000000000..14aaf70d6
--- /dev/null
+++ b/playbooks/common/openshift-cluster/upgrades/v3_3/upgrade_nodes.yml
@@ -0,0 +1,106 @@
+---
+#
+# Node Upgrade Playbook
+#
+# Upgrades nodes only, but requires the control plane to have already been upgraded.
+#
+- include: ../init.yml
+ tags:
+ - pre_upgrade
+
+- name: Configure the upgrade target for the common upgrade tasks
+ hosts: oo_all_hosts
+ tags:
+ - pre_upgrade
+ tasks:
+ - set_fact:
+ openshift_upgrade_target: "{{ '1.3' if deployment_type == 'origin' else '3.3' }}"
+ openshift_upgrade_min: "{{ '1.2' if deployment_type == 'origin' else '3.2' }}"
+
+# Pre-upgrade
+- include: ../initialize_nodes_to_upgrade.yml
+ tags:
+ - pre_upgrade
+
+- name: Update repos on nodes
+ hosts: oo_masters_to_config:oo_nodes_to_upgrade:oo_etcd_to_config:oo_lb_to_config
+ roles:
+ - openshift_repos
+ tags:
+ - pre_upgrade
+
+- name: Set openshift_no_proxy_internal_hostnames
+ hosts: oo_masters_to_config:oo_nodes_to_upgrade
+ tags:
+ - pre_upgrade
+ tasks:
+ - set_fact:
+ openshift_no_proxy_internal_hostnames: "{{ hostvars | oo_select_keys(groups['oo_nodes_to_upgrade']
+ | union(groups['oo_masters_to_config'])
+ | union(groups['oo_etcd_to_config'] | default([])))
+ | oo_collect('openshift.common.hostname') | default([]) | join (',')
+ }}"
+ when: "{{ (openshift_http_proxy is defined or openshift_https_proxy is defined) and
+ openshift_generate_no_proxy_hosts | default(True) | bool }}"
+
+- include: ../pre/verify_inventory_vars.yml
+ tags:
+ - pre_upgrade
+
+- include: ../disable_excluder.yml
+ tags:
+ - pre_upgrade
+
+- include: ../../initialize_openshift_version.yml
+ tags:
+ - pre_upgrade
+ vars:
+ # Request specific openshift_release and let the openshift_version role handle converting this
+ # to a more specific version, respecting openshift_image_tag and openshift_pkg_version if
+ # defined, and overriding the normal behavior of protecting the installed version
+ openshift_release: "{{ openshift_upgrade_target }}"
+ openshift_protect_installed_version: False
+
+ # We skip the docker role at this point in upgrade to prevent
+ # unintended package, container, or config upgrades which trigger
+ # docker restarts. At this early stage of upgrade we can assume
+ # docker is configured and running.
+ skip_docker_role: True
+
+- name: Verify masters are already upgraded
+ hosts: oo_masters_to_config
+ tags:
+ - pre_upgrade
+ tasks:
+ - fail: msg="Master running {{ openshift.common.version }} must be upgraded to {{ openshift_version }} before node upgrade can be run."
+ when: openshift.common.version != openshift_version
+
+- include: ../pre/verify_control_plane_running.yml
+ tags:
+ - pre_upgrade
+
+- include: ../pre/verify_upgrade_targets.yml
+ tags:
+ - pre_upgrade
+
+- include: ../pre/verify_docker_upgrade_targets.yml
+ tags:
+ - pre_upgrade
+
+- include: ../pre/gate_checks.yml
+ tags:
+ - pre_upgrade
+
+# Pre-upgrade completed, nothing after this should be tagged pre_upgrade.
+
+# Separate step so we can execute in parallel and clear out anything unused
+# before we get into the serialized upgrade process which will then remove
+# remaining images if possible.
+- name: Cleanup unused Docker images
+ hosts: oo_nodes_to_upgrade
+ tasks:
+ - include: ../cleanup_unused_images.yml
+
+- include: ../upgrade_nodes.yml
+ vars:
+ node_config_hook: "v3_3/node_config_upgrade.yml"
diff --git a/playbooks/common/openshift-cluster/upgrades/v3_4/master_config_upgrade.yml b/playbooks/common/openshift-cluster/upgrades/v3_4/master_config_upgrade.yml
index 43c2ffcd4..ed89dbe8d 100644
--- a/playbooks/common/openshift-cluster/upgrades/v3_4/master_config_upgrade.yml
+++ b/playbooks/common/openshift-cluster/upgrades/v3_4/master_config_upgrade.yml
@@ -3,7 +3,7 @@
dest: "{{ openshift.common.config_base}}/master/master-config.yaml"
yaml_key: 'admissionConfig.pluginConfig'
yaml_value: "{{ openshift.master.admission_plugin_config }}"
- when: "{{ 'admission_plugin_config' in openshift.master }}"
+ when: "'admission_plugin_config' in openshift.master"
- modify_yaml:
dest: "{{ openshift.common.config_base}}/master/master-config.yaml"
diff --git a/playbooks/byo/openshift-cluster/upgrades/v3_4/roles b/playbooks/common/openshift-cluster/upgrades/v3_4/roles
index 6bc1a7aef..6bc1a7aef 120000
--- a/playbooks/byo/openshift-cluster/upgrades/v3_4/roles
+++ b/playbooks/common/openshift-cluster/upgrades/v3_4/roles
diff --git a/playbooks/common/openshift-cluster/upgrades/v3_4/upgrade.yml b/playbooks/common/openshift-cluster/upgrades/v3_4/upgrade.yml
new file mode 100644
index 000000000..5d6455bef
--- /dev/null
+++ b/playbooks/common/openshift-cluster/upgrades/v3_4/upgrade.yml
@@ -0,0 +1,105 @@
+---
+#
+# Full Control Plane + Nodes Upgrade
+#
+- include: ../init.yml
+ tags:
+ - pre_upgrade
+
+- name: Configure the upgrade target for the common upgrade tasks
+ hosts: oo_all_hosts
+ tags:
+ - pre_upgrade
+ tasks:
+ - set_fact:
+ openshift_upgrade_target: "{{ '1.4' if deployment_type == 'origin' else '3.4' }}"
+ openshift_upgrade_min: "{{ '1.3' if deployment_type == 'origin' else '3.3' }}"
+
+# Pre-upgrade
+
+- include: ../initialize_nodes_to_upgrade.yml
+ tags:
+ - pre_upgrade
+
+- name: Update repos and initialize facts on all hosts
+ hosts: oo_masters_to_config:oo_nodes_to_upgrade:oo_etcd_to_config:oo_lb_to_config
+ tags:
+ - pre_upgrade
+ roles:
+ - openshift_repos
+
+- name: Set openshift_no_proxy_internal_hostnames
+ hosts: oo_masters_to_config:oo_nodes_to_upgrade
+ tags:
+ - pre_upgrade
+ tasks:
+ - set_fact:
+ openshift_no_proxy_internal_hostnames: "{{ hostvars | oo_select_keys(groups['oo_nodes_to_config']
+ | union(groups['oo_masters_to_config'])
+ | union(groups['oo_etcd_to_config'] | default([])))
+ | oo_collect('openshift.common.hostname') | default([]) | join (',')
+ }}"
+ when: "{{ (openshift_http_proxy is defined or openshift_https_proxy is defined) and
+ openshift_generate_no_proxy_hosts | default(True) | bool }}"
+
+- include: ../pre/verify_inventory_vars.yml
+ tags:
+ - pre_upgrade
+
+- include: ../disable_excluder.yml
+ tags:
+ - pre_upgrade
+
+- include: ../../initialize_openshift_version.yml
+ tags:
+ - pre_upgrade
+ vars:
+ # Request specific openshift_release and let the openshift_version role handle converting this
+ # to a more specific version, respecting openshift_image_tag and openshift_pkg_version if
+ # defined, and overriding the normal behavior of protecting the installed version
+ openshift_release: "{{ openshift_upgrade_target }}"
+ openshift_protect_installed_version: False
+
+ # We skip the docker role at this point in upgrade to prevent
+ # unintended package, container, or config upgrades which trigger
+ # docker restarts. At this early stage of upgrade we can assume
+ # docker is configured and running.
+ skip_docker_role: True
+
+- include: ../pre/verify_control_plane_running.yml
+ tags:
+ - pre_upgrade
+
+- include: ../../../openshift-master/validate_restart.yml
+ tags:
+ - pre_upgrade
+
+- include: ../pre/verify_upgrade_targets.yml
+ tags:
+ - pre_upgrade
+
+- include: ../pre/verify_docker_upgrade_targets.yml
+ tags:
+ - pre_upgrade
+
+- include: ../pre/gate_checks.yml
+ tags:
+ - pre_upgrade
+
+# Pre-upgrade completed, nothing after this should be tagged pre_upgrade.
+
+# Separate step so we can execute in parallel and clear out anything unused
+# before we get into the serialized upgrade process which will then remove
+# remaining images if possible.
+- name: Cleanup unused Docker images
+ hosts: oo_masters_to_config:oo_nodes_to_upgrade:oo_etcd_to_config
+ tasks:
+ - include: ../cleanup_unused_images.yml
+
+- include: ../upgrade_control_plane.yml
+ vars:
+ master_config_hook: "v3_4/master_config_upgrade.yml"
+
+- include: ../upgrade_nodes.yml
+
+- include: ../post_control_plane.yml
diff --git a/playbooks/common/openshift-cluster/upgrades/v3_4/upgrade_control_plane.yml b/playbooks/common/openshift-cluster/upgrades/v3_4/upgrade_control_plane.yml
new file mode 100644
index 000000000..c76920586
--- /dev/null
+++ b/playbooks/common/openshift-cluster/upgrades/v3_4/upgrade_control_plane.yml
@@ -0,0 +1,111 @@
+---
+#
+# Control Plane Upgrade Playbook
+#
+# Upgrades masters and Docker (only on standalone etcd hosts)
+#
+# This upgrade does not include:
+# - node service running on masters
+# - docker running on masters
+# - node service running on dedicated nodes
+#
+# You can run the upgrade_nodes.yml playbook after this to upgrade these components separately.
+#
+- include: ../init.yml
+ tags:
+ - pre_upgrade
+
+- name: Configure the upgrade target for the common upgrade tasks
+ hosts: oo_all_hosts
+ tags:
+ - pre_upgrade
+ tasks:
+ - set_fact:
+ openshift_upgrade_target: "{{ '1.4' if deployment_type == 'origin' else '3.4' }}"
+ openshift_upgrade_min: "{{ '1.3' if deployment_type == 'origin' else '3.3' }}"
+
+# Pre-upgrade
+- include: ../initialize_nodes_to_upgrade.yml
+ tags:
+ - pre_upgrade
+
+- name: Update repos on control plane hosts
+ hosts: oo_masters_to_config:oo_etcd_to_config:oo_lb_to_config
+ tags:
+ - pre_upgrade
+ roles:
+ - openshift_repos
+
+- name: Set openshift_no_proxy_internal_hostnames
+ hosts: oo_masters_to_config:oo_nodes_to_upgrade
+ tags:
+ - pre_upgrade
+ tasks:
+ - set_fact:
+ openshift_no_proxy_internal_hostnames: "{{ hostvars | oo_select_keys(groups['oo_nodes_to_config']
+ | union(groups['oo_masters_to_config'])
+ | union(groups['oo_etcd_to_config'] | default([])))
+ | oo_collect('openshift.common.hostname') | default([]) | join (',')
+ }}"
+ when: "{{ (openshift_http_proxy is defined or openshift_https_proxy is defined) and
+ openshift_generate_no_proxy_hosts | default(True) | bool }}"
+
+- include: ../pre/verify_inventory_vars.yml
+ tags:
+ - pre_upgrade
+
+- include: ../disable_excluder.yml
+ tags:
+ - pre_upgrade
+
+- include: ../../initialize_openshift_version.yml
+ tags:
+ - pre_upgrade
+ vars:
+ # Request specific openshift_release and let the openshift_version role handle converting this
+ # to a more specific version, respecting openshift_image_tag and openshift_pkg_version if
+ # defined, and overriding the normal behavior of protecting the installed version
+ openshift_release: "{{ openshift_upgrade_target }}"
+ openshift_protect_installed_version: False
+
+ # We skip the docker role at this point in upgrade to prevent
+ # unintended package, container, or config upgrades which trigger
+ # docker restarts. At this early stage of upgrade we can assume
+ # docker is configured and running.
+ skip_docker_role: True
+
+- include: ../pre/verify_control_plane_running.yml
+ tags:
+ - pre_upgrade
+
+- include: ../../../openshift-master/validate_restart.yml
+ tags:
+ - pre_upgrade
+
+- include: ../pre/verify_upgrade_targets.yml
+ tags:
+ - pre_upgrade
+
+- include: ../pre/verify_docker_upgrade_targets.yml
+ tags:
+ - pre_upgrade
+
+- include: ../pre/gate_checks.yml
+ tags:
+ - pre_upgrade
+
+# Pre-upgrade completed, nothing after this should be tagged pre_upgrade.
+
+# Separate step so we can execute in parallel and clear out anything unused
+# before we get into the serialized upgrade process which will then remove
+# remaining images if possible.
+- name: Cleanup unused Docker images
+ hosts: oo_masters_to_config:oo_etcd_to_config
+ tasks:
+ - include: ../cleanup_unused_images.yml
+
+- include: ../upgrade_control_plane.yml
+ vars:
+ master_config_hook: "v3_4/master_config_upgrade.yml"
+
+- include: ../post_control_plane.yml
diff --git a/playbooks/common/openshift-cluster/upgrades/v3_4/upgrade_nodes.yml b/playbooks/common/openshift-cluster/upgrades/v3_4/upgrade_nodes.yml
new file mode 100644
index 000000000..f397f6015
--- /dev/null
+++ b/playbooks/common/openshift-cluster/upgrades/v3_4/upgrade_nodes.yml
@@ -0,0 +1,104 @@
+---
+#
+# Node Upgrade Playbook
+#
+# Upgrades nodes only, but requires the control plane to have already been upgraded.
+#
+- include: ../init.yml
+ tags:
+ - pre_upgrade
+
+- name: Configure the upgrade target for the common upgrade tasks
+ hosts: oo_all_hosts
+ tags:
+ - pre_upgrade
+ tasks:
+ - set_fact:
+ openshift_upgrade_target: "{{ '1.4' if deployment_type == 'origin' else '3.4' }}"
+ openshift_upgrade_min: "{{ '1.3' if deployment_type == 'origin' else '3.3' }}"
+
+# Pre-upgrade
+- include: ../initialize_nodes_to_upgrade.yml
+ tags:
+ - pre_upgrade
+
+- name: Update repos on nodes
+ hosts: oo_masters_to_config:oo_nodes_to_upgrade:oo_etcd_to_config:oo_lb_to_config
+ roles:
+ - openshift_repos
+ tags:
+ - pre_upgrade
+
+- name: Set openshift_no_proxy_internal_hostnames
+ hosts: oo_masters_to_config:oo_nodes_to_upgrade
+ tags:
+ - pre_upgrade
+ tasks:
+ - set_fact:
+ openshift_no_proxy_internal_hostnames: "{{ hostvars | oo_select_keys(groups['oo_nodes_to_upgrade']
+ | union(groups['oo_masters_to_config'])
+ | union(groups['oo_etcd_to_config'] | default([])))
+ | oo_collect('openshift.common.hostname') | default([]) | join (',')
+ }}"
+ when: "{{ (openshift_http_proxy is defined or openshift_https_proxy is defined) and
+ openshift_generate_no_proxy_hosts | default(True) | bool }}"
+
+- include: ../pre/verify_inventory_vars.yml
+ tags:
+ - pre_upgrade
+
+- include: ../disable_excluder.yml
+ tags:
+ - pre_upgrade
+
+- include: ../../initialize_openshift_version.yml
+ tags:
+ - pre_upgrade
+ vars:
+ # Request specific openshift_release and let the openshift_version role handle converting this
+ # to a more specific version, respecting openshift_image_tag and openshift_pkg_version if
+ # defined, and overriding the normal behavior of protecting the installed version
+ openshift_release: "{{ openshift_upgrade_target }}"
+ openshift_protect_installed_version: False
+
+ # We skip the docker role at this point in upgrade to prevent
+ # unintended package, container, or config upgrades which trigger
+ # docker restarts. At this early stage of upgrade we can assume
+ # docker is configured and running.
+ skip_docker_role: True
+
+- name: Verify masters are already upgraded
+ hosts: oo_masters_to_config
+ tags:
+ - pre_upgrade
+ tasks:
+ - fail: msg="Master running {{ openshift.common.version }} must be upgraded to {{ openshift_version }} before node upgrade can be run."
+ when: openshift.common.version != openshift_version
+
+- include: ../pre/verify_control_plane_running.yml
+ tags:
+ - pre_upgrade
+
+- include: ../pre/verify_upgrade_targets.yml
+ tags:
+ - pre_upgrade
+
+- include: ../pre/verify_docker_upgrade_targets.yml
+ tags:
+ - pre_upgrade
+
+- include: ../pre/gate_checks.yml
+ tags:
+ - pre_upgrade
+
+# Pre-upgrade completed, nothing after this should be tagged pre_upgrade.
+
+# Separate step so we can execute in parallel and clear out anything unused
+# before we get into the serialized upgrade process which will then remove
+# remaining images if possible.
+- name: Cleanup unused Docker images
+ hosts: oo_nodes_to_upgrade
+ tasks:
+ - include: ../cleanup_unused_images.yml
+
+- include: ../upgrade_nodes.yml
diff --git a/playbooks/common/openshift-cluster/upgrades/v3_5/upgrade.yml b/playbooks/common/openshift-cluster/upgrades/v3_5/upgrade.yml
new file mode 100644
index 000000000..7cedfb1ca
--- /dev/null
+++ b/playbooks/common/openshift-cluster/upgrades/v3_5/upgrade.yml
@@ -0,0 +1,111 @@
+---
+#
+# Full Control Plane + Nodes Upgrade
+#
+- include: ../init.yml
+ tags:
+ - pre_upgrade
+
+- name: Configure the upgrade target for the common upgrade tasks
+ hosts: oo_all_hosts
+ tags:
+ - pre_upgrade
+ tasks:
+ - set_fact:
+ openshift_upgrade_target: "{{ '1.5' if deployment_type == 'origin' else '3.5' }}"
+ openshift_upgrade_min: "{{ '1.4' if deployment_type == 'origin' else '3.4' }}"
+
+# Pre-upgrade
+
+- include: ../initialize_nodes_to_upgrade.yml
+ tags:
+ - pre_upgrade
+
+- name: Update repos and initialize facts on all hosts
+ hosts: oo_masters_to_config:oo_nodes_to_upgrade:oo_etcd_to_config:oo_lb_to_config
+ tags:
+ - pre_upgrade
+ roles:
+ - openshift_repos
+
+- name: Set openshift_no_proxy_internal_hostnames
+ hosts: oo_masters_to_config:oo_nodes_to_upgrade
+ tags:
+ - pre_upgrade
+ tasks:
+ - set_fact:
+ openshift_no_proxy_internal_hostnames: "{{ hostvars | oo_select_keys(groups['oo_nodes_to_config']
+ | union(groups['oo_masters_to_config'])
+ | union(groups['oo_etcd_to_config'] | default([])))
+ | oo_collect('openshift.common.hostname') | default([]) | join (',')
+ }}"
+ when: "{{ (openshift_http_proxy is defined or openshift_https_proxy is defined) and
+ openshift_generate_no_proxy_hosts | default(True) | bool }}"
+
+- include: ../pre/verify_inventory_vars.yml
+ tags:
+ - pre_upgrade
+
+- include: ../disable_excluder.yml
+ tags:
+ - pre_upgrade
+
+# Note: During upgrade the openshift excluder is not unexcluded inside the initialize_openshift_version.yml play.
+# So it is necessary to run the play after running disable_excluder.yml.
+- include: ../../initialize_openshift_version.yml
+ tags:
+ - pre_upgrade
+ vars:
+ # Request specific openshift_release and let the openshift_version role handle converting this
+ # to a more specific version, respecting openshift_image_tag and openshift_pkg_version if
+ # defined, and overriding the normal behavior of protecting the installed version
+ openshift_release: "{{ openshift_upgrade_target }}"
+ openshift_protect_installed_version: False
+
+ # We skip the docker role at this point in upgrade to prevent
+ # unintended package, container, or config upgrades which trigger
+ # docker restarts. At this early stage of upgrade we can assume
+ # docker is configured and running.
+ skip_docker_role: True
+
+- include: ../pre/verify_control_plane_running.yml
+ tags:
+ - pre_upgrade
+
+- include: ../../../openshift-master/validate_restart.yml
+ tags:
+ - pre_upgrade
+
+- include: ../pre/verify_upgrade_targets.yml
+ tags:
+ - pre_upgrade
+
+- include: ../pre/verify_docker_upgrade_targets.yml
+ tags:
+ - pre_upgrade
+
+- include: validator.yml
+ tags:
+ - pre_upgrade
+
+- include: ../pre/gate_checks.yml
+ tags:
+ - pre_upgrade
+
+# Pre-upgrade completed, nothing after this should be tagged pre_upgrade.
+
+# Separate step so we can execute in parallel and clear out anything unused
+# before we get into the serialized upgrade process which will then remove
+# remaining images if possible.
+- name: Cleanup unused Docker images
+ hosts: oo_masters_to_config:oo_nodes_to_upgrade:oo_etcd_to_config
+ tasks:
+ - include: ../cleanup_unused_images.yml
+
+- include: ../upgrade_control_plane.yml
+
+- include: ../upgrade_nodes.yml
+
+- include: ../post_control_plane.yml
+
+- include: storage_upgrade.yml
diff --git a/playbooks/common/openshift-cluster/upgrades/v3_5/upgrade_control_plane.yml b/playbooks/common/openshift-cluster/upgrades/v3_5/upgrade_control_plane.yml
new file mode 100644
index 000000000..0198074ed
--- /dev/null
+++ b/playbooks/common/openshift-cluster/upgrades/v3_5/upgrade_control_plane.yml
@@ -0,0 +1,115 @@
+---
+#
+# Control Plane Upgrade Playbook
+#
+# Upgrades masters and Docker (only on standalone etcd hosts)
+#
+# This upgrade does not include:
+# - node service running on masters
+# - docker running on masters
+# - node service running on dedicated nodes
+#
+# You can run the upgrade_nodes.yml playbook after this to upgrade these components separately.
+#
+- include: ../init.yml
+ tags:
+ - pre_upgrade
+
+- name: Configure the upgrade target for the common upgrade tasks
+ hosts: oo_all_hosts
+ tags:
+ - pre_upgrade
+ tasks:
+ - set_fact:
+ openshift_upgrade_target: "{{ '1.5' if deployment_type == 'origin' else '3.5' }}"
+ openshift_upgrade_min: "{{ '1.4' if deployment_type == 'origin' else '3.4' }}"
+
+# Pre-upgrade
+- include: ../initialize_nodes_to_upgrade.yml
+ tags:
+ - pre_upgrade
+
+- name: Update repos on control plane hosts
+ hosts: oo_masters_to_config:oo_etcd_to_config:oo_lb_to_config
+ tags:
+ - pre_upgrade
+ roles:
+ - openshift_repos
+
+- name: Set openshift_no_proxy_internal_hostnames
+ hosts: oo_masters_to_config:oo_nodes_to_upgrade
+ tags:
+ - pre_upgrade
+ tasks:
+ - set_fact:
+ openshift_no_proxy_internal_hostnames: "{{ hostvars | oo_select_keys(groups['oo_nodes_to_config']
+ | union(groups['oo_masters_to_config'])
+ | union(groups['oo_etcd_to_config'] | default([])))
+ | oo_collect('openshift.common.hostname') | default([]) | join (',')
+ }}"
+ when: "{{ (openshift_http_proxy is defined or openshift_https_proxy is defined) and
+ openshift_generate_no_proxy_hosts | default(True) | bool }}"
+
+- include: ../pre/verify_inventory_vars.yml
+ tags:
+ - pre_upgrade
+
+- include: ../disable_excluder.yml
+ tags:
+ - pre_upgrade
+
+- include: ../../initialize_openshift_version.yml
+ tags:
+ - pre_upgrade
+ vars:
+ # Request specific openshift_release and let the openshift_version role handle converting this
+ # to a more specific version, respecting openshift_image_tag and openshift_pkg_version if
+ # defined, and overriding the normal behavior of protecting the installed version
+ openshift_release: "{{ openshift_upgrade_target }}"
+ openshift_protect_installed_version: False
+
+ # We skip the docker role at this point in upgrade to prevent
+ # unintended package, container, or config upgrades which trigger
+ # docker restarts. At this early stage of upgrade we can assume
+ # docker is configured and running.
+ skip_docker_role: True
+
+- include: ../pre/verify_control_plane_running.yml
+ tags:
+ - pre_upgrade
+
+- include: ../../../openshift-master/validate_restart.yml
+ tags:
+ - pre_upgrade
+
+- include: ../pre/verify_upgrade_targets.yml
+ tags:
+ - pre_upgrade
+
+- include: ../pre/verify_docker_upgrade_targets.yml
+ tags:
+ - pre_upgrade
+
+- include: validator.yml
+ tags:
+ - pre_upgrade
+
+- include: ../pre/gate_checks.yml
+ tags:
+ - pre_upgrade
+
+# Pre-upgrade completed, nothing after this should be tagged pre_upgrade.
+
+# Separate step so we can execute in parallel and clear out anything unused
+# before we get into the serialized upgrade process which will then remove
+# remaining images if possible.
+- name: Cleanup unused Docker images
+ hosts: oo_masters_to_config:oo_etcd_to_config
+ tasks:
+ - include: ../cleanup_unused_images.yml
+
+- include: ../upgrade_control_plane.yml
+
+- include: ../post_control_plane.yml
+
+- include: storage_upgrade.yml
diff --git a/playbooks/common/openshift-cluster/upgrades/v3_5/upgrade_nodes.yml b/playbooks/common/openshift-cluster/upgrades/v3_5/upgrade_nodes.yml
new file mode 100644
index 000000000..2b16875f4
--- /dev/null
+++ b/playbooks/common/openshift-cluster/upgrades/v3_5/upgrade_nodes.yml
@@ -0,0 +1,104 @@
+---
+#
+# Node Upgrade Playbook
+#
+# Upgrades nodes only, but requires the control plane to have already been upgraded.
+#
+- include: ../init.yml
+ tags:
+ - pre_upgrade
+
+- name: Configure the upgrade target for the common upgrade tasks
+ hosts: oo_all_hosts
+ tags:
+ - pre_upgrade
+ tasks:
+ - set_fact:
+ openshift_upgrade_target: "{{ '1.5' if deployment_type == 'origin' else '3.5' }}"
+ openshift_upgrade_min: "{{ '1.4' if deployment_type == 'origin' else '3.4' }}"
+
+# Pre-upgrade
+- include: ../initialize_nodes_to_upgrade.yml
+ tags:
+ - pre_upgrade
+
+- name: Update repos on nodes
+ hosts: oo_masters_to_config:oo_nodes_to_upgrade:oo_etcd_to_config:oo_lb_to_config
+ roles:
+ - openshift_repos
+ tags:
+ - pre_upgrade
+
+- name: Set openshift_no_proxy_internal_hostnames
+ hosts: oo_masters_to_config:oo_nodes_to_upgrade
+ tags:
+ - pre_upgrade
+ tasks:
+ - set_fact:
+ openshift_no_proxy_internal_hostnames: "{{ hostvars | oo_select_keys(groups['oo_nodes_to_upgrade']
+ | union(groups['oo_masters_to_config'])
+ | union(groups['oo_etcd_to_config'] | default([])))
+ | oo_collect('openshift.common.hostname') | default([]) | join (',')
+ }}"
+ when: "{{ (openshift_http_proxy is defined or openshift_https_proxy is defined) and
+ openshift_generate_no_proxy_hosts | default(True) | bool }}"
+
+- include: ../pre/verify_inventory_vars.yml
+ tags:
+ - pre_upgrade
+
+- include: ../disable_excluder.yml
+ tags:
+ - pre_upgrade
+
+- include: ../../initialize_openshift_version.yml
+ tags:
+ - pre_upgrade
+ vars:
+ # Request specific openshift_release and let the openshift_version role handle converting this
+ # to a more specific version, respecting openshift_image_tag and openshift_pkg_version if
+ # defined, and overriding the normal behavior of protecting the installed version
+ openshift_release: "{{ openshift_upgrade_target }}"
+ openshift_protect_installed_version: False
+
+ # We skip the docker role at this point in upgrade to prevent
+ # unintended package, container, or config upgrades which trigger
+ # docker restarts. At this early stage of upgrade we can assume
+ # docker is configured and running.
+ skip_docker_role: True
+
+- name: Verify masters are already upgraded
+ hosts: oo_masters_to_config
+ tags:
+ - pre_upgrade
+ tasks:
+ - fail: msg="Master running {{ openshift.common.version }} must be upgraded to {{ openshift_version }} before node upgrade can be run."
+ when: openshift.common.version != openshift_version
+
+- include: ../pre/verify_control_plane_running.yml
+ tags:
+ - pre_upgrade
+
+- include: ../pre/verify_upgrade_targets.yml
+ tags:
+ - pre_upgrade
+
+- include: ../pre/verify_docker_upgrade_targets.yml
+ tags:
+ - pre_upgrade
+
+- include: ../pre/gate_checks.yml
+ tags:
+ - pre_upgrade
+
+# Pre-upgrade completed, nothing after this should be tagged pre_upgrade.
+
+# Separate step so we can execute in parallel and clear out anything unused
+# before we get into the serialized upgrade process which will then remove
+# remaining images if possible.
+- name: Cleanup unused Docker images
+ hosts: oo_nodes_to_upgrade
+ tasks:
+ - include: ../cleanup_unused_images.yml
+
+- include: ../upgrade_nodes.yml
diff --git a/playbooks/common/openshift-cluster/upgrades/v3_6/upgrade.yml b/playbooks/common/openshift-cluster/upgrades/v3_6/upgrade.yml
new file mode 100644
index 000000000..4604bdc8b
--- /dev/null
+++ b/playbooks/common/openshift-cluster/upgrades/v3_6/upgrade.yml
@@ -0,0 +1,111 @@
+---
+#
+# Full Control Plane + Nodes Upgrade
+#
+- include: ../init.yml
+ tags:
+ - pre_upgrade
+
+- name: Configure the upgrade target for the common upgrade tasks
+ hosts: oo_all_hosts
+ tags:
+ - pre_upgrade
+ tasks:
+ - set_fact:
+ openshift_upgrade_target: '3.6'
+ openshift_upgrade_min: "{{ '1.5' if deployment_type == 'origin' else '3.5' }}"
+
+# Pre-upgrade
+
+- include: ../initialize_nodes_to_upgrade.yml
+ tags:
+ - pre_upgrade
+
+- name: Update repos and initialize facts on all hosts
+ hosts: oo_masters_to_config:oo_nodes_to_upgrade:oo_etcd_to_config:oo_lb_to_config
+ tags:
+ - pre_upgrade
+ roles:
+ - openshift_repos
+
+- name: Set openshift_no_proxy_internal_hostnames
+ hosts: oo_masters_to_config:oo_nodes_to_upgrade
+ tags:
+ - pre_upgrade
+ tasks:
+ - set_fact:
+ openshift_no_proxy_internal_hostnames: "{{ hostvars | oo_select_keys(groups['oo_nodes_to_config']
+ | union(groups['oo_masters_to_config'])
+ | union(groups['oo_etcd_to_config'] | default([])))
+ | oo_collect('openshift.common.hostname') | default([]) | join (',')
+ }}"
+ when: "{{ (openshift_http_proxy is defined or openshift_https_proxy is defined) and
+ openshift_generate_no_proxy_hosts | default(True) | bool }}"
+
+- include: ../pre/verify_inventory_vars.yml
+ tags:
+ - pre_upgrade
+
+- include: ../disable_excluder.yml
+ tags:
+ - pre_upgrade
+
+# Note: During upgrade the openshift excluder is not unexcluded inside the initialize_openshift_version.yml play.
+# So it is necassary to run the play after running disable_excluder.yml.
+- include: ../../initialize_openshift_version.yml
+ tags:
+ - pre_upgrade
+ vars:
+ # Request specific openshift_release and let the openshift_version role handle converting this
+ # to a more specific version, respecting openshift_image_tag and openshift_pkg_version if
+ # defined, and overriding the normal behavior of protecting the installed version
+ openshift_release: "{{ openshift_upgrade_target }}"
+ openshift_protect_installed_version: False
+
+ # We skip the docker role at this point in upgrade to prevent
+ # unintended package, container, or config upgrades which trigger
+ # docker restarts. At this early stage of upgrade we can assume
+ # docker is configured and running.
+ skip_docker_role: True
+
+- include: ../pre/verify_control_plane_running.yml
+ tags:
+ - pre_upgrade
+
+- include: ../../../openshift-master/validate_restart.yml
+ tags:
+ - pre_upgrade
+
+- include: ../pre/verify_upgrade_targets.yml
+ tags:
+ - pre_upgrade
+
+- include: ../pre/verify_docker_upgrade_targets.yml
+ tags:
+ - pre_upgrade
+
+- include: validator.yml
+ tags:
+ - pre_upgrade
+
+- include: ../pre/gate_checks.yml
+ tags:
+ - pre_upgrade
+
+# Pre-upgrade completed, nothing after this should be tagged pre_upgrade.
+
+# Separate step so we can execute in parallel and clear out anything unused
+# before we get into the serialized upgrade process which will then remove
+# remaining images if possible.
+- name: Cleanup unused Docker images
+ hosts: oo_masters_to_config:oo_nodes_to_upgrade:oo_etcd_to_config
+ tasks:
+ - include: ../cleanup_unused_images.yml
+
+- include: ../upgrade_control_plane.yml
+
+- include: ../upgrade_nodes.yml
+
+- include: ../post_control_plane.yml
+
+- include: storage_upgrade.yml
diff --git a/playbooks/common/openshift-cluster/upgrades/v3_6/upgrade_control_plane.yml b/playbooks/common/openshift-cluster/upgrades/v3_6/upgrade_control_plane.yml
new file mode 100644
index 000000000..a09097ed9
--- /dev/null
+++ b/playbooks/common/openshift-cluster/upgrades/v3_6/upgrade_control_plane.yml
@@ -0,0 +1,115 @@
+---
+#
+# Control Plane Upgrade Playbook
+#
+# Upgrades masters and Docker (only on standalone etcd hosts)
+#
+# This upgrade does not include:
+# - node service running on masters
+# - docker running on masters
+# - node service running on dedicated nodes
+#
+# You can run the upgrade_nodes.yml playbook after this to upgrade these components separately.
+#
+- include: ../init.yml
+ tags:
+ - pre_upgrade
+
+- name: Configure the upgrade target for the common upgrade tasks
+ hosts: oo_all_hosts
+ tags:
+ - pre_upgrade
+ tasks:
+ - set_fact:
+ openshift_upgrade_target: '3.6'
+ openshift_upgrade_min: "{{ '1.5' if deployment_type == 'origin' else '3.5' }}"
+
+# Pre-upgrade
+- include: ../initialize_nodes_to_upgrade.yml
+ tags:
+ - pre_upgrade
+
+- name: Update repos on control plane hosts
+ hosts: oo_masters_to_config:oo_etcd_to_config:oo_lb_to_config
+ tags:
+ - pre_upgrade
+ roles:
+ - openshift_repos
+
+- name: Set openshift_no_proxy_internal_hostnames
+ hosts: oo_masters_to_config:oo_nodes_to_upgrade
+ tags:
+ - pre_upgrade
+ tasks:
+ - set_fact:
+ openshift_no_proxy_internal_hostnames: "{{ hostvars | oo_select_keys(groups['oo_nodes_to_config']
+ | union(groups['oo_masters_to_config'])
+ | union(groups['oo_etcd_to_config'] | default([])))
+ | oo_collect('openshift.common.hostname') | default([]) | join (',')
+ }}"
+ when: "{{ (openshift_http_proxy is defined or openshift_https_proxy is defined) and
+ openshift_generate_no_proxy_hosts | default(True) | bool }}"
+
+- include: ../pre/verify_inventory_vars.yml
+ tags:
+ - pre_upgrade
+
+- include: ../disable_excluder.yml
+ tags:
+ - pre_upgrade
+
+- include: ../../initialize_openshift_version.yml
+ tags:
+ - pre_upgrade
+ vars:
+ # Request specific openshift_release and let the openshift_version role handle converting this
+ # to a more specific version, respecting openshift_image_tag and openshift_pkg_version if
+ # defined, and overriding the normal behavior of protecting the installed version
+ openshift_release: "{{ openshift_upgrade_target }}"
+ openshift_protect_installed_version: False
+
+ # We skip the docker role at this point in upgrade to prevent
+ # unintended package, container, or config upgrades which trigger
+ # docker restarts. At this early stage of upgrade we can assume
+ # docker is configured and running.
+ skip_docker_role: True
+
+- include: ../pre/verify_control_plane_running.yml
+ tags:
+ - pre_upgrade
+
+- include: ../../../openshift-master/validate_restart.yml
+ tags:
+ - pre_upgrade
+
+- include: ../pre/verify_upgrade_targets.yml
+ tags:
+ - pre_upgrade
+
+- include: ../pre/verify_docker_upgrade_targets.yml
+ tags:
+ - pre_upgrade
+
+- include: validator.yml
+ tags:
+ - pre_upgrade
+
+- include: ../pre/gate_checks.yml
+ tags:
+ - pre_upgrade
+
+# Pre-upgrade completed, nothing after this should be tagged pre_upgrade.
+
+# Separate step so we can execute in parallel and clear out anything unused
+# before we get into the serialized upgrade process which will then remove
+# remaining images if possible.
+- name: Cleanup unused Docker images
+ hosts: oo_masters_to_config:oo_etcd_to_config
+ tasks:
+ - include: ../cleanup_unused_images.yml
+
+- include: ../upgrade_control_plane.yml
+
+- include: ../post_control_plane.yml
+
+- include: storage_upgrade.yml
diff --git a/playbooks/common/openshift-cluster/upgrades/v3_6/upgrade_nodes.yml b/playbooks/common/openshift-cluster/upgrades/v3_6/upgrade_nodes.yml
new file mode 100644
index 000000000..7640f2116
--- /dev/null
+++ b/playbooks/common/openshift-cluster/upgrades/v3_6/upgrade_nodes.yml
@@ -0,0 +1,104 @@
+---
+#
+# Node Upgrade Playbook
+#
+# Upgrades nodes only, but requires the control plane to have already been upgraded.
+#
+- include: ../init.yml
+ tags:
+ - pre_upgrade
+
+- name: Configure the upgrade target for the common upgrade tasks
+ hosts: oo_all_hosts
+ tags:
+ - pre_upgrade
+ tasks:
+ - set_fact:
+ openshift_upgrade_target: '3.6'
+ openshift_upgrade_min: "{{ '1.5' if deployment_type == 'origin' else '3.5' }}"
+
+# Pre-upgrade
+- include: ../initialize_nodes_to_upgrade.yml
+ tags:
+ - pre_upgrade
+
+- name: Update repos on nodes
+ hosts: oo_masters_to_config:oo_nodes_to_upgrade:oo_etcd_to_config:oo_lb_to_config
+ roles:
+ - openshift_repos
+ tags:
+ - pre_upgrade
+
+- name: Set openshift_no_proxy_internal_hostnames
+ hosts: oo_masters_to_config:oo_nodes_to_upgrade
+ tags:
+ - pre_upgrade
+ tasks:
+ - set_fact:
+ openshift_no_proxy_internal_hostnames: "{{ hostvars | oo_select_keys(groups['oo_nodes_to_upgrade']
+ | union(groups['oo_masters_to_config'])
+ | union(groups['oo_etcd_to_config'] | default([])))
+ | oo_collect('openshift.common.hostname') | default([]) | join (',')
+ }}"
+ when: "{{ (openshift_http_proxy is defined or openshift_https_proxy is defined) and
+ openshift_generate_no_proxy_hosts | default(True) | bool }}"
+
+- include: ../pre/verify_inventory_vars.yml
+ tags:
+ - pre_upgrade
+
+- include: ../disable_excluder.yml
+ tags:
+ - pre_upgrade
+
+- include: ../../initialize_openshift_version.yml
+ tags:
+ - pre_upgrade
+ vars:
+ # Request specific openshift_release and let the openshift_version role handle converting this
+ # to a more specific version, respecting openshift_image_tag and openshift_pkg_version if
+ # defined, and overriding the normal behavior of protecting the installed version
+ openshift_release: "{{ openshift_upgrade_target }}"
+ openshift_protect_installed_version: False
+
+ # We skip the docker role at this point in upgrade to prevent
+ # unintended package, container, or config upgrades which trigger
+ # docker restarts. At this early stage of upgrade we can assume
+ # docker is configured and running.
+ skip_docker_role: True
+
+- name: Verify masters are already upgraded
+ hosts: oo_masters_to_config
+ tags:
+ - pre_upgrade
+ tasks:
+ - fail: msg="Master running {{ openshift.common.version }} must be upgraded to {{ openshift_version }} before node upgrade can be run."
+ when: openshift.common.version != openshift_version
+
+- include: ../pre/verify_control_plane_running.yml
+ tags:
+ - pre_upgrade
+
+- include: ../pre/verify_upgrade_targets.yml
+ tags:
+ - pre_upgrade
+
+- include: ../pre/verify_docker_upgrade_targets.yml
+ tags:
+ - pre_upgrade
+
+- include: ../pre/gate_checks.yml
+ tags:
+ - pre_upgrade
+
+# Pre-upgrade completed, nothing after this should be tagged pre_upgrade.
+
+# Separate step so we can execute in parallel and clear out anything unused
+# before we get into the serialized upgrade process which will then remove
+# remaining images if possible.
+- name: Cleanup unused Docker images
+ hosts: oo_nodes_to_upgrade
+ tasks:
+ - include: ../cleanup_unused_images.yml
+
+- include: ../upgrade_nodes.yml
diff --git a/playbooks/common/openshift-master/scaleup.yml b/playbooks/common/openshift-master/scaleup.yml
index 92f16dc47..ab0045a39 100644
--- a/playbooks/common/openshift-master/scaleup.yml
+++ b/playbooks/common/openshift-master/scaleup.yml
@@ -51,7 +51,7 @@
changed_when: false
- name: Configure docker hosts
- hosts: oo_masters_to-config:oo_nodes_to_config
+ hosts: oo_masters_to_config:oo_nodes_to_config
vars:
docker_additional_registries: "{{ lookup('oo_option', 'docker_additional_registries') | oo_split }}"
docker_insecure_registries: "{{ lookup('oo_option', 'docker_insecure_registries') | oo_split }}"
diff --git a/playbooks/common/openshift-node/network_manager.yml b/playbooks/common/openshift-node/network_manager.yml
index be050c12c..0014a5dbd 100644
--- a/playbooks/common/openshift-node/network_manager.yml
+++ b/playbooks/common/openshift-node/network_manager.yml
@@ -1,6 +1,6 @@
---
- name: Install and configure NetworkManager
- hosts: l_oo_all_hosts
+ hosts: oo_all_hosts
become: yes
tasks:
- name: install NetworkManager
diff --git a/playbooks/libvirt/openshift-cluster/tasks/launch_instances.yml b/playbooks/libvirt/openshift-cluster/tasks/launch_instances.yml
index 78581fdfe..ccd29be29 100644
--- a/playbooks/libvirt/openshift-cluster/tasks/launch_instances.yml
+++ b/playbooks/libvirt/openshift-cluster/tasks/launch_instances.yml
@@ -14,7 +14,7 @@
url: '{{ image_url }}'
sha256sum: '{{ image_sha256 }}'
dest: '{{ libvirt_storage_pool_path }}/{{ [image_name, image_compression] | difference([""]) | join(".") }}'
- when: '{{ ( lookup("oo_option", "skip_image_download") | default("no", True) | lower ) in ["false", "no"] }}'
+ when: ( lookup("oo_option", "skip_image_download") | default("no", True) | lower ) in ["false", "no"]
register: downloaded_image
- name: Uncompress xz compressed base cloud image
diff --git a/roles/etcd/tasks/main.yml b/roles/etcd/tasks/main.yml
index 8853740e5..35117225b 100644
--- a/roles/etcd/tasks/main.yml
+++ b/roles/etcd/tasks/main.yml
@@ -84,7 +84,7 @@
daemon_reload: yes
when: not openshift.common.is_etcd_system_container | bool
register: task_result
- failed_when: "task_result|failed and 'could not' not in task_result.msg|lower"
+ failed_when: task_result|failed and 'could not' not in task_result.msg|lower
- name: Install etcd container service file
template:
diff --git a/roles/lib_openshift/src/test/integration/oc_label.yml b/roles/lib_openshift/src/test/integration/oc_label.yml
index b4e721407..22cf687c5 100755
--- a/roles/lib_openshift/src/test/integration/oc_label.yml
+++ b/roles/lib_openshift/src/test/integration/oc_label.yml
@@ -15,7 +15,7 @@
- name: ensure needed vars are defined
fail:
msg: "{{ item }} not defined"
- when: "{{ item }} is not defined"
+ when: item is not defined
with_items:
- cli_master_test # ansible inventory instance to run playbook against
diff --git a/roles/lib_openshift/src/test/integration/oc_user.yml b/roles/lib_openshift/src/test/integration/oc_user.yml
index ad1f9d188..9b4290052 100755
--- a/roles/lib_openshift/src/test/integration/oc_user.yml
+++ b/roles/lib_openshift/src/test/integration/oc_user.yml
@@ -14,7 +14,7 @@
- name: ensure needed vars are defined
fail:
msg: "{{ item }} no defined"
- when: "{{ item}} is not defined"
+ when: item is not defined
with_items:
- cli_master_test # ansible inventory instance to run playbook against
diff --git a/roles/openshift_ca/tasks/main.yml b/roles/openshift_ca/tasks/main.yml
index 3b17d9ed6..c7b906949 100644
--- a/roles/openshift_ca/tasks/main.yml
+++ b/roles/openshift_ca/tasks/main.yml
@@ -95,7 +95,7 @@
{% for legacy_ca_certificate in g_master_legacy_ca_result.files | default([]) | oo_collect('path') %}
--certificate-authority {{ legacy_ca_certificate }}
{% endfor %}
- --hostnames={{ openshift.common.all_hostnames | join(',') }}
+ --hostnames={{ hostvars[openshift_ca_host].openshift.common.all_hostnames | join(',') }}
--master={{ openshift.master.api_url }}
--public-master={{ openshift.master.public_api_url }}
--cert-dir={{ openshift_ca_config_dir }}
diff --git a/roles/openshift_certificate_expiry/filter_plugins/oo_cert_expiry.py b/roles/openshift_certificate_expiry/filter_plugins/oo_cert_expiry.py
index 5f102e960..577a14b9a 100644
--- a/roles/openshift_certificate_expiry/filter_plugins/oo_cert_expiry.py
+++ b/roles/openshift_certificate_expiry/filter_plugins/oo_cert_expiry.py
@@ -35,7 +35,7 @@ Example playbook usage:
become: no
run_once: yes
delegate_to: localhost
- when: "{{ openshift_certificate_expiry_save_json_results|bool }}"
+ when: openshift_certificate_expiry_save_json_results|bool
copy:
content: "{{ hostvars|oo_cert_expiry_results_to_json() }}"
dest: "{{ openshift_certificate_expiry_json_results_path }}"
diff --git a/roles/openshift_certificate_expiry/library/openshift_cert_expiry.py b/roles/openshift_certificate_expiry/library/openshift_cert_expiry.py
index c204b5341..0242f5b43 100644
--- a/roles/openshift_certificate_expiry/library/openshift_cert_expiry.py
+++ b/roles/openshift_certificate_expiry/library/openshift_cert_expiry.py
@@ -135,7 +135,7 @@ platforms missing the Python OpenSSL library.
continue
elif l.startswith('Subject:'):
- # O=system:nodes, CN=system:node:m01.example.com
+ # O = system:nodes, CN = system:node:m01.example.com
self.subject = FakeOpenSSLCertificateSubjects(l.partition(': ')[-1])
def get_serial_number(self):
@@ -202,7 +202,7 @@ object"""
"""
self.subjects = []
for s in subject_string.split(', '):
- name, _, value = s.partition('=')
+ name, _, value = s.partition(' = ')
self.subjects.append((name, value))
def get_components(self):
diff --git a/roles/openshift_certificate_expiry/tasks/main.yml b/roles/openshift_certificate_expiry/tasks/main.yml
index 139d5de6e..b5234bd1e 100644
--- a/roles/openshift_certificate_expiry/tasks/main.yml
+++ b/roles/openshift_certificate_expiry/tasks/main.yml
@@ -13,12 +13,12 @@
src: cert-expiry-table.html.j2
dest: "{{ openshift_certificate_expiry_html_report_path }}"
delegate_to: localhost
- when: "{{ openshift_certificate_expiry_generate_html_report|bool }}"
+ when: openshift_certificate_expiry_generate_html_report|bool
- name: Generate the result JSON string
run_once: yes
set_fact: json_result_string="{{ hostvars|oo_cert_expiry_results_to_json(play_hosts) }}"
- when: "{{ openshift_certificate_expiry_save_json_results|bool }}"
+ when: openshift_certificate_expiry_save_json_results|bool
- name: Generate results JSON file
become: no
@@ -27,4 +27,4 @@
src: save_json_results.j2
dest: "{{ openshift_certificate_expiry_json_results_path }}"
delegate_to: localhost
- when: "{{ openshift_certificate_expiry_save_json_results|bool }}"
+ when: openshift_certificate_expiry_save_json_results|bool
diff --git a/roles/openshift_certificate_expiry/test/test_fakeopensslclasses.py b/roles/openshift_certificate_expiry/test/test_fakeopensslclasses.py
index ccdd48fa8..8a521a765 100644
--- a/roles/openshift_certificate_expiry/test/test_fakeopensslclasses.py
+++ b/roles/openshift_certificate_expiry/test/test_fakeopensslclasses.py
@@ -17,7 +17,8 @@ from openshift_cert_expiry import FakeOpenSSLCertificate # noqa: E402
@pytest.fixture(scope='module')
def fake_valid_cert(valid_cert):
- cmd = ['openssl', 'x509', '-in', str(valid_cert['cert_file']), '-text']
+ cmd = ['openssl', 'x509', '-in', str(valid_cert['cert_file']), '-text',
+ '-nameopt', 'oneline']
cert = subprocess.check_output(cmd)
return FakeOpenSSLCertificate(cert.decode('utf8'))
diff --git a/roles/openshift_cloud_provider/tasks/openstack.yml b/roles/openshift_cloud_provider/tasks/openstack.yml
index f22dd4520..5788e6d74 100644
--- a/roles/openshift_cloud_provider/tasks/openstack.yml
+++ b/roles/openshift_cloud_provider/tasks/openstack.yml
@@ -7,4 +7,4 @@
template:
dest: "{{ openshift.common.config_base }}/cloudprovider/openstack.conf"
src: openstack.conf.j2
- when: "openshift_cloudprovider_openstack_auth_url is defined and openshift_cloudprovider_openstack_username is defined and openshift_cloudprovider_openstack_password is defined and (openshift_cloudprovider_openstack_tenant_id is defined or openshift_cloudprovider_openstack_tenant_name is defined)"
+ when: openshift_cloudprovider_openstack_auth_url is defined and openshift_cloudprovider_openstack_username is defined and openshift_cloudprovider_openstack_password is defined and (openshift_cloudprovider_openstack_tenant_id is defined or openshift_cloudprovider_openstack_tenant_name is defined)
diff --git a/playbooks/common/openshift-cluster/upgrades/pre/validate_excluder.yml b/roles/openshift_excluder/tasks/verify_excluder.yml
index 6de1ed061..24a05d56e 100644
--- a/playbooks/common/openshift-cluster/upgrades/pre/validate_excluder.yml
+++ b/roles/openshift_excluder/tasks/verify_excluder.yml
@@ -11,7 +11,7 @@
failed_when: false
changed_when: false
- - name: Docker excluder version detected
+ - name: "{{ excluder }} version detected"
debug:
msg: "{{ excluder }}: {{ excluder_version.stdout }}"
diff --git a/roles/openshift_excluder/tasks/verify_upgrade.yml b/roles/openshift_excluder/tasks/verify_upgrade.yml
new file mode 100644
index 000000000..6ea2130ac
--- /dev/null
+++ b/roles/openshift_excluder/tasks/verify_upgrade.yml
@@ -0,0 +1,15 @@
+---
+# input variables
+# - repoquery_cmd
+# - openshift_upgrade_target
+- include: init.yml
+
+- include: verify_excluder.yml
+ vars:
+ excluder: "{{ openshift.common.service_type }}-docker-excluder"
+ when: docker_excluder_on
+
+- include: verify_excluder.yml
+ vars:
+ excluder: "{{ openshift.common.service_type }}-excluder"
+ when: openshift_excluder_on
diff --git a/roles/openshift_expand_partition/tasks/main.yml b/roles/openshift_expand_partition/tasks/main.yml
index 00603f4fa..4cb5418c6 100644
--- a/roles/openshift_expand_partition/tasks/main.yml
+++ b/roles/openshift_expand_partition/tasks/main.yml
@@ -6,7 +6,7 @@
- name: Determine if growpart is installed
command: "rpm -q cloud-utils-growpart"
register: has_growpart
- failed_when: "has_growpart.cr != 0 and 'package cloud-utils-growpart is not installed' not in has_growpart.stdout"
+ failed_when: has_growpart.cr != 0 and 'package cloud-utils-growpart is not installed' not in has_growpart.stdout
changed_when: false
when: openshift.common.is_containerized | bool
diff --git a/roles/openshift_hosted_logging/tasks/deploy_logging.yaml b/roles/openshift_hosted_logging/tasks/deploy_logging.yaml
index afd82766f..78b624109 100644
--- a/roles/openshift_hosted_logging/tasks/deploy_logging.yaml
+++ b/roles/openshift_hosted_logging/tasks/deploy_logging.yaml
@@ -36,7 +36,7 @@
command: >
{{ openshift.common.client_binary }} --config={{ mktemp.stdout }}/admin.kubeconfig secrets new logging-deployer {{ openshift_hosted_logging_secret_vars | default('nothing=/dev/null') }}
register: secret_output
- failed_when: "secret_output.rc == 1 and 'exists' not in secret_output.stderr"
+ failed_when: secret_output.rc == 1 and 'exists' not in secret_output.stderr
- name: "Create templates for logging accounts and the deployer"
command: >
@@ -60,21 +60,21 @@
{{ openshift.common.client_binary }} adm --config={{ mktemp.stdout }}/admin.kubeconfig
policy add-cluster-role-to-user oauth-editor system:serviceaccount:logging:logging-deployer
register: permiss_output
- failed_when: "permiss_output.rc == 1 and 'exists' not in permiss_output.stderr"
+ failed_when: permiss_output.rc == 1 and 'exists' not in permiss_output.stderr
- name: "Set permissions for fluentd"
command: >
{{ openshift.common.client_binary }} adm --config={{ mktemp.stdout }}/admin.kubeconfig
policy add-scc-to-user privileged system:serviceaccount:logging:aggregated-logging-fluentd
register: fluentd_output
- failed_when: "fluentd_output.rc == 1 and 'exists' not in fluentd_output.stderr"
+ failed_when: fluentd_output.rc == 1 and 'exists' not in fluentd_output.stderr
- name: "Set additional permissions for fluentd"
command: >
{{ openshift.common.client_binary }} adm policy --config={{ mktemp.stdout }}/admin.kubeconfig
add-cluster-role-to-user cluster-reader system:serviceaccount:logging:aggregated-logging-fluentd
register: fluentd2_output
- failed_when: "fluentd2_output.rc == 1 and 'exists' not in fluentd2_output.stderr"
+ failed_when: fluentd2_output.rc == 1 and 'exists' not in fluentd2_output.stderr
- name: "Add rolebinding-reader to aggregated-logging-elasticsearch"
command: >
@@ -82,13 +82,13 @@
policy add-cluster-role-to-user rolebinding-reader \
system:serviceaccount:logging:aggregated-logging-elasticsearch
register: rolebinding_reader_output
- failed_when: "rolebinding_reader_output == 1 and 'exists' not in rolebinding_reader_output.stderr"
+ failed_when: rolebinding_reader_output == 1 and 'exists' not in rolebinding_reader_output.stderr
- name: "Create ConfigMap for deployer parameters"
command: >
{{ openshift.common.client_binary}} --config={{ mktemp.stdout }}/admin.kubeconfig create configmap logging-deployer {{ deployer_cmap_params }}
register: deployer_configmap_output
- failed_when: "deployer_configmap_output.rc == 1 and 'exists' not in deployer_configmap_output.stderr"
+ failed_when: deployer_configmap_output.rc == 1 and 'exists' not in deployer_configmap_output.stderr
- name: "Process the deployer template"
shell: "{{ openshift.common.client_binary }} --config={{ mktemp.stdout }}/admin.kubeconfig new-app logging-deployer-template {{ oc_new_app_values }}"
diff --git a/roles/openshift_hosted_metrics/tasks/install.yml b/roles/openshift_hosted_metrics/tasks/install.yml
index 6a442cefc..15dd1bd54 100644
--- a/roles/openshift_hosted_metrics/tasks/install.yml
+++ b/roles/openshift_hosted_metrics/tasks/install.yml
@@ -81,7 +81,7 @@
secrets new metrics-deployer nothing=/dev/null
register: metrics_deployer_secret
changed_when: metrics_deployer_secret.rc == 0
- failed_when: "metrics_deployer_secret.rc == 1 and 'already exists' not in metrics_deployer_secret.stderr"
+ failed_when: metrics_deployer_secret.rc == 1 and 'already exists' not in metrics_deployer_secret.stderr
# TODO: extend this to allow user passed in certs or generating cert with
# OpenShift CA
diff --git a/roles/openshift_logging/defaults/main.yml b/roles/openshift_logging/defaults/main.yml
index 5ee8d1e2a..dbf5b4d3d 100644
--- a/roles/openshift_logging/defaults/main.yml
+++ b/roles/openshift_logging/defaults/main.yml
@@ -3,6 +3,10 @@ openshift_logging_use_ops: "{{ openshift_hosted_logging_enable_ops_cluster | def
openshift_logging_master_url: "https://kubernetes.default.svc.{{ openshift.common.dns_domain }}"
openshift_logging_master_public_url: "{{ openshift_hosted_logging_master_public_url | default('https://' + openshift.common.public_hostname + ':' ~ (openshift_master_api_port | default('8443', true))) }}"
openshift_logging_namespace: logging
+openshift_logging_nodeselector: null
+openshift_logging_labels: {}
+openshift_logging_label_key: ""
+openshift_logging_label_value: ""
openshift_logging_install_logging: True
openshift_logging_image_pull_secret: "{{ openshift_hosted_logging_image_pull_secret | default('') }}"
diff --git a/roles/openshift_logging/tasks/generate_routes.yaml b/roles/openshift_logging/tasks/generate_routes.yaml
index e77da7a24..f76bb3a0a 100644
--- a/roles/openshift_logging/tasks/generate_routes.yaml
+++ b/roles/openshift_logging/tasks/generate_routes.yaml
@@ -1,14 +1,14 @@
---
- set_fact: kibana_key={{ lookup('file', openshift_logging_kibana_key) | b64encode }}
- when: "{{ openshift_logging_kibana_key | trim | length > 0 }}"
+ when: openshift_logging_kibana_key | trim | length > 0
changed_when: false
- set_fact: kibana_cert={{ lookup('file', openshift_logging_kibana_cert)| b64encode }}
- when: "{{openshift_logging_kibana_cert | trim | length > 0}}"
+ when: openshift_logging_kibana_cert | trim | length > 0
changed_when: false
- set_fact: kibana_ca={{ lookup('file', openshift_logging_kibana_ca)| b64encode }}
- when: "{{openshift_logging_kibana_ca | trim | length > 0}}"
+ when: openshift_logging_kibana_ca | trim | length > 0
changed_when: false
- set_fact: kibana_ca={{key_pairs | entry_from_named_pair('ca_file') }}
diff --git a/roles/openshift_logging/tasks/install_elasticsearch.yaml b/roles/openshift_logging/tasks/install_elasticsearch.yaml
index b80f37892..878010eb0 100644
--- a/roles/openshift_logging/tasks/install_elasticsearch.yaml
+++ b/roles/openshift_logging/tasks/install_elasticsearch.yaml
@@ -3,7 +3,7 @@
set_fact: openshift_logging_current_es_size={{ openshift_logging_facts.elasticsearch.deploymentconfigs.keys() | length }}
- set_fact: openshift_logging_es_pvc_prefix="logging-es"
- when: "not openshift_logging_es_pvc_prefix or openshift_logging_es_pvc_prefix == ''"
+ when: not openshift_logging_es_pvc_prefix or openshift_logging_es_pvc_prefix == ''
### evaluate if the PVC attached to the dc currently matches the provided vars
## if it does then we reuse that pvc in the DC
@@ -65,7 +65,7 @@
check_mode: no
- set_fact: openshift_logging_es_ops_pvc_prefix="logging-es-ops"
- when: "not openshift_logging_es_ops_pvc_prefix or openshift_logging_es_ops_pvc_prefix == ''"
+ when: not openshift_logging_es_ops_pvc_prefix or openshift_logging_es_ops_pvc_prefix == ''
- include: set_es_storage.yaml
vars:
diff --git a/roles/openshift_logging/tasks/install_fluentd.yaml b/roles/openshift_logging/tasks/install_fluentd.yaml
index 35273829c..6bc405819 100644
--- a/roles/openshift_logging/tasks/install_fluentd.yaml
+++ b/roles/openshift_logging/tasks/install_fluentd.yaml
@@ -32,7 +32,7 @@
{{ openshift.common.admin_binary}} --config={{ mktemp.stdout }}/admin.kubeconfig policy
add-scc-to-user privileged system:serviceaccount:{{openshift_logging_namespace}}:aggregated-logging-fluentd
register: fluentd_output
- failed_when: "fluentd_output.rc == 1 and 'exists' not in fluentd_output.stderr"
+ failed_when: fluentd_output.rc == 1 and 'exists' not in fluentd_output.stderr
check_mode: no
when: fluentd_privileged.stdout.find("system:serviceaccount:{{openshift_logging_namespace}}:aggregated-logging-fluentd") == -1
@@ -49,6 +49,6 @@
{{ openshift.common.admin_binary}} --config={{ mktemp.stdout }}/admin.kubeconfig policy
add-cluster-role-to-user cluster-reader system:serviceaccount:{{openshift_logging_namespace}}:aggregated-logging-fluentd
register: fluentd2_output
- failed_when: "fluentd2_output.rc == 1 and 'exists' not in fluentd2_output.stderr"
+ failed_when: fluentd2_output.rc == 1 and 'exists' not in fluentd2_output.stderr
check_mode: no
when: fluentd_cluster_reader.stdout.find("system:serviceaccount:{{openshift_logging_namespace}}:aggregated-logging-fluentd") == -1
diff --git a/roles/openshift_logging/tasks/install_mux.yaml b/roles/openshift_logging/tasks/install_mux.yaml
index 296da626f..91eeb95a1 100644
--- a/roles/openshift_logging/tasks/install_mux.yaml
+++ b/roles/openshift_logging/tasks/install_mux.yaml
@@ -45,7 +45,7 @@
{{ openshift.common.admin_binary}} --config={{ mktemp.stdout }}/admin.kubeconfig policy
add-scc-to-user hostmount-anyuid system:serviceaccount:{{openshift_logging_namespace}}:aggregated-logging-fluentd
register: mux_output
- failed_when: "mux_output.rc == 1 and 'exists' not in mux_output.stderr"
+ failed_when: mux_output.rc == 1 and 'exists' not in mux_output.stderr
check_mode: no
when: mux_hostmount_anyuid.stdout.find("system:serviceaccount:{{openshift_logging_namespace}}:aggregated-logging-fluentd") == -1
@@ -62,6 +62,6 @@
{{ openshift.common.admin_binary}} --config={{ mktemp.stdout }}/admin.kubeconfig policy
add-cluster-role-to-user cluster-reader system:serviceaccount:{{openshift_logging_namespace}}:aggregated-logging-fluentd
register: mux2_output
- failed_when: "mux2_output.rc == 1 and 'exists' not in mux2_output.stderr"
+ failed_when: mux2_output.rc == 1 and 'exists' not in mux2_output.stderr
check_mode: no
when: mux_cluster_reader.stdout.find("system:serviceaccount:{{openshift_logging_namespace}}:aggregated-logging-fluentd") == -1
diff --git a/roles/openshift_logging/tasks/install_support.yaml b/roles/openshift_logging/tasks/install_support.yaml
index da0bbb627..877ce3149 100644
--- a/roles/openshift_logging/tasks/install_support.yaml
+++ b/roles/openshift_logging/tasks/install_support.yaml
@@ -1,17 +1,36 @@
---
# This is the base configuration for installing the other components
-- name: Check for logging project already exists
- command: >
- {{ openshift.common.client_binary }} --config={{ mktemp.stdout }}/admin.kubeconfig get project {{openshift_logging_namespace}} --no-headers
- register: logging_project_result
- ignore_errors: yes
- when: not ansible_check_mode
- changed_when: no
+- name: Set logging project
+ oc_project:
+ state: present
+ name: "{{ openshift_logging_namespace }}"
+ node_selector: "{{ openshift_logging_nodeselector | default(null) }}"
+
+- name: Labelling logging project
+ oc_label:
+ state: present
+ kind: namespace
+ name: "{{ openshift_logging_namespace }}"
+ labels:
+ - key: "{{ item.key }}"
+ value: "{{ item.value }}"
+ with_dict: "{{ openshift_logging_labels | default({}) }}"
+ when:
+ - openshift_logging_labels is defined
+ - openshift_logging_labels is dict
-- name: "Create logging project"
- command: >
- {{ openshift.common.admin_binary }} --config={{ mktemp.stdout }}/admin.kubeconfig new-project {{openshift_logging_namespace}}
- when: not ansible_check_mode and "not found" in logging_project_result.stderr
+- name: Labelling logging project
+ oc_label:
+ state: present
+ kind: namespace
+ name: "{{ openshift_logging_namespace }}"
+ labels:
+ - key: "{{ openshift_logging_label_key }}"
+ value: "{{ openshift_logging_label_value }}"
+ when:
+ - openshift_logging_label_key is defined
+ - openshift_logging_label_key != ""
+ - openshift_logging_label_value is defined
- name: Create logging cert directory
file: path={{openshift.common.config_base}}/logging state=directory mode=0755
diff --git a/roles/openshift_logging/tasks/main.yaml b/roles/openshift_logging/tasks/main.yaml
index c7f4a2f93..387da618d 100644
--- a/roles/openshift_logging/tasks/main.yaml
+++ b/roles/openshift_logging/tasks/main.yaml
@@ -1,7 +1,7 @@
---
- fail:
msg: Only one Fluentd nodeselector key pair should be provided
- when: "{{ openshift_logging_fluentd_nodeselector.keys() | count }} > 1"
+ when: openshift_logging_fluentd_nodeselector.keys() | count > 1
- name: Set default image variables based on deployment_type
include_vars: "{{ item }}"
diff --git a/roles/openshift_logging/tasks/start_cluster.yaml b/roles/openshift_logging/tasks/start_cluster.yaml
index ef9a42a9e..7fec5db42 100644
--- a/roles/openshift_logging/tasks/start_cluster.yaml
+++ b/roles/openshift_logging/tasks/start_cluster.yaml
@@ -36,7 +36,7 @@
name: "{{ object }}"
namespace: "{{openshift_logging_namespace}}"
replicas: "{{ openshift_logging_mux_replica_count | default (1) }}"
- with_items: "{{ mux_dc.results.results[0]['items'] | map(attribute='metadata.name') | list }}"
+ with_items: "{{ mux_dc.results.results[0]['items'] if 'results' in mux_dc else {} | map(attribute='metadata.name') | list }}"
loop_control:
loop_var: object
when:
diff --git a/roles/openshift_logging/tasks/stop_cluster.yaml b/roles/openshift_logging/tasks/stop_cluster.yaml
index d20c57cc1..c078e4b2f 100644
--- a/roles/openshift_logging/tasks/stop_cluster.yaml
+++ b/roles/openshift_logging/tasks/stop_cluster.yaml
@@ -36,7 +36,7 @@
name: "{{ object }}"
namespace: "{{openshift_logging_namespace}}"
replicas: 0
- with_items: "{{ mux_dc.results.results[0]['items'] | map(attribute='metadata.name') | list }}"
+ with_items: "{{ mux_dc.results.results[0]['items'] if 'results' in mux_dc else {} | map(attribute='metadata.name') | list }}"
loop_control:
loop_var: object
when: openshift_logging_use_mux
diff --git a/roles/openshift_master/tasks/main.yml b/roles/openshift_master/tasks/main.yml
index 98e0da1a2..5522fef26 100644
--- a/roles/openshift_master/tasks/main.yml
+++ b/roles/openshift_master/tasks/main.yml
@@ -194,7 +194,7 @@
state: stopped
when: openshift_master_ha | bool
register: task_result
- failed_when: "task_result|failed and 'could not' not in task_result.msg|lower"
+ failed_when: task_result|failed and 'could not' not in task_result.msg|lower
- set_fact:
master_service_status_changed: "{{ start_result | changed }}"
diff --git a/roles/openshift_master_facts/tasks/main.yml b/roles/openshift_master_facts/tasks/main.yml
index 6f8f09b22..f048e0aef 100644
--- a/roles/openshift_master_facts/tasks/main.yml
+++ b/roles/openshift_master_facts/tasks/main.yml
@@ -128,10 +128,10 @@
- name: Test if scheduler config is readable
fail:
msg: "Unknown scheduler config apiVersion {{ openshift_master_scheduler_config.apiVersion }}"
- when: "{{ openshift_master_scheduler_current_config.apiVersion | default(None) != 'v1' }}"
+ when: openshift_master_scheduler_current_config.apiVersion | default(None) != 'v1'
- name: Set current scheduler predicates and priorities
set_fact:
openshift_master_scheduler_current_predicates: "{{ openshift_master_scheduler_current_config.predicates }}"
openshift_master_scheduler_current_priorities: "{{ openshift_master_scheduler_current_config.priorities }}"
- when: "{{ scheduler_config_stat.stat.exists }}"
+ when: scheduler_config_stat.stat.exists
diff --git a/roles/openshift_metrics/tasks/install_cassandra.yaml b/roles/openshift_metrics/tasks/install_cassandra.yaml
index a467c1a51..3b4e8560f 100644
--- a/roles/openshift_metrics/tasks/install_cassandra.yaml
+++ b/roles/openshift_metrics/tasks/install_cassandra.yaml
@@ -23,7 +23,7 @@
changed_when: false
- set_fact: openshift_metrics_cassandra_pvc_prefix="hawkular-metrics"
- when: "not openshift_metrics_cassandra_pvc_prefix or openshift_metrics_cassandra_pvc_prefix == ''"
+ when: not openshift_metrics_cassandra_pvc_prefix or openshift_metrics_cassandra_pvc_prefix == ''
- name: generate hawkular-cassandra persistent volume claims
template:
diff --git a/roles/openshift_metrics/tasks/install_heapster.yaml b/roles/openshift_metrics/tasks/install_heapster.yaml
index d13b96be1..0eb852d91 100644
--- a/roles/openshift_metrics/tasks/install_heapster.yaml
+++ b/roles/openshift_metrics/tasks/install_heapster.yaml
@@ -22,7 +22,7 @@
with_items:
- hawkular-metrics-certs
- hawkular-metrics-account
- when: "not {{ openshift_metrics_heapster_standalone | bool }}"
+ when: not openshift_metrics_heapster_standalone | bool
- name: Generating serviceaccount for heapster
template: src=serviceaccount.j2 dest={{mktemp.stdout}}/templates/metrics-{{obj_name}}-sa.yaml
diff --git a/roles/openshift_metrics/tasks/install_metrics.yaml b/roles/openshift_metrics/tasks/install_metrics.yaml
index ffe6f63a2..74eb56713 100644
--- a/roles/openshift_metrics/tasks/install_metrics.yaml
+++ b/roles/openshift_metrics/tasks/install_metrics.yaml
@@ -10,11 +10,11 @@
- cassandra
loop_control:
loop_var: include_file
- when: "not {{ openshift_metrics_heapster_standalone | bool }}"
+ when: not openshift_metrics_heapster_standalone | bool
- name: Install Heapster Standalone
include: install_heapster.yaml
- when: "{{ openshift_metrics_heapster_standalone | bool }}"
+ when: openshift_metrics_heapster_standalone | bool
- find: paths={{ mktemp.stdout }}/templates patterns=*.yaml
register: object_def_files
@@ -48,7 +48,7 @@
- name: Scaling down cluster to recognize changes
include: stop_metrics.yaml
- when: "{{ existing_metrics_rc.stdout_lines | length > 0 }}"
+ when: existing_metrics_rc.stdout_lines | length > 0
- name: Scaling up cluster
include: start_metrics.yaml
diff --git a/roles/openshift_metrics/tasks/main.yaml b/roles/openshift_metrics/tasks/main.yaml
index c8d222c60..e8b7bea5c 100644
--- a/roles/openshift_metrics/tasks/main.yaml
+++ b/roles/openshift_metrics/tasks/main.yaml
@@ -19,7 +19,7 @@
- name: Create temp directory for all our templates
file: path={{mktemp.stdout}}/templates state=directory mode=0755
changed_when: False
- when: "{{ openshift_metrics_install_metrics | bool }}"
+ when: openshift_metrics_install_metrics | bool
- name: Create temp directory local on control node
local_action: command mktemp -d
diff --git a/roles/openshift_metrics/tasks/start_metrics.yaml b/roles/openshift_metrics/tasks/start_metrics.yaml
index b5a1c8f06..2037e8dc3 100644
--- a/roles/openshift_metrics/tasks/start_metrics.yaml
+++ b/roles/openshift_metrics/tasks/start_metrics.yaml
@@ -20,7 +20,7 @@
loop_control:
loop_var: object
when: metrics_cassandra_rc is defined
- changed_when: "{{metrics_cassandra_rc | length > 0 }}"
+ changed_when: metrics_cassandra_rc | length > 0
- command: >
{{openshift.common.client_binary}}
@@ -42,7 +42,7 @@
with_items: "{{metrics_metrics_rc.stdout_lines}}"
loop_control:
loop_var: object
- changed_when: "{{metrics_metrics_rc | length > 0 }}"
+ changed_when: metrics_metrics_rc | length > 0
- command: >
{{openshift.common.client_binary}}
diff --git a/roles/openshift_metrics/tasks/stop_metrics.yaml b/roles/openshift_metrics/tasks/stop_metrics.yaml
index f69bb0f11..9a2ce9267 100644
--- a/roles/openshift_metrics/tasks/stop_metrics.yaml
+++ b/roles/openshift_metrics/tasks/stop_metrics.yaml
@@ -41,7 +41,7 @@
with_items: "{{metrics_hawkular_rc.stdout_lines}}"
loop_control:
loop_var: object
- changed_when: "{{metrics_hawkular_rc | length > 0 }}"
+ changed_when: metrics_hawkular_rc | length > 0
- command: >
{{openshift.common.client_binary}} --config={{mktemp.stdout}}/admin.kubeconfig
@@ -63,4 +63,4 @@
loop_control:
loop_var: object
when: metrics_cassandra_rc is defined
- changed_when: "{{metrics_cassandra_rc | length > 0 }}"
+ changed_when: metrics_cassandra_rc | length > 0
diff --git a/roles/openshift_metrics/tasks/uninstall_metrics.yaml b/roles/openshift_metrics/tasks/uninstall_metrics.yaml
index 8a6be6237..9a5d52eb6 100644
--- a/roles/openshift_metrics/tasks/uninstall_metrics.yaml
+++ b/roles/openshift_metrics/tasks/uninstall_metrics.yaml
@@ -8,7 +8,7 @@
delete --ignore-not-found --selector=metrics-infra
all,sa,secrets,templates,routes,pvc,rolebindings,clusterrolebindings
register: delete_metrics
- changed_when: "delete_metrics.stdout != 'No resources found'"
+ changed_when: delete_metrics.stdout != 'No resources found'
- name: remove rolebindings
command: >
@@ -16,4 +16,4 @@
delete --ignore-not-found
rolebinding/hawkular-view
clusterrolebinding/heapster-cluster-reader
- changed_when: "delete_metrics.stdout != 'No resources found'"
+ changed_when: delete_metrics.stdout != 'No resources found'
diff --git a/roles/openshift_node/tasks/main.yml b/roles/openshift_node/tasks/main.yml
index d7fea2f32..656874f56 100644
--- a/roles/openshift_node/tasks/main.yml
+++ b/roles/openshift_node/tasks/main.yml
@@ -147,7 +147,7 @@
- regex: '^AWS_SECRET_ACCESS_KEY='
line: "AWS_SECRET_ACCESS_KEY={{ openshift_cloudprovider_aws_secret_key | default('') }}"
no_log: True
- when: "openshift_cloudprovider_kind is defined and openshift_cloudprovider_kind == 'aws' and openshift_cloudprovider_aws_access_key is defined and openshift_cloudprovider_aws_secret_key is defined"
+ when: openshift_cloudprovider_kind is defined and openshift_cloudprovider_kind == 'aws' and openshift_cloudprovider_aws_access_key is defined and openshift_cloudprovider_aws_secret_key is defined
notify:
- restart node
diff --git a/roles/openshift_provisioners/tasks/install_efs.yaml b/roles/openshift_provisioners/tasks/install_efs.yaml
index 57279c665..b53b6afa1 100644
--- a/roles/openshift_provisioners/tasks/install_efs.yaml
+++ b/roles/openshift_provisioners/tasks/install_efs.yaml
@@ -65,6 +65,6 @@
{{ openshift.common.admin_binary}} --config={{ mktemp.stdout }}/admin.kubeconfig policy
add-scc-to-user anyuid system:serviceaccount:{{openshift_provisioners_project}}:provisioners-efs
register: efs_output
- failed_when: "efs_output.rc == 1 and 'exists' not in efs_output.stderr"
+ failed_when: efs_output.rc == 1 and 'exists' not in efs_output.stderr
check_mode: no
when: efs_anyuid.stdout.find("system:serviceaccount:{{openshift_provisioners_project}}:provisioners-efs") == -1
diff --git a/roles/openshift_storage_glusterfs/tasks/glusterfs_registry.yml b/roles/openshift_storage_glusterfs/tasks/glusterfs_registry.yml
index 9f092d5d5..6d02d2090 100644
--- a/roles/openshift_storage_glusterfs/tasks/glusterfs_registry.yml
+++ b/roles/openshift_storage_glusterfs/tasks/glusterfs_registry.yml
@@ -45,4 +45,4 @@
- name: Create GlusterFS registry volume
command: "heketi-cli -s http://{{ openshift_storage_glusterfs_heketi_url }} --user admin --secret '{{ openshift_storage_glusterfs_heketi_admin_key }}' volume create --size={{ openshift.hosted.registry.storage.volume.size | replace('Gi','') }} --name={{ openshift.hosted.registry.storage.glusterfs.path }}"
- when: "'{{ openshift.hosted.registry.storage.glusterfs.path }}' not in registry_volume.stdout"
+ when: "'openshift.hosted.registry.storage.glusterfs.path' not in registry_volume.stdout"
diff --git a/roles/openshift_storage_glusterfs/tasks/heketi_deploy_part2.yml b/roles/openshift_storage_glusterfs/tasks/heketi_deploy_part2.yml
index 84b85e95d..778b5a673 100644
--- a/roles/openshift_storage_glusterfs/tasks/heketi_deploy_part2.yml
+++ b/roles/openshift_storage_glusterfs/tasks/heketi_deploy_part2.yml
@@ -14,7 +14,7 @@
# Need `command` here because heketi-storage.json contains multiple objects.
- name: Copy heketi DB to GlusterFS volume
command: "{{ openshift.common.client_binary }} --config={{ mktemp.stdout }}/admin.kubeconfig create -f {{ mktemp.stdout }}/heketi-storage.json -n {{ openshift_storage_glusterfs_namespace }}"
- when: "setup_storage.rc == 0"
+ when: setup_storage.rc == 0
- name: Wait for copy job to finish
oc_obj:
@@ -34,7 +34,7 @@
- "heketi_job.results.results | count > 0"
# Fail when pod's 'Failed' status is True
- "heketi_job.results.results | oo_collect(attribute='status.conditions') | oo_collect(attribute='status', filters={'type': 'Failed'}) | map('bool') | select | list | count == 1"
- when: "setup_storage.rc == 0"
+ when: setup_storage.rc == 0
- name: Delete deploy resources
oc_obj:
diff --git a/roles/openshift_storage_glusterfs/tasks/main.yml b/roles/openshift_storage_glusterfs/tasks/main.yml
index 265a3cc6e..71c4a2732 100644
--- a/roles/openshift_storage_glusterfs/tasks/main.yml
+++ b/roles/openshift_storage_glusterfs/tasks/main.yml
@@ -163,7 +163,7 @@
- name: Load heketi topology
command: "heketi-cli -s http://{{ openshift_storage_glusterfs_heketi_url }} --user admin --secret '{{ openshift_storage_glusterfs_heketi_admin_key }}' topology load --json={{ mktemp.stdout }}/topology.json 2>&1"
register: topology_load
- failed_when: "topology_load.rc != 0 or 'Unable' in topology_load.stdout"
+ failed_when: topology_load.rc != 0 or 'Unable' in topology_load.stdout
when:
- openshift_storage_glusterfs_is_native
- openshift_storage_glusterfs_heketi_topology_load
@@ -172,7 +172,7 @@
when: openshift_storage_glusterfs_heketi_is_native and openshift_storage_glusterfs_heketi_is_missing
- include: glusterfs_registry.yml
- when: "openshift.hosted.registry.storage.kind == 'glusterfs'"
+ when: openshift.hosted.registry.storage.kind == 'glusterfs'
- name: Delete temp directory
file:
diff --git a/roles/os_firewall/tasks/firewall/firewalld.yml b/roles/os_firewall/tasks/firewall/firewalld.yml
index 4b2979887..509655b0c 100644
--- a/roles/os_firewall/tasks/firewall/firewalld.yml
+++ b/roles/os_firewall/tasks/firewall/firewalld.yml
@@ -14,7 +14,7 @@
- iptables
- ip6tables
register: task_result
- failed_when: "task_result|failed and 'could not' not in task_result.msg|lower"
+ failed_when: task_result|failed and 'could not' not in task_result.msg|lower
- name: Wait 10 seconds after disabling iptables
pause:
diff --git a/roles/os_firewall/tasks/firewall/iptables.yml b/roles/os_firewall/tasks/firewall/iptables.yml
index 38ea2477c..55f2fc471 100644
--- a/roles/os_firewall/tasks/firewall/iptables.yml
+++ b/roles/os_firewall/tasks/firewall/iptables.yml
@@ -7,7 +7,7 @@
enabled: no
masked: yes
register: task_result
- failed_when: "task_result|failed and 'could not' not in task_result.msg|lower"
+ failed_when: task_result|failed and 'could not' not in task_result.msg|lower
- name: Wait 10 seconds after disabling firewalld
pause:
diff --git a/utils/src/ooinstall/variants.py b/utils/src/ooinstall/variants.py
index f25266f29..1574d447a 100644
--- a/utils/src/ooinstall/variants.py
+++ b/utils/src/ooinstall/variants.py
@@ -39,18 +39,19 @@ class Variant(object):
# WARNING: Keep the versions ordered, most recent first:
OSE = Variant('openshift-enterprise', 'OpenShift Container Platform', [
- Version('3.5', 'openshift-enterprise'),
+ Version('3.6', 'openshift-enterprise'),
])
REG = Variant('openshift-enterprise', 'Registry', [
- Version('3.4', 'openshift-enterprise', 'registry'),
+ Version('3.6', 'openshift-enterprise', 'registry'),
])
origin = Variant('origin', 'OpenShift Origin', [
- Version('1.4', 'origin'),
+ Version('3.6', 'origin'),
])
LEGACY = Variant('openshift-enterprise', 'OpenShift Container Platform', [
+ Version('3.5', 'openshift-enterprise'),
Version('3.4', 'openshift-enterprise'),
Version('3.3', 'openshift-enterprise'),
Version('3.2', 'openshift-enterprise'),