summaryrefslogtreecommitdiffstats
path: root/playbooks/common/openshift-cluster/upgrades
diff options
context:
space:
mode:
Diffstat (limited to 'playbooks/common/openshift-cluster/upgrades')
-rw-r--r--playbooks/common/openshift-cluster/upgrades/containerized_node_upgrade.yml17
-rw-r--r--playbooks/common/openshift-cluster/upgrades/docker/restart.yml27
-rw-r--r--playbooks/common/openshift-cluster/upgrades/docker/upgrade.yml33
-rw-r--r--playbooks/common/openshift-cluster/upgrades/docker/upgrade_check.yml4
-rw-r--r--playbooks/common/openshift-cluster/upgrades/etcd/backup.yml94
-rw-r--r--playbooks/common/openshift-cluster/upgrades/etcd/containerized_tasks.yml46
-rw-r--r--playbooks/common/openshift-cluster/upgrades/etcd/fedora_tasks.yml23
l---------playbooks/common/openshift-cluster/upgrades/etcd/files/etcdctl.sh1
l---------playbooks/common/openshift-cluster/upgrades/etcd/filter_plugins1
l---------playbooks/common/openshift-cluster/upgrades/etcd/lookup_plugins1
-rw-r--r--playbooks/common/openshift-cluster/upgrades/etcd/main.yml44
-rw-r--r--playbooks/common/openshift-cluster/upgrades/etcd/rhel_tasks.yml20
l---------playbooks/common/openshift-cluster/upgrades/etcd/roles1
-rw-r--r--playbooks/common/openshift-cluster/upgrades/etcd/upgrade.yml94
-rw-r--r--playbooks/common/openshift-cluster/upgrades/files/pre-upgrade-check193
-rw-r--r--playbooks/common/openshift-cluster/upgrades/files/rpm_versions.sh12
-rw-r--r--playbooks/common/openshift-cluster/upgrades/init.yml2
-rwxr-xr-xplaybooks/common/openshift-cluster/upgrades/library/openshift_upgrade_config.py11
-rw-r--r--playbooks/common/openshift-cluster/upgrades/rpm_upgrade.yml9
-rw-r--r--playbooks/common/openshift-cluster/upgrades/upgrade_control_plane.yml102
-rw-r--r--playbooks/common/openshift-cluster/upgrades/upgrade_nodes.yml45
-rw-r--r--playbooks/common/openshift-cluster/upgrades/upgrade_scheduler.yml166
l---------playbooks/common/openshift-cluster/upgrades/v3_1_to_v3_2/nuke_images.sh1
-rw-r--r--playbooks/common/openshift-cluster/upgrades/v3_3/master_config_upgrade.yml16
-rw-r--r--playbooks/common/openshift-cluster/upgrades/v3_3/node_config_upgrade.yml1
-rw-r--r--playbooks/common/openshift-cluster/upgrades/v3_4/master_config_upgrade.yml16
26 files changed, 634 insertions, 346 deletions
diff --git a/playbooks/common/openshift-cluster/upgrades/containerized_node_upgrade.yml b/playbooks/common/openshift-cluster/upgrades/containerized_node_upgrade.yml
index 439df5ffd..9f7961614 100644
--- a/playbooks/common/openshift-cluster/upgrades/containerized_node_upgrade.yml
+++ b/playbooks/common/openshift-cluster/upgrades/containerized_node_upgrade.yml
@@ -1,9 +1,14 @@
+---
+# This is a hack to allow us to use systemd_units.yml, but skip the handlers which
+# restart services. We will unconditionally restart all containerized services
+# because we have to unconditionally restart Docker:
+- set_fact:
+ skip_node_svc_handlers: True
+
- name: Update systemd units
include: ../../../../roles/openshift_node/tasks/systemd_units.yml openshift_version={{ openshift_image_tag }}
-- name: Verifying the correct version was configured
- shell: grep {{ verify_upgrade_version }} {{ item }}
- with_items:
- - /etc/sysconfig/openvswitch
- - /etc/sysconfig/{{ openshift.common.service_type }}*
- when: verify_upgrade_version is defined
+# This is a no-op because of skip_node_svc_handlers, but lets us trigger it before end of
+# play when the node has already been marked schedulable again. (this would look strange
+# in logs otherwise)
+- meta: flush_handlers
diff --git a/playbooks/common/openshift-cluster/upgrades/docker/restart.yml b/playbooks/common/openshift-cluster/upgrades/docker/restart.yml
new file mode 100644
index 000000000..1b418920f
--- /dev/null
+++ b/playbooks/common/openshift-cluster/upgrades/docker/restart.yml
@@ -0,0 +1,27 @@
+---
+- name: Restart docker
+ service: name=docker state=restarted
+
+- name: Update docker facts
+ openshift_facts:
+ role: docker
+
+- name: Restart containerized services
+ service: name={{ item }} state=started
+ with_items:
+ - etcd_container
+ - openvswitch
+ - "{{ openshift.common.service_type }}-master"
+ - "{{ openshift.common.service_type }}-master-api"
+ - "{{ openshift.common.service_type }}-master-controllers"
+ - "{{ openshift.common.service_type }}-node"
+ failed_when: false
+ when: openshift.common.is_containerized | bool
+
+- name: Wait for master API to come back online
+ wait_for:
+ host: "{{ openshift.common.hostname }}"
+ state: started
+ delay: 10
+ port: "{{ openshift.master.api_port }}"
+ when: inventory_hostname in groups.oo_masters_to_config
diff --git a/playbooks/common/openshift-cluster/upgrades/docker/upgrade.yml b/playbooks/common/openshift-cluster/upgrades/docker/upgrade.yml
index 417096dd0..17f8fc6e9 100644
--- a/playbooks/common/openshift-cluster/upgrades/docker/upgrade.yml
+++ b/playbooks/common/openshift-cluster/upgrades/docker/upgrade.yml
@@ -20,7 +20,7 @@
- debug: var=docker_image_count.stdout
- name: Remove all containers and images
- script: nuke_images.sh docker
+ script: nuke_images.sh
register: nuke_images_result
when: docker_upgrade_nuke_images is defined and docker_upgrade_nuke_images | bool
@@ -35,32 +35,7 @@
- service: name=docker state=stopped
- name: Upgrade Docker
- action: "{{ ansible_pkg_mgr }} name=docker{{ '-' + docker_version }} state=present"
+ package: name=docker{{ '-' + docker_version }} state=present
-- service: name=docker state=started
-
-- name: Update docker facts
- openshift_facts:
- role: docker
-
-- name: Restart containerized services
- service: name={{ item }} state=started
- with_items:
- - etcd_container
- - openvswitch
- - "{{ openshift.common.service_type }}-master"
- - "{{ openshift.common.service_type }}-master-api"
- - "{{ openshift.common.service_type }}-master-controllers"
- - "{{ openshift.common.service_type }}-node"
- failed_when: false
- when: openshift.common.is_containerized | bool
-
-- name: Wait for master API to come back online
- become: no
- local_action:
- module: wait_for
- host="{{ inventory_hostname }}"
- state=started
- delay=10
- port="{{ openshift.master.api_port }}"
- when: inventory_hostname in groups.oo_masters_to_config
+- include: restart.yml
+ when: not skip_docker_restart | default(False) | bool
diff --git a/playbooks/common/openshift-cluster/upgrades/docker/upgrade_check.yml b/playbooks/common/openshift-cluster/upgrades/docker/upgrade_check.yml
index caf80b358..b2a2eac9a 100644
--- a/playbooks/common/openshift-cluster/upgrades/docker/upgrade_check.yml
+++ b/playbooks/common/openshift-cluster/upgrades/docker/upgrade_check.yml
@@ -9,6 +9,8 @@
- name: Check if Docker is installed
command: rpm -q docker
+ args:
+ warn: no
register: pkg_check
failed_when: pkg_check.rc > 1
changed_when: no
@@ -48,5 +50,5 @@
- name: Flag to delete all images prior to upgrade if crossing Docker 1.10 boundary
set_fact:
- docker_upgrade_nuke_images: True
+ docker_upgrade_nuke_images: True
when: l_docker_upgrade | bool and docker_upgrade_nuke_images is not defined and curr_docker_version.stdout | version_compare('1.10','<') and docker_version | version_compare('1.10','>=')
diff --git a/playbooks/common/openshift-cluster/upgrades/etcd/backup.yml b/playbooks/common/openshift-cluster/upgrades/etcd/backup.yml
new file mode 100644
index 000000000..d0eadf1fc
--- /dev/null
+++ b/playbooks/common/openshift-cluster/upgrades/etcd/backup.yml
@@ -0,0 +1,94 @@
+---
+- name: Backup etcd
+ hosts: etcd_hosts_to_backup
+ vars:
+ embedded_etcd: "{{ groups.oo_etcd_to_config | default([]) | length == 0 }}"
+ timestamp: "{{ lookup('pipe', 'date +%Y%m%d%H%M%S') }}"
+ etcdctl_command: "{{ 'etcdctl' if not openshift.common.is_containerized or embedded_etcd else 'docker exec etcd_container etcdctl' }}"
+ roles:
+ - openshift_facts
+ tasks:
+ # Ensure we persist the etcd role for this host in openshift_facts
+ - openshift_facts:
+ role: etcd
+ local_facts: {}
+ when: "'etcd' not in openshift"
+
+ - stat: path=/var/lib/openshift
+ register: var_lib_openshift
+
+ - stat: path=/var/lib/origin
+ register: var_lib_origin
+
+ - name: Create origin symlink if necessary
+ file: src=/var/lib/openshift/ dest=/var/lib/origin state=link
+ when: var_lib_openshift.stat.exists == True and var_lib_origin.stat.exists == False
+
+ # TODO: replace shell module with command and update later checks
+ # We assume to be using the data dir for all backups.
+ - name: Check available disk space for etcd backup
+ shell: df --output=avail -k {{ openshift.common.data_dir }} | tail -n 1
+ register: avail_disk
+
+ # TODO: replace shell module with command and update later checks
+ - name: Check current embedded etcd disk usage
+ shell: du -k {{ openshift.etcd.etcd_data_dir }} | tail -n 1 | cut -f1
+ register: etcd_disk_usage
+ when: embedded_etcd | bool
+
+ - name: Abort if insufficient disk space for etcd backup
+ fail:
+ msg: >
+ {{ etcd_disk_usage.stdout }} Kb disk space required for etcd backup,
+ {{ avail_disk.stdout }} Kb available.
+ when: (embedded_etcd | bool) and (etcd_disk_usage.stdout|int > avail_disk.stdout|int)
+
+ # For non containerized and non embedded we should have the correct version of
+ # etcd installed already. So don't do anything.
+ #
+ # For embedded or containerized we need to use the latest because OCP 3.3 uses
+ # a version of etcd that can only be backed up with etcd-3.x and if it's
+ # containerized then etcd version may be newer than that on the host so
+ # upgrade it.
+ #
+ # On atomic we have neither yum nor dnf so ansible throws a hard to debug error
+ # if you use package there, like this: "Could not find a module for unknown."
+ # see https://bugzilla.redhat.com/show_bug.cgi?id=1408668
+ #
+ # TODO - We should refactor all containerized backups to use the containerized
+ # version of etcd to perform the backup rather than relying on the host's
+ # binaries. Until we do that we'll continue to have problems backing up etcd
+ # when atomic host has an older version than the version that's running in the
+ # container whether that's embedded or not
+ - name: Install latest etcd for containerized or embedded
+ package:
+ name: etcd
+ state: latest
+ when: ( embedded_etcd | bool or openshift.common.is_containerized ) and not openshift.common.is_atomic
+
+ - name: Generate etcd backup
+ command: >
+ {{ etcdctl_command }} backup --data-dir={{ openshift.etcd.etcd_data_dir }}
+ --backup-dir={{ openshift.common.data_dir }}/etcd-backup-{{ backup_tag | default('') }}{{ timestamp }}
+
+ - set_fact:
+ etcd_backup_complete: True
+
+ - name: Display location of etcd backup
+ debug:
+ msg: "Etcd backup created in {{ openshift.common.data_dir }}/etcd-backup-{{ backup_tag | default('') }}{{ timestamp }}"
+
+- name: Gate on etcd backup
+ hosts: localhost
+ connection: local
+ become: no
+ tasks:
+ - set_fact:
+ etcd_backup_completed: "{{ hostvars
+ | oo_select_keys(groups.etcd_hosts_to_backup)
+ | oo_collect('inventory_hostname', {'etcd_backup_complete': true}) }}"
+ - set_fact:
+ etcd_backup_failed: "{{ groups.etcd_hosts_to_backup | difference(etcd_backup_completed) }}"
+ - fail:
+ msg: "Upgrade cannot continue. The following hosts did not complete etcd backup: {{ etcd_backup_failed | join(',') }}"
+ when: etcd_backup_failed | length > 0
diff --git a/playbooks/common/openshift-cluster/upgrades/etcd/containerized_tasks.yml b/playbooks/common/openshift-cluster/upgrades/etcd/containerized_tasks.yml
new file mode 100644
index 000000000..5f8b59e17
--- /dev/null
+++ b/playbooks/common/openshift-cluster/upgrades/etcd/containerized_tasks.yml
@@ -0,0 +1,46 @@
+---
+- name: Verify cluster is healthy pre-upgrade
+ command: "etcdctl --cert-file /etc/etcd/peer.crt --key-file /etc/etcd/peer.key --ca-file /etc/etcd/ca.crt -C https://{{ openshift.common.hostname }}:2379 cluster-health"
+
+- name: Get current image
+ shell: grep 'ExecStart=' /etc/systemd/system/etcd_container.service | awk '{print $NF}'
+ register: current_image
+
+- name: Set new_etcd_image
+ set_fact:
+ new_etcd_image: "{{ current_image.stdout | regex_replace('/etcd.*$','/etcd:' ~ upgrade_version ) }}"
+
+- name: Pull new etcd image
+ command: "docker pull {{ new_etcd_image }}"
+
+- name: Update to latest etcd image
+ replace:
+ dest: /etc/systemd/system/etcd_container.service
+ regexp: "{{ current_image.stdout }}$"
+ replace: "{{ new_etcd_image }}"
+
+- name: Restart etcd_container
+ systemd:
+ name: etcd_container
+ daemon_reload: yes
+ state: restarted
+
+## TODO: probably should just move this into the backup playbooks, also this
+## will fail on atomic host. We need to revisit how to do etcd backups there as
+## the container may be newer than etcdctl on the host. Assumes etcd3 obsoletes etcd (7.3.1)
+- name: Upgrade etcd for etcdctl when not atomic
+ package: name=etcd state=latest
+ when: not openshift.common.is_atomic | bool
+
+- name: Verify cluster is healthy
+ command: "etcdctl --cert-file /etc/etcd/peer.crt --key-file /etc/etcd/peer.key --ca-file /etc/etcd/ca.crt -C https://{{ openshift.common.hostname }}:2379 cluster-health"
+ register: etcdctl
+ until: etcdctl.rc == 0
+ retries: 3
+ delay: 10
+
+- name: Store new etcd_image
+ openshift_facts:
+ role: etcd
+ local_facts:
+ etcd_image: "{{ new_etcd_image }}"
diff --git a/playbooks/common/openshift-cluster/upgrades/etcd/fedora_tasks.yml b/playbooks/common/openshift-cluster/upgrades/etcd/fedora_tasks.yml
new file mode 100644
index 000000000..30232110e
--- /dev/null
+++ b/playbooks/common/openshift-cluster/upgrades/etcd/fedora_tasks.yml
@@ -0,0 +1,23 @@
+---
+# F23 GA'd with etcd 2.0, currently has 2.2 in updates
+# F24 GA'd with etcd-2.2, currently has 2.2 in updates
+# F25 Beta currently has etcd 3.0
+- name: Verify cluster is healthy pre-upgrade
+ command: "etcdctl --cert-file /etc/etcd/peer.crt --key-file /etc/etcd/peer.key --ca-file /etc/etcd/ca.crt -C https://{{ openshift.common.hostname }}:2379 cluster-health"
+
+- name: Update etcd
+ package:
+ name: "etcd"
+ state: "latest"
+
+- name: Restart etcd
+ service:
+ name: etcd
+ state: restarted
+
+- name: Verify cluster is healthy
+ command: "etcdctl --cert-file /etc/etcd/peer.crt --key-file /etc/etcd/peer.key --ca-file /etc/etcd/ca.crt -C https://{{ openshift.common.hostname }}:2379 cluster-health"
+ register: etcdctl
+ until: etcdctl.rc == 0
+ retries: 3
+ delay: 10
diff --git a/playbooks/common/openshift-cluster/upgrades/etcd/files/etcdctl.sh b/playbooks/common/openshift-cluster/upgrades/etcd/files/etcdctl.sh
new file mode 120000
index 000000000..641e04e44
--- /dev/null
+++ b/playbooks/common/openshift-cluster/upgrades/etcd/files/etcdctl.sh
@@ -0,0 +1 @@
+../roles/etcd/files/etcdctl.sh \ No newline at end of file
diff --git a/playbooks/common/openshift-cluster/upgrades/etcd/filter_plugins b/playbooks/common/openshift-cluster/upgrades/etcd/filter_plugins
new file mode 120000
index 000000000..27ddaa18b
--- /dev/null
+++ b/playbooks/common/openshift-cluster/upgrades/etcd/filter_plugins
@@ -0,0 +1 @@
+../../../../../filter_plugins \ No newline at end of file
diff --git a/playbooks/common/openshift-cluster/upgrades/etcd/lookup_plugins b/playbooks/common/openshift-cluster/upgrades/etcd/lookup_plugins
new file mode 120000
index 000000000..cf407f69b
--- /dev/null
+++ b/playbooks/common/openshift-cluster/upgrades/etcd/lookup_plugins
@@ -0,0 +1 @@
+../../../../../lookup_plugins \ No newline at end of file
diff --git a/playbooks/common/openshift-cluster/upgrades/etcd/main.yml b/playbooks/common/openshift-cluster/upgrades/etcd/main.yml
new file mode 100644
index 000000000..8268adc2e
--- /dev/null
+++ b/playbooks/common/openshift-cluster/upgrades/etcd/main.yml
@@ -0,0 +1,44 @@
+---
+# For 1.4/3.4 we want to upgrade everyone to etcd-3.0. etcd docs say to
+# upgrade from 2.0.x to 2.1.x to 2.2.x to 2.3.x to 3.0.x. While this is a tedius
+# task for RHEL and CENTOS it's simply not possible in Fedora unless you've
+# mirrored packages on your own because only the GA and latest versions are
+# available in the repos. So for Fedora we'll simply skip this, sorry.
+
+- include: ../../evaluate_groups.yml
+ tags:
+ - always
+
+# We use two groups one for hosts we're upgrading which doesn't include embedded etcd
+# The other for backing up which includes the embedded etcd host, there's no need to
+# upgrade embedded etcd that just happens when the master is updated.
+- name: Evaluate additional groups for etcd
+ hosts: localhost
+ connection: local
+ become: no
+ tasks:
+ - name: Evaluate etcd_hosts_to_upgrade
+ add_host:
+ name: "{{ item }}"
+ groups: etcd_hosts_to_upgrade
+ with_items: "{{ groups.oo_etcd_to_config if groups.oo_etcd_to_config is defined and groups.oo_etcd_to_config | length > 0 else [] }}"
+ - name: Evaluate etcd_hosts_to_backup
+ add_host:
+ name: "{{ item }}"
+ groups: etcd_hosts_to_backup
+ with_items: "{{ groups.oo_etcd_to_config if groups.oo_etcd_to_config is defined and groups.oo_etcd_to_config | length > 0 else groups.oo_first_master }}"
+
+- name: Backup etcd before upgrading anything
+ include: backup.yml
+ vars:
+ backup_tag: "pre-upgrade-"
+ when: openshift_etcd_backup | default(true) | bool
+
+- name: Drop etcdctl profiles
+ hosts: etcd_hosts_to_upgrade
+ tasks:
+ - include: roles/etcd/tasks/etcdctl.yml
+
+- name: Perform etcd upgrade
+ include: ./upgrade.yml
+ when: openshift_etcd_upgrade | default(true) | bool
diff --git a/playbooks/common/openshift-cluster/upgrades/etcd/rhel_tasks.yml b/playbooks/common/openshift-cluster/upgrades/etcd/rhel_tasks.yml
new file mode 100644
index 000000000..3a972e8ab
--- /dev/null
+++ b/playbooks/common/openshift-cluster/upgrades/etcd/rhel_tasks.yml
@@ -0,0 +1,20 @@
+---
+- name: Verify cluster is healthy pre-upgrade
+ command: "etcdctl --cert-file /etc/etcd/peer.crt --key-file /etc/etcd/peer.key --ca-file /etc/etcd/ca.crt -C https://{{ openshift.common.hostname }}:2379 cluster-health"
+
+- name: Update etcd RPM
+ package:
+ name: etcd-{{ upgrade_version }}*
+ state: latest
+
+- name: Restart etcd
+ service:
+ name: etcd
+ state: restarted
+
+- name: Verify cluster is healthy
+ command: "etcdctl --cert-file /etc/etcd/peer.crt --key-file /etc/etcd/peer.key --ca-file /etc/etcd/ca.crt -C https://{{ openshift.common.hostname }}:2379 cluster-health"
+ register: etcdctl
+ until: etcdctl.rc == 0
+ retries: 3
+ delay: 10
diff --git a/playbooks/common/openshift-cluster/upgrades/etcd/roles b/playbooks/common/openshift-cluster/upgrades/etcd/roles
new file mode 120000
index 000000000..6bc1a7aef
--- /dev/null
+++ b/playbooks/common/openshift-cluster/upgrades/etcd/roles
@@ -0,0 +1 @@
+../../../../../roles \ No newline at end of file
diff --git a/playbooks/common/openshift-cluster/upgrades/etcd/upgrade.yml b/playbooks/common/openshift-cluster/upgrades/etcd/upgrade.yml
new file mode 100644
index 000000000..0f8d94737
--- /dev/null
+++ b/playbooks/common/openshift-cluster/upgrades/etcd/upgrade.yml
@@ -0,0 +1,94 @@
+---
+- name: Determine etcd version
+ hosts: etcd_hosts_to_upgrade
+ tasks:
+ - name: Record RPM based etcd version
+ command: rpm -qa --qf '%{version}' etcd\*
+ args:
+ warn: no
+ register: etcd_rpm_version
+ failed_when: false
+ when: not openshift.common.is_containerized | bool
+ - name: Record containerized etcd version
+ command: docker exec etcd_container rpm -qa --qf '%{version}' etcd\*
+ register: etcd_container_version
+ failed_when: false
+ when: openshift.common.is_containerized | bool
+
+# I really dislike this copy/pasta but I wasn't able to find a way to get it to loop
+# through hosts, then loop through tasks only when appropriate
+- name: Upgrade to 2.1
+ hosts: etcd_hosts_to_upgrade
+ serial: 1
+ vars:
+ upgrade_version: '2.1'
+ tasks:
+ - include: rhel_tasks.yml
+ when: etcd_rpm_version.stdout | default('99') | version_compare('2.1','<') and ansible_distribution == 'RedHat' and not openshift.common.is_containerized | bool
+
+- name: Upgrade RPM hosts to 2.2
+ hosts: etcd_hosts_to_upgrade
+ serial: 1
+ vars:
+ upgrade_version: '2.2'
+ tasks:
+ - include: rhel_tasks.yml
+ when: etcd_rpm_version.stdout | default('99') | version_compare('2.2','<') and ansible_distribution == 'RedHat' and not openshift.common.is_containerized | bool
+
+- name: Upgrade containerized hosts to 2.2.5
+ hosts: etcd_hosts_to_upgrade
+ serial: 1
+ vars:
+ upgrade_version: 2.2.5
+ tasks:
+ - include: containerized_tasks.yml
+ when: etcd_container_version.stdout | default('99') | version_compare('2.2','<') and openshift.common.is_containerized | bool
+
+- name: Upgrade RPM hosts to 2.3
+ hosts: etcd_hosts_to_upgrade
+ serial: 1
+ vars:
+ upgrade_version: '2.3'
+ tasks:
+ - include: rhel_tasks.yml
+ when: etcd_rpm_version.stdout | default('99') | version_compare('2.3','<') and ansible_distribution == 'RedHat' and not openshift.common.is_containerized | bool
+
+- name: Upgrade containerized hosts to 2.3.7
+ hosts: etcd_hosts_to_upgrade
+ serial: 1
+ vars:
+ upgrade_version: 2.3.7
+ tasks:
+ - include: containerized_tasks.yml
+ when: etcd_container_version.stdout | default('99') | version_compare('2.3','<') and openshift.common.is_containerized | bool
+
+- name: Upgrade RPM hosts to 3.0
+ hosts: etcd_hosts_to_upgrade
+ serial: 1
+ vars:
+ upgrade_version: '3.0'
+ tasks:
+ - include: rhel_tasks.yml
+ when: etcd_rpm_version.stdout | default('99') | version_compare('3.0','<') and ansible_distribution == 'RedHat' and not openshift.common.is_containerized | bool
+
+- name: Upgrade containerized hosts to etcd3 image
+ hosts: etcd_hosts_to_upgrade
+ serial: 1
+ vars:
+ upgrade_version: 3.0.15
+ tasks:
+ - include: containerized_tasks.yml
+ when: etcd_container_version.stdout | default('99') | version_compare('3.0','<') and openshift.common.is_containerized | bool
+
+- name: Upgrade fedora to latest
+ hosts: etcd_hosts_to_upgrade
+ serial: 1
+ tasks:
+ - include: fedora_tasks.yml
+ when: ansible_distribution == 'Fedora' and not openshift.common.is_containerized | bool
+
+- name: Backup etcd
+ include: backup.yml
+ vars:
+ backup_tag: "post-3.0-"
+ when: openshift_etcd_backup | default(true) | bool
diff --git a/playbooks/common/openshift-cluster/upgrades/files/pre-upgrade-check b/playbooks/common/openshift-cluster/upgrades/files/pre-upgrade-check
deleted file mode 100644
index e5c958ebb..000000000
--- a/playbooks/common/openshift-cluster/upgrades/files/pre-upgrade-check
+++ /dev/null
@@ -1,193 +0,0 @@
-#!/usr/bin/env python
-"""
-Pre-upgrade checks that must be run on a master before proceeding with upgrade.
-"""
-# This is a script not a python module:
-# pylint: disable=invalid-name
-
-# NOTE: This script should not require any python libs other than what is
-# in the standard library.
-
-__license__ = "ASL 2.0"
-
-import json
-import os
-import subprocess
-import re
-
-# The maximum length of container.ports.name
-ALLOWED_LENGTH = 15
-# The valid structure of container.ports.name
-ALLOWED_CHARS = re.compile('^[a-z0-9][a-z0-9\\-]*[a-z0-9]$')
-AT_LEAST_ONE_LETTER = re.compile('[a-z]')
-# look at OS_PATH for the full path. Default ot 'oc'
-OC_PATH = os.getenv('OC_PATH', 'oc')
-
-
-def validate(value):
- """
- validate verifies that value matches required conventions
-
- Rules of container.ports.name validation:
-
- * must be less that 16 chars
- * at least one letter
- * only a-z0-9-
- * hyphens can not be leading or trailing or next to each other
-
- :Parameters:
- - `value`: Value to validate
- """
- if len(value) > ALLOWED_LENGTH:
- return False
-
- if '--' in value:
- return False
-
- # We search since it can be anywhere
- if not AT_LEAST_ONE_LETTER.search(value):
- return False
-
- # We match because it must start at the beginning
- if not ALLOWED_CHARS.match(value):
- return False
- return True
-
-
-def list_items(kind):
- """
- list_items returns a list of items from the api
-
- :Parameters:
- - `kind`: Kind of item to access
- """
- response = subprocess.check_output([OC_PATH, 'get', '--all-namespaces', '-o', 'json', kind])
- items = json.loads(response)
- return items.get("items", [])
-
-
-def get(obj, *paths):
- """
- Gets an object
-
- :Parameters:
- - `obj`: A dictionary structure
- - `path`: All other non-keyword arguments
- """
- ret_obj = obj
- for path in paths:
- if ret_obj.get(path, None) is None:
- return []
- ret_obj = ret_obj[path]
- return ret_obj
-
-
-# pylint: disable=too-many-arguments
-def pretty_print_errors(namespace, kind, item_name, container_name, invalid_label, port_name, valid):
- """
- Prints out results in human friendly way.
-
- :Parameters:
- - `namespace`: Namespace of the resource
- - `kind`: Kind of the resource
- - `item_name`: Name of the resource
- - `container_name`: Name of the container. May be "" when kind=Service.
- - `port_name`: Name of the port
- - `invalid_label`: The label of the invalid port. Port.name/targetPort
- - `valid`: True if the port is valid
- """
- if not valid:
- if len(container_name) > 0:
- print('%s/%s -n %s (Container="%s" %s="%s")' % (
- kind, item_name, namespace, container_name, invalid_label, port_name))
- else:
- print('%s/%s -n %s (%s="%s")' % (
- kind, item_name, namespace, invalid_label, port_name))
-
-
-def print_validation_header():
- """
- Prints the error header. Should run on the first error to avoid
- overwhelming the user.
- """
- print """\
-At least one port name is invalid and must be corrected before upgrading.
-Please update or remove any resources with invalid port names.
-
- Valid port names must:
-
- * be less that 16 characters
- * have at least one letter
- * contain only a-z0-9-
- * not start or end with -
- * not contain dashes next to each other ('--')
-"""
-
-
-def main():
- """
- main is the main entry point to this script
- """
- try:
- # the comma at the end suppresses the newline
- print "Checking for oc ...",
- subprocess.check_output([OC_PATH, 'whoami'])
- print "found"
- except:
- print(
- 'Unable to run "%s whoami"\n'
- 'Please ensure OpenShift is running, and "oc" is on your system '
- 'path.\n'
- 'You can override the path with the OC_PATH environment variable.'
- % OC_PATH)
- raise SystemExit(1)
-
- # Where the magic happens
- first_error = True
- for kind, path in [
- ('deploymentconfigs', ("spec", "template", "spec", "containers")),
- ('replicationcontrollers', ("spec", "template", "spec", "containers")),
- ('pods', ("spec", "containers"))]:
- for item in list_items(kind):
- namespace = item["metadata"]["namespace"]
- item_name = item["metadata"]["name"]
- for container in get(item, *path):
- container_name = container["name"]
- for port in get(container, "ports"):
- port_name = port.get("name", None)
- if not port_name:
- # Unnamed ports are OK
- continue
- valid = validate(port_name)
- if not valid and first_error:
- first_error = False
- print_validation_header()
- pretty_print_errors(
- namespace, kind, item_name,
- container_name, "Port.name", port_name, valid)
-
- # Services follow a different flow
- for item in list_items('services'):
- namespace = item["metadata"]["namespace"]
- item_name = item["metadata"]["name"]
- for port in get(item, "spec", "ports"):
- port_name = port.get("targetPort", None)
- if isinstance(port_name, int) or port_name is None:
- # Integer only or unnamed ports are OK
- continue
- valid = validate(port_name)
- if not valid and first_error:
- first_error = False
- print_validation_header()
- pretty_print_errors(
- namespace, "services", item_name, "",
- "targetPort", port_name, valid)
-
- # If we had at least 1 error then exit with 1
- if not first_error:
- raise SystemExit(1)
-
-
-if __name__ == '__main__':
- main()
-
diff --git a/playbooks/common/openshift-cluster/upgrades/files/rpm_versions.sh b/playbooks/common/openshift-cluster/upgrades/files/rpm_versions.sh
deleted file mode 100644
index 7bf249742..000000000
--- a/playbooks/common/openshift-cluster/upgrades/files/rpm_versions.sh
+++ /dev/null
@@ -1,12 +0,0 @@
-#!/bin/bash
-if [ `which dnf 2> /dev/null` ]; then
- installed=$(dnf repoquery --installed --latest-limit 1 -d 0 --qf '%{version}-%{release}' "${@}" 2> /dev/null)
- available=$(dnf repoquery --available --latest-limit 1 -d 0 --qf '%{version}-%{release}' "${@}" 2> /dev/null)
-else
- installed=$(repoquery --plugins --pkgnarrow=installed --qf '%{version}-%{release}' "${@}" 2> /dev/null)
- available=$(repoquery --plugins --pkgnarrow=available --qf '%{version}-%{release}' "${@}" 2> /dev/null)
-fi
-
-echo "---"
-echo "curr_version: ${installed}"
-echo "avail_version: ${available}"
diff --git a/playbooks/common/openshift-cluster/upgrades/init.yml b/playbooks/common/openshift-cluster/upgrades/init.yml
index fbdb7900a..8cac2fb3b 100644
--- a/playbooks/common/openshift-cluster/upgrades/init.yml
+++ b/playbooks/common/openshift-cluster/upgrades/init.yml
@@ -1,6 +1,4 @@
---
-- include: ../verify_ansible_version.yml
-
- hosts: localhost
connection: local
become: no
diff --git a/playbooks/common/openshift-cluster/upgrades/library/openshift_upgrade_config.py b/playbooks/common/openshift-cluster/upgrades/library/openshift_upgrade_config.py
index 9a065fd1c..673f11889 100755
--- a/playbooks/common/openshift-cluster/upgrades/library/openshift_upgrade_config.py
+++ b/playbooks/common/openshift-cluster/upgrades/library/openshift_upgrade_config.py
@@ -17,6 +17,7 @@ requirements: [ ]
EXAMPLES = '''
'''
+
def modify_api_levels(level_list, remove, ensure, msg_prepend='',
msg_append=''):
""" modify_api_levels """
@@ -62,7 +63,6 @@ def upgrade_master_3_0_to_3_1(ansible_module, config_base, backup):
config = yaml.safe_load(master_cfg_file.read())
master_cfg_file.close()
-
# Remove unsupported api versions and ensure supported api versions from
# master config
unsupported_levels = ['v1beta1', 'v1beta2', 'v1beta3']
@@ -118,7 +118,7 @@ def main():
# redefined-outer-name
global module
- module = AnsibleModule(
+ module = AnsibleModule( # noqa: F405
argument_spec=dict(
config_base=dict(required=True),
from_version=dict(required=True, choices=['3.0']),
@@ -146,13 +146,14 @@ def main():
# ignore broad-except error to avoid stack trace to ansible user
# pylint: disable=broad-except
- except Exception, e:
+ except Exception as e:
return module.fail_json(msg=str(e))
+
# ignore pylint errors related to the module_utils import
-# pylint: disable=redefined-builtin, unused-wildcard-import, wildcard-import
+# pylint: disable=redefined-builtin, unused-wildcard-import, wildcard-import, wrong-import-position
# import module snippets
-from ansible.module_utils.basic import *
+from ansible.module_utils.basic import * # noqa: E402,F403
if __name__ == '__main__':
main()
diff --git a/playbooks/common/openshift-cluster/upgrades/rpm_upgrade.yml b/playbooks/common/openshift-cluster/upgrades/rpm_upgrade.yml
index cd1139b29..df2b664d4 100644
--- a/playbooks/common/openshift-cluster/upgrades/rpm_upgrade.yml
+++ b/playbooks/common/openshift-cluster/upgrades/rpm_upgrade.yml
@@ -1,11 +1,8 @@
+---
# We verified latest rpm available is suitable, so just yum update.
- name: Upgrade packages
- action: "{{ ansible_pkg_mgr }} name={{ openshift.common.service_type }}-{{ component }}{{ openshift_pkg_version }} state=present"
+ package: "name={{ openshift.common.service_type }}-{{ component }}{{ openshift_pkg_version }} state=present"
- name: Ensure python-yaml present for config upgrade
- action: "{{ ansible_pkg_mgr }} name=PyYAML state=present"
+ package: name=PyYAML state=present
when: not openshift.common.is_atomic | bool
-
-- name: Restart node service
- service: name="{{ openshift.common.service_type }}-node" state=restarted
- when: component == "node"
diff --git a/playbooks/common/openshift-cluster/upgrades/upgrade_control_plane.yml b/playbooks/common/openshift-cluster/upgrades/upgrade_control_plane.yml
index 6b567e2e2..6950b6166 100644
--- a/playbooks/common/openshift-cluster/upgrades/upgrade_control_plane.yml
+++ b/playbooks/common/openshift-cluster/upgrades/upgrade_control_plane.yml
@@ -24,88 +24,14 @@
- openshift_facts:
role: master
local_facts:
- embedded_etcd: "{{ groups.oo_etcd_to_config | length == 0 }}"
+ embedded_etcd: "{{ groups.oo_etcd_to_config | default([]) | length == 0 }}"
+ debug_level: "{{ openshift_master_debug_level | default(openshift.common.debug_level | default(2)) }}"
-- name: Backup etcd
- hosts: etcd_hosts_to_backup
- vars:
- embedded_etcd: "{{ groups.oo_etcd_to_config | default([]) | length == 0 }}"
- timestamp: "{{ lookup('pipe', 'date +%Y%m%d%H%M%S') }}"
- roles:
- - openshift_facts
- tasks:
- # Ensure we persist the etcd role for this host in openshift_facts
- - openshift_facts:
- role: etcd
- local_facts: {}
- when: "'etcd' not in openshift"
-
- - stat: path=/var/lib/openshift
- register: var_lib_openshift
-
- - stat: path=/var/lib/origin
- register: var_lib_origin
-
- - name: Create origin symlink if necessary
- file: src=/var/lib/openshift/ dest=/var/lib/origin state=link
- when: var_lib_openshift.stat.exists == True and var_lib_origin.stat.exists == False
-
- # TODO: replace shell module with command and update later checks
- # We assume to be using the data dir for all backups.
- - name: Check available disk space for etcd backup
- shell: df --output=avail -k {{ openshift.common.data_dir }} | tail -n 1
- register: avail_disk
-
- # TODO: replace shell module with command and update later checks
- - name: Check current embedded etcd disk usage
- shell: du -k {{ openshift.etcd.etcd_data_dir }} | tail -n 1 | cut -f1
- register: etcd_disk_usage
- when: embedded_etcd | bool
-
- - name: Abort if insufficient disk space for etcd backup
- fail:
- msg: >
- {{ etcd_disk_usage.stdout }} Kb disk space required for etcd backup,
- {{ avail_disk.stdout }} Kb available.
- when: (embedded_etcd | bool) and (etcd_disk_usage.stdout|int > avail_disk.stdout|int)
-
- - name: Install etcd (for etcdctl)
- action: "{{ ansible_pkg_mgr }} name=etcd state=installed"
- when: not openshift.common.is_atomic | bool
-
- - name: Generate etcd backup
- command: >
- etcdctl backup --data-dir={{ openshift.etcd.etcd_data_dir }}
- --backup-dir={{ openshift.common.data_dir }}/etcd-backup-{{ timestamp }}
-
- - set_fact:
- etcd_backup_complete: True
-
- - name: Display location of etcd backup
- debug:
- msg: "Etcd backup created in {{ openshift.common.data_dir }}/etcd-backup-{{ timestamp }}"
-
-
-- name: Gate on etcd backup
- hosts: localhost
- connection: local
- become: no
- tasks:
- - set_fact:
- etcd_backup_completed: "{{ hostvars
- | oo_select_keys(groups.etcd_hosts_to_backup)
- | oo_collect('inventory_hostname', {'etcd_backup_complete': true}) }}"
- - set_fact:
- etcd_backup_failed: "{{ groups.etcd_hosts_to_backup | difference(etcd_backup_completed) }}"
- - fail:
- msg: "Upgrade cannot continue. The following hosts did not complete etcd backup: {{ etcd_backup_failed | join(',') }}"
- when: etcd_backup_failed | length > 0
+- name: Upgrade and backup etcd
+ include: ./etcd/main.yml
- name: Upgrade master packages
hosts: oo_masters_to_config
- handlers:
- - include: ../../../../roles/openshift_master/handlers/main.yml
- static: yes
roles:
- openshift_facts
tasks:
@@ -125,6 +51,14 @@
- include: create_service_signer_cert.yml
+# Set openshift_master_facts separately. In order to reconcile
+# admission_config's, we currently must run openshift_master_facts and
+# then run openshift_facts.
+- name: Set OpenShift master facts
+ hosts: oo_masters_to_config
+ roles:
+ - openshift_master_facts
+
- name: Upgrade master config and systemd units
hosts: oo_masters_to_config
handlers:
@@ -132,7 +66,11 @@
static: yes
roles:
- openshift_facts
- tasks:
+ post_tasks:
+ - include_vars: ../../../../roles/openshift_master_facts/vars/main.yml
+
+ - include: upgrade_scheduler.yml
+
- include: "{{ master_config_hook }}"
when: master_config_hook is defined
@@ -228,6 +166,12 @@
when: origin_reconcile_bindings | bool or ent_reconcile_bindings | bool
run_once: true
+ - name: Reconcile Jenkins Pipeline Role Bindings
+ command: >
+ {{ openshift.common.client_binary }} adm --config={{ openshift.common.config_base }}/master/admin.kubeconfig policy reconcile-cluster-role-bindings system:build-strategy-jenkinspipeline --confirm
+ run_once: true
+ when: openshift.common.version_gte_3_4_or_1_4 | bool
+
- name: Reconcile Security Context Constraints
command: >
{{ openshift.common.client_binary }} adm policy reconcile-sccs --confirm --additive-only=true
diff --git a/playbooks/common/openshift-cluster/upgrades/upgrade_nodes.yml b/playbooks/common/openshift-cluster/upgrades/upgrade_nodes.yml
index 1f314c854..86b344d7a 100644
--- a/playbooks/common/openshift-cluster/upgrades/upgrade_nodes.yml
+++ b/playbooks/common/openshift-cluster/upgrades/upgrade_nodes.yml
@@ -1,5 +1,5 @@
---
-- name: Evacuate and upgrade nodes
+- name: Drain and upgrade nodes
hosts: oo_nodes_to_upgrade
# This var must be set with -e on invocation, as it is not a per-host inventory var
# and is evaluated early. Values such as "20%" can also be used.
@@ -17,7 +17,7 @@
# we merge upgrade functionality into the base roles and a normal config.yml playbook run.
- name: Determine if node is currently scheduleable
command: >
- {{ openshift.common.client_binary }} get node {{ openshift.node.nodename | lower }} -o json
+ {{ hostvars[groups.oo_first_master.0].openshift.common.client_binary }} get node {{ openshift.node.nodename | lower }} -o json
register: node_output
delegate_to: "{{ groups.oo_first_master.0 }}"
changed_when: false
@@ -29,7 +29,7 @@
- name: Mark unschedulable if host is a node
command: >
- {{ openshift.common.client_binary }} adm manage-node {{ openshift.node.nodename | lower }} --schedulable=false
+ {{ hostvars[groups.oo_first_master.0].openshift.common.client_binary }} adm manage-node {{ openshift.node.nodename | lower }} --schedulable=false
delegate_to: "{{ groups.oo_first_master.0 }}"
when: inventory_hostname in groups.oo_nodes_to_upgrade
# NOTE: There is a transient "object has been modified" error here, allow a couple
@@ -39,13 +39,18 @@
retries: 3
delay: 1
- - name: Evacuate Node for Kubelet upgrade
+ - name: Drain Node for Kubelet upgrade
command: >
- {{ openshift.common.client_binary }} adm manage-node {{ openshift.node.nodename | lower }} --evacuate --force
+ {{ hostvars[groups.oo_first_master.0].openshift.common.client_binary }} adm manage-node {{ openshift.node.nodename | lower }} {{ openshift.common.evacuate_or_drain }} --force
delegate_to: "{{ groups.oo_first_master.0 }}"
when: inventory_hostname in groups.oo_nodes_to_upgrade
+
tasks:
+
- include: docker/upgrade.yml
+ vars:
+ # We will restart Docker ourselves after everything is ready:
+ skip_docker_restart: True
when: l_docker_upgrade is defined and l_docker_upgrade | bool and not openshift.common.is_atomic | bool
- include: "{{ node_config_hook }}"
@@ -53,23 +58,41 @@
- include: rpm_upgrade.yml
vars:
- component: "node"
- openshift_version: "{{ openshift_pkg_version | default('') }}"
+ component: "node"
+ openshift_version: "{{ openshift_pkg_version | default('') }}"
when: inventory_hostname in groups.oo_nodes_to_upgrade and not openshift.common.is_containerized | bool
+ - name: Remove obsolete docker-sdn-ovs.conf
+ file: path=/etc/systemd/system/docker.service.d/docker-sdn-ovs.conf state=absent
+ when: (deployment_type == 'openshift-enterprise' and openshift_release | version_compare('3.4', '>=')) or (deployment_type == 'origin' and openshift_release | version_compare('1.4', '>='))
+
- include: containerized_node_upgrade.yml
when: inventory_hostname in groups.oo_nodes_to_upgrade and openshift.common.is_containerized | bool
- - meta: flush_handlers
+ - name: Ensure containerized services stopped before Docker restart
+ service: name={{ item }} state=stopped
+ with_items:
+ - etcd_container
+ - openvswitch
+ - "{{ openshift.common.service_type }}-master"
+ - "{{ openshift.common.service_type }}-master-api"
+ - "{{ openshift.common.service_type }}-master-controllers"
+ - "{{ openshift.common.service_type }}-node"
+ failed_when: false
+ when: openshift.common.is_containerized | bool
+ # Mandatory Docker restart, ensure all containerized services are running:
+ - include: docker/restart.yml
+
+ - name: Restart rpm node service
+ service: name="{{ openshift.common.service_type }}-node" state=restarted
+ when: inventory_hostname in groups.oo_nodes_to_upgrade and not openshift.common.is_containerized | bool
- name: Set node schedulability
command: >
- {{ openshift.common.client_binary }} adm manage-node {{ openshift.node.nodename | lower }} --schedulable=true
+ {{ hostvars[groups.oo_first_master.0].openshift.common.client_binary }} adm manage-node {{ openshift.node.nodename | lower }} --schedulable=true
delegate_to: "{{ groups.oo_first_master.0 }}"
when: inventory_hostname in groups.oo_nodes_to_upgrade and was_schedulable | bool
register: node_sched
until: node_sched.rc == 0
retries: 3
delay: 1
-
-
diff --git a/playbooks/common/openshift-cluster/upgrades/upgrade_scheduler.yml b/playbooks/common/openshift-cluster/upgrades/upgrade_scheduler.yml
new file mode 100644
index 000000000..88f2ddc78
--- /dev/null
+++ b/playbooks/common/openshift-cluster/upgrades/upgrade_scheduler.yml
@@ -0,0 +1,166 @@
+---
+# Upgrade predicates
+- vars:
+ prev_predicates: "{{ lookup('openshift_master_facts_default_predicates', short_version=openshift_upgrade_min, deployment_type=openshift_deployment_type) }}"
+ prev_predicates_no_region: "{{ lookup('openshift_master_facts_default_predicates', short_version=openshift_upgrade_min, deployment_type=openshift_deployment_type, regions_enabled=False) }}"
+ default_predicates_no_region: "{{ lookup('openshift_master_facts_default_predicates', regions_enabled=False) }}"
+ # older_predicates are the set of predicates that have previously been
+ # hard-coded into openshift_facts
+ older_predicates:
+ - - name: MatchNodeSelector
+ - name: PodFitsResources
+ - name: PodFitsPorts
+ - name: NoDiskConflict
+ - name: NoVolumeZoneConflict
+ - name: MaxEBSVolumeCount
+ - name: MaxGCEPDVolumeCount
+ - name: Region
+ argument:
+ serviceAffinity:
+ labels:
+ - region
+ - - name: MatchNodeSelector
+ - name: PodFitsResources
+ - name: PodFitsPorts
+ - name: NoDiskConflict
+ - name: NoVolumeZoneConflict
+ - name: Region
+ argument:
+ serviceAffinity:
+ labels:
+ - region
+ - - name: MatchNodeSelector
+ - name: PodFitsResources
+ - name: PodFitsPorts
+ - name: NoDiskConflict
+ - name: Region
+ argument:
+ serviceAffinity:
+ labels:
+ - region
+ # older_predicates_no_region are the set of predicates that have previously
+ # been hard-coded into openshift_facts, with the Region predicate removed
+ older_predicates_no_region:
+ - - name: MatchNodeSelector
+ - name: PodFitsResources
+ - name: PodFitsPorts
+ - name: NoDiskConflict
+ - name: NoVolumeZoneConflict
+ - name: MaxEBSVolumeCount
+ - name: MaxGCEPDVolumeCount
+ - - name: MatchNodeSelector
+ - name: PodFitsResources
+ - name: PodFitsPorts
+ - name: NoDiskConflict
+ - name: NoVolumeZoneConflict
+ - - name: MatchNodeSelector
+ - name: PodFitsResources
+ - name: PodFitsPorts
+ - name: NoDiskConflict
+ block:
+
+ # Handle case where openshift_master_predicates is defined
+ - block:
+ - debug:
+ msg: "WARNING: openshift_master_scheduler_predicates is set to defaults from an earlier release of OpenShift current defaults are: {{ openshift_master_scheduler_default_predicates }}"
+ when: "{{ openshift_master_scheduler_predicates in older_predicates + older_predicates_no_region + [prev_predicates] + [prev_predicates_no_region] }}"
+
+ - debug:
+ msg: "WARNING: openshift_master_scheduler_predicates does not match current defaults of: {{ openshift_master_scheduler_default_predicates }}"
+ when: "{{ openshift_master_scheduler_predicates != openshift_master_scheduler_default_predicates }}"
+ when: "{{ openshift_master_scheduler_predicates | default(none) is not none }}"
+
+ # Handle cases where openshift_master_predicates is not defined
+ - block:
+ - debug:
+ msg: "WARNING: existing scheduler config does not match previous known defaults automated upgrade of scheduler config is disabled.\nexisting scheduler predicates: {{ openshift_master_scheduler_current_predicates }}\ncurrent scheduler default predicates are: {{ openshift_master_scheduler_default_predicates }}"
+ when: "{{ openshift_master_scheduler_current_predicates != openshift_master_scheduler_default_predicates and
+ openshift_master_scheduler_current_predicates not in older_predicates + [prev_predicates] }}"
+
+ - set_fact:
+ openshift_upgrade_scheduler_predicates: "{{ openshift_master_scheduler_default_predicates }}"
+ when: "{{ openshift_master_scheduler_current_predicates != openshift_master_scheduler_default_predicates and
+ openshift_master_scheduler_current_predicates in older_predicates + [prev_predicates] }}"
+
+ - set_fact:
+ openshift_upgrade_scheduler_predicates: "{{ default_predicates_no_region }}"
+ when: "{{ openshift_master_scheduler_current_predicates != default_predicates_no_region and
+ openshift_master_scheduler_current_predicates in older_predicates_no_region + [prev_predicates_no_region] }}"
+
+ when: "{{ openshift_master_scheduler_predicates | default(none) is none }}"
+
+
+# Upgrade priorities
+- vars:
+ prev_priorities: "{{ lookup('openshift_master_facts_default_priorities', short_version=openshift_upgrade_min, deployment_type=openshift_deployment_type) }}"
+ prev_priorities_no_zone: "{{ lookup('openshift_master_facts_default_priorities', short_version=openshift_upgrade_min, deployment_type=openshift_deployment_type, zones_enabled=False) }}"
+ default_priorities_no_zone: "{{ lookup('openshift_master_facts_default_priorities', zones_enabled=False) }}"
+ # older_priorities are the set of priorities that have previously been
+ # hard-coded into openshift_facts
+ older_priorities:
+ - - name: LeastRequestedPriority
+ weight: 1
+ - name: SelectorSpreadPriority
+ weight: 1
+ - name: Zone
+ weight: 2
+ argument:
+ serviceAntiAffinity:
+ label: zone
+ # older_priorities_no_region are the set of priorities that have previously
+ # been hard-coded into openshift_facts, with the Zone priority removed
+ older_priorities_no_zone:
+ - - name: LeastRequestedPriority
+ weight: 1
+ - name: SelectorSpreadPriority
+ weight: 1
+ block:
+
+ # Handle case where openshift_master_priorities is defined
+ - block:
+ - debug:
+ msg: "WARNING: openshift_master_scheduler_priorities is set to defaults from an earlier release of OpenShift current defaults are: {{ openshift_master_scheduler_default_priorities }}"
+ when: "{{ openshift_master_scheduler_priorities in older_priorities + older_priorities_no_zone + [prev_priorities] + [prev_priorities_no_zone] }}"
+
+ - debug:
+ msg: "WARNING: openshift_master_scheduler_priorities does not match current defaults of: {{ openshift_master_scheduler_default_priorities }}"
+ when: "{{ openshift_master_scheduler_priorities != openshift_master_scheduler_default_priorities }}"
+ when: "{{ openshift_master_scheduler_priorities | default(none) is not none }}"
+
+ # Handle cases where openshift_master_priorities is not defined
+ - block:
+ - debug:
+ msg: "WARNING: existing scheduler config does not match previous known defaults automated upgrade of scheduler config is disabled.\nexisting scheduler priorities: {{ openshift_master_scheduler_current_priorities }}\ncurrent scheduler default priorities are: {{ openshift_master_scheduler_default_priorities }}"
+ when: "{{ openshift_master_scheduler_current_priorities != openshift_master_scheduler_default_priorities and
+ openshift_master_scheduler_current_priorities not in older_priorities + [prev_priorities] }}"
+
+ - set_fact:
+ openshift_upgrade_scheduler_priorities: "{{ openshift_master_scheduler_default_priorities }}"
+ when: "{{ openshift_master_scheduler_current_priorities != openshift_master_scheduler_default_priorities and
+ openshift_master_scheduler_current_priorities in older_priorities + [prev_priorities] }}"
+
+ - set_fact:
+ openshift_upgrade_scheduler_priorities: "{{ default_priorities_no_zone }}"
+ when: "{{ openshift_master_scheduler_current_priorities != default_priorities_no_zone and
+ openshift_master_scheduler_current_priorities in older_priorities_no_zone + [prev_priorities_no_zone] }}"
+
+ when: "{{ openshift_master_scheduler_priorities | default(none) is none }}"
+
+
+# Update scheduler
+- vars:
+ scheduler_config:
+ kind: Policy
+ apiVersion: v1
+ predicates: "{{ openshift_upgrade_scheduler_predicates
+ | default(openshift_master_scheduler_current_predicates) }}"
+ priorities: "{{ openshift_upgrade_scheduler_priorities
+ | default(openshift_master_scheduler_current_priorities) }}"
+ block:
+ - name: Update scheduler config
+ copy:
+ content: "{{ scheduler_config | to_nice_json }}"
+ dest: "{{ openshift_master_scheduler_conf }}"
+ backup: true
+ when: "{{ openshift_upgrade_scheduler_predicates is defined or
+ openshift_upgrade_scheduler_priorities is defined }}"
diff --git a/playbooks/common/openshift-cluster/upgrades/v3_1_to_v3_2/nuke_images.sh b/playbooks/common/openshift-cluster/upgrades/v3_1_to_v3_2/nuke_images.sh
deleted file mode 120000
index 49a51bba9..000000000
--- a/playbooks/common/openshift-cluster/upgrades/v3_1_to_v3_2/nuke_images.sh
+++ /dev/null
@@ -1 +0,0 @@
-../files/nuke_images.sh \ No newline at end of file
diff --git a/playbooks/common/openshift-cluster/upgrades/v3_3/master_config_upgrade.yml b/playbooks/common/openshift-cluster/upgrades/v3_3/master_config_upgrade.yml
index 684eea343..68c71a132 100644
--- a/playbooks/common/openshift-cluster/upgrades/v3_3/master_config_upgrade.yml
+++ b/playbooks/common/openshift-cluster/upgrades/v3_3/master_config_upgrade.yml
@@ -48,3 +48,19 @@
dest: "{{ openshift.common.config_base}}/master/master-config.yaml"
yaml_key: 'controllerConfig.servicesServingCert.signer.keyFile'
yaml_value: service-signer.key
+
+- modify_yaml:
+ dest: "{{ openshift.common.config_base}}/master/master-config.yaml"
+ yaml_key: 'admissionConfig.pluginConfig'
+ yaml_value: "{{ openshift.master.admission_plugin_config }}"
+ when: "{{ 'admission_plugin_config' in openshift.master }}"
+
+- modify_yaml:
+ dest: "{{ openshift.common.config_base}}/master/master-config.yaml"
+ yaml_key: 'admissionConfig.pluginOrderOverride'
+ yaml_value:
+
+- modify_yaml:
+ dest: "{{ openshift.common.config_base}}/master/master-config.yaml"
+ yaml_key: 'kubernetesMasterConfig.admissionConfig'
+ yaml_value:
diff --git a/playbooks/common/openshift-cluster/upgrades/v3_3/node_config_upgrade.yml b/playbooks/common/openshift-cluster/upgrades/v3_3/node_config_upgrade.yml
index 8f64636ae..89b524f14 100644
--- a/playbooks/common/openshift-cluster/upgrades/v3_3/node_config_upgrade.yml
+++ b/playbooks/common/openshift-cluster/upgrades/v3_3/node_config_upgrade.yml
@@ -18,4 +18,3 @@
dest: "{{ openshift.common.config_base}}/node/node-config.yaml"
yaml_key: 'masterClientConnectionOverrides.qps'
yaml_value: 20
-
diff --git a/playbooks/common/openshift-cluster/upgrades/v3_4/master_config_upgrade.yml b/playbooks/common/openshift-cluster/upgrades/v3_4/master_config_upgrade.yml
new file mode 100644
index 000000000..43c2ffcd4
--- /dev/null
+++ b/playbooks/common/openshift-cluster/upgrades/v3_4/master_config_upgrade.yml
@@ -0,0 +1,16 @@
+---
+- modify_yaml:
+ dest: "{{ openshift.common.config_base}}/master/master-config.yaml"
+ yaml_key: 'admissionConfig.pluginConfig'
+ yaml_value: "{{ openshift.master.admission_plugin_config }}"
+ when: "{{ 'admission_plugin_config' in openshift.master }}"
+
+- modify_yaml:
+ dest: "{{ openshift.common.config_base}}/master/master-config.yaml"
+ yaml_key: 'admissionConfig.pluginOrderOverride'
+ yaml_value:
+
+- modify_yaml:
+ dest: "{{ openshift.common.config_base}}/master/master-config.yaml"
+ yaml_key: 'kubernetesMasterConfig.admissionConfig'
+ yaml_value: