summaryrefslogtreecommitdiffstats
path: root/playbooks/common/openshift-cluster
diff options
context:
space:
mode:
Diffstat (limited to 'playbooks/common/openshift-cluster')
-rw-r--r--playbooks/common/openshift-cluster/config.yml2
-rw-r--r--playbooks/common/openshift-cluster/initialize_facts.yml2
-rw-r--r--playbooks/common/openshift-cluster/initialize_openshift_version.yml29
-rw-r--r--playbooks/common/openshift-cluster/upgrades/docker/upgrade.yml44
-rw-r--r--playbooks/common/openshift-cluster/upgrades/docker/upgrade_check.yml51
-rw-r--r--playbooks/common/openshift-cluster/upgrades/files/nuke_images.sh23
-rw-r--r--playbooks/common/openshift-cluster/upgrades/files/openshift_container_versions.sh22
l---------playbooks/common/openshift-cluster/upgrades/v3_0_minor/filter_plugins1
l---------playbooks/common/openshift-cluster/upgrades/v3_0_minor/library1
l---------playbooks/common/openshift-cluster/upgrades/v3_0_minor/lookup_plugins1
l---------playbooks/common/openshift-cluster/upgrades/v3_0_minor/roles1
-rw-r--r--playbooks/common/openshift-cluster/upgrades/v3_0_minor/upgrade.yml114
l---------playbooks/common/openshift-cluster/upgrades/v3_0_to_v3_1/filter_plugins1
l---------playbooks/common/openshift-cluster/upgrades/v3_0_to_v3_1/library1
l---------playbooks/common/openshift-cluster/upgrades/v3_0_to_v3_1/lookup_plugins1
l---------playbooks/common/openshift-cluster/upgrades/v3_0_to_v3_1/roles1
-rw-r--r--playbooks/common/openshift-cluster/upgrades/v3_0_to_v3_1/upgrade.yml646
l---------playbooks/common/openshift-cluster/upgrades/v3_1_minor/filter_plugins1
l---------playbooks/common/openshift-cluster/upgrades/v3_1_minor/library1
l---------playbooks/common/openshift-cluster/upgrades/v3_1_minor/lookup_plugins1
-rw-r--r--playbooks/common/openshift-cluster/upgrades/v3_1_minor/post.yml58
-rw-r--r--playbooks/common/openshift-cluster/upgrades/v3_1_minor/pre.yml88
l---------playbooks/common/openshift-cluster/upgrades/v3_1_minor/roles1
-rw-r--r--playbooks/common/openshift-cluster/upgrades/v3_1_minor/upgrade.yml140
-rw-r--r--playbooks/common/openshift-cluster/upgrades/v3_1_to_v3_2/containerized_node_upgrade.yml (renamed from playbooks/common/openshift-cluster/upgrades/v3_1_to_v3_2/containerized_upgrade.yml)2
-rw-r--r--playbooks/common/openshift-cluster/upgrades/v3_1_to_v3_2/docker_upgrade.yml14
-rw-r--r--playbooks/common/openshift-cluster/upgrades/v3_1_to_v3_2/node_upgrade.yml24
l---------playbooks/common/openshift-cluster/upgrades/v3_1_to_v3_2/openshift.docker.node.dep.service1
l---------playbooks/common/openshift-cluster/upgrades/v3_1_to_v3_2/openshift.docker.node.service1
l---------playbooks/common/openshift-cluster/upgrades/v3_1_to_v3_2/openvswitch.docker.service1
l---------playbooks/common/openshift-cluster/upgrades/v3_1_to_v3_2/openvswitch.sysconfig.j21
-rw-r--r--playbooks/common/openshift-cluster/upgrades/v3_1_to_v3_2/post.yml4
-rw-r--r--playbooks/common/openshift-cluster/upgrades/v3_1_to_v3_2/pre.yml138
-rw-r--r--playbooks/common/openshift-cluster/upgrades/v3_1_to_v3_2/rpm_upgrade.yml3
-rw-r--r--playbooks/common/openshift-cluster/upgrades/v3_1_to_v3_2/upgrade.yml112
35 files changed, 269 insertions, 1263 deletions
diff --git a/playbooks/common/openshift-cluster/config.yml b/playbooks/common/openshift-cluster/config.yml
index 5fec11541..5cf5df08e 100644
--- a/playbooks/common/openshift-cluster/config.yml
+++ b/playbooks/common/openshift-cluster/config.yml
@@ -5,6 +5,8 @@
- include: validate_hostnames.yml
+- include: initialize_openshift_version.yml
+
- name: Set oo_options
hosts: oo_all_hosts
tasks:
diff --git a/playbooks/common/openshift-cluster/initialize_facts.yml b/playbooks/common/openshift-cluster/initialize_facts.yml
index 37f523246..04dde632b 100644
--- a/playbooks/common/openshift-cluster/initialize_facts.yml
+++ b/playbooks/common/openshift-cluster/initialize_facts.yml
@@ -9,3 +9,5 @@
role: common
local_facts:
hostname: "{{ openshift_hostname | default(None) }}"
+ - set_fact:
+ openshift_docker_hosted_registry_network: "{{ hostvars[groups.oo_first_master.0].openshift.common.portal_net }}"
diff --git a/playbooks/common/openshift-cluster/initialize_openshift_version.yml b/playbooks/common/openshift-cluster/initialize_openshift_version.yml
new file mode 100644
index 000000000..972df050c
--- /dev/null
+++ b/playbooks/common/openshift-cluster/initialize_openshift_version.yml
@@ -0,0 +1,29 @@
+---
+# NOTE: requires openshift_facts be run
+- name: Determine openshift_version to configure on first master
+ hosts: oo_first_master
+ roles:
+ - openshift_version
+ pre_tasks:
+ - debug: var=openshift_version
+ post_tasks:
+ - debug: var=openshift_version
+
+# NOTE: We set this even on etcd hosts as they may also later run as masters,
+# and we don't want to install wrong version of docker and have to downgrade
+# later.
+- name: Set openshift_version for all hosts
+ hosts: oo_all_hosts:!oo_first_master
+ vars:
+ openshift_version: "{{ hostvars[groups.oo_first_master.0].openshift_version }}"
+ roles:
+ - openshift_version
+ pre_tasks:
+ - debug: var=hostvars[groups.oo_first_master.0].openshift_version
+ - debug: var=openshift.common.version
+ - debug: var=openshift_version
+ post_tasks:
+ - debug: var=hostvars[groups.oo_first_master.0].openshift_version
+ - debug: var=openshift.common.version
+ - debug: var=openshift_version
+
diff --git a/playbooks/common/openshift-cluster/upgrades/docker/upgrade.yml b/playbooks/common/openshift-cluster/upgrades/docker/upgrade.yml
new file mode 100644
index 000000000..20d66522f
--- /dev/null
+++ b/playbooks/common/openshift-cluster/upgrades/docker/upgrade.yml
@@ -0,0 +1,44 @@
+---
+# We need docker service up to remove all the images, but these services will keep
+# trying to re-start and thus re-pull the images we're trying to delete.
+- name: Stop containerized services
+ service: name={{ item }} state=stopped
+ with_items:
+ - "{{ openshift.common.service_type }}-master"
+ - "{{ openshift.common.service_type }}-master-api"
+ - "{{ openshift.common.service_type }}-master-controllers"
+ - "{{ openshift.common.service_type }}-node"
+ - etcd_container
+ - openvswitch
+ failed_when: false
+ when: openshift.common.is_containerized | bool
+
+- name: Remove all containers and images
+ script: nuke_images.sh docker
+ register: nuke_images_result
+ when: docker_upgrade_nuke_images is defined and docker_upgrade_nuke_images | bool
+
+- name: Upgrade Docker
+ action: "{{ ansible_pkg_mgr }} name=docker{{ '-' + docker_version }} state=present"
+
+- name: Restart containerized services
+ service: name={{ item }} state=started
+ with_items:
+ - etcd_container
+ - openvswitch
+ - "{{ openshift.common.service_type }}-master"
+ - "{{ openshift.common.service_type }}-master-api"
+ - "{{ openshift.common.service_type }}-master-controllers"
+ - "{{ openshift.common.service_type }}-node"
+ failed_when: false
+ when: openshift.common.is_containerized | bool
+
+- name: Wait for master API to come back online
+ become: no
+ local_action:
+ module: wait_for
+ host="{{ inventory_hostname }}"
+ state=started
+ delay=10
+ port="{{ openshift.master.api_port }}"
+ when: inventory_hostname in groups.oo_masters_to_config
diff --git a/playbooks/common/openshift-cluster/upgrades/docker/upgrade_check.yml b/playbooks/common/openshift-cluster/upgrades/docker/upgrade_check.yml
new file mode 100644
index 000000000..06b3e244f
--- /dev/null
+++ b/playbooks/common/openshift-cluster/upgrades/docker/upgrade_check.yml
@@ -0,0 +1,51 @@
+---
+
+# This snippet determines if a Docker upgrade is required by checking the inventory
+# variables, the available packages, and sets l_docker_version to True if so.
+
+- set_fact:
+ docker_upgrade: True
+ when: docker_upgrade is not defined
+
+- name: Check if Docker is installed
+ command: rpm -q docker
+ register: pkg_check
+ failed_when: pkg_check.rc > 1
+ changed_when: no
+
+- name: Get current version of Docker
+ command: "{{ repoquery_cmd }} --installed --qf '%{version}' docker"
+ register: curr_docker_version
+ changed_when: false
+
+- name: Get latest available version of Docker
+ command: >
+ {{ repoquery_cmd }} --qf '%{version}' "docker"
+ register: avail_docker_version
+ failed_when: false
+ changed_when: false
+
+- fail:
+ msg: This playbook requires access to Docker 1.10 or later
+ # Disable the 1.10 requirement if the user set a specific Docker version
+ when: avail_docker_version.stdout | version_compare('1.10','<') and docker_version is not defined
+
+# Default l_docker_upgrade to False, we'll set to True if an upgrade is required:
+- set_fact:
+ l_docker_upgrade: False
+
+# Make sure a docker_verison is set if none was requested:
+- set_fact:
+ docker_version: "{{ avail_docker_version.stdout }}"
+ when: docker_version is not defined
+
+- name: Flag for Docker upgrade if necessary
+ set_fact:
+ l_docker_upgrade: True
+ when: pkg_check.rc == 0 and curr_docker_version.stdout | version_compare(docker_version,'<')
+
+- name: Flag to delete all images prior to upgrade if crossing Docker 1.10 boundary
+ set_fact:
+ docker_upgrade_nuke_images: True
+ when: l_docker_upgrade | bool and docker_upgrade_nuke_images is not defined and curr_docker_version.stdout | version_compare('1.10','<') and docker_version | version_compare('1.10','>=')
+
diff --git a/playbooks/common/openshift-cluster/upgrades/files/nuke_images.sh b/playbooks/common/openshift-cluster/upgrades/files/nuke_images.sh
new file mode 100644
index 000000000..6b155f7fa
--- /dev/null
+++ b/playbooks/common/openshift-cluster/upgrades/files/nuke_images.sh
@@ -0,0 +1,23 @@
+#!/bin/bash
+
+# Stop any running containers
+running_container_ids=`docker ps -q`
+if test -n "$running_container_ids"
+then
+ docker stop $running_container_ids
+fi
+
+# Delete all containers
+container_ids=`docker ps -a -q`
+if test -n "$container_ids"
+then
+ docker rm -f -v $container_ids
+fi
+
+# Delete all images (forcefully)
+image_ids=`docker images -q`
+if test -n "$image_ids"
+then
+ # Taken from: https://gist.github.com/brianclements/f72b2de8e307c7b56689#gistcomment-1443144
+ docker rmi $(docker images | grep "$2/\|/$2 \| $2 \|$2 \|$2-\|$2_" | awk '{print $1 ":" $2}') 2>/dev/null || echo "No images matching \"$2\" left to purge."
+fi
diff --git a/playbooks/common/openshift-cluster/upgrades/files/openshift_container_versions.sh b/playbooks/common/openshift-cluster/upgrades/files/openshift_container_versions.sh
deleted file mode 100644
index 9bbeff660..000000000
--- a/playbooks/common/openshift-cluster/upgrades/files/openshift_container_versions.sh
+++ /dev/null
@@ -1,22 +0,0 @@
-#!/bin/bash
-
-# Here we don't really care if this is a master, api, controller or node image.
-# We just need to know the version of one of them.
-unit_file=$(ls /etc/systemd/system/${1}*.service | grep -v node-dep | head -n1)
-
-if [ ${1} == "origin" ]; then
- image_name="openshift/origin"
-elif grep aep $unit_file 2>&1 > /dev/null; then
- image_name="aep3/node"
-elif grep openshift3 $unit_file 2>&1 > /dev/null; then
- image_name="openshift3/node"
-fi
-
-installed=$(docker run --rm --entrypoint=/bin/openshift ${image_name} version 2> /dev/null | grep openshift | awk '{ print $2 }' | cut -f1 -d"-" | tr -d 'v')
-
-docker pull ${image_name} 2>&1 > /dev/null
-available=$(docker run --rm --entrypoint=/bin/openshift ${image_name} version 2> /dev/null | grep openshift | awk '{ print $2 }' | cut -f1 -d"-" | tr -d 'v')
-
-echo "---"
-echo "curr_version: ${installed}"
-echo "avail_version: ${available}"
diff --git a/playbooks/common/openshift-cluster/upgrades/v3_0_minor/filter_plugins b/playbooks/common/openshift-cluster/upgrades/v3_0_minor/filter_plugins
deleted file mode 120000
index 27ddaa18b..000000000
--- a/playbooks/common/openshift-cluster/upgrades/v3_0_minor/filter_plugins
+++ /dev/null
@@ -1 +0,0 @@
-../../../../../filter_plugins \ No newline at end of file
diff --git a/playbooks/common/openshift-cluster/upgrades/v3_0_minor/library b/playbooks/common/openshift-cluster/upgrades/v3_0_minor/library
deleted file mode 120000
index 53bed9684..000000000
--- a/playbooks/common/openshift-cluster/upgrades/v3_0_minor/library
+++ /dev/null
@@ -1 +0,0 @@
-../library \ No newline at end of file
diff --git a/playbooks/common/openshift-cluster/upgrades/v3_0_minor/lookup_plugins b/playbooks/common/openshift-cluster/upgrades/v3_0_minor/lookup_plugins
deleted file mode 120000
index cf407f69b..000000000
--- a/playbooks/common/openshift-cluster/upgrades/v3_0_minor/lookup_plugins
+++ /dev/null
@@ -1 +0,0 @@
-../../../../../lookup_plugins \ No newline at end of file
diff --git a/playbooks/common/openshift-cluster/upgrades/v3_0_minor/roles b/playbooks/common/openshift-cluster/upgrades/v3_0_minor/roles
deleted file mode 120000
index 6bc1a7aef..000000000
--- a/playbooks/common/openshift-cluster/upgrades/v3_0_minor/roles
+++ /dev/null
@@ -1 +0,0 @@
-../../../../../roles \ No newline at end of file
diff --git a/playbooks/common/openshift-cluster/upgrades/v3_0_minor/upgrade.yml b/playbooks/common/openshift-cluster/upgrades/v3_0_minor/upgrade.yml
deleted file mode 100644
index e31e7f8a3..000000000
--- a/playbooks/common/openshift-cluster/upgrades/v3_0_minor/upgrade.yml
+++ /dev/null
@@ -1,114 +0,0 @@
----
-- name: Evaluate groups
- include: ../../evaluate_groups.yml
-
-- name: Re-Run cluster configuration to apply latest configuration changes
- include: ../../config.yml
-
-- name: Upgrade masters
- hosts: oo_masters_to_config
- vars:
- openshift_version: "{{ openshift_pkg_version | default('') }}"
- tasks:
- - name: Upgrade master packages
- action: "{{ ansible_pkg_mgr }} name={{ openshift.common.service_type }}-master{{ openshift_version }} state=latest"
- - name: Restart master services
- service: name="{{ openshift.common.service_type}}-master" state=restarted
-
-- name: Upgrade nodes
- hosts: oo_nodes_to_config
- vars:
- openshift_version: "{{ openshift_pkg_version | default('') }}"
- tasks:
- - name: Upgrade node packages
- action: "{{ ansible_pkg_mgr }} name={{ openshift.common.service_type }}-node{{ openshift_version }} state=latest"
- - name: Restart node services
- service: name="{{ openshift.common.service_type }}-node" state=restarted
-
-- name: Determine new master version
- hosts: oo_first_master
- tasks:
- - name: Determine new version
- command: >
- rpm -q --queryformat '%{version}' {{ openshift.common.service_type }}-master
- register: _new_version
-
-- name: Ensure AOS 3.0.2 or Origin 1.0.6
- hosts: oo_first_master
- tasks:
- - fail:
- msg: "This playbook requires Origin 1.0.6 or Atomic OpenShift 3.0.2 or later"
- when: _new_version.stdout | version_compare('1.0.6','<') or ( _new_version.stdout | version_compare('3.0','>=' and _new_version.stdout | version_compare('3.0.2','<') )
-
-- name: Update cluster policy
- hosts: oo_first_master
- tasks:
- - name: oadm policy reconcile-cluster-roles --additive-only=true --confirm
- command: >
- {{ openshift.common.admin_binary}} --config={{ openshift.common.config_base }}/master/admin.kubeconfig
- policy reconcile-cluster-roles --additive-only=true --confirm
-
-- name: Upgrade default router
- hosts: oo_first_master
- vars:
- - router_image: "{{ openshift.master.registry_url | replace( '${component}', 'haproxy-router' ) | replace ( '${version}', 'v' + _new_version.stdout ) }}"
- - oc_cmd: "{{ openshift.common.client_binary }} --config={{ openshift.common.config_base }}/master/admin.kubeconfig"
- tasks:
- - name: Check for default router
- command: >
- {{ oc_cmd }} get -n default dc/router
- register: _default_router
- failed_when: false
- changed_when: false
- - name: Check for allowHostNetwork and allowHostPorts
- when: _default_router.rc == 0
- shell: >
- {{ oc_cmd }} get -o yaml scc/privileged | /usr/bin/grep -e allowHostPorts -e allowHostNetwork
- register: _scc
- - name: Grant allowHostNetwork and allowHostPorts
- when:
- - _default_router.rc == 0
- - "'false' in _scc.stdout"
- command: >
- {{ oc_cmd }} patch scc/privileged -p '{"allowHostPorts":true,"allowHostNetwork":true}' --loglevel=9
- - name: Update deployment config to 1.0.4/3.0.1 spec
- when: _default_router.rc == 0
- command: >
- {{ oc_cmd }} patch dc/router -p
- '{"spec":{"strategy":{"rollingParams":{"updatePercent":-10},"spec":{"serviceAccount":"router","serviceAccountName":"router"}}}}'
- - name: Switch to hostNetwork=true
- when: _default_router.rc == 0
- command: >
- {{ oc_cmd }} patch dc/router -p '{"spec":{"template":{"spec":{"hostNetwork":true}}}}'
- - name: Update router image to current version
- when: _default_router.rc == 0
- command: >
- {{ oc_cmd }} patch dc/router -p
- '{"spec":{"template":{"spec":{"containers":[{"name":"router","image":"{{ router_image }}"}]}}}}'
-
-- name: Upgrade default
- hosts: oo_first_master
- vars:
- - registry_image: "{{ openshift.master.registry_url | replace( '${component}', 'docker-registry' ) | replace ( '${version}', 'v' + _new_version.stdout ) }}"
- - oc_cmd: "{{ openshift.common.client_binary }} --config={{ openshift.common.config_base }}/master/admin.kubeconfig"
- tasks:
- - name: Check for default registry
- command: >
- {{ oc_cmd }} get -n default dc/docker-registry
- register: _default_registry
- failed_when: false
- changed_when: false
- - name: Update registry image to current version
- when: _default_registry.rc == 0
- command: >
- {{ oc_cmd }} patch dc/docker-registry -p
- '{"spec":{"template":{"spec":{"containers":[{"name":"registry","image":"{{ registry_image }}"}]}}}}'
-
-- name: Update image streams and templates
- hosts: oo_first_master
- vars:
- openshift_examples_import_command: "update"
- openshift_deployment_type: "{{ deployment_type }}"
- registry_url: "{{ openshift.master.registry_url }}"
- roles:
- - openshift_examples
diff --git a/playbooks/common/openshift-cluster/upgrades/v3_0_to_v3_1/filter_plugins b/playbooks/common/openshift-cluster/upgrades/v3_0_to_v3_1/filter_plugins
deleted file mode 120000
index 27ddaa18b..000000000
--- a/playbooks/common/openshift-cluster/upgrades/v3_0_to_v3_1/filter_plugins
+++ /dev/null
@@ -1 +0,0 @@
-../../../../../filter_plugins \ No newline at end of file
diff --git a/playbooks/common/openshift-cluster/upgrades/v3_0_to_v3_1/library b/playbooks/common/openshift-cluster/upgrades/v3_0_to_v3_1/library
deleted file mode 120000
index 53bed9684..000000000
--- a/playbooks/common/openshift-cluster/upgrades/v3_0_to_v3_1/library
+++ /dev/null
@@ -1 +0,0 @@
-../library \ No newline at end of file
diff --git a/playbooks/common/openshift-cluster/upgrades/v3_0_to_v3_1/lookup_plugins b/playbooks/common/openshift-cluster/upgrades/v3_0_to_v3_1/lookup_plugins
deleted file mode 120000
index cf407f69b..000000000
--- a/playbooks/common/openshift-cluster/upgrades/v3_0_to_v3_1/lookup_plugins
+++ /dev/null
@@ -1 +0,0 @@
-../../../../../lookup_plugins \ No newline at end of file
diff --git a/playbooks/common/openshift-cluster/upgrades/v3_0_to_v3_1/roles b/playbooks/common/openshift-cluster/upgrades/v3_0_to_v3_1/roles
deleted file mode 120000
index 6bc1a7aef..000000000
--- a/playbooks/common/openshift-cluster/upgrades/v3_0_to_v3_1/roles
+++ /dev/null
@@ -1 +0,0 @@
-../../../../../roles \ No newline at end of file
diff --git a/playbooks/common/openshift-cluster/upgrades/v3_0_to_v3_1/upgrade.yml b/playbooks/common/openshift-cluster/upgrades/v3_0_to_v3_1/upgrade.yml
deleted file mode 100644
index c3c1240d8..000000000
--- a/playbooks/common/openshift-cluster/upgrades/v3_0_to_v3_1/upgrade.yml
+++ /dev/null
@@ -1,646 +0,0 @@
----
-###############################################################################
-# Evaluate host groups and gather facts
-###############################################################################
-- name: Evaluate host groups
- include: ../../evaluate_groups.yml
-
-- name: Load openshift_facts
- hosts: oo_masters_to_config:oo_nodes_to_config:oo_etcd_to_config:oo_lb_to_config
- roles:
- - openshift_facts
-
-- name: Evaluate additional groups for upgrade
- hosts: localhost
- connection: local
- become: no
- tasks:
- - name: Evaluate etcd_hosts_to_backup
- add_host:
- name: "{{ item }}"
- groups: etcd_hosts_to_backup
- with_items: groups.oo_etcd_to_config if groups.oo_etcd_to_config is defined and groups.oo_etcd_to_config | length > 0 else groups.oo_first_master
-
-
-###############################################################################
-# Pre-upgrade checks
-###############################################################################
-- name: Verify upgrade can proceed
- hosts: oo_first_master
- vars:
- openshift_master_ha: "{{ groups.oo_masters_to_config | length > 1 }}"
- target_version: "{{ '1.1' if deployment_type == 'origin' else '3.1' }}"
- gather_facts: no
- tasks:
- # Pacemaker is currently the only supported upgrade path for multiple masters
- - fail:
- msg: "openshift_master_cluster_method must be set to 'pacemaker'"
- when: openshift_master_ha | bool and ((openshift_master_cluster_method is not defined) or (openshift_master_cluster_method is defined and openshift_master_cluster_method != "pacemaker"))
-
- - fail:
- msg: >
- This upgrade is only supported for origin, openshift-enterprise, and online
- deployment types
- when: deployment_type not in ['origin','openshift-enterprise', 'online']
-
- - fail:
- msg: >
- openshift_pkg_version is {{ openshift_pkg_version }} which is not a
- valid version for a {{ target_version }} upgrade
- when: openshift_pkg_version is defined and openshift_pkg_version.split('-',1).1 | version_compare(target_version ,'<')
-
- # If this script errors out ansible will show the default stdout/stderr
- # which contains details for the user:
- - script: ../files/pre-upgrade-check
-
-
-- name: Verify upgrade targets
- hosts: oo_masters_to_config:oo_nodes_to_config
- vars:
- target_version: "{{ '1.1' if deployment_type == 'origin' else '3.1' }}"
- tasks:
- - name: Clean package cache
- command: "{{ ansible_pkg_mgr }} clean all"
-
- - set_fact:
- g_new_service_name: "{{ 'origin' if deployment_type =='origin' else 'atomic-openshift' }}"
-
- - name: Determine available versions
- script: ../files/rpm_versions.sh {{ g_new_service_name }} openshift
- register: g_versions_result
-
- - set_fact:
- g_aos_versions: "{{ g_versions_result.stdout | from_yaml }}"
-
- - set_fact:
- g_new_version: "{{ g_aos_versions.curr_version.split('-', 1).0 if g_aos_versions.avail_version is none else g_aos_versions.avail_version.split('-', 1).0 }}"
- when: openshift_pkg_version is not defined
-
- - set_fact:
- g_new_version: "{{ openshift_pkg_version | replace('-','') }}"
- when: openshift_pkg_version is defined
-
- - fail:
- msg: This playbook requires Origin 1.0.6 or later
- when: deployment_type == 'origin' and g_aos_versions.curr_version | version_compare('1.0.6','<')
-
- - fail:
- msg: Upgrade packages not found
- when: (g_aos_versions.avail_version | default(g_aos_versions.curr_version, true) | version_compare(target_version, '<'))
-
- - set_fact:
- pre_upgrade_complete: True
-
-
-##############################################################################
-# Gate on pre-upgrade checks
-##############################################################################
-- name: Gate on pre-upgrade checks
- hosts: localhost
- connection: local
- become: no
- vars:
- pre_upgrade_hosts: "{{ groups.oo_masters_to_config | union(groups.oo_nodes_to_config) }}"
- tasks:
- - set_fact:
- pre_upgrade_completed: "{{ hostvars
- | oo_select_keys(pre_upgrade_hosts)
- | oo_collect('inventory_hostname', {'pre_upgrade_complete': true}) }}"
- - set_fact:
- pre_upgrade_failed: "{{ pre_upgrade_hosts | difference(pre_upgrade_completed) }}"
- - fail:
- msg: "Upgrade cannot continue. The following hosts did not complete pre-upgrade checks: {{ pre_upgrade_failed | join(',') }}"
- when: pre_upgrade_failed | length > 0
-
-
-
-###############################################################################
-# Backup etcd
-###############################################################################
-- name: Backup etcd
- hosts: etcd_hosts_to_backup
- vars:
- embedded_etcd: "{{ openshift.master.embedded_etcd }}"
- timestamp: "{{ lookup('pipe', 'date +%Y%m%d%H%M%S') }}"
- roles:
- - openshift_facts
- tasks:
- # Ensure we persist the etcd role for this host in openshift_facts
- - openshift_facts:
- role: etcd
- local_facts: {}
- when: "'etcd' not in openshift"
-
- - stat: path=/var/lib/openshift
- register: var_lib_openshift
-
- - stat: path=/var/lib/origin
- register: var_lib_origin
-
- - name: Create origin symlink if necessary
- file: src=/var/lib/openshift/ dest=/var/lib/origin state=link
- when: var_lib_openshift.stat.exists == True and var_lib_origin.stat.exists == False
-
- # TODO: replace shell module with command and update later checks
- # We assume to be using the data dir for all backups.
- - name: Check available disk space for etcd backup
- shell: df --output=avail -k {{ openshift.common.data_dir }} | tail -n 1
- register: avail_disk
-
- # TODO: replace shell module with command and update later checks
- - name: Check current embedded etcd disk usage
- shell: du -k {{ openshift.etcd.etcd_data_dir }} | tail -n 1 | cut -f1
- register: etcd_disk_usage
- when: embedded_etcd | bool
-
- - name: Abort if insufficient disk space for etcd backup
- fail:
- msg: >
- {{ etcd_disk_usage.stdout }} Kb disk space required for etcd backup,
- {{ avail_disk.stdout }} Kb available.
- when: (embedded_etcd | bool) and (etcd_disk_usage.stdout|int > avail_disk.stdout|int)
-
- - name: Install etcd (for etcdctl)
- action: "{{ ansible_pkg_mgr }} name=etcd state=latest"
-
- - name: Generate etcd backup
- command: >
- etcdctl backup --data-dir={{ openshift.etcd.etcd_data_dir }}
- --backup-dir={{ openshift.common.data_dir }}/etcd-backup-{{ timestamp }}
-
- - set_fact:
- etcd_backup_complete: True
-
- - name: Display location of etcd backup
- debug:
- msg: "Etcd backup created in {{ openshift.common.data_dir }}/etcd-backup-{{ timestamp }}"
-
-
-##############################################################################
-# Gate on etcd backup
-##############################################################################
-- name: Gate on etcd backup
- hosts: localhost
- connection: local
- become: no
- tasks:
- - set_fact:
- etcd_backup_completed: "{{ hostvars
- | oo_select_keys(groups.etcd_hosts_to_backup)
- | oo_collect('inventory_hostname', {'etcd_backup_complete': true}) }}"
- - set_fact:
- etcd_backup_failed: "{{ groups.etcd_hosts_to_backup | difference(etcd_backup_completed) }}"
- - fail:
- msg: "Upgrade cannot continue. The following hosts did not complete etcd backup: {{ etcd_backup_failed | join(',') }}"
- when: etcd_backup_failed | length > 0
-
-
-
-###############################################################################
-# Upgrade Masters
-###############################################################################
-- name: Create temp directory for syncing certs
- hosts: localhost
- connection: local
- become: no
- gather_facts: no
- tasks:
- - name: Create local temp directory for syncing certs
- local_action: command mktemp -d /tmp/openshift-ansible-XXXXXXX
- register: g_master_mktemp
- changed_when: False
-
-- name: Update deployment type
- hosts: oo_masters_to_config:oo_nodes_to_config:oo_etcd_to_config
- vars:
- openshift_deployment_type: "{{ deployment_type }}"
- roles:
- - openshift_facts
-
-- name: Update master facts
- hosts: oo_masters_to_config
- roles:
- - openshift_facts
- post_tasks:
- - openshift_facts:
- role: master
- local_facts:
- cluster_method: "{{ openshift_master_cluster_method | default(None) }}"
-
-- name: Upgrade master packages and configuration
- hosts: oo_masters_to_config
- vars:
- openshift_version: "{{ openshift_pkg_version | default('') }}"
- roles:
- - openshift_facts
- tasks:
- - name: Upgrade to latest available kernel
- action: "{{ ansible_pkg_mgr}} name=kernel state=latest"
-
- - name: Upgrade master packages
- command: "{{ ansible_pkg_mgr}} update -y {{ openshift.common.service_type }}-master{{ openshift_version }}"
- when: openshift_pkg_version is not defined
-
- - name: Upgrade packages
- command: "{{ ansible_pkg_mgr}} install -y {{ openshift.common.installed_variant_rpms | oo_31_rpm_rename_conversion(openshift_version) | join (' ')}}"
- when: openshift_pkg_version is defined and deployment_type == 'openshift-enterprise'
-
- - name: Ensure python-yaml present for config upgrade
- action: "{{ ansible_pkg_mgr }} name=PyYAML state=present"
- when: not openshift.common.is_atomic | bool
-
- - name: Upgrade master configuration
- openshift_upgrade_config:
- from_version: '3.0'
- to_version: '3.1'
- role: master
- config_base: "{{ hostvars[inventory_hostname].openshift.common.config_base }}"
-
- - set_fact:
- openshift_master_certs_no_etcd:
- - admin.crt
- - master.kubelet-client.crt
- - "{{ 'master.proxy-client.crt' if openshift.common.version_gte_3_1_or_1_1 else omit }}"
- - master.server.crt
- - openshift-master.crt
- - openshift-registry.crt
- - openshift-router.crt
- - etcd.server.crt
- openshift_master_certs_etcd:
- - master.etcd-client.crt
-
- - set_fact:
- openshift_master_certs: "{{ (openshift_master_certs_no_etcd | union(openshift_master_certs_etcd)) if (groups.oo_etcd_to_config is defined and groups.oo_etcd_to_config) else openshift_master_certs_no_etcd }}"
-
- - name: Check status of master certificates
- stat:
- path: "{{ openshift.common.config_base }}/master/{{ item }}"
- with_items: openshift_master_certs
- register: g_master_cert_stat_result
-
- - set_fact:
- master_certs_missing: "{{ False in (g_master_cert_stat_result.results
- | oo_collect(attribute='stat.exists')
- | list ) }}"
- master_cert_subdir: master-{{ openshift.common.hostname }}
- master_cert_config_dir: "{{ openshift.common.config_base }}/master"
-
-
-- name: Generate missing master certificates
- hosts: oo_first_master
- vars:
- master_hostnames: "{{ hostvars
- | oo_select_keys(groups.oo_masters_to_config)
- | oo_collect('openshift.common.all_hostnames')
- | oo_flatten | unique }}"
- master_generated_certs_dir: "{{ openshift.common.config_base }}/generated-configs"
- masters_needing_certs: "{{ hostvars
- | oo_select_keys(groups['oo_masters_to_config'] | difference(groups['oo_first_master']))
- | oo_filter_list(filter_attr='master_certs_missing') }}"
- sync_tmpdir: "{{ hostvars.localhost.g_master_mktemp.stdout }}"
- openshift_deployment_type: "{{ deployment_type }}"
- roles:
- - openshift_master_certificates
- post_tasks:
- - name: Remove generated etcd client certs when using external etcd
- file:
- path: "{{ master_generated_certs_dir }}/{{ item.0.master_cert_subdir }}/{{ item.1 }}"
- state: absent
- when: groups.oo_etcd_to_config is defined and groups.oo_etcd_to_config
- with_nested:
- - masters_needing_certs
- - - master.etcd-client.crt
- - master.etcd-client.key
-
- - name: Create a tarball of the master certs
- command: >
- tar -czvf {{ master_generated_certs_dir }}/{{ item.master_cert_subdir }}.tgz
- -C {{ master_generated_certs_dir }}/{{ item.master_cert_subdir }} .
- with_items: masters_needing_certs
-
- - name: Retrieve the master cert tarball from the master
- fetch:
- src: "{{ master_generated_certs_dir }}/{{ item.master_cert_subdir }}.tgz"
- dest: "{{ sync_tmpdir }}/"
- flat: yes
- fail_on_missing: yes
- validate_checksum: yes
- with_items: masters_needing_certs
-
-
-- name: Sync generated certs, update service config and restart master services
- hosts: oo_masters_to_config
- vars:
- sync_tmpdir: "{{ hostvars.localhost.g_master_mktemp.stdout }}"
- openshift_master_ha: "{{ groups.oo_masters_to_config | length > 1 }}"
- openshift_deployment_type: "{{ deployment_type }}"
- tasks:
- - name: Unarchive the tarball on the master
- unarchive:
- src: "{{ sync_tmpdir }}/{{ master_cert_subdir }}.tgz"
- dest: "{{ master_cert_config_dir }}"
- when: inventory_hostname != groups.oo_first_master.0
-
- - name: Restart master service
- service: name="{{ openshift.common.service_type}}-master" state=restarted
- when: not openshift_master_ha | bool
-
- - name: Ensure the master service is enabled
- service: name="{{ openshift.common.service_type}}-master" state=started enabled=yes
- when: not openshift_master_ha | bool
-
- - name: Check for configured cluster
- stat:
- path: /etc/corosync/corosync.conf
- register: corosync_conf
- when: openshift_master_ha | bool
-
- - name: Destroy cluster
- command: pcs cluster destroy --all
- when: openshift_master_ha | bool and corosync_conf.stat.exists == true
- run_once: true
-
- - name: Start pcsd
- service: name=pcsd enabled=yes state=started
- when: openshift_master_ha | bool
-
-
-- name: Re-create cluster
- hosts: oo_first_master
- vars:
- openshift_master_ha: "{{ groups.oo_masters_to_config | length > 1 }}"
- openshift_deployment_type: "{{ deployment_type }}"
- omc_cluster_hosts: "{{ groups.oo_masters_to_config | join(' ') }}"
- roles:
- - role: openshift_master_cluster
- when: openshift_master_ha | bool
-
-
-- name: Delete temporary directory on localhost
- hosts: localhost
- connection: local
- become: no
- gather_facts: no
- tasks:
- - file: name={{ g_master_mktemp.stdout }} state=absent
- changed_when: False
-
-
-- name: Set master update status to complete
- hosts: oo_masters_to_config
- tasks:
- - set_fact:
- master_update_complete: True
-
-
-##############################################################################
-# Gate on master update complete
-##############################################################################
-- name: Gate on master update
- hosts: localhost
- connection: local
- become: no
- tasks:
- - set_fact:
- master_update_completed: "{{ hostvars
- | oo_select_keys(groups.oo_masters_to_config)
- | oo_collect('inventory_hostname', {'master_update_complete': true}) }}"
- - set_fact:
- master_update_failed: "{{ groups.oo_masters_to_config | difference(master_update_completed) }}"
- - fail:
- msg: "Upgrade cannot continue. The following masters did not finish updating: {{ master_update_failed | join(',') }}"
- when: master_update_failed | length > 0
-
-
-###############################################################################
-# Upgrade Nodes
-###############################################################################
-- name: Upgrade nodes
- hosts: oo_nodes_to_config
- vars:
- openshift_version: "{{ openshift_pkg_version | default('') }}"
- roles:
- - openshift_facts
- tasks:
- - name: Upgrade node packages
- command: "{{ ansible_pkg_mgr }} update -y {{ openshift.common.service_type }}-node{{ openshift_version }}"
- when: openshift_pkg_version is not defined
-
- - name: Upgrade packages
- command: "{{ ansible_pkg_mgr}} install -y {{ openshift.common.installed_variant_rpms | oo_31_rpm_rename_conversion(openshift_version) | join (' ')}}"
- when: openshift_pkg_version is defined and deployment_type == 'openshift-enterprise'
-
- - name: Restart node service
- service: name="{{ openshift.common.service_type }}-node" state=restarted
-
- - name: Ensure node service enabled
- service: name="{{ openshift.common.service_type }}-node" state=started enabled=yes
-
- - name: Install Ceph storage plugin dependencies
- action: "{{ ansible_pkg_mgr }} name=ceph-common state=present"
-
- - name: Install GlusterFS storage plugin dependencies
- action: "{{ ansible_pkg_mgr }} name=glusterfs-fuse state=present"
-
- - name: Set sebooleans to allow gluster storage plugin access from containers
- seboolean:
- name: "{{ item }}"
- state: yes
- persistent: yes
- when: ansible_selinux and ansible_selinux.status == "enabled"
- with_items:
- - virt_use_fusefs
- - virt_sandbox_use_fusefs
- register: sebool_result
- failed_when: "'state' not in sebool_result and 'msg' in sebool_result and 'SELinux boolean {{ item }} does not exist' not in sebool_result.msg"
-
- - set_fact:
- node_update_complete: True
-
-
-##############################################################################
-# Gate on nodes update
-##############################################################################
-- name: Gate on nodes update
- hosts: localhost
- connection: local
- become: no
- tasks:
- - set_fact:
- node_update_completed: "{{ hostvars
- | oo_select_keys(groups.oo_nodes_to_config)
- | oo_collect('inventory_hostname', {'node_update_complete': true}) }}"
- - set_fact:
- node_update_failed: "{{ groups.oo_nodes_to_config | difference(node_update_completed) }}"
- - fail:
- msg: "Upgrade cannot continue. The following nodes did not finish updating: {{ node_update_failed | join(',') }}"
- when: node_update_failed | length > 0
-
-
-###############################################################################
-# Post upgrade - Reconcile Cluster Roles and Cluster Role Bindings
-###############################################################################
-- name: Reconcile Cluster Roles and Cluster Role Bindings
- hosts: oo_masters_to_config
- vars:
- origin_reconcile_bindings: "{{ deployment_type == 'origin' and g_new_version | version_compare('1.0.6', '>') }}"
- ent_reconcile_bindings: true
- openshift_master_ha: "{{ groups.oo_masters_to_config | length > 1 }}"
- tasks:
- - name: Reconcile Cluster Roles
- command: >
- {{ openshift.common.admin_binary}} --config={{ openshift.common.config_base }}/master/admin.kubeconfig
- policy reconcile-cluster-roles --additive-only=true --confirm
- run_once: true
-
- - name: Reconcile Cluster Role Bindings
- command: >
- {{ openshift.common.admin_binary}} --config={{ openshift.common.config_base }}/master/admin.kubeconfig
- policy reconcile-cluster-role-bindings
- --exclude-groups=system:authenticated
- --exclude-groups=system:authenticated:oauth
- --exclude-groups=system:unauthenticated
- --exclude-users=system:anonymous
- --additive-only=true --confirm
- when: origin_reconcile_bindings | bool or ent_reconcile_bindings | bool
- run_once: true
-
- - name: Restart master services
- service: name="{{ openshift.common.service_type}}-master" state=restarted
- when: not openshift_master_ha | bool
-
- - name: Restart master cluster
- command: pcs resource restart master
- when: openshift_master_ha | bool
- run_once: true
-
- - name: Wait for the clustered master service to be available
- wait_for:
- host: "{{ openshift_master_cluster_vip }}"
- port: 8443
- state: started
- timeout: 180
- delay: 90
- when: openshift_master_ha | bool
- run_once: true
-
- - set_fact:
- reconcile_complete: True
-
-
-##############################################################################
-# Gate on reconcile
-##############################################################################
-- name: Gate on reconcile
- hosts: localhost
- connection: local
- become: no
- tasks:
- - set_fact:
- reconcile_completed: "{{ hostvars
- | oo_select_keys(groups.oo_masters_to_config)
- | oo_collect('inventory_hostname', {'reconcile_complete': true}) }}"
- - set_fact:
- reconcile_failed: "{{ groups.oo_masters_to_config | difference(reconcile_completed) }}"
- - fail:
- msg: "Upgrade cannot continue. The following masters did not finish reconciling: {{ reconcile_failed | join(',') }}"
- when: reconcile_failed | length > 0
-
-
-
-
-###############################################################################
-# Post upgrade - Upgrade default router, default registry and examples
-###############################################################################
-- name: Upgrade default router and default registry
- hosts: oo_first_master
- vars:
- openshift_deployment_type: "{{ deployment_type }}"
- registry_image: "{{ openshift.master.registry_url | replace( '${component}', 'docker-registry' ) | replace ( '${version}', 'v' + g_new_version ) }}"
- router_image: "{{ openshift.master.registry_url | replace( '${component}', 'haproxy-router' ) | replace ( '${version}', 'v' + g_new_version ) }}"
- oc_cmd: "{{ openshift.common.client_binary }} --config={{ openshift.common.config_base }}/master/admin.kubeconfig"
- roles:
- # Create the new templates shipped in 3.1, existing templates are left
- # unmodified. This prevents the subsequent role definition for
- # openshift_examples from failing when trying to replace templates that do
- # not already exist. We could have potentially done a replace --force to
- # create and update in one step.
- - openshift_examples
- # Update the existing templates
- - role: openshift_examples
- openshift_examples_import_command: replace
- registry_url: "{{ openshift.master.registry_url }}"
- pre_tasks:
- - name: Collect all routers
- command: >
- {{ oc_cmd }} get pods --all-namespaces -l 'router' -o json
- register: all_routers
- failed_when: false
- changed_when: false
-
- - set_fact: haproxy_routers="{{ (all_routers.stdout | from_json)['items'] | oo_pods_match_component(openshift_deployment_type, 'haproxy-router') | oo_select_keys_from_list(['metadata']) }}"
- when: all_routers.rc == 0
-
- - set_fact: haproxy_routers=[]
- when: all_routers.rc != 0
-
- - name: Check for allowHostNetwork and allowHostPorts
- when: all_routers.rc == 0
- shell: >
- {{ oc_cmd }} get -o yaml scc/privileged | /usr/bin/grep -e allowHostPorts -e allowHostNetwork
- register: _scc
-
- - name: Grant allowHostNetwork and allowHostPorts
- when:
- - all_routers.rc == 0
- - "'false' in _scc.stdout"
- command: >
- {{ oc_cmd }} patch scc/privileged -p
- '{"allowHostPorts":true,"allowHostNetwork":true}' --api-version=v1
-
- - name: Update deployment config to 1.0.4/3.0.1 spec
- when: all_routers.rc == 0
- command: >
- {{ oc_cmd }} patch dc/{{ item['labels']['deploymentconfig'] }} -p
- '{"spec":{"strategy":{"rollingParams":{"updatePercent":-10},"spec":{"serviceAccount":"router","serviceAccountName":"router"}}}}'
- --api-version=v1
- with_items: haproxy_routers
-
- - name: Switch to hostNetwork=true
- when: all_routers.rc == 0
- command: >
- {{ oc_cmd }} patch dc/{{ item['labels']['deploymentconfig'] }} -p '{"spec":{"template":{"spec":{"hostNetwork":true}}}}'
- --api-version=v1
- with_items: haproxy_routers
-
- - name: Update router image to current version
- when: all_routers.rc == 0
- command: >
- {{ oc_cmd }} patch dc/{{ item['labels']['deploymentconfig'] }} -p
- '{"spec":{"template":{"spec":{"containers":[{"name":"router","image":"{{ router_image }}"}]}}}}'
- --api-version=v1
- with_items: haproxy_routers
- when: not openshift.common.version_gte_3_1_1_or_1_1_1
-
- - name: Update router image to current version
- when: all_routers.rc == 0
- command: >
- {{ oc_cmd }} patch dc/{{ item['labels']['deploymentconfig'] }} -p
- '{"spec":{"template":{"spec":{"containers":[{"name":"router","image":"{{ router_image }}","livenessProbe":{"tcpSocket":null,"httpGet":{"path": "/healthz", "port": 1936, "host": "localhost", "scheme": "HTTP"},"initialDelaySeconds":10,"timeoutSeconds":1}}]}}}}'
- --api-version=v1
- with_items: haproxy_routers
- when: openshift.common.version_gte_3_1_1_or_1_1_1
-
- - name: Check for default registry
- command: >
- {{ oc_cmd }} get -n default dc/docker-registry
- register: _default_registry
- failed_when: false
- changed_when: false
-
- - name: Update registry image to current version
- when: _default_registry.rc == 0
- command: >
- {{ oc_cmd }} patch dc/docker-registry -p
- '{"spec":{"template":{"spec":{"containers":[{"name":"registry","image":"{{ registry_image }}"}]}}}}'
- --api-version=v1
diff --git a/playbooks/common/openshift-cluster/upgrades/v3_1_minor/filter_plugins b/playbooks/common/openshift-cluster/upgrades/v3_1_minor/filter_plugins
deleted file mode 120000
index 27ddaa18b..000000000
--- a/playbooks/common/openshift-cluster/upgrades/v3_1_minor/filter_plugins
+++ /dev/null
@@ -1 +0,0 @@
-../../../../../filter_plugins \ No newline at end of file
diff --git a/playbooks/common/openshift-cluster/upgrades/v3_1_minor/library b/playbooks/common/openshift-cluster/upgrades/v3_1_minor/library
deleted file mode 120000
index 53bed9684..000000000
--- a/playbooks/common/openshift-cluster/upgrades/v3_1_minor/library
+++ /dev/null
@@ -1 +0,0 @@
-../library \ No newline at end of file
diff --git a/playbooks/common/openshift-cluster/upgrades/v3_1_minor/lookup_plugins b/playbooks/common/openshift-cluster/upgrades/v3_1_minor/lookup_plugins
deleted file mode 120000
index cf407f69b..000000000
--- a/playbooks/common/openshift-cluster/upgrades/v3_1_minor/lookup_plugins
+++ /dev/null
@@ -1 +0,0 @@
-../../../../../lookup_plugins \ No newline at end of file
diff --git a/playbooks/common/openshift-cluster/upgrades/v3_1_minor/post.yml b/playbooks/common/openshift-cluster/upgrades/v3_1_minor/post.yml
deleted file mode 100644
index f030eed18..000000000
--- a/playbooks/common/openshift-cluster/upgrades/v3_1_minor/post.yml
+++ /dev/null
@@ -1,58 +0,0 @@
----
-###############################################################################
-# Post upgrade - Upgrade default router, default registry and examples
-###############################################################################
-- name: Upgrade default router and default registry
- hosts: oo_first_master
- vars:
- openshift_deployment_type: "{{ deployment_type }}"
- registry_image: "{{ openshift.master.registry_url | replace( '${component}', 'docker-registry' ) | replace ( '${version}', 'v' + g_new_version ) }}"
- router_image: "{{ openshift.master.registry_url | replace( '${component}', 'haproxy-router' ) | replace ( '${version}', 'v' + g_new_version ) }}"
- oc_cmd: "{{ openshift.common.client_binary }} --config={{ openshift.common.config_base }}/master/admin.kubeconfig"
- roles:
- # Create the new templates shipped in 3.1.z, existing templates are left
- # unmodified. This prevents the subsequent role definition for
- # openshift_examples from failing when trying to replace templates that do
- # not already exist. We could have potentially done a replace --force to
- # create and update in one step.
- - openshift_examples
- # Update the existing templates
- - role: openshift_examples
- openshift_examples_import_command: replace
- registry_url: "{{ openshift.master.registry_url }}"
- pre_tasks:
- - name: Collect all routers
- command: >
- {{ oc_cmd }} get pods --all-namespaces -l 'router' -o json
- register: all_routers
- failed_when: false
- changed_when: false
-
- - set_fact: haproxy_routers="{{ (all_routers.stdout | from_json)['items'] | oo_pods_match_component(openshift_deployment_type, 'haproxy-router') | oo_select_keys_from_list(['metadata']) }}"
- when: all_routers.rc == 0
-
- - set_fact: haproxy_routers=[]
- when: all_routers.rc != 0
-
- - name: Update router image to current version
- when: all_routers.rc == 0
- command: >
- {{ oc_cmd }} patch dc/{{ item['labels']['deploymentconfig'] }} -p
- '{"spec":{"template":{"spec":{"containers":[{"name":"router","image":"{{ router_image }}","livenessProbe":{"tcpSocket":null,"httpGet":{"path": "/healthz", "port": 1936, "host": "localhost", "scheme": "HTTP"},"initialDelaySeconds":10,"timeoutSeconds":1}}]}}}}'
- --api-version=v1
- with_items: haproxy_routers
-
- - name: Check for default registry
- command: >
- {{ oc_cmd }} get -n default dc/docker-registry
- register: _default_registry
- failed_when: false
- changed_when: false
-
- - name: Update registry image to current version
- when: _default_registry.rc == 0
- command: >
- {{ oc_cmd }} patch dc/docker-registry -p
- '{"spec":{"template":{"spec":{"containers":[{"name":"registry","image":"{{ registry_image }}"}]}}}}'
- --api-version=v1
-
diff --git a/playbooks/common/openshift-cluster/upgrades/v3_1_minor/pre.yml b/playbooks/common/openshift-cluster/upgrades/v3_1_minor/pre.yml
deleted file mode 100644
index 85d7073f2..000000000
--- a/playbooks/common/openshift-cluster/upgrades/v3_1_minor/pre.yml
+++ /dev/null
@@ -1,88 +0,0 @@
----
-###############################################################################
-# Evaluate host groups and gather facts
-###############################################################################
-- name: Load openshift_facts
- hosts: oo_masters_to_config:oo_nodes_to_config:oo_etcd_to_config:oo_lb_to_config
- roles:
- - openshift_facts
-
-###############################################################################
-# Pre-upgrade checks
-###############################################################################
-- name: Verify upgrade can proceed
- hosts: oo_first_master
- vars:
- openshift_master_ha: "{{ groups.oo_masters_to_config | length > 1 }}"
- target_version: "{{ '1.1.1' if deployment_type == 'origin' else '3.1.1' }}"
- gather_facts: no
- tasks:
- - fail:
- msg: >
- This upgrade is only supported for origin, openshift-enterprise, and online
- deployment types
- when: deployment_type not in ['origin','openshift-enterprise', 'online']
-
- - fail:
- msg: >
- openshift_pkg_version is {{ openshift_pkg_version }} which is not a
- valid version for a {{ target_version }} upgrade
- when: openshift_pkg_version is defined and openshift_pkg_version.split('-',1).1 | version_compare(target_version ,'<')
-
-- name: Verify upgrade targets
- hosts: oo_masters_to_config:oo_nodes_to_config
- vars:
- target_version: "{{ '1.1.1' if deployment_type == 'origin' else '3.1.1' }}"
- tasks:
- - name: Clean package cache
- command: "{{ ansible_pkg_mgr }} clean all"
- when: not openshift.common.is_atomic | bool
-
- - set_fact:
- g_new_service_name: "{{ 'origin' if deployment_type =='origin' else 'atomic-openshift' }}"
-
- - name: Determine available versions
- script: ../files/rpm_versions.sh {{ g_new_service_name }}
- register: g_versions_result
-
- - set_fact:
- g_aos_versions: "{{ g_versions_result.stdout | from_yaml }}"
-
- - set_fact:
- g_new_version: "{{ g_aos_versions.curr_version.split('-', 1).0 if g_aos_versions.avail_version is none else g_aos_versions.avail_version.split('-', 1).0 }}"
-
- - fail:
- msg: This playbook requires Origin 1.1 or later
- when: deployment_type == 'origin' and g_aos_versions.curr_version | version_compare('1.1','<')
-
- - fail:
- msg: This playbook requires Atomic Enterprise Platform/OpenShift Enterprise 3.1 or later
- when: deployment_type == 'atomic-openshift' and g_aos_versions.curr_version | version_compare('3.1','<')
-
- - fail:
- msg: Upgrade packages not found
- when: (g_aos_versions.avail_version | default(g_aos_versions.curr_version, true) | version_compare(target_version, '<'))
-
- - set_fact:
- pre_upgrade_complete: True
-
-
-##############################################################################
-# Gate on pre-upgrade checks
-##############################################################################
-- name: Gate on pre-upgrade checks
- hosts: localhost
- connection: local
- become: no
- vars:
- pre_upgrade_hosts: "{{ groups.oo_masters_to_config | union(groups.oo_nodes_to_config) }}"
- tasks:
- - set_fact:
- pre_upgrade_completed: "{{ hostvars
- | oo_select_keys(pre_upgrade_hosts)
- | oo_collect('inventory_hostname', {'pre_upgrade_complete': true}) }}"
- - set_fact:
- pre_upgrade_failed: "{{ pre_upgrade_hosts | difference(pre_upgrade_completed) }}"
- - fail:
- msg: "Upgrade cannot continue. The following hosts did not complete pre-upgrade checks: {{ pre_upgrade_failed | join(',') }}"
- when: pre_upgrade_failed | length > 0
diff --git a/playbooks/common/openshift-cluster/upgrades/v3_1_minor/roles b/playbooks/common/openshift-cluster/upgrades/v3_1_minor/roles
deleted file mode 120000
index 6bc1a7aef..000000000
--- a/playbooks/common/openshift-cluster/upgrades/v3_1_minor/roles
+++ /dev/null
@@ -1 +0,0 @@
-../../../../../roles \ No newline at end of file
diff --git a/playbooks/common/openshift-cluster/upgrades/v3_1_minor/upgrade.yml b/playbooks/common/openshift-cluster/upgrades/v3_1_minor/upgrade.yml
deleted file mode 100644
index e5cfa58aa..000000000
--- a/playbooks/common/openshift-cluster/upgrades/v3_1_minor/upgrade.yml
+++ /dev/null
@@ -1,140 +0,0 @@
----
-###############################################################################
-# The restart playbook should be run after this playbook completes.
-###############################################################################
-
-###############################################################################
-# Upgrade Masters
-###############################################################################
-- name: Upgrade master packages and configuration
- hosts: oo_masters_to_config
- vars:
- openshift_version: "{{ openshift_pkg_version | default('') }}"
- tasks:
- - name: Upgrade master packages
- command: "{{ ansible_pkg_mgr}} update-to -y {{ openshift.common.service_type }}-master{{ openshift_version }} {{ openshift.common.service_type }}-sdn-ovs{{ openshift_version }}"
- when: not openshift.common.is_containerized | bool
-
- - name: Ensure python-yaml present for config upgrade
- action: "{{ ansible_pkg_mgr }} name=PyYAML state=present"
- when: not openshift.common.is_containerized | bool
-
-# Currently 3.1.1 does not have any new configuration settings
-#
-# - name: Upgrade master configuration
-# openshift_upgrade_config:
-# from_version: '3.0'
-# to_version: '3.1'
-# role: master
-# config_base: "{{ hostvars[inventory_hostname].openshift.common.config_base }}"
-
-- name: Set master update status to complete
- hosts: oo_masters_to_config
- tasks:
- - set_fact:
- master_update_complete: True
-
-##############################################################################
-# Gate on master update complete
-##############################################################################
-- name: Gate on master update
- hosts: localhost
- connection: local
- become: no
- tasks:
- - set_fact:
- master_update_completed: "{{ hostvars
- | oo_select_keys(groups.oo_masters_to_config)
- | oo_collect('inventory_hostname', {'master_update_complete': true}) }}"
- - set_fact:
- master_update_failed: "{{ groups.oo_masters_to_config | difference(master_update_completed) }}"
- - fail:
- msg: "Upgrade cannot continue. The following masters did not finish updating: {{ master_update_failed | join(',') }}"
- when: master_update_failed | length > 0
-
-###############################################################################
-# Upgrade Nodes
-###############################################################################
-- name: Upgrade nodes
- hosts: oo_nodes_to_config
- vars:
- openshift_version: "{{ openshift_pkg_version | default('') }}"
- roles:
- - openshift_facts
- tasks:
- - name: Upgrade node packages
- command: "{{ ansible_pkg_mgr }} update-to -y {{ openshift.common.service_type }}-node{{ openshift_version }} {{ openshift.common.service_type }}-sdn-ovs{{ openshift_version }}"
- when: not openshift.common.is_containerized | bool
-
- - name: Restart node service
- service: name="{{ openshift.common.service_type }}-node" state=restarted
-
- - set_fact:
- node_update_complete: True
-
-##############################################################################
-# Gate on nodes update
-##############################################################################
-- name: Gate on nodes update
- hosts: localhost
- connection: local
- become: no
- tasks:
- - set_fact:
- node_update_completed: "{{ hostvars
- | oo_select_keys(groups.oo_nodes_to_config)
- | oo_collect('inventory_hostname', {'node_update_complete': true}) }}"
- - set_fact:
- node_update_failed: "{{ groups.oo_nodes_to_config | difference(node_update_completed) }}"
- - fail:
- msg: "Upgrade cannot continue. The following nodes did not finish updating: {{ node_update_failed | join(',') }}"
- when: node_update_failed | length > 0
-
-###############################################################################
-# Reconcile Cluster Roles and Cluster Role Bindings
-###############################################################################
-- name: Reconcile Cluster Roles and Cluster Role Bindings
- hosts: oo_masters_to_config
- vars:
- origin_reconcile_bindings: "{{ deployment_type == 'origin' and g_new_version | version_compare('1.0.6', '>') }}"
- ent_reconcile_bindings: true
- openshift_master_ha: "{{ groups.oo_masters_to_config | length > 1 }}"
- tasks:
- - name: Reconcile Cluster Roles
- command: >
- {{ openshift.common.admin_binary}} --config={{ openshift.common.config_base }}/master/admin.kubeconfig
- policy reconcile-cluster-roles --additive-only=true --confirm
- run_once: true
-
- - name: Reconcile Cluster Role Bindings
- command: >
- {{ openshift.common.admin_binary}} --config={{ openshift.common.config_base }}/master/admin.kubeconfig
- policy reconcile-cluster-role-bindings
- --exclude-groups=system:authenticated
- --exclude-groups=system:authenticated:oauth
- --exclude-groups=system:unauthenticated
- --exclude-users=system:anonymous
- --additive-only=true --confirm
- when: origin_reconcile_bindings | bool or ent_reconcile_bindings | bool
- run_once: true
-
- - set_fact:
- reconcile_complete: True
-
-##############################################################################
-# Gate on reconcile
-##############################################################################
-- name: Gate on reconcile
- hosts: localhost
- connection: local
- become: no
- tasks:
- - set_fact:
- reconcile_completed: "{{ hostvars
- | oo_select_keys(groups.oo_masters_to_config)
- | oo_collect('inventory_hostname', {'reconcile_complete': true}) }}"
- - set_fact:
- reconcile_failed: "{{ groups.oo_masters_to_config | difference(reconcile_completed) }}"
- - fail:
- msg: "Upgrade cannot continue. The following masters did not finish reconciling: {{ reconcile_failed | join(',') }}"
- when: reconcile_failed | length > 0
diff --git a/playbooks/common/openshift-cluster/upgrades/v3_1_to_v3_2/containerized_upgrade.yml b/playbooks/common/openshift-cluster/upgrades/v3_1_to_v3_2/containerized_node_upgrade.yml
index 319758a06..60ea84f8e 100644
--- a/playbooks/common/openshift-cluster/upgrades/v3_1_to_v3_2/containerized_upgrade.yml
+++ b/playbooks/common/openshift-cluster/upgrades/v3_1_to_v3_2/containerized_node_upgrade.yml
@@ -1,7 +1,7 @@
- include_vars: ../../../../../roles/openshift_node/vars/main.yml
- name: Update systemd units
- include: ../../../../../roles/openshift_node/tasks/systemd_units.yml openshift_version=v{{ g_new_version }}
+ include: ../../../../../roles/openshift_node/tasks/systemd_units.yml openshift_version={{ openshift_image_tag }}
- name: Verifying the correct version was configured
shell: grep {{ verify_upgrade_version }} {{ item }}
diff --git a/playbooks/common/openshift-cluster/upgrades/v3_1_to_v3_2/docker_upgrade.yml b/playbooks/common/openshift-cluster/upgrades/v3_1_to_v3_2/docker_upgrade.yml
deleted file mode 100644
index c7b18f51b..000000000
--- a/playbooks/common/openshift-cluster/upgrades/v3_1_to_v3_2/docker_upgrade.yml
+++ /dev/null
@@ -1,14 +0,0 @@
-- name: Check if Docker is installed
- command: rpm -q docker
- register: pkg_check
- failed_when: pkg_check.rc > 1
- changed_when: no
-
-- name: Upgrade Docker
- command: "{{ ansible_pkg_mgr}} update -y docker"
- when: pkg_check.rc == 0 and g_docker_version.curr_version | version_compare('1.9','<')
- register: docker_upgrade
-
-- name: Restart Docker
- command: systemctl restart docker
- when: docker_upgrade | changed
diff --git a/playbooks/common/openshift-cluster/upgrades/v3_1_to_v3_2/node_upgrade.yml b/playbooks/common/openshift-cluster/upgrades/v3_1_to_v3_2/node_upgrade.yml
deleted file mode 100644
index a911f12be..000000000
--- a/playbooks/common/openshift-cluster/upgrades/v3_1_to_v3_2/node_upgrade.yml
+++ /dev/null
@@ -1,24 +0,0 @@
-- name: Prepare for Node evacuation
- command: >
- {{ openshift.common.admin_binary }} manage-node {{ openshift.common.hostname | lower }} --schedulable=false
- delegate_to: "{{ groups.oo_first_master.0 }}"
-
-- name: Evacuate Node for Kubelet upgrade
- command: >
- {{ openshift.common.admin_binary }} manage-node {{ openshift.common.hostname | lower }} --evacuate --force
- delegate_to: "{{ groups.oo_first_master.0 }}"
-
-- include: rpm_upgrade.yml
- vars:
- component: "node"
- openshift_version: "{{ openshift_pkg_version | default('') }}"
- when: not openshift.common.is_containerized | bool
-
-- include: containerized_upgrade.yml
- when: openshift.common.is_containerized | bool
-
-- name: Set node schedulability
- command: >
- {{ openshift.common.admin_binary }} manage-node {{ openshift.common.hostname | lower }} --schedulable=true
- delegate_to: "{{ groups.oo_first_master.0 }}"
- when: openshift.node.schedulable | bool
diff --git a/playbooks/common/openshift-cluster/upgrades/v3_1_to_v3_2/openshift.docker.node.dep.service b/playbooks/common/openshift-cluster/upgrades/v3_1_to_v3_2/openshift.docker.node.dep.service
new file mode 120000
index 000000000..b384a3f4d
--- /dev/null
+++ b/playbooks/common/openshift-cluster/upgrades/v3_1_to_v3_2/openshift.docker.node.dep.service
@@ -0,0 +1 @@
+../../../../../roles/openshift_node/templates/openshift.docker.node.dep.service \ No newline at end of file
diff --git a/playbooks/common/openshift-cluster/upgrades/v3_1_to_v3_2/openshift.docker.node.service b/playbooks/common/openshift-cluster/upgrades/v3_1_to_v3_2/openshift.docker.node.service
new file mode 120000
index 000000000..a2f140144
--- /dev/null
+++ b/playbooks/common/openshift-cluster/upgrades/v3_1_to_v3_2/openshift.docker.node.service
@@ -0,0 +1 @@
+../../../../../roles/openshift_node/templates/openshift.docker.node.service \ No newline at end of file
diff --git a/playbooks/common/openshift-cluster/upgrades/v3_1_to_v3_2/openvswitch.docker.service b/playbooks/common/openshift-cluster/upgrades/v3_1_to_v3_2/openvswitch.docker.service
new file mode 120000
index 000000000..61946ff91
--- /dev/null
+++ b/playbooks/common/openshift-cluster/upgrades/v3_1_to_v3_2/openvswitch.docker.service
@@ -0,0 +1 @@
+../../../../../roles/openshift_node/templates/openvswitch.docker.service \ No newline at end of file
diff --git a/playbooks/common/openshift-cluster/upgrades/v3_1_to_v3_2/openvswitch.sysconfig.j2 b/playbooks/common/openshift-cluster/upgrades/v3_1_to_v3_2/openvswitch.sysconfig.j2
new file mode 120000
index 000000000..3adc56e4e
--- /dev/null
+++ b/playbooks/common/openshift-cluster/upgrades/v3_1_to_v3_2/openvswitch.sysconfig.j2
@@ -0,0 +1 @@
+../../../../../roles/openshift_node/templates/openvswitch.sysconfig.j2 \ No newline at end of file
diff --git a/playbooks/common/openshift-cluster/upgrades/v3_1_to_v3_2/post.yml b/playbooks/common/openshift-cluster/upgrades/v3_1_to_v3_2/post.yml
index c16965a35..ccf9514f1 100644
--- a/playbooks/common/openshift-cluster/upgrades/v3_1_to_v3_2/post.yml
+++ b/playbooks/common/openshift-cluster/upgrades/v3_1_to_v3_2/post.yml
@@ -6,8 +6,8 @@
hosts: oo_first_master
vars:
openshift_deployment_type: "{{ deployment_type }}"
- registry_image: "{{ openshift.master.registry_url | replace( '${component}', 'docker-registry' ) | replace ( '${version}', 'v' + g_new_version ) }}"
- router_image: "{{ openshift.master.registry_url | replace( '${component}', 'haproxy-router' ) | replace ( '${version}', 'v' + g_new_version ) }}"
+ registry_image: "{{ openshift.master.registry_url | replace( '${component}', 'docker-registry' ) | replace ( '${version}', openshift_image_tag ) }}"
+ router_image: "{{ openshift.master.registry_url | replace( '${component}', 'haproxy-router' ) | replace ( '${version}', openshift_image_tag ) }}"
oc_cmd: "{{ openshift.common.client_binary }} --config={{ openshift.common.config_base }}/master/admin.kubeconfig"
roles:
- openshift_manageiq
diff --git a/playbooks/common/openshift-cluster/upgrades/v3_1_to_v3_2/pre.yml b/playbooks/common/openshift-cluster/upgrades/v3_1_to_v3_2/pre.yml
index f163cca86..b49b3df7d 100644
--- a/playbooks/common/openshift-cluster/upgrades/v3_1_to_v3_2/pre.yml
+++ b/playbooks/common/openshift-cluster/upgrades/v3_1_to_v3_2/pre.yml
@@ -2,10 +2,12 @@
###############################################################################
# Evaluate host groups and gather facts
###############################################################################
-- name: Load openshift_facts and update repos
+
+- include: ../../initialize_facts.yml
+
+- name: Update repos and initialize facts on all hosts
hosts: oo_masters_to_config:oo_nodes_to_config:oo_etcd_to_config:oo_lb_to_config
roles:
- - openshift_facts
- openshift_repos
- name: Set openshift_no_proxy_internal_hostnames
@@ -34,10 +36,10 @@
###############################################################################
# Pre-upgrade checks
###############################################################################
-- name: Verify upgrade can proceed
+- name: Verify upgrade can proceed on first master
hosts: oo_first_master
vars:
- target_version: "{{ '1.2' if deployment_type == 'origin' else '3.1.1.900' }}"
+ target_version: "{{ '1.2' if deployment_type == 'origin' else '3.2' }}"
g_pacemaker_upgrade_url_segment: "{{ 'org/latest' if deployment_type =='origin' else '.com/enterprise/3.1' }}"
gather_facts: no
tasks:
@@ -53,6 +55,11 @@
https://docs.openshift.{{ g_pacemaker_upgrade_url_segment }}/install_config/upgrading/pacemaker_to_native_ha.html
when: openshift.master.cluster_method is defined and openshift.master.cluster_method == 'pacemaker'
+ # Error out in situations where the user has older versions specified in their
+ # inventory in any of the openshift_release, openshift_image_tag, and
+ # openshift_pkg_version variables. These must be removed or updated to proceed
+ # with upgrade.
+ # TODO: Should we block if you're *over* the next major release version as well?
- fail:
msg: >
openshift_pkg_version is {{ openshift_pkg_version }} which is not a
@@ -65,6 +72,28 @@
valid version for a {{ target_version }} upgrade
when: openshift_image_tag is defined and openshift_image_tag.split('v',1).1 | version_compare(target_version ,'<')
+ - set_fact:
+ openshift_release: "{{ openshift_release[1:] }}"
+ when: openshift_release is defined and openshift_release[0] == 'v'
+
+ - fail:
+ msg: >
+ openshift_release is {{ openshift_release }} which is not a
+ valid release for a {{ target_version }} upgrade
+ when: openshift_release is defined and not openshift_release | version_compare(target_version ,'=')
+
+- include: ../../../../common/openshift-cluster/initialize_openshift_version.yml
+ vars:
+ # Request openshift_release 3.2 and let the openshift_version role handle converting this
+ # to a more specific version, respecting openshift_image_tag and openshift_pkg_version if
+ # defined, and overriding the normal behavior of protecting the installed version
+ openshift_release: "3.2"
+ openshift_protect_installed_version: False
+ # Docker role (a dependency) should be told not to do anything to installed version
+ # of docker, we handle this separately during upgrade. (the inventory may have a
+ # docker_version defined, we don't want to actually do it until later)
+ docker_protect_installed_version: True
+
- name: Verify master processes
hosts: oo_masters_to_config
roles:
@@ -100,6 +129,7 @@
hosts: oo_nodes_to_config
roles:
- openshift_facts
+ - openshift_docker_facts
tasks:
- name: Ensure Node is running
service:
@@ -111,19 +141,17 @@
- name: Verify upgrade targets
hosts: oo_masters_to_config:oo_nodes_to_config
vars:
- target_version: "{{ '1.2' if deployment_type == 'origin' else '3.1.1.900' }}"
+ target_version: "{{ '1.2' if deployment_type == 'origin' else '3.2' }}"
openshift_docker_hosted_registry_network: "{{ hostvars[groups.oo_first_master.0].openshift.common.portal_net }}"
- upgrading: True
- handlers:
- - include: ../../../../../roles/openshift_master/handlers/main.yml
- - include: ../../../../../roles/openshift_node/handlers/main.yml
- roles:
- # We want the cli role to evaluate so that the containerized oc/oadm wrappers
- # are modified to use the correct image tag. However, this can trigger a
- # docker restart if new configuration is laid down which would immediately
- # pull the latest image and defeat the purpose of these tasks.
- - { role: openshift_cli }
pre_tasks:
+ - fail:
+ msg: Verify OpenShift is already installed
+ when: openshift.common.version is not defined
+
+ - fail:
+ msg: Verify the correct version was found
+ when: verify_upgrade_version is defined and openshift_version != verify_upgrade_version
+
- name: Clean package cache
command: "{{ ansible_pkg_mgr }} clean all"
when: not openshift.common.is_atomic | bool
@@ -132,58 +160,28 @@
g_new_service_name: "{{ 'origin' if deployment_type =='origin' else 'atomic-openshift' }}"
when: not openshift.common.is_containerized | bool
- - name: Determine available versions
- script: ../files/rpm_versions.sh {{ g_new_service_name }}
- register: g_rpm_versions_result
- when: not openshift.common.is_containerized | bool
-
- - set_fact:
- g_aos_versions: "{{ g_rpm_versions_result.stdout | from_yaml }}"
- when: not openshift.common.is_containerized | bool
-
- - name: Determine available versions
- script: ../files/openshift_container_versions.sh {{ openshift.common.service_type }}
- register: g_containerized_versions_result
- when: openshift.common.is_containerized | bool
-
- - set_fact:
- g_aos_versions: "{{ g_containerized_versions_result.stdout | from_yaml }}"
+ - name: Verify containers are available for upgrade
+ command: >
+ docker pull {{ openshift.common.cli_image }}:{{ openshift_image_tag }}
when: openshift.common.is_containerized | bool
- - set_fact:
- g_new_version: "{{ g_aos_versions.curr_version.split('-', 1).0 if g_aos_versions.avail_version is none else g_aos_versions.avail_version.split('-', 1).0 }}"
- when: openshift_pkg_version is not defined
-
- - set_fact:
- g_new_version: "{{ openshift_pkg_version | replace('-','') }}"
- when: openshift_pkg_version is defined
-
- - set_fact:
- g_new_version: "{{ openshift_image_tag | replace('v','') }}"
- when: openshift_image_tag is defined
-
- - fail:
- msg: Verifying the correct version was found
- when: g_aos_versions.curr_version == ""
-
- - fail:
- msg: Verifying the correct version was found
- when: verify_upgrade_version is defined and g_new_version != verify_upgrade_version
-
- - include_vars: ../../../../../roles/openshift_master/vars/main.yml
- when: inventory_hostname in groups.oo_masters_to_config
+ - name: Check latest available OpenShift RPM version
+ command: >
+ {{ repoquery_cmd }} --qf '%{version}' "{{ openshift.common.service_type }}"
+ failed_when: false
+ changed_when: false
+ register: avail_openshift_version
+ when: not openshift.common.is_containerized | bool
- - name: Update systemd units
- include: ../../../../../roles/openshift_master/tasks/systemd_units.yml openshift_version=v{{ g_new_version }}
- when: inventory_hostname in groups.oo_masters_to_config
+ - debug: var=avail_openshift_version
- - include_vars: ../../../../../roles/openshift_node/vars/main.yml
- when: inventory_hostname in groups.oo_nodes_to_config
+ - name: Verify OpenShift 3.2 RPMs are available for upgrade
+ fail:
+ msg: "OpenShift {{ avail_openshift_version.stdout }} is available, but 3.2 or greater is required"
+ when: not openshift.common.is_containerized | bool and not avail_openshift_version | skipped and avail_openshift_version.stdout | default('0.0', True) | version_compare('3.2', '<')
- - name: Update systemd units
- include: ../../../../../roles/openshift_node/tasks/systemd_units.yml openshift_version=v{{ g_new_version }}
- when: inventory_hostname in groups.oo_nodes_to_config
+ # TODO: Are these two grep checks necessary anymore?
# Note: the version number is hardcoded here in hopes of catching potential
# bugs in how g_aos_versions.curr_version is set
- name: Verifying the correct version is installed for upgrade
@@ -198,19 +196,15 @@
with_items:
- /etc/systemd/system/openvswitch.service
- /etc/systemd/system/{{ openshift.common.service_type }}*.service
- when: openshift.common.is_containerized | bool
-
- - fail:
- msg: This playbook requires Origin 1.1 or later
- when: deployment_type == 'origin' and g_aos_versions.curr_version | version_compare('1.1','<')
+ when: openshift.common.is_containerized | bool and verify_upgrade_version is defined
- fail:
- msg: This playbook requires Atomic Enterprise Platform/OpenShift Enterprise 3.1 or later
- when: deployment_type == 'atomic-openshift' and g_aos_versions.curr_version | version_compare('3.1','<')
+ msg: This upgrade playbook must be run on Origin 1.1 or later
+ when: deployment_type == 'origin' and openshift.common.version | version_compare('1.1','<')
- fail:
- msg: Upgrade packages not found
- when: openshift_image_tag is not defined and (g_aos_versions.avail_version | default(g_aos_versions.curr_version, true) | version_compare(target_version, '<'))
+ msg: This upgrade playbook must be run on OpenShift Enterprise 3.1 or later
+ when: deployment_type == 'atomic-openshift' and openshift.common.version | version_compare('3.1','<')
- name: Verify docker upgrade targets
hosts: oo_masters_to_config:oo_nodes_to_config:oo_etcd_to_config
@@ -234,8 +228,8 @@
when: openshift.common.is_atomic | bool
- fail:
- msg: This playbook requires access to Docker 1.9 or later
- when: g_docker_version.avail_version | default(g_docker_version.curr_version, true) | version_compare('1.9','<')
+ msg: This playbook requires access to Docker 1.10 or later
+ when: g_docker_version.avail_version | default(g_docker_version.curr_version, true) | version_compare('1.10','<')
# TODO: add check to upgrade ostree to get latest Docker
diff --git a/playbooks/common/openshift-cluster/upgrades/v3_1_to_v3_2/rpm_upgrade.yml b/playbooks/common/openshift-cluster/upgrades/v3_1_to_v3_2/rpm_upgrade.yml
index 5c96ad094..1d97d3802 100644
--- a/playbooks/common/openshift-cluster/upgrades/v3_1_to_v3_2/rpm_upgrade.yml
+++ b/playbooks/common/openshift-cluster/upgrades/v3_1_to_v3_2/rpm_upgrade.yml
@@ -1,5 +1,6 @@
+# We verified latest rpm available is suitable, so just yum update.
- name: Upgrade packages
- command: "{{ ansible_pkg_mgr}} update -y {{ openshift.common.service_type }}-{{ component }}-{{ g_new_version }}"
+ command: "{{ ansible_pkg_mgr}} update -y {{ openshift.common.service_type }}-{{ component }}"
- name: Ensure python-yaml present for config upgrade
action: "{{ ansible_pkg_mgr }} name=PyYAML state=present"
diff --git a/playbooks/common/openshift-cluster/upgrades/v3_1_to_v3_2/upgrade.yml b/playbooks/common/openshift-cluster/upgrades/v3_1_to_v3_2/upgrade.yml
index 964257af5..6c27b0d44 100644
--- a/playbooks/common/openshift-cluster/upgrades/v3_1_to_v3_2/upgrade.yml
+++ b/playbooks/common/openshift-cluster/upgrades/v3_1_to_v3_2/upgrade.yml
@@ -3,49 +3,6 @@
# The restart playbook should be run after this playbook completes.
###############################################################################
-- name: Upgrade docker
- hosts: oo_masters_to_config:oo_nodes_to_config
- roles:
- - openshift_facts
- tasks:
- - include: docker_upgrade.yml
- when: not openshift.common.is_atomic | bool
- - name: Set post docker install facts
- openshift_facts:
- role: "{{ item.role }}"
- local_facts: "{{ item.local_facts }}"
- with_items:
- - role: docker
- local_facts:
- openshift_image_tag: "v{{ g_new_version }}"
- openshift_version: "{{ g_new_version }}"
-
-- name: Upgrade docker
- hosts: oo_etcd_to_config
- roles:
- - openshift_facts
- tasks:
- # Upgrade docker when host is not atomic and host is not a non-containerized etcd node
- - include: docker_upgrade.yml
- when: not openshift.common.is_atomic | bool and not ('oo_etcd_to_config' in group_names and not openshift.common.is_containerized)
-
-# The cli image is used by openshift_docker_facts to determine the currently installed
-# version. We need to explicitly pull the latest image to handle cases where
-# the locally cached 'latest' tag is older the g_new_version.
-- name: Download cli image
- hosts: oo_masters_to_config:oo_nodes_to_config
- roles:
- - { role: openshift_docker_facts }
- vars:
- openshift_docker_hosted_registry_network: "{{ hostvars[groups.oo_first_master.0].openshift.common.portal_net }}"
- tasks:
- - name: Pull Images
- command: >
- docker pull {{ item }}:latest
- with_items:
- - "{{ openshift.common.cli_image }}"
- when: openshift.common.is_containerized | bool
-
###############################################################################
# Upgrade Masters
###############################################################################
@@ -62,7 +19,7 @@
- include_vars: ../../../../../roles/openshift_master/vars/main.yml
- name: Update systemd units
- include: ../../../../../roles/openshift_master/tasks/systemd_units.yml openshift_version=v{{ g_new_version }}
+ include: ../../../../../roles/openshift_master/tasks/systemd_units.yml
# - name: Upgrade master configuration
# openshift_upgrade_config:
@@ -98,36 +55,54 @@
###############################################################################
# Upgrade Nodes
###############################################################################
-- name: Upgrade nodes
- hosts: oo_nodes_to_config
+
+# Here we handle all tasks that might require a node evac. (upgrading docker, and the node service)
+- name: Perform upgrades that may require node evacuation
+ hosts: oo_masters_to_config:oo_etcd_to_config:oo_nodes_to_config
serial: 1
+ any_errors_fatal: true
roles:
- openshift_facts
handlers:
- include: ../../../../../roles/openshift_node/handlers/main.yml
tasks:
- - include: node_upgrade.yml
+ # TODO: To better handle re-trying failed upgrades, it would be nice to check if the node
+ # or docker actually needs an upgrade before proceeding.
+ - name: Mark unschedulable if host is a node
+ command: >
+ {{ openshift.common.admin_binary }} manage-node {{ openshift.common.hostname | lower }} --schedulable=false
+ delegate_to: "{{ groups.oo_first_master.0 }}"
+ when: inventory_hostname in groups.oo_nodes_to_config
- - set_fact:
- node_update_complete: True
+ - name: Evacuate Node for Kubelet upgrade
+ command: >
+ {{ openshift.common.admin_binary }} manage-node {{ openshift.common.hostname | lower }} --evacuate --force
+ delegate_to: "{{ groups.oo_first_master.0 }}"
+ when: inventory_hostname in groups.oo_nodes_to_config
+
+ # Only check if docker upgrade is required if docker_upgrade is not
+ # already set to False.
+ - include: ../docker/upgrade_check.yml
+ when: docker_upgrade is not defined or docker_upgrade | bool
+
+ - include: ../docker/upgrade.yml
+ when: l_docker_upgrade is defined and l_docker_upgrade | bool
+
+ - include: rpm_upgrade.yml
+ vars:
+ component: "node"
+ openshift_version: "{{ openshift_pkg_version | default('') }}"
+ when: inventory_hostname in groups.oo_nodes_to_config and not openshift.common.is_containerized | bool
+
+ - include: containerized_node_upgrade.yml
+ when: inventory_hostname in groups.oo_nodes_to_config and openshift.common.is_containerized | bool
+
+ - name: Set node schedulability
+ command: >
+ {{ openshift.common.admin_binary }} manage-node {{ openshift.common.hostname | lower }} --schedulable=true
+ delegate_to: "{{ groups.oo_first_master.0 }}"
+ when: inventory_hostname in groups.oo_nodes_to_config and openshift.node.schedulable | bool
-##############################################################################
-# Gate on nodes update
-##############################################################################
-- name: Gate on nodes update
- hosts: localhost
- connection: local
- become: no
- tasks:
- - set_fact:
- node_update_completed: "{{ hostvars
- | oo_select_keys(groups.oo_nodes_to_config)
- | oo_collect('inventory_hostname', {'node_update_complete': true}) }}"
- - set_fact:
- node_update_failed: "{{ groups.oo_nodes_to_config | difference(node_update_completed) }}"
- - fail:
- msg: "Upgrade cannot continue. The following nodes did not finish updating: {{ node_update_failed | join(',') }}"
- when: node_update_failed | length > 0
###############################################################################
# Reconcile Cluster Roles, Cluster Role Bindings and Security Context Constraints
@@ -136,12 +111,11 @@
- name: Reconcile Cluster Roles and Cluster Role Bindings and Security Context Constraints
hosts: oo_masters_to_config
roles:
- - { role: openshift_cli, openshift_image_tag: "v{{ g_new_version }}" }
+ - { role: openshift_cli }
vars:
- origin_reconcile_bindings: "{{ deployment_type == 'origin' and g_new_version | version_compare('1.0.6', '>') }}"
+ origin_reconcile_bindings: "{{ deployment_type == 'origin' and openshift_version | version_compare('1.0.6', '>') }}"
ent_reconcile_bindings: true
openshift_docker_hosted_registry_network: "{{ hostvars[groups.oo_first_master.0].openshift.common.portal_net }}"
- upgrading: True
tasks:
- name: Verifying the correct commandline tools are available
shell: grep {{ verify_upgrade_version }} {{ openshift.common.admin_binary}}