summaryrefslogtreecommitdiffstats
path: root/playbooks/common/openshift-cluster
diff options
context:
space:
mode:
Diffstat (limited to 'playbooks/common/openshift-cluster')
-rw-r--r--playbooks/common/openshift-cluster/additional_config.yml56
-rw-r--r--playbooks/common/openshift-cluster/config.yml72
-rw-r--r--playbooks/common/openshift-cluster/create_services.yml8
-rw-r--r--playbooks/common/openshift-cluster/evaluate_groups.yml103
-rw-r--r--playbooks/common/openshift-cluster/tasks/set_etcd_launch_facts.yml (renamed from playbooks/common/openshift-cluster/set_etcd_launch_facts_tasks.yml)0
-rw-r--r--playbooks/common/openshift-cluster/tasks/set_master_launch_facts.yml (renamed from playbooks/common/openshift-cluster/set_master_launch_facts_tasks.yml)0
-rw-r--r--playbooks/common/openshift-cluster/tasks/set_node_launch_facts.yml (renamed from playbooks/common/openshift-cluster/set_node_launch_facts_tasks.yml)0
-rw-r--r--playbooks/common/openshift-cluster/update_repos_and_packages.yml2
-rw-r--r--playbooks/common/openshift-cluster/upgrades/files/ensure_system_units_have_version.sh51
-rw-r--r--playbooks/common/openshift-cluster/upgrades/files/openshift_container_versions.sh22
-rw-r--r--playbooks/common/openshift-cluster/upgrades/files/pre-upgrade-check193
-rw-r--r--playbooks/common/openshift-cluster/upgrades/files/rpm_versions.sh8
l---------playbooks/common/openshift-cluster/upgrades/filter_plugins1
-rwxr-xr-xplaybooks/common/openshift-cluster/upgrades/library/openshift_upgrade_config.py158
l---------playbooks/common/openshift-cluster/upgrades/lookup_plugins1
l---------playbooks/common/openshift-cluster/upgrades/roles1
l---------playbooks/common/openshift-cluster/upgrades/v3_0_minor/filter_plugins1
l---------playbooks/common/openshift-cluster/upgrades/v3_0_minor/library1
l---------playbooks/common/openshift-cluster/upgrades/v3_0_minor/lookup_plugins1
l---------playbooks/common/openshift-cluster/upgrades/v3_0_minor/roles1
-rw-r--r--playbooks/common/openshift-cluster/upgrades/v3_0_minor/upgrade.yml112
l---------playbooks/common/openshift-cluster/upgrades/v3_0_to_v3_1/filter_plugins1
l---------playbooks/common/openshift-cluster/upgrades/v3_0_to_v3_1/library1
l---------playbooks/common/openshift-cluster/upgrades/v3_0_to_v3_1/lookup_plugins1
l---------playbooks/common/openshift-cluster/upgrades/v3_0_to_v3_1/roles1
-rw-r--r--playbooks/common/openshift-cluster/upgrades/v3_0_to_v3_1/upgrade.yml648
l---------playbooks/common/openshift-cluster/upgrades/v3_1_minor/filter_plugins1
l---------playbooks/common/openshift-cluster/upgrades/v3_1_minor/library1
l---------playbooks/common/openshift-cluster/upgrades/v3_1_minor/lookup_plugins1
-rw-r--r--playbooks/common/openshift-cluster/upgrades/v3_1_minor/post.yml57
-rw-r--r--playbooks/common/openshift-cluster/upgrades/v3_1_minor/pre.yml88
l---------playbooks/common/openshift-cluster/upgrades/v3_1_minor/roles1
-rw-r--r--playbooks/common/openshift-cluster/upgrades/v3_1_minor/upgrade.yml140
-rw-r--r--playbooks/common/openshift-cluster/upgrades/v3_1_to_v3_2/containerized_upgrade.yml9
-rw-r--r--playbooks/common/openshift-cluster/upgrades/v3_1_to_v3_2/docker_upgrade.yml14
l---------playbooks/common/openshift-cluster/upgrades/v3_1_to_v3_2/filter_plugins1
l---------playbooks/common/openshift-cluster/upgrades/v3_1_to_v3_2/library1
l---------playbooks/common/openshift-cluster/upgrades/v3_1_to_v3_2/lookup_plugins1
-rw-r--r--playbooks/common/openshift-cluster/upgrades/v3_1_to_v3_2/post.yml57
-rw-r--r--playbooks/common/openshift-cluster/upgrades/v3_1_to_v3_2/pre.yml252
l---------playbooks/common/openshift-cluster/upgrades/v3_1_to_v3_2/roles1
-rw-r--r--playbooks/common/openshift-cluster/upgrades/v3_1_to_v3_2/rpm_upgrade.yml6
-rw-r--r--playbooks/common/openshift-cluster/upgrades/v3_1_to_v3_2/upgrade.yml159
-rw-r--r--playbooks/common/openshift-cluster/validate_hostnames.yml26
44 files changed, 2186 insertions, 74 deletions
diff --git a/playbooks/common/openshift-cluster/additional_config.yml b/playbooks/common/openshift-cluster/additional_config.yml
new file mode 100644
index 000000000..1ac78468a
--- /dev/null
+++ b/playbooks/common/openshift-cluster/additional_config.yml
@@ -0,0 +1,56 @@
+- name: Configure flannel
+ hosts: oo_first_master
+ vars:
+ etcd_urls: "{{ openshift.master.etcd_urls }}"
+ roles:
+ - role: flannel_register
+ when: openshift.common.use_flannel | bool
+
+- name: Additional master configuration
+ hosts: oo_first_master
+ vars:
+ cockpit_plugins: "{{ osm_cockpit_plugins | default(['cockpit-kubernetes']) }}"
+ etcd_urls: "{{ openshift.master.etcd_urls }}"
+ openshift_master_ha: "{{ groups.oo_masters | length > 1 }}"
+ omc_cluster_hosts: "{{ groups.oo_masters | join(' ')}}"
+ roles:
+ - role: openshift_master_cluster
+ when: openshift_master_ha | bool and openshift.master.cluster_method == "pacemaker"
+ - role: openshift_examples
+ when: openshift.common.install_examples | bool
+ - role: openshift_cluster_metrics
+ when: openshift.common.use_cluster_metrics | bool
+ - role: openshift_manageiq
+ when: openshift.common.use_manageiq | bool
+ - role: cockpit
+ when: not openshift.common.is_atomic and ( deployment_type in ['atomic-enterprise','openshift-enterprise'] ) and
+ (osm_use_cockpit | bool or osm_use_cockpit is undefined )
+ - role: flannel_register
+ when: openshift.common.use_flannel | bool
+ - role: pods
+ when: openshift.common.deployment_type == 'online'
+ - role: os_env_extras
+ when: openshift.common.deployment_type == 'online'
+
+- name: Create persistent volumes and create hosted services
+ hosts: oo_first_master
+ vars:
+ attach_registry_volume: "{{ openshift.hosted.registry.storage.kind != None }}"
+ deploy_infra: "{{ openshift.master.infra_nodes | default([]) | length > 0 }}"
+ persistent_volumes: "{{ hostvars[groups.oo_first_master.0] | oo_persistent_volumes(groups) }}"
+ persistent_volume_claims: "{{ hostvars[groups.oo_first_master.0] | oo_persistent_volume_claims }}"
+ roles:
+ - role: openshift_persistent_volumes
+ when: persistent_volumes | length > 0 or persistent_volume_claims | length > 0
+ - role: openshift_serviceaccounts
+ openshift_serviceaccounts_names:
+ - router
+ - registry
+ openshift_serviceaccounts_namespace: default
+ openshift_serviceaccounts_sccs:
+ - privileged
+ - role: openshift_router
+ when: deploy_infra | bool
+ - role: openshift_registry
+ registry_volume_claim: "{{ openshift.hosted.registry.storage.volume.name }}-claim"
+ when: deploy_infra | bool and attach_registry_volume | bool
diff --git a/playbooks/common/openshift-cluster/config.yml b/playbooks/common/openshift-cluster/config.yml
index 4c74f96db..23c8f039e 100644
--- a/playbooks/common/openshift-cluster/config.yml
+++ b/playbooks/common/openshift-cluster/config.yml
@@ -1,74 +1,16 @@
---
-- name: Populate config host groups
- hosts: localhost
- gather_facts: no
- tasks:
- - fail:
- msg: This playbook rquires g_etcd_group to be set
- when: g_etcd_group is not defined
+- include: evaluate_groups.yml
- - fail:
- msg: This playbook rquires g_masters_group to be set
- when: g_masters_group is not defined
+- include: validate_hostnames.yml
- - fail:
- msg: This playbook rquires g_nodes_group to be set
- when: g_nodes_group is not defined
-
- - name: Evaluate oo_etcd_to_config
- add_host:
- name: "{{ item }}"
- groups: oo_etcd_to_config
- ansible_ssh_user: "{{ g_ssh_user | default(omit) }}"
- ansible_sudo: "{{ g_sudo | default(omit) }}"
- with_items: groups[g_etcd_group] | default([])
-
- - name: Evaluate oo_masters_to_config
- add_host:
- name: "{{ item }}"
- groups: oo_masters_to_config
- ansible_ssh_user: "{{ g_ssh_user | default(omit) }}"
- ansible_sudo: "{{ g_sudo | default(omit) }}"
- with_items: groups[g_masters_group] | default([])
-
- - name: Evaluate oo_nodes_to_config
- add_host:
- name: "{{ item }}"
- groups: oo_nodes_to_config
- ansible_ssh_user: "{{ g_ssh_user | default(omit) }}"
- ansible_sudo: "{{ g_sudo | default(omit) }}"
- with_items: groups[g_nodes_group] | default([])
-
- - name: Evaluate oo_nodes_to_config
- add_host:
- name: "{{ item }}"
- groups: oo_nodes_to_config
- ansible_ssh_user: "{{ g_ssh_user | default(omit) }}"
- ansible_sudo: "{{ g_sudo | default(omit) }}"
- with_items: groups[g_masters_group] | default([])
- when: g_nodeonmaster is defined and g_nodeonmaster == true
-
- - name: Evaluate oo_first_etcd
- add_host:
- name: "{{ groups[g_etcd_group][0] }}"
- groups: oo_first_etcd
- ansible_ssh_user: "{{ g_ssh_user | default(omit) }}"
- ansible_sudo: "{{ g_sudo | default(omit) }}"
- when: g_etcd_group in groups and (groups[g_etcd_group] | length) > 0
-
- - name: Evaluate oo_first_master
- add_host:
- name: "{{ groups[g_masters_group][0] }}"
- groups: oo_first_master
- ansible_ssh_user: "{{ g_ssh_user | default(omit) }}"
- ansible_sudo: "{{ g_sudo | default(omit) }}"
- when: g_masters_group in groups and (groups[g_masters_group] | length) > 0
+- include: ../openshift-docker/config.yml
- include: ../openshift-etcd/config.yml
+- include: ../openshift-nfs/config.yml
+
- include: ../openshift-master/config.yml
+- include: additional_config.yml
+
- include: ../openshift-node/config.yml
- vars:
- osn_cluster_dns_domain: "{{ hostvars[groups.oo_first_master.0].openshift.dns.domain }}"
- osn_cluster_dns_ip: "{{ hostvars[groups.oo_first_master.0].openshift.dns.ip }}"
diff --git a/playbooks/common/openshift-cluster/create_services.yml b/playbooks/common/openshift-cluster/create_services.yml
deleted file mode 100644
index e70709d19..000000000
--- a/playbooks/common/openshift-cluster/create_services.yml
+++ /dev/null
@@ -1,8 +0,0 @@
----
-- name: Deploy OpenShift Services
- hosts: "{{ g_svc_master }}"
- connection: ssh
- gather_facts: yes
- roles:
- - openshift_registry
- - openshift_router
diff --git a/playbooks/common/openshift-cluster/evaluate_groups.yml b/playbooks/common/openshift-cluster/evaluate_groups.yml
new file mode 100644
index 000000000..432a92b49
--- /dev/null
+++ b/playbooks/common/openshift-cluster/evaluate_groups.yml
@@ -0,0 +1,103 @@
+---
+- name: Populate config host groups
+ hosts: localhost
+ connection: local
+ become: no
+ gather_facts: no
+ tasks:
+ - fail:
+ msg: This playbook requires g_etcd_hosts to be set
+ when: g_etcd_hosts is not defined
+
+ - fail:
+ msg: This playbook requires g_master_hosts or g_new_master_hosts to be set
+ when: g_master_hosts is not defined and g_new_master_hosts is not defined
+
+ - fail:
+ msg: This playbook requires g_node_hosts or g_new_node_hosts to be set
+ when: g_node_hosts is not defined and g_new_node_hosts is not defined
+
+ - fail:
+ msg: This playbook requires g_lb_hosts to be set
+ when: g_lb_hosts is not defined
+
+ - fail:
+ msg: This playbook requires g_nfs_hosts to be set
+ when: g_nfs_hosts is not defined
+
+ - fail:
+ msg: The nfs group must be limited to one host
+ when: (groups[g_nfs_hosts] | default([])) | length > 1
+
+ - name: Evaluate oo_masters
+ add_host:
+ name: "{{ item }}"
+ groups: oo_masters
+ ansible_ssh_user: "{{ g_ssh_user | default(omit) }}"
+ ansible_sudo: "{{ g_sudo | default(omit) }}"
+ with_items: "{{ g_master_hosts | union(g_new_master_hosts) | default([]) }}"
+
+ - name: Evaluate oo_etcd_to_config
+ add_host:
+ name: "{{ item }}"
+ groups: oo_etcd_to_config
+ ansible_ssh_user: "{{ g_ssh_user | default(omit) }}"
+ ansible_sudo: "{{ g_sudo | default(omit) }}"
+ with_items: "{{ g_etcd_hosts | default([]) }}"
+
+ - name: Evaluate oo_masters_to_config
+ add_host:
+ name: "{{ item }}"
+ groups: oo_masters_to_config
+ ansible_ssh_user: "{{ g_ssh_user | default(omit) }}"
+ ansible_sudo: "{{ g_sudo | default(omit) }}"
+ with_items: "{{ g_new_master_hosts | default(g_master_hosts | default([], true), true) }}"
+
+ - name: Evaluate oo_nodes_to_config
+ add_host:
+ name: "{{ item }}"
+ groups: oo_nodes_to_config
+ ansible_ssh_user: "{{ g_ssh_user | default(omit) }}"
+ ansible_sudo: "{{ g_sudo | default(omit) }}"
+ with_items: "{{ g_new_node_hosts | default(g_node_hosts | default([], true), true) }}"
+
+ # Skip adding the master to oo_nodes_to_config when g_new_node_hosts is
+ - name: Evaluate oo_nodes_to_config
+ add_host:
+ name: "{{ item }}"
+ groups: oo_nodes_to_config
+ ansible_ssh_user: "{{ g_ssh_user | default(omit) }}"
+ ansible_sudo: "{{ g_sudo | default(omit) }}"
+ with_items: "{{ g_master_hosts | default([]) }}"
+ when: g_nodeonmaster | default(false) == true and g_new_node_hosts is not defined
+
+ - name: Evaluate oo_first_etcd
+ add_host:
+ name: "{{ g_etcd_hosts[0] }}"
+ groups: oo_first_etcd
+ ansible_ssh_user: "{{ g_ssh_user | default(omit) }}"
+ when: g_etcd_hosts|length > 0
+
+ - name: Evaluate oo_first_master
+ add_host:
+ name: "{{ g_master_hosts[0] }}"
+ groups: oo_first_master
+ ansible_ssh_user: "{{ g_ssh_user | default(omit) }}"
+ ansible_sudo: "{{ g_sudo | default(omit) }}"
+ when: g_master_hosts|length > 0
+
+ - name: Evaluate oo_lb_to_config
+ add_host:
+ name: "{{ item }}"
+ groups: oo_lb_to_config
+ ansible_ssh_user: "{{ g_ssh_user | default(omit) }}"
+ ansible_sudo: "{{ g_sudo | default(omit) }}"
+ with_items: "{{ g_lb_hosts | default([]) }}"
+
+ - name: Evaluate oo_nfs_to_config
+ add_host:
+ name: "{{ item }}"
+ groups: oo_nfs_to_config
+ ansible_ssh_user: "{{ g_ssh_user | default(omit) }}"
+ ansible_sudo: "{{ g_sudo | default(omit) }}"
+ with_items: "{{ g_nfs_hosts | default([]) }}"
diff --git a/playbooks/common/openshift-cluster/set_etcd_launch_facts_tasks.yml b/playbooks/common/openshift-cluster/tasks/set_etcd_launch_facts.yml
index 1a6580795..1a6580795 100644
--- a/playbooks/common/openshift-cluster/set_etcd_launch_facts_tasks.yml
+++ b/playbooks/common/openshift-cluster/tasks/set_etcd_launch_facts.yml
diff --git a/playbooks/common/openshift-cluster/set_master_launch_facts_tasks.yml b/playbooks/common/openshift-cluster/tasks/set_master_launch_facts.yml
index 36d7b7870..36d7b7870 100644
--- a/playbooks/common/openshift-cluster/set_master_launch_facts_tasks.yml
+++ b/playbooks/common/openshift-cluster/tasks/set_master_launch_facts.yml
diff --git a/playbooks/common/openshift-cluster/set_node_launch_facts_tasks.yml b/playbooks/common/openshift-cluster/tasks/set_node_launch_facts.yml
index 278942f8b..278942f8b 100644
--- a/playbooks/common/openshift-cluster/set_node_launch_facts_tasks.yml
+++ b/playbooks/common/openshift-cluster/tasks/set_node_launch_facts.yml
diff --git a/playbooks/common/openshift-cluster/update_repos_and_packages.yml b/playbooks/common/openshift-cluster/update_repos_and_packages.yml
index 190e2d862..1474bb3ca 100644
--- a/playbooks/common/openshift-cluster/update_repos_and_packages.yml
+++ b/playbooks/common/openshift-cluster/update_repos_and_packages.yml
@@ -4,7 +4,7 @@
openshift_deployment_type: "{{ deployment_type }}"
roles:
- role: rhel_subscribe
- when: deployment_type == "enterprise" and
+ when: deployment_type in ["enterprise", "atomic-enterprise", "openshift-enterprise"] and
ansible_distribution == "RedHat" and
lookup('oo_option', 'rhel_skip_subscription') | default(rhsub_skip, True) |
default('no', True) | lower in ['no', 'false']
diff --git a/playbooks/common/openshift-cluster/upgrades/files/ensure_system_units_have_version.sh b/playbooks/common/openshift-cluster/upgrades/files/ensure_system_units_have_version.sh
new file mode 100644
index 000000000..239f43314
--- /dev/null
+++ b/playbooks/common/openshift-cluster/upgrades/files/ensure_system_units_have_version.sh
@@ -0,0 +1,51 @@
+#!/bin/bash
+set -e
+
+SERVICE_TYPE=$1
+DEPLOYMENT_TYPE=$2
+VERSION="v${3}"
+
+add_image_version_to_sysconfig () {
+ unit_name=$2
+ sysconfig_file=/etc/sysconfig/${unit_name}
+
+ if ! grep IMAGE_VERSION ${sysconfig_file}; then
+ sed -i "/CONFIG_FILE/a IMAGE_VERSION=${1}" ${sysconfig_file}
+ else
+ sed -i "s/\(IMAGE_VERSION=\).*/\1${1}/" ${sysconfig_file}
+ fi
+}
+
+add_image_version_to_unit () {
+ deployment_type=$1
+ unit_file=$2
+
+ if ! grep IMAGE_VERSION $unit_file; then
+ image_namespace="openshift/"
+ if [ $deployment_type == "atomic-enterprise" ]; then
+ image_namespace="aep3/"
+ elif [ $deployment_type == "openshift-enterprise" ]; then
+ image_namespace="openshift3/"
+ fi
+
+ sed -i "s|\(${image_namespace}[a-zA-Z0-9]\+\)|\1:\${IMAGE_VERSION}|" $unit_file
+ fi
+}
+
+for unit_file in $(ls /etc/systemd/system/${SERVICE_TYPE}*.service); do
+ unit_name=$(basename -s .service ${unit_file})
+ add_image_version_to_sysconfig $VERSION $unit_name
+ add_image_version_to_unit $DEPLOYMENT_TYPE $unit_file
+done
+
+if [ -e /etc/sysconfig/openvswitch ]; then
+ add_image_version_to_sysconfig $VERSION openvswitch
+else
+ echo IMAGE_VERSION=${VERSION} > /etc/sysconfig/openvswitch
+fi
+if ! grep EnvironmentFile /etc/systemd/system/openvswitch.service > /dev/null; then
+ sed -i "/Service/a EnvironmentFile=/etc/sysconfig/openvswitch" /etc/systemd/system/openvswitch.service
+fi
+add_image_version_to_unit $DEPLOYMENT_TYPE /etc/systemd/system/openvswitch.service
+
+systemctl daemon-reload
diff --git a/playbooks/common/openshift-cluster/upgrades/files/openshift_container_versions.sh b/playbooks/common/openshift-cluster/upgrades/files/openshift_container_versions.sh
new file mode 100644
index 000000000..da6bcd23c
--- /dev/null
+++ b/playbooks/common/openshift-cluster/upgrades/files/openshift_container_versions.sh
@@ -0,0 +1,22 @@
+#!/bin/bash
+
+# Here we don't really care if this is a master, api, controller or node image.
+# We just need to know the version of one of them.
+unit_file=$(ls /etc/systemd/system/${1}*.service | head -n1)
+installed_container_name=$(basename -s .service ${unit_file})
+installed=$(docker exec ${installed_container_name} openshift version | grep openshift | awk '{ print $2 }' | cut -f1 -d"-" | tr -d 'v')
+
+if [ ${1} == "origin" ]; then
+ image_name="openshift/origin"
+elif grep aep $unit_file 2>&1 > /dev/null; then
+ image_name="aep3/aep"
+elif grep openshift3 $unit_file 2>&1 > /dev/null; then
+ image_name="openshift3/ose"
+fi
+
+docker pull ${image_name} 2>&1 > /dev/null
+available=$(docker run --rm ${image_name} version | grep openshift | awk '{ print $2 }' | cut -f1 -d"-" | tr -d 'v')
+
+echo "---"
+echo "curr_version: ${installed}"
+echo "avail_version: ${available}"
diff --git a/playbooks/common/openshift-cluster/upgrades/files/pre-upgrade-check b/playbooks/common/openshift-cluster/upgrades/files/pre-upgrade-check
new file mode 100644
index 000000000..e5c958ebb
--- /dev/null
+++ b/playbooks/common/openshift-cluster/upgrades/files/pre-upgrade-check
@@ -0,0 +1,193 @@
+#!/usr/bin/env python
+"""
+Pre-upgrade checks that must be run on a master before proceeding with upgrade.
+"""
+# This is a script not a python module:
+# pylint: disable=invalid-name
+
+# NOTE: This script should not require any python libs other than what is
+# in the standard library.
+
+__license__ = "ASL 2.0"
+
+import json
+import os
+import subprocess
+import re
+
+# The maximum length of container.ports.name
+ALLOWED_LENGTH = 15
+# The valid structure of container.ports.name
+ALLOWED_CHARS = re.compile('^[a-z0-9][a-z0-9\\-]*[a-z0-9]$')
+AT_LEAST_ONE_LETTER = re.compile('[a-z]')
+# look at OS_PATH for the full path. Default ot 'oc'
+OC_PATH = os.getenv('OC_PATH', 'oc')
+
+
+def validate(value):
+ """
+ validate verifies that value matches required conventions
+
+ Rules of container.ports.name validation:
+
+ * must be less that 16 chars
+ * at least one letter
+ * only a-z0-9-
+ * hyphens can not be leading or trailing or next to each other
+
+ :Parameters:
+ - `value`: Value to validate
+ """
+ if len(value) > ALLOWED_LENGTH:
+ return False
+
+ if '--' in value:
+ return False
+
+ # We search since it can be anywhere
+ if not AT_LEAST_ONE_LETTER.search(value):
+ return False
+
+ # We match because it must start at the beginning
+ if not ALLOWED_CHARS.match(value):
+ return False
+ return True
+
+
+def list_items(kind):
+ """
+ list_items returns a list of items from the api
+
+ :Parameters:
+ - `kind`: Kind of item to access
+ """
+ response = subprocess.check_output([OC_PATH, 'get', '--all-namespaces', '-o', 'json', kind])
+ items = json.loads(response)
+ return items.get("items", [])
+
+
+def get(obj, *paths):
+ """
+ Gets an object
+
+ :Parameters:
+ - `obj`: A dictionary structure
+ - `path`: All other non-keyword arguments
+ """
+ ret_obj = obj
+ for path in paths:
+ if ret_obj.get(path, None) is None:
+ return []
+ ret_obj = ret_obj[path]
+ return ret_obj
+
+
+# pylint: disable=too-many-arguments
+def pretty_print_errors(namespace, kind, item_name, container_name, invalid_label, port_name, valid):
+ """
+ Prints out results in human friendly way.
+
+ :Parameters:
+ - `namespace`: Namespace of the resource
+ - `kind`: Kind of the resource
+ - `item_name`: Name of the resource
+ - `container_name`: Name of the container. May be "" when kind=Service.
+ - `port_name`: Name of the port
+ - `invalid_label`: The label of the invalid port. Port.name/targetPort
+ - `valid`: True if the port is valid
+ """
+ if not valid:
+ if len(container_name) > 0:
+ print('%s/%s -n %s (Container="%s" %s="%s")' % (
+ kind, item_name, namespace, container_name, invalid_label, port_name))
+ else:
+ print('%s/%s -n %s (%s="%s")' % (
+ kind, item_name, namespace, invalid_label, port_name))
+
+
+def print_validation_header():
+ """
+ Prints the error header. Should run on the first error to avoid
+ overwhelming the user.
+ """
+ print """\
+At least one port name is invalid and must be corrected before upgrading.
+Please update or remove any resources with invalid port names.
+
+ Valid port names must:
+
+ * be less that 16 characters
+ * have at least one letter
+ * contain only a-z0-9-
+ * not start or end with -
+ * not contain dashes next to each other ('--')
+"""
+
+
+def main():
+ """
+ main is the main entry point to this script
+ """
+ try:
+ # the comma at the end suppresses the newline
+ print "Checking for oc ...",
+ subprocess.check_output([OC_PATH, 'whoami'])
+ print "found"
+ except:
+ print(
+ 'Unable to run "%s whoami"\n'
+ 'Please ensure OpenShift is running, and "oc" is on your system '
+ 'path.\n'
+ 'You can override the path with the OC_PATH environment variable.'
+ % OC_PATH)
+ raise SystemExit(1)
+
+ # Where the magic happens
+ first_error = True
+ for kind, path in [
+ ('deploymentconfigs', ("spec", "template", "spec", "containers")),
+ ('replicationcontrollers', ("spec", "template", "spec", "containers")),
+ ('pods', ("spec", "containers"))]:
+ for item in list_items(kind):
+ namespace = item["metadata"]["namespace"]
+ item_name = item["metadata"]["name"]
+ for container in get(item, *path):
+ container_name = container["name"]
+ for port in get(container, "ports"):
+ port_name = port.get("name", None)
+ if not port_name:
+ # Unnamed ports are OK
+ continue
+ valid = validate(port_name)
+ if not valid and first_error:
+ first_error = False
+ print_validation_header()
+ pretty_print_errors(
+ namespace, kind, item_name,
+ container_name, "Port.name", port_name, valid)
+
+ # Services follow a different flow
+ for item in list_items('services'):
+ namespace = item["metadata"]["namespace"]
+ item_name = item["metadata"]["name"]
+ for port in get(item, "spec", "ports"):
+ port_name = port.get("targetPort", None)
+ if isinstance(port_name, int) or port_name is None:
+ # Integer only or unnamed ports are OK
+ continue
+ valid = validate(port_name)
+ if not valid and first_error:
+ first_error = False
+ print_validation_header()
+ pretty_print_errors(
+ namespace, "services", item_name, "",
+ "targetPort", port_name, valid)
+
+ # If we had at least 1 error then exit with 1
+ if not first_error:
+ raise SystemExit(1)
+
+
+if __name__ == '__main__':
+ main()
+
diff --git a/playbooks/common/openshift-cluster/upgrades/files/rpm_versions.sh b/playbooks/common/openshift-cluster/upgrades/files/rpm_versions.sh
new file mode 100644
index 000000000..a2a9579b5
--- /dev/null
+++ b/playbooks/common/openshift-cluster/upgrades/files/rpm_versions.sh
@@ -0,0 +1,8 @@
+#!/bin/bash
+
+installed=$(yum list installed -e 0 -q "$@" 2>&1 | tail -n +2 | awk '{ print $2 }' | sort -r | tr '\n' ' ')
+available=$(yum list available -e 0 -q "$@" 2>&1 | tail -n +2 | grep -v 'el7ose' | awk '{ print $2 }' | sort -r | tr '\n' ' ')
+
+echo "---"
+echo "curr_version: ${installed}"
+echo "avail_version: ${available}"
diff --git a/playbooks/common/openshift-cluster/upgrades/filter_plugins b/playbooks/common/openshift-cluster/upgrades/filter_plugins
new file mode 120000
index 000000000..b1213dedb
--- /dev/null
+++ b/playbooks/common/openshift-cluster/upgrades/filter_plugins
@@ -0,0 +1 @@
+../../../../filter_plugins \ No newline at end of file
diff --git a/playbooks/common/openshift-cluster/upgrades/library/openshift_upgrade_config.py b/playbooks/common/openshift-cluster/upgrades/library/openshift_upgrade_config.py
new file mode 100755
index 000000000..9a065fd1c
--- /dev/null
+++ b/playbooks/common/openshift-cluster/upgrades/library/openshift_upgrade_config.py
@@ -0,0 +1,158 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+# vim: expandtab:tabstop=4:shiftwidth=4
+
+"""Ansible module for modifying OpenShift configs during an upgrade"""
+
+import os
+import yaml
+
+DOCUMENTATION = '''
+---
+module: openshift_upgrade_config
+short_description: OpenShift Upgrade Config
+author: Jason DeTiberus
+requirements: [ ]
+'''
+EXAMPLES = '''
+'''
+
+def modify_api_levels(level_list, remove, ensure, msg_prepend='',
+ msg_append=''):
+ """ modify_api_levels """
+ changed = False
+ changes = []
+
+ if not isinstance(remove, list):
+ remove = []
+
+ if not isinstance(ensure, list):
+ ensure = []
+
+ if not isinstance(level_list, list):
+ new_list = []
+ changed = True
+ changes.append("%s created missing %s" % (msg_prepend, msg_append))
+ else:
+ new_list = level_list
+ for level in remove:
+ if level in new_list:
+ new_list.remove(level)
+ changed = True
+ changes.append("%s removed %s %s" % (msg_prepend, level, msg_append))
+
+ for level in ensure:
+ if level not in new_list:
+ new_list.append(level)
+ changed = True
+ changes.append("%s added %s %s" % (msg_prepend, level, msg_append))
+
+ return {'new_list': new_list, 'changed': changed, 'changes': changes}
+
+
+def upgrade_master_3_0_to_3_1(ansible_module, config_base, backup):
+ """Main upgrade method for 3.0 to 3.1."""
+ changes = []
+
+ # Facts do not get transferred to the hosts where custom modules run,
+ # need to make some assumptions here.
+ master_config = os.path.join(config_base, 'master/master-config.yaml')
+
+ master_cfg_file = open(master_config, 'r')
+ config = yaml.safe_load(master_cfg_file.read())
+ master_cfg_file.close()
+
+
+ # Remove unsupported api versions and ensure supported api versions from
+ # master config
+ unsupported_levels = ['v1beta1', 'v1beta2', 'v1beta3']
+ supported_levels = ['v1']
+
+ result = modify_api_levels(config.get('apiLevels'), unsupported_levels,
+ supported_levels, 'master-config.yaml:', 'from apiLevels')
+ if result['changed']:
+ config['apiLevels'] = result['new_list']
+ changes.append(result['changes'])
+
+ if 'kubernetesMasterConfig' in config and 'apiLevels' in config['kubernetesMasterConfig']:
+ config['kubernetesMasterConfig'].pop('apiLevels')
+ changes.append('master-config.yaml: removed kubernetesMasterConfig.apiLevels')
+
+ # Add masterCA to serviceAccountConfig
+ if 'serviceAccountConfig' in config and 'masterCA' not in config['serviceAccountConfig']:
+ config['serviceAccountConfig']['masterCA'] = config['oauthConfig'].get('masterCA', 'ca.crt')
+
+ # Add proxyClientInfo to master-config
+ if 'proxyClientInfo' not in config['kubernetesMasterConfig']:
+ config['kubernetesMasterConfig']['proxyClientInfo'] = {
+ 'certFile': 'master.proxy-client.crt',
+ 'keyFile': 'master.proxy-client.key'
+ }
+ changes.append("master-config.yaml: added proxyClientInfo")
+
+ if len(changes) > 0:
+ if backup:
+ # TODO: Check success:
+ ansible_module.backup_local(master_config)
+
+ # Write the modified config:
+ out_file = open(master_config, 'w')
+ out_file.write(yaml.safe_dump(config, default_flow_style=False))
+ out_file.close()
+
+ return changes
+
+
+def upgrade_master(ansible_module, config_base, from_version, to_version, backup):
+ """Upgrade entry point."""
+ if from_version == '3.0':
+ if to_version == '3.1':
+ return upgrade_master_3_0_to_3_1(ansible_module, config_base, backup)
+
+
+def main():
+ """ main """
+ # disabling pylint errors for global-variable-undefined and invalid-name
+ # for 'global module' usage, since it is required to use ansible_facts
+ # pylint: disable=global-variable-undefined, invalid-name,
+ # redefined-outer-name
+ global module
+
+ module = AnsibleModule(
+ argument_spec=dict(
+ config_base=dict(required=True),
+ from_version=dict(required=True, choices=['3.0']),
+ to_version=dict(required=True, choices=['3.1']),
+ role=dict(required=True, choices=['master']),
+ backup=dict(required=False, default=True, type='bool')
+ ),
+ supports_check_mode=True,
+ )
+
+ from_version = module.params['from_version']
+ to_version = module.params['to_version']
+ role = module.params['role']
+ backup = module.params['backup']
+ config_base = module.params['config_base']
+
+ try:
+ changes = []
+ if role == 'master':
+ changes = upgrade_master(module, config_base, from_version,
+ to_version, backup)
+
+ changed = len(changes) > 0
+ return module.exit_json(changed=changed, changes=changes)
+
+ # ignore broad-except error to avoid stack trace to ansible user
+ # pylint: disable=broad-except
+ except Exception, e:
+ return module.fail_json(msg=str(e))
+
+# ignore pylint errors related to the module_utils import
+# pylint: disable=redefined-builtin, unused-wildcard-import, wildcard-import
+# import module snippets
+from ansible.module_utils.basic import *
+
+if __name__ == '__main__':
+ main()
diff --git a/playbooks/common/openshift-cluster/upgrades/lookup_plugins b/playbooks/common/openshift-cluster/upgrades/lookup_plugins
new file mode 120000
index 000000000..aff753026
--- /dev/null
+++ b/playbooks/common/openshift-cluster/upgrades/lookup_plugins
@@ -0,0 +1 @@
+../../../../lookup_plugins \ No newline at end of file
diff --git a/playbooks/common/openshift-cluster/upgrades/roles b/playbooks/common/openshift-cluster/upgrades/roles
new file mode 120000
index 000000000..4bdbcbad3
--- /dev/null
+++ b/playbooks/common/openshift-cluster/upgrades/roles
@@ -0,0 +1 @@
+../../../../roles \ No newline at end of file
diff --git a/playbooks/common/openshift-cluster/upgrades/v3_0_minor/filter_plugins b/playbooks/common/openshift-cluster/upgrades/v3_0_minor/filter_plugins
new file mode 120000
index 000000000..27ddaa18b
--- /dev/null
+++ b/playbooks/common/openshift-cluster/upgrades/v3_0_minor/filter_plugins
@@ -0,0 +1 @@
+../../../../../filter_plugins \ No newline at end of file
diff --git a/playbooks/common/openshift-cluster/upgrades/v3_0_minor/library b/playbooks/common/openshift-cluster/upgrades/v3_0_minor/library
new file mode 120000
index 000000000..53bed9684
--- /dev/null
+++ b/playbooks/common/openshift-cluster/upgrades/v3_0_minor/library
@@ -0,0 +1 @@
+../library \ No newline at end of file
diff --git a/playbooks/common/openshift-cluster/upgrades/v3_0_minor/lookup_plugins b/playbooks/common/openshift-cluster/upgrades/v3_0_minor/lookup_plugins
new file mode 120000
index 000000000..cf407f69b
--- /dev/null
+++ b/playbooks/common/openshift-cluster/upgrades/v3_0_minor/lookup_plugins
@@ -0,0 +1 @@
+../../../../../lookup_plugins \ No newline at end of file
diff --git a/playbooks/common/openshift-cluster/upgrades/v3_0_minor/roles b/playbooks/common/openshift-cluster/upgrades/v3_0_minor/roles
new file mode 120000
index 000000000..6bc1a7aef
--- /dev/null
+++ b/playbooks/common/openshift-cluster/upgrades/v3_0_minor/roles
@@ -0,0 +1 @@
+../../../../../roles \ No newline at end of file
diff --git a/playbooks/common/openshift-cluster/upgrades/v3_0_minor/upgrade.yml b/playbooks/common/openshift-cluster/upgrades/v3_0_minor/upgrade.yml
new file mode 100644
index 000000000..63c8ef756
--- /dev/null
+++ b/playbooks/common/openshift-cluster/upgrades/v3_0_minor/upgrade.yml
@@ -0,0 +1,112 @@
+---
+- name: Evaluate groups
+ include: ../../evaluate_groups.yml
+
+- name: Re-Run cluster configuration to apply latest configuration changes
+ include: ../../config.yml
+
+- name: Upgrade masters
+ hosts: oo_masters_to_config
+ vars:
+ openshift_version: "{{ openshift_pkg_version | default('') }}"
+ tasks:
+ - name: Upgrade master packages
+ action: "{{ ansible_pkg_mgr }} name={{ openshift.common.service_type }}-master{{ openshift_version }} state=latest"
+ - name: Restart master services
+ service: name="{{ openshift.common.service_type}}-master" state=restarted
+
+- name: Upgrade nodes
+ hosts: oo_nodes_to_config
+ vars:
+ openshift_version: "{{ openshift_pkg_version | default('') }}"
+ tasks:
+ - name: Upgrade node packages
+ action: "{{ ansible_pkg_mgr }} name={{ openshift.common.service_type }}-node{{ openshift_version }} state=latest"
+ - name: Restart node services
+ service: name="{{ openshift.common.service_type }}-node" state=restarted
+
+- name: Determine new master version
+ hosts: oo_first_master
+ tasks:
+ - name: Determine new version
+ command: >
+ rpm -q --queryformat '%{version}' {{ openshift.common.service_type }}-master
+ register: _new_version
+
+- name: Ensure AOS 3.0.2 or Origin 1.0.6
+ hosts: oo_first_master
+ tasks:
+ fail: This playbook requires Origin 1.0.6 or Atomic OpenShift 3.0.2 or later
+ when: _new_version.stdout | version_compare('1.0.6','<') or ( _new_version.stdout | version_compare('3.0','>=' and _new_version.stdout | version_compare('3.0.2','<') )
+
+- name: Update cluster policy
+ hosts: oo_first_master
+ tasks:
+ - name: oadm policy reconcile-cluster-roles --confirm
+ command: >
+ {{ openshift.common.admin_binary}} --config={{ openshift.common.config_base }}/master/admin.kubeconfig
+ policy reconcile-cluster-roles --confirm
+
+- name: Upgrade default router
+ hosts: oo_first_master
+ vars:
+ - router_image: "{{ openshift.master.registry_url | replace( '${component}', 'haproxy-router' ) | replace ( '${version}', 'v' + _new_version.stdout ) }}"
+ - oc_cmd: "{{ openshift.common.client_binary }} --config={{ openshift.common.config_base }}/master/admin.kubeconfig"
+ tasks:
+ - name: Check for default router
+ command: >
+ {{ oc_cmd }} get -n default dc/router
+ register: _default_router
+ failed_when: false
+ changed_when: false
+ - name: Check for allowHostNetwork and allowHostPorts
+ when: _default_router.rc == 0
+ shell: >
+ {{ oc_cmd }} get -o yaml scc/privileged | /usr/bin/grep -e allowHostPorts -e allowHostNetwork
+ register: _scc
+ - name: Grant allowHostNetwork and allowHostPorts
+ when:
+ - _default_router.rc == 0
+ - "'false' in _scc.stdout"
+ command: >
+ {{ oc_cmd }} patch scc/privileged -p '{"allowHostPorts":true,"allowHostNetwork":true}' --loglevel=9
+ - name: Update deployment config to 1.0.4/3.0.1 spec
+ when: _default_router.rc == 0
+ command: >
+ {{ oc_cmd }} patch dc/router -p
+ '{"spec":{"strategy":{"rollingParams":{"updatePercent":-10},"spec":{"serviceAccount":"router","serviceAccountName":"router"}}}}'
+ - name: Switch to hostNetwork=true
+ when: _default_router.rc == 0
+ command: >
+ {{ oc_cmd }} patch dc/router -p '{"spec":{"template":{"spec":{"hostNetwork":true}}}}'
+ - name: Update router image to current version
+ when: _default_router.rc == 0
+ command: >
+ {{ oc_cmd }} patch dc/router -p
+ '{"spec":{"template":{"spec":{"containers":[{"name":"router","image":"{{ router_image }}"}]}}}}'
+
+- name: Upgrade default
+ hosts: oo_first_master
+ vars:
+ - registry_image: "{{ openshift.master.registry_url | replace( '${component}', 'docker-registry' ) | replace ( '${version}', 'v' + _new_version.stdout ) }}"
+ - oc_cmd: "{{ openshift.common.client_binary }} --config={{ openshift.common.config_base }}/master/admin.kubeconfig"
+ tasks:
+ - name: Check for default registry
+ command: >
+ {{ oc_cmd }} get -n default dc/docker-registry
+ register: _default_registry
+ failed_when: false
+ changed_when: false
+ - name: Update registry image to current version
+ when: _default_registry.rc == 0
+ command: >
+ {{ oc_cmd }} patch dc/docker-registry -p
+ '{"spec":{"template":{"spec":{"containers":[{"name":"registry","image":"{{ registry_image }}"}]}}}}'
+
+- name: Update image streams and templates
+ hosts: oo_first_master
+ vars:
+ openshift_examples_import_command: "update"
+ openshift_deployment_type: "{{ deployment_type }}"
+ roles:
+ - openshift_examples
diff --git a/playbooks/common/openshift-cluster/upgrades/v3_0_to_v3_1/filter_plugins b/playbooks/common/openshift-cluster/upgrades/v3_0_to_v3_1/filter_plugins
new file mode 120000
index 000000000..27ddaa18b
--- /dev/null
+++ b/playbooks/common/openshift-cluster/upgrades/v3_0_to_v3_1/filter_plugins
@@ -0,0 +1 @@
+../../../../../filter_plugins \ No newline at end of file
diff --git a/playbooks/common/openshift-cluster/upgrades/v3_0_to_v3_1/library b/playbooks/common/openshift-cluster/upgrades/v3_0_to_v3_1/library
new file mode 120000
index 000000000..53bed9684
--- /dev/null
+++ b/playbooks/common/openshift-cluster/upgrades/v3_0_to_v3_1/library
@@ -0,0 +1 @@
+../library \ No newline at end of file
diff --git a/playbooks/common/openshift-cluster/upgrades/v3_0_to_v3_1/lookup_plugins b/playbooks/common/openshift-cluster/upgrades/v3_0_to_v3_1/lookup_plugins
new file mode 120000
index 000000000..cf407f69b
--- /dev/null
+++ b/playbooks/common/openshift-cluster/upgrades/v3_0_to_v3_1/lookup_plugins
@@ -0,0 +1 @@
+../../../../../lookup_plugins \ No newline at end of file
diff --git a/playbooks/common/openshift-cluster/upgrades/v3_0_to_v3_1/roles b/playbooks/common/openshift-cluster/upgrades/v3_0_to_v3_1/roles
new file mode 120000
index 000000000..6bc1a7aef
--- /dev/null
+++ b/playbooks/common/openshift-cluster/upgrades/v3_0_to_v3_1/roles
@@ -0,0 +1 @@
+../../../../../roles \ No newline at end of file
diff --git a/playbooks/common/openshift-cluster/upgrades/v3_0_to_v3_1/upgrade.yml b/playbooks/common/openshift-cluster/upgrades/v3_0_to_v3_1/upgrade.yml
new file mode 100644
index 000000000..6d7cefc8e
--- /dev/null
+++ b/playbooks/common/openshift-cluster/upgrades/v3_0_to_v3_1/upgrade.yml
@@ -0,0 +1,648 @@
+---
+###############################################################################
+# Evaluate host groups and gather facts
+###############################################################################
+- name: Evaluate host groups
+ include: ../../evaluate_groups.yml
+
+- name: Load openshift_facts
+ hosts: oo_masters_to_config:oo_nodes_to_config:oo_etcd_to_config:oo_lb_to_config
+ roles:
+ - openshift_facts
+
+- name: Evaluate additional groups for upgrade
+ hosts: localhost
+ connection: local
+ become: no
+ tasks:
+ - name: Evaluate etcd_hosts_to_backup
+ add_host:
+ name: "{{ item }}"
+ groups: etcd_hosts_to_backup
+ with_items: groups.oo_etcd_to_config if groups.oo_etcd_to_config is defined and groups.oo_etcd_to_config | length > 0 else groups.oo_first_master
+
+
+###############################################################################
+# Pre-upgrade checks
+###############################################################################
+- name: Verify upgrade can proceed
+ hosts: oo_first_master
+ vars:
+ openshift_master_ha: "{{ groups.oo_masters_to_config | length > 1 }}"
+ target_version: "{{ '1.1' if deployment_type == 'origin' else '3.1' }}"
+ gather_facts: no
+ tasks:
+ # Pacemaker is currently the only supported upgrade path for multiple masters
+ - fail:
+ msg: "openshift_master_cluster_method must be set to 'pacemaker'"
+ when: openshift_master_ha | bool and ((openshift_master_cluster_method is not defined) or (openshift_master_cluster_method is defined and openshift_master_cluster_method != "pacemaker"))
+
+ - fail:
+ msg: >
+ This upgrade is only supported for origin, openshift-enterprise, and online
+ deployment types
+ when: deployment_type not in ['origin','openshift-enterprise', 'online']
+
+ - fail:
+ msg: >
+ openshift_pkg_version is {{ openshift_pkg_version }} which is not a
+ valid version for a {{ target_version }} upgrade
+ when: openshift_pkg_version is defined and openshift_pkg_version.split('-',1).1 | version_compare(target_version ,'<')
+
+ # If this script errors out ansible will show the default stdout/stderr
+ # which contains details for the user:
+ - script: ../files/pre-upgrade-check
+
+
+- name: Verify upgrade can proceed
+ hosts: oo_masters_to_config:oo_nodes_to_config
+ vars:
+ target_version: "{{ '1.1' if deployment_type == 'origin' else '3.1' }}"
+ tasks:
+ - name: Clean package cache
+ command: "{{ ansible_pkg_mgr }} clean all"
+
+ - set_fact:
+ g_new_service_name: "{{ 'origin' if deployment_type =='origin' else 'atomic-openshift' }}"
+
+ - name: Determine available versions
+ script: ../files/rpm_versions.sh {{ g_new_service_name }} openshift
+ register: g_versions_result
+
+ - set_fact:
+ g_aos_versions: "{{ g_versions_result.stdout | from_yaml }}"
+
+ - set_fact:
+ g_new_version: "{{ g_aos_versions.curr_version.split('-', 1).0 if g_aos_versions.avail_version is none else g_aos_versions.avail_version.split('-', 1).0 }}"
+ when: openshift_pkg_version is not defined
+
+ - set_fact:
+ g_new_version: "{{ openshift_pkg_version | replace('-','') }}"
+ when: openshift_pkg_version is defined
+
+ - fail:
+ msg: This playbook requires Origin 1.0.6 or later
+ when: deployment_type == 'origin' and g_aos_versions.curr_version | version_compare('1.0.6','<')
+
+ - fail:
+ msg: Upgrade packages not found
+ when: (g_aos_versions.avail_version | default(g_aos_versions.curr_version, true) | version_compare(target_version, '<'))
+
+ - set_fact:
+ pre_upgrade_complete: True
+
+
+##############################################################################
+# Gate on pre-upgrade checks
+##############################################################################
+- name: Gate on pre-upgrade checks
+ hosts: localhost
+ connection: local
+ become: no
+ vars:
+ pre_upgrade_hosts: "{{ groups.oo_masters_to_config | union(groups.oo_nodes_to_config) }}"
+ tasks:
+ - set_fact:
+ pre_upgrade_completed: "{{ hostvars
+ | oo_select_keys(pre_upgrade_hosts)
+ | oo_collect('inventory_hostname', {'pre_upgrade_complete': true}) }}"
+ - set_fact:
+ pre_upgrade_failed: "{{ pre_upgrade_hosts | difference(pre_upgrade_completed) }}"
+ - fail:
+ msg: "Upgrade cannot continue. The following hosts did not complete pre-upgrade checks: {{ pre_upgrade_failed | join(',') }}"
+ when: pre_upgrade_failed | length > 0
+
+
+
+###############################################################################
+# Backup etcd
+###############################################################################
+- name: Backup etcd
+ hosts: etcd_hosts_to_backup
+ vars:
+ embedded_etcd: "{{ openshift.master.embedded_etcd }}"
+ timestamp: "{{ lookup('pipe', 'date +%Y%m%d%H%M%S') }}"
+ roles:
+ - openshift_facts
+ tasks:
+ # Ensure we persist the etcd role for this host in openshift_facts
+ - openshift_facts:
+ role: etcd
+ local_facts: {}
+ when: "'etcd' not in openshift"
+
+ - stat: path=/var/lib/openshift
+ register: var_lib_openshift
+
+ - stat: path=/var/lib/origin
+ register: var_lib_origin
+
+ - name: Create origin symlink if necessary
+ file: src=/var/lib/openshift/ dest=/var/lib/origin state=link
+ when: var_lib_openshift.stat.exists == True and var_lib_origin.stat.exists == False
+
+ # TODO: replace shell module with command and update later checks
+ # We assume to be using the data dir for all backups.
+ - name: Check available disk space for etcd backup
+ shell: df --output=avail -k {{ openshift.common.data_dir }} | tail -n 1
+ register: avail_disk
+
+ # TODO: replace shell module with command and update later checks
+ - name: Check current embedded etcd disk usage
+ shell: du -k {{ openshift.etcd.etcd_data_dir }} | tail -n 1 | cut -f1
+ register: etcd_disk_usage
+ when: embedded_etcd | bool
+
+ - name: Abort if insufficient disk space for etcd backup
+ fail:
+ msg: >
+ {{ etcd_disk_usage.stdout }} Kb disk space required for etcd backup,
+ {{ avail_disk.stdout }} Kb available.
+ when: (embedded_etcd | bool) and (etcd_disk_usage.stdout|int > avail_disk.stdout|int)
+
+ - name: Install etcd (for etcdctl)
+ action: "{{ ansible_pkg_mgr }} name=etcd state=latest"
+
+ - name: Generate etcd backup
+ command: >
+ etcdctl backup --data-dir={{ openshift.etcd.etcd_data_dir }}
+ --backup-dir={{ openshift.common.data_dir }}/etcd-backup-{{ timestamp }}
+
+ - set_fact:
+ etcd_backup_complete: True
+
+ - name: Display location of etcd backup
+ debug:
+ msg: "Etcd backup created in {{ openshift.common.data_dir }}/etcd-backup-{{ timestamp }}"
+
+
+##############################################################################
+# Gate on etcd backup
+##############################################################################
+- name: Gate on etcd backup
+ hosts: localhost
+ connection: local
+ become: no
+ tasks:
+ - set_fact:
+ etcd_backup_completed: "{{ hostvars
+ | oo_select_keys(groups.etcd_hosts_to_backup)
+ | oo_collect('inventory_hostname', {'etcd_backup_complete': true}) }}"
+ - set_fact:
+ etcd_backup_failed: "{{ groups.etcd_hosts_to_backup | difference(etcd_backup_completed) }}"
+ - fail:
+ msg: "Upgrade cannot continue. The following hosts did not complete etcd backup: {{ etcd_backup_failed | join(',') }}"
+ when: etcd_backup_failed | length > 0
+
+
+
+###############################################################################
+# Upgrade Masters
+###############################################################################
+- name: Create temp directory for syncing certs
+ hosts: localhost
+ connection: local
+ become: no
+ gather_facts: no
+ tasks:
+ - name: Create local temp directory for syncing certs
+ local_action: command mktemp -d /tmp/openshift-ansible-XXXXXXX
+ register: g_master_mktemp
+ changed_when: False
+
+- name: Update deployment type
+ hosts: oo_masters_to_config:oo_nodes_to_config:oo_etcd_to_config
+ roles:
+ - openshift_facts
+ post_tasks:
+ - openshift_facts:
+ role: common
+ local_facts:
+ deployment_type: "{{ deployment_type }}"
+
+- name: Update master facts
+ hosts: oo_masters_to_config
+ roles:
+ - openshift_facts
+ post_tasks:
+ - openshift_facts:
+ role: master
+ local_facts:
+ cluster_method: "{{ openshift_master_cluster_method | default(None) }}"
+
+- name: Upgrade master packages and configuration
+ hosts: oo_masters_to_config
+ vars:
+ openshift_version: "{{ openshift_pkg_version | default('') }}"
+ roles:
+ - openshift_facts
+ tasks:
+ - name: Upgrade to latest available kernel
+ action: "{{ ansible_pkg_mgr}} name=kernel state=latest"
+
+ - name: Upgrade master packages
+ command: "{{ ansible_pkg_mgr}} update -y {{ openshift.common.service_type }}-master{{ openshift_version }}"
+ when: openshift_pkg_version is not defined
+
+ - name: Upgrade packages
+ command: "{{ ansible_pkg_mgr}} install -y {{ openshift.common.installed_variant_rpms | oo_31_rpm_rename_conversion(openshift_version) | join (' ')}}"
+ when: openshift_pkg_version is defined and deployment_type == 'openshift-enterprise'
+
+ - name: Ensure python-yaml present for config upgrade
+ action: "{{ ansible_pkg_mgr }} name=PyYAML state=present"
+ when: not openshift.common.is_atomic | bool
+
+ - name: Upgrade master configuration
+ openshift_upgrade_config:
+ from_version: '3.0'
+ to_version: '3.1'
+ role: master
+ config_base: "{{ hostvars[inventory_hostname].openshift.common.config_base }}"
+
+ - set_fact:
+ openshift_master_certs_no_etcd:
+ - admin.crt
+ - master.kubelet-client.crt
+ - "{{ 'master.proxy-client.crt' if openshift.common.version_gte_3_1_or_1_1 else omit }}"
+ - master.server.crt
+ - openshift-master.crt
+ - openshift-registry.crt
+ - openshift-router.crt
+ - etcd.server.crt
+ openshift_master_certs_etcd:
+ - master.etcd-client.crt
+
+ - set_fact:
+ openshift_master_certs: "{{ (openshift_master_certs_no_etcd | union(openshift_master_certs_etcd)) if (groups.oo_etcd_to_config is defined and groups.oo_etcd_to_config) else openshift_master_certs_no_etcd }}"
+
+ - name: Check status of master certificates
+ stat:
+ path: "{{ openshift.common.config_base }}/master/{{ item }}"
+ with_items: openshift_master_certs
+ register: g_master_cert_stat_result
+
+ - set_fact:
+ master_certs_missing: "{{ False in (g_master_cert_stat_result.results
+ | oo_collect(attribute='stat.exists')
+ | list ) }}"
+ master_cert_subdir: master-{{ openshift.common.hostname }}
+ master_cert_config_dir: "{{ openshift.common.config_base }}/master"
+
+
+- name: Generate missing master certificates
+ hosts: oo_first_master
+ vars:
+ master_hostnames: "{{ hostvars
+ | oo_select_keys(groups.oo_masters_to_config)
+ | oo_collect('openshift.common.all_hostnames')
+ | oo_flatten | unique }}"
+ master_generated_certs_dir: "{{ openshift.common.config_base }}/generated-configs"
+ masters_needing_certs: "{{ hostvars
+ | oo_select_keys(groups['oo_masters_to_config'] | difference(groups['oo_first_master']))
+ | oo_filter_list(filter_attr='master_certs_missing') }}"
+ sync_tmpdir: "{{ hostvars.localhost.g_master_mktemp.stdout }}"
+ openshift_deployment_type: "{{ deployment_type }}"
+ roles:
+ - openshift_master_certificates
+ post_tasks:
+ - name: Remove generated etcd client certs when using external etcd
+ file:
+ path: "{{ master_generated_certs_dir }}/{{ item.0.master_cert_subdir }}/{{ item.1 }}"
+ state: absent
+ when: groups.oo_etcd_to_config is defined and groups.oo_etcd_to_config
+ with_nested:
+ - masters_needing_certs
+ - - master.etcd-client.crt
+ - master.etcd-client.key
+
+ - name: Create a tarball of the master certs
+ command: >
+ tar -czvf {{ master_generated_certs_dir }}/{{ item.master_cert_subdir }}.tgz
+ -C {{ master_generated_certs_dir }}/{{ item.master_cert_subdir }} .
+ with_items: masters_needing_certs
+
+ - name: Retrieve the master cert tarball from the master
+ fetch:
+ src: "{{ master_generated_certs_dir }}/{{ item.master_cert_subdir }}.tgz"
+ dest: "{{ sync_tmpdir }}/"
+ flat: yes
+ fail_on_missing: yes
+ validate_checksum: yes
+ with_items: masters_needing_certs
+
+
+- name: Sync generated certs, update service config and restart master services
+ hosts: oo_masters_to_config
+ vars:
+ sync_tmpdir: "{{ hostvars.localhost.g_master_mktemp.stdout }}"
+ openshift_master_ha: "{{ groups.oo_masters_to_config | length > 1 }}"
+ openshift_deployment_type: "{{ deployment_type }}"
+ tasks:
+ - name: Unarchive the tarball on the master
+ unarchive:
+ src: "{{ sync_tmpdir }}/{{ master_cert_subdir }}.tgz"
+ dest: "{{ master_cert_config_dir }}"
+ when: inventory_hostname != groups.oo_first_master.0
+
+ - name: Restart master service
+ service: name="{{ openshift.common.service_type}}-master" state=restarted
+ when: not openshift_master_ha | bool
+
+ - name: Ensure the master service is enabled
+ service: name="{{ openshift.common.service_type}}-master" state=started enabled=yes
+ when: not openshift_master_ha | bool
+
+ - name: Check for configured cluster
+ stat:
+ path: /etc/corosync/corosync.conf
+ register: corosync_conf
+ when: openshift_master_ha | bool
+
+ - name: Destroy cluster
+ command: pcs cluster destroy --all
+ when: openshift_master_ha | bool and corosync_conf.stat.exists == true
+ run_once: true
+
+ - name: Start pcsd
+ service: name=pcsd enabled=yes state=started
+ when: openshift_master_ha | bool
+
+
+- name: Re-create cluster
+ hosts: oo_first_master
+ vars:
+ openshift_master_ha: "{{ groups.oo_masters_to_config | length > 1 }}"
+ openshift_deployment_type: "{{ deployment_type }}"
+ omc_cluster_hosts: "{{ groups.oo_masters_to_config | join(' ') }}"
+ roles:
+ - role: openshift_master_cluster
+ when: openshift_master_ha | bool
+
+
+- name: Delete temporary directory on localhost
+ hosts: localhost
+ connection: local
+ become: no
+ gather_facts: no
+ tasks:
+ - file: name={{ g_master_mktemp.stdout }} state=absent
+ changed_when: False
+
+
+- name: Set master update status to complete
+ hosts: oo_masters_to_config
+ tasks:
+ - set_fact:
+ master_update_complete: True
+
+
+##############################################################################
+# Gate on master update complete
+##############################################################################
+- name: Gate on master update
+ hosts: localhost
+ connection: local
+ become: no
+ tasks:
+ - set_fact:
+ master_update_completed: "{{ hostvars
+ | oo_select_keys(groups.oo_masters_to_config)
+ | oo_collect('inventory_hostname', {'master_update_complete': true}) }}"
+ - set_fact:
+ master_update_failed: "{{ groups.oo_masters_to_config | difference(master_update_completed) }}"
+ - fail:
+ msg: "Upgrade cannot continue. The following masters did not finish updating: {{ master_update_failed | join(',') }}"
+ when: master_update_failed | length > 0
+
+
+###############################################################################
+# Upgrade Nodes
+###############################################################################
+- name: Upgrade nodes
+ hosts: oo_nodes_to_config
+ vars:
+ openshift_version: "{{ openshift_pkg_version | default('') }}"
+ roles:
+ - openshift_facts
+ tasks:
+ - name: Upgrade node packages
+ command: "{{ ansible_pkg_mgr }} update -y {{ openshift.common.service_type }}-node{{ openshift_version }}"
+ when: openshift_pkg_version is not defined
+
+ - name: Upgrade packages
+ command: "{{ ansible_pkg_mgr}} install -y {{ openshift.common.installed_variant_rpms | oo_31_rpm_rename_conversion(openshift_version) | join (' ')}}"
+ when: openshift_pkg_version is defined and deployment_type == 'openshift-enterprise'
+
+ - name: Restart node service
+ service: name="{{ openshift.common.service_type }}-node" state=restarted
+
+ - name: Ensure node service enabled
+ service: name="{{ openshift.common.service_type }}-node" state=started enabled=yes
+
+ - name: Install Ceph storage plugin dependencies
+ action: "{{ ansible_pkg_mgr }} name=ceph-common state=present"
+
+ - name: Install GlusterFS storage plugin dependencies
+ action: "{{ ansible_pkg_mgr }} name=glusterfs-fuse state=present"
+
+ - name: Set sebooleans to allow gluster storage plugin access from containers
+ seboolean:
+ name: "{{ item }}"
+ state: yes
+ persistent: yes
+ when: ansible_selinux and ansible_selinux.status == "enabled"
+ with_items:
+ - virt_use_fusefs
+ - virt_sandbox_use_fusefs
+ register: sebool_result
+ failed_when: "'state' not in sebool_result and 'msg' in sebool_result and 'SELinux boolean {{ item }} does not exist' not in sebool_result.msg"
+
+ - set_fact:
+ node_update_complete: True
+
+
+##############################################################################
+# Gate on nodes update
+##############################################################################
+- name: Gate on nodes update
+ hosts: localhost
+ connection: local
+ become: no
+ tasks:
+ - set_fact:
+ node_update_completed: "{{ hostvars
+ | oo_select_keys(groups.oo_nodes_to_config)
+ | oo_collect('inventory_hostname', {'node_update_complete': true}) }}"
+ - set_fact:
+ node_update_failed: "{{ groups.oo_nodes_to_config | difference(node_update_completed) }}"
+ - fail:
+ msg: "Upgrade cannot continue. The following nodes did not finish updating: {{ node_update_failed | join(',') }}"
+ when: node_update_failed | length > 0
+
+
+###############################################################################
+# Post upgrade - Reconcile Cluster Roles and Cluster Role Bindings
+###############################################################################
+- name: Reconcile Cluster Roles and Cluster Role Bindings
+ hosts: oo_masters_to_config
+ vars:
+ origin_reconcile_bindings: "{{ deployment_type == 'origin' and g_new_version | version_compare('1.0.6', '>') }}"
+ ent_reconcile_bindings: true
+ openshift_master_ha: "{{ groups.oo_masters_to_config | length > 1 }}"
+ tasks:
+ - name: Reconcile Cluster Roles
+ command: >
+ {{ openshift.common.admin_binary}} --config={{ openshift.common.config_base }}/master/admin.kubeconfig
+ policy reconcile-cluster-roles --confirm
+ run_once: true
+
+ - name: Reconcile Cluster Role Bindings
+ command: >
+ {{ openshift.common.admin_binary}} --config={{ openshift.common.config_base }}/master/admin.kubeconfig
+ policy reconcile-cluster-role-bindings
+ --exclude-groups=system:authenticated
+ --exclude-groups=system:authenticated:oauth
+ --exclude-groups=system:unauthenticated
+ --exclude-users=system:anonymous
+ --additive-only=true --confirm
+ when: origin_reconcile_bindings | bool or ent_reconcile_bindings | bool
+ run_once: true
+
+ - name: Restart master services
+ service: name="{{ openshift.common.service_type}}-master" state=restarted
+ when: not openshift_master_ha | bool
+
+ - name: Restart master cluster
+ command: pcs resource restart master
+ when: openshift_master_ha | bool
+ run_once: true
+
+ - name: Wait for the clustered master service to be available
+ wait_for:
+ host: "{{ openshift_master_cluster_vip }}"
+ port: 8443
+ state: started
+ timeout: 180
+ delay: 90
+ when: openshift_master_ha | bool
+ run_once: true
+
+ - set_fact:
+ reconcile_complete: True
+
+
+##############################################################################
+# Gate on reconcile
+##############################################################################
+- name: Gate on reconcile
+ hosts: localhost
+ connection: local
+ become: no
+ tasks:
+ - set_fact:
+ reconcile_completed: "{{ hostvars
+ | oo_select_keys(groups.oo_masters_to_config)
+ | oo_collect('inventory_hostname', {'reconcile_complete': true}) }}"
+ - set_fact:
+ reconcile_failed: "{{ groups.oo_masters_to_config | difference(reconcile_completed) }}"
+ - fail:
+ msg: "Upgrade cannot continue. The following masters did not finish reconciling: {{ reconcile_failed | join(',') }}"
+ when: reconcile_failed | length > 0
+
+
+
+
+###############################################################################
+# Post upgrade - Upgrade default router, default registry and examples
+###############################################################################
+- name: Upgrade default router and default registry
+ hosts: oo_first_master
+ vars:
+ openshift_deployment_type: "{{ deployment_type }}"
+ registry_image: "{{ openshift.master.registry_url | replace( '${component}', 'docker-registry' ) | replace ( '${version}', 'v' + g_new_version ) }}"
+ router_image: "{{ openshift.master.registry_url | replace( '${component}', 'haproxy-router' ) | replace ( '${version}', 'v' + g_new_version ) }}"
+ oc_cmd: "{{ openshift.common.client_binary }} --config={{ openshift.common.config_base }}/master/admin.kubeconfig"
+ roles:
+ # Create the new templates shipped in 3.1, existing templates are left
+ # unmodified. This prevents the subsequent role definition for
+ # openshift_examples from failing when trying to replace templates that do
+ # not already exist. We could have potentially done a replace --force to
+ # create and update in one step.
+ - openshift_examples
+ # Update the existing templates
+ - role: openshift_examples
+ openshift_examples_import_command: replace
+ pre_tasks:
+ - name: Collect all routers
+ command: >
+ {{ oc_cmd }} get pods --all-namespaces -l 'router' -o json
+ register: all_routers
+ failed_when: false
+ changed_when: false
+
+ - set_fact: haproxy_routers="{{ (all_routers.stdout | from_json)['items'] | oo_pods_match_component(openshift_deployment_type, 'haproxy-router') | oo_select_keys_from_list(['metadata']) }}"
+ when: all_routers.rc == 0
+
+ - set_fact: haproxy_routers=[]
+ when: all_routers.rc != 0
+
+ - name: Check for allowHostNetwork and allowHostPorts
+ when: all_routers.rc == 0
+ shell: >
+ {{ oc_cmd }} get -o yaml scc/privileged | /usr/bin/grep -e allowHostPorts -e allowHostNetwork
+ register: _scc
+
+ - name: Grant allowHostNetwork and allowHostPorts
+ when:
+ - all_routers.rc == 0
+ - "'false' in _scc.stdout"
+ command: >
+ {{ oc_cmd }} patch scc/privileged -p
+ '{"allowHostPorts":true,"allowHostNetwork":true}' --api-version=v1
+
+ - name: Update deployment config to 1.0.4/3.0.1 spec
+ when: all_routers.rc == 0
+ command: >
+ {{ oc_cmd }} patch dc/{{ item['labels']['deploymentconfig'] }} -p
+ '{"spec":{"strategy":{"rollingParams":{"updatePercent":-10},"spec":{"serviceAccount":"router","serviceAccountName":"router"}}}}'
+ --api-version=v1
+ with_items: haproxy_routers
+
+ - name: Switch to hostNetwork=true
+ when: all_routers.rc == 0
+ command: >
+ {{ oc_cmd }} patch dc/{{ item['labels']['deploymentconfig'] }} -p '{"spec":{"template":{"spec":{"hostNetwork":true}}}}'
+ --api-version=v1
+ with_items: haproxy_routers
+
+ - name: Update router image to current version
+ when: all_routers.rc == 0
+ command: >
+ {{ oc_cmd }} patch dc/{{ item['labels']['deploymentconfig'] }} -p
+ '{"spec":{"template":{"spec":{"containers":[{"name":"router","image":"{{ router_image }}"}]}}}}'
+ --api-version=v1
+ with_items: haproxy_routers
+ when: not openshift.common.version_gte_3_1_1_or_1_1_1
+
+ - name: Update router image to current version
+ when: all_routers.rc == 0
+ command: >
+ {{ oc_cmd }} patch dc/{{ item['labels']['deploymentconfig'] }} -p
+ '{"spec":{"template":{"spec":{"containers":[{"name":"router","image":"{{ router_image }}","livenessProbe":{"tcpSocket":null,"httpGet":{"path": "/healthz", "port": 1936, "host": "localhost", "scheme": "HTTP"},"initialDelaySeconds":10,"timeoutSeconds":1}}]}}}}'
+ --api-version=v1
+ with_items: haproxy_routers
+ when: openshift.common.version_gte_3_1_1_or_1_1_1
+
+ - name: Check for default registry
+ command: >
+ {{ oc_cmd }} get -n default dc/docker-registry
+ register: _default_registry
+ failed_when: false
+ changed_when: false
+
+ - name: Update registry image to current version
+ when: _default_registry.rc == 0
+ command: >
+ {{ oc_cmd }} patch dc/docker-registry -p
+ '{"spec":{"template":{"spec":{"containers":[{"name":"registry","image":"{{ registry_image }}"}]}}}}'
+ --api-version=v1
diff --git a/playbooks/common/openshift-cluster/upgrades/v3_1_minor/filter_plugins b/playbooks/common/openshift-cluster/upgrades/v3_1_minor/filter_plugins
new file mode 120000
index 000000000..27ddaa18b
--- /dev/null
+++ b/playbooks/common/openshift-cluster/upgrades/v3_1_minor/filter_plugins
@@ -0,0 +1 @@
+../../../../../filter_plugins \ No newline at end of file
diff --git a/playbooks/common/openshift-cluster/upgrades/v3_1_minor/library b/playbooks/common/openshift-cluster/upgrades/v3_1_minor/library
new file mode 120000
index 000000000..53bed9684
--- /dev/null
+++ b/playbooks/common/openshift-cluster/upgrades/v3_1_minor/library
@@ -0,0 +1 @@
+../library \ No newline at end of file
diff --git a/playbooks/common/openshift-cluster/upgrades/v3_1_minor/lookup_plugins b/playbooks/common/openshift-cluster/upgrades/v3_1_minor/lookup_plugins
new file mode 120000
index 000000000..cf407f69b
--- /dev/null
+++ b/playbooks/common/openshift-cluster/upgrades/v3_1_minor/lookup_plugins
@@ -0,0 +1 @@
+../../../../../lookup_plugins \ No newline at end of file
diff --git a/playbooks/common/openshift-cluster/upgrades/v3_1_minor/post.yml b/playbooks/common/openshift-cluster/upgrades/v3_1_minor/post.yml
new file mode 100644
index 000000000..196393b2a
--- /dev/null
+++ b/playbooks/common/openshift-cluster/upgrades/v3_1_minor/post.yml
@@ -0,0 +1,57 @@
+---
+###############################################################################
+# Post upgrade - Upgrade default router, default registry and examples
+###############################################################################
+- name: Upgrade default router and default registry
+ hosts: oo_first_master
+ vars:
+ openshift_deployment_type: "{{ deployment_type }}"
+ registry_image: "{{ openshift.master.registry_url | replace( '${component}', 'docker-registry' ) | replace ( '${version}', 'v' + g_new_version ) }}"
+ router_image: "{{ openshift.master.registry_url | replace( '${component}', 'haproxy-router' ) | replace ( '${version}', 'v' + g_new_version ) }}"
+ oc_cmd: "{{ openshift.common.client_binary }} --config={{ openshift.common.config_base }}/master/admin.kubeconfig"
+ roles:
+ # Create the new templates shipped in 3.1.z, existing templates are left
+ # unmodified. This prevents the subsequent role definition for
+ # openshift_examples from failing when trying to replace templates that do
+ # not already exist. We could have potentially done a replace --force to
+ # create and update in one step.
+ - openshift_examples
+ # Update the existing templates
+ - role: openshift_examples
+ openshift_examples_import_command: replace
+ pre_tasks:
+ - name: Collect all routers
+ command: >
+ {{ oc_cmd }} get pods --all-namespaces -l 'router' -o json
+ register: all_routers
+ failed_when: false
+ changed_when: false
+
+ - set_fact: haproxy_routers="{{ (all_routers.stdout | from_json)['items'] | oo_pods_match_component(openshift_deployment_type, 'haproxy-router') | oo_select_keys_from_list(['metadata']) }}"
+ when: all_routers.rc == 0
+
+ - set_fact: haproxy_routers=[]
+ when: all_routers.rc != 0
+
+ - name: Update router image to current version
+ when: all_routers.rc == 0
+ command: >
+ {{ oc_cmd }} patch dc/{{ item['labels']['deploymentconfig'] }} -p
+ '{"spec":{"template":{"spec":{"containers":[{"name":"router","image":"{{ router_image }}","livenessProbe":{"tcpSocket":null,"httpGet":{"path": "/healthz", "port": 1936, "host": "localhost", "scheme": "HTTP"},"initialDelaySeconds":10,"timeoutSeconds":1}}]}}}}'
+ --api-version=v1
+ with_items: haproxy_routers
+
+ - name: Check for default registry
+ command: >
+ {{ oc_cmd }} get -n default dc/docker-registry
+ register: _default_registry
+ failed_when: false
+ changed_when: false
+
+ - name: Update registry image to current version
+ when: _default_registry.rc == 0
+ command: >
+ {{ oc_cmd }} patch dc/docker-registry -p
+ '{"spec":{"template":{"spec":{"containers":[{"name":"registry","image":"{{ registry_image }}"}]}}}}'
+ --api-version=v1
+
diff --git a/playbooks/common/openshift-cluster/upgrades/v3_1_minor/pre.yml b/playbooks/common/openshift-cluster/upgrades/v3_1_minor/pre.yml
new file mode 100644
index 000000000..66935e061
--- /dev/null
+++ b/playbooks/common/openshift-cluster/upgrades/v3_1_minor/pre.yml
@@ -0,0 +1,88 @@
+---
+###############################################################################
+# Evaluate host groups and gather facts
+###############################################################################
+- name: Load openshift_facts
+ hosts: oo_masters_to_config:oo_nodes_to_config:oo_etcd_to_config:oo_lb_to_config
+ roles:
+ - openshift_facts
+
+###############################################################################
+# Pre-upgrade checks
+###############################################################################
+- name: Verify upgrade can proceed
+ hosts: oo_first_master
+ vars:
+ openshift_master_ha: "{{ groups.oo_masters_to_config | length > 1 }}"
+ target_version: "{{ '1.1.1' if deployment_type == 'origin' else '3.1.1' }}"
+ gather_facts: no
+ tasks:
+ - fail:
+ msg: >
+ This upgrade is only supported for origin, openshift-enterprise, and online
+ deployment types
+ when: deployment_type not in ['origin','openshift-enterprise', 'online']
+
+ - fail:
+ msg: >
+ openshift_pkg_version is {{ openshift_pkg_version }} which is not a
+ valid version for a {{ target_version }} upgrade
+ when: openshift_pkg_version is defined and openshift_pkg_version.split('-',1).1 | version_compare(target_version ,'<')
+
+- name: Verify upgrade can proceed
+ hosts: oo_masters_to_config:oo_nodes_to_config
+ vars:
+ target_version: "{{ '1.1.1' if deployment_type == 'origin' else '3.1.1' }}"
+ tasks:
+ - name: Clean package cache
+ command: "{{ ansible_pkg_mgr }} clean all"
+ when: not openshift.common.is_atomic | bool
+
+ - set_fact:
+ g_new_service_name: "{{ 'origin' if deployment_type =='origin' else 'atomic-openshift' }}"
+
+ - name: Determine available versions
+ script: ../files/rpm_versions.sh {{ g_new_service_name }}
+ register: g_versions_result
+
+ - set_fact:
+ g_aos_versions: "{{ g_versions_result.stdout | from_yaml }}"
+
+ - set_fact:
+ g_new_version: "{{ g_aos_versions.curr_version.split('-', 1).0 if g_aos_versions.avail_version is none else g_aos_versions.avail_version.split('-', 1).0 }}"
+
+ - fail:
+ msg: This playbook requires Origin 1.1 or later
+ when: deployment_type == 'origin' and g_aos_versions.curr_version | version_compare('1.1','<')
+
+ - fail:
+ msg: This playbook requires Atomic Enterprise Platform/OpenShift Enterprise 3.1 or later
+ when: deployment_type == 'atomic-openshift' and g_aos_versions.curr_version | version_compare('3.1','<')
+
+ - fail:
+ msg: Upgrade packages not found
+ when: (g_aos_versions.avail_version | default(g_aos_versions.curr_version, true) | version_compare(target_version, '<'))
+
+ - set_fact:
+ pre_upgrade_complete: True
+
+
+##############################################################################
+# Gate on pre-upgrade checks
+##############################################################################
+- name: Gate on pre-upgrade checks
+ hosts: localhost
+ connection: local
+ become: no
+ vars:
+ pre_upgrade_hosts: "{{ groups.oo_masters_to_config | union(groups.oo_nodes_to_config) }}"
+ tasks:
+ - set_fact:
+ pre_upgrade_completed: "{{ hostvars
+ | oo_select_keys(pre_upgrade_hosts)
+ | oo_collect('inventory_hostname', {'pre_upgrade_complete': true}) }}"
+ - set_fact:
+ pre_upgrade_failed: "{{ pre_upgrade_hosts | difference(pre_upgrade_completed) }}"
+ - fail:
+ msg: "Upgrade cannot continue. The following hosts did not complete pre-upgrade checks: {{ pre_upgrade_failed | join(',') }}"
+ when: pre_upgrade_failed | length > 0
diff --git a/playbooks/common/openshift-cluster/upgrades/v3_1_minor/roles b/playbooks/common/openshift-cluster/upgrades/v3_1_minor/roles
new file mode 120000
index 000000000..6bc1a7aef
--- /dev/null
+++ b/playbooks/common/openshift-cluster/upgrades/v3_1_minor/roles
@@ -0,0 +1 @@
+../../../../../roles \ No newline at end of file
diff --git a/playbooks/common/openshift-cluster/upgrades/v3_1_minor/upgrade.yml b/playbooks/common/openshift-cluster/upgrades/v3_1_minor/upgrade.yml
new file mode 100644
index 000000000..54bb251f7
--- /dev/null
+++ b/playbooks/common/openshift-cluster/upgrades/v3_1_minor/upgrade.yml
@@ -0,0 +1,140 @@
+---
+###############################################################################
+# The restart playbook should be run after this playbook completes.
+###############################################################################
+
+###############################################################################
+# Upgrade Masters
+###############################################################################
+- name: Upgrade master packages and configuration
+ hosts: oo_masters_to_config
+ vars:
+ openshift_version: "{{ openshift_pkg_version | default('') }}"
+ tasks:
+ - name: Upgrade master packages
+ command: "{{ ansible_pkg_mgr}} update -y {{ openshift.common.service_type }}-master{{ openshift_version }}"
+ when: not openshift.common.is_containerized | bool
+
+ - name: Ensure python-yaml present for config upgrade
+ action: "{{ ansible_pkg_mgr }} name=PyYAML state=present"
+ when: not openshift.common.is_containerized | bool
+
+# Currently 3.1.1 does not have any new configuration settings
+#
+# - name: Upgrade master configuration
+# openshift_upgrade_config:
+# from_version: '3.0'
+# to_version: '3.1'
+# role: master
+# config_base: "{{ hostvars[inventory_hostname].openshift.common.config_base }}"
+
+- name: Set master update status to complete
+ hosts: oo_masters_to_config
+ tasks:
+ - set_fact:
+ master_update_complete: True
+
+##############################################################################
+# Gate on master update complete
+##############################################################################
+- name: Gate on master update
+ hosts: localhost
+ connection: local
+ become: no
+ tasks:
+ - set_fact:
+ master_update_completed: "{{ hostvars
+ | oo_select_keys(groups.oo_masters_to_config)
+ | oo_collect('inventory_hostname', {'master_update_complete': true}) }}"
+ - set_fact:
+ master_update_failed: "{{ groups.oo_masters_to_config | difference(master_update_completed) }}"
+ - fail:
+ msg: "Upgrade cannot continue. The following masters did not finish updating: {{ master_update_failed | join(',') }}"
+ when: master_update_failed | length > 0
+
+###############################################################################
+# Upgrade Nodes
+###############################################################################
+- name: Upgrade nodes
+ hosts: oo_nodes_to_config
+ vars:
+ openshift_version: "{{ openshift_pkg_version | default('') }}"
+ roles:
+ - openshift_facts
+ tasks:
+ - name: Upgrade node packages
+ command: "{{ ansible_pkg_mgr }} update -y {{ openshift.common.service_type }}-node{{ openshift_version }}"
+ when: not openshift.common.is_containerized | bool
+
+ - name: Restart node service
+ service: name="{{ openshift.common.service_type }}-node" state=restarted
+
+ - set_fact:
+ node_update_complete: True
+
+##############################################################################
+# Gate on nodes update
+##############################################################################
+- name: Gate on nodes update
+ hosts: localhost
+ connection: local
+ become: no
+ tasks:
+ - set_fact:
+ node_update_completed: "{{ hostvars
+ | oo_select_keys(groups.oo_nodes_to_config)
+ | oo_collect('inventory_hostname', {'node_update_complete': true}) }}"
+ - set_fact:
+ node_update_failed: "{{ groups.oo_nodes_to_config | difference(node_update_completed) }}"
+ - fail:
+ msg: "Upgrade cannot continue. The following nodes did not finish updating: {{ node_update_failed | join(',') }}"
+ when: node_update_failed | length > 0
+
+###############################################################################
+# Reconcile Cluster Roles and Cluster Role Bindings
+###############################################################################
+- name: Reconcile Cluster Roles and Cluster Role Bindings
+ hosts: oo_masters_to_config
+ vars:
+ origin_reconcile_bindings: "{{ deployment_type == 'origin' and g_new_version | version_compare('1.0.6', '>') }}"
+ ent_reconcile_bindings: true
+ openshift_master_ha: "{{ groups.oo_masters_to_config | length > 1 }}"
+ tasks:
+ - name: Reconcile Cluster Roles
+ command: >
+ {{ openshift.common.admin_binary}} --config={{ openshift.common.config_base }}/master/admin.kubeconfig
+ policy reconcile-cluster-roles --confirm
+ run_once: true
+
+ - name: Reconcile Cluster Role Bindings
+ command: >
+ {{ openshift.common.admin_binary}} --config={{ openshift.common.config_base }}/master/admin.kubeconfig
+ policy reconcile-cluster-role-bindings
+ --exclude-groups=system:authenticated
+ --exclude-groups=system:authenticated:oauth
+ --exclude-groups=system:unauthenticated
+ --exclude-users=system:anonymous
+ --additive-only=true --confirm
+ when: origin_reconcile_bindings | bool or ent_reconcile_bindings | bool
+ run_once: true
+
+ - set_fact:
+ reconcile_complete: True
+
+##############################################################################
+# Gate on reconcile
+##############################################################################
+- name: Gate on reconcile
+ hosts: localhost
+ connection: local
+ become: no
+ tasks:
+ - set_fact:
+ reconcile_completed: "{{ hostvars
+ | oo_select_keys(groups.oo_masters_to_config)
+ | oo_collect('inventory_hostname', {'reconcile_complete': true}) }}"
+ - set_fact:
+ reconcile_failed: "{{ groups.oo_masters_to_config | difference(reconcile_completed) }}"
+ - fail:
+ msg: "Upgrade cannot continue. The following masters did not finish reconciling: {{ reconcile_failed | join(',') }}"
+ when: reconcile_failed | length > 0
diff --git a/playbooks/common/openshift-cluster/upgrades/v3_1_to_v3_2/containerized_upgrade.yml b/playbooks/common/openshift-cluster/upgrades/v3_1_to_v3_2/containerized_upgrade.yml
new file mode 100644
index 000000000..696994688
--- /dev/null
+++ b/playbooks/common/openshift-cluster/upgrades/v3_1_to_v3_2/containerized_upgrade.yml
@@ -0,0 +1,9 @@
+- name: Update system_units
+ script: ../files/ensure_system_units_have_version.sh {{ openshift.common.service_type }} {{ openshift.common.deployment_type }} {{ g_new_version }}
+
+- name: Verifying the correct version was configured
+ command: grep {{ verify_upgrade_version }} {{ item }}
+ with_items:
+ - /etc/sysconfig/openvswitch
+ - /etc/sysconfig/{{ openshift.common.service_type }}*
+ when: verify_upgrade_version is defined
diff --git a/playbooks/common/openshift-cluster/upgrades/v3_1_to_v3_2/docker_upgrade.yml b/playbooks/common/openshift-cluster/upgrades/v3_1_to_v3_2/docker_upgrade.yml
new file mode 100644
index 000000000..d9177e8a0
--- /dev/null
+++ b/playbooks/common/openshift-cluster/upgrades/v3_1_to_v3_2/docker_upgrade.yml
@@ -0,0 +1,14 @@
+- name: Check if Docker is installed
+ command: rpm -q docker
+ register: pkg_check
+ failed_when: pkg_check.rc > 1
+ changed_when: no
+
+- name: Upgrade Docker
+ command: "{{ ansible_pkg_mgr}} update -y docker"
+ when: pkg_check.rc == 0 and g_docker_version.curr_version | version_compare('1.9','<')
+ register: docker_upgrade
+
+- name: Restart Docker
+ service: name=docker state=restarted
+ when: docker_upgrade | changed
diff --git a/playbooks/common/openshift-cluster/upgrades/v3_1_to_v3_2/filter_plugins b/playbooks/common/openshift-cluster/upgrades/v3_1_to_v3_2/filter_plugins
new file mode 120000
index 000000000..27ddaa18b
--- /dev/null
+++ b/playbooks/common/openshift-cluster/upgrades/v3_1_to_v3_2/filter_plugins
@@ -0,0 +1 @@
+../../../../../filter_plugins \ No newline at end of file
diff --git a/playbooks/common/openshift-cluster/upgrades/v3_1_to_v3_2/library b/playbooks/common/openshift-cluster/upgrades/v3_1_to_v3_2/library
new file mode 120000
index 000000000..53bed9684
--- /dev/null
+++ b/playbooks/common/openshift-cluster/upgrades/v3_1_to_v3_2/library
@@ -0,0 +1 @@
+../library \ No newline at end of file
diff --git a/playbooks/common/openshift-cluster/upgrades/v3_1_to_v3_2/lookup_plugins b/playbooks/common/openshift-cluster/upgrades/v3_1_to_v3_2/lookup_plugins
new file mode 120000
index 000000000..cf407f69b
--- /dev/null
+++ b/playbooks/common/openshift-cluster/upgrades/v3_1_to_v3_2/lookup_plugins
@@ -0,0 +1 @@
+../../../../../lookup_plugins \ No newline at end of file
diff --git a/playbooks/common/openshift-cluster/upgrades/v3_1_to_v3_2/post.yml b/playbooks/common/openshift-cluster/upgrades/v3_1_to_v3_2/post.yml
new file mode 100644
index 000000000..3fd97ac14
--- /dev/null
+++ b/playbooks/common/openshift-cluster/upgrades/v3_1_to_v3_2/post.yml
@@ -0,0 +1,57 @@
+---
+###############################################################################
+# Post upgrade - Upgrade default router, default registry and examples
+###############################################################################
+- name: Upgrade default router and default registry
+ hosts: oo_first_master
+ vars:
+ openshift_deployment_type: "{{ deployment_type }}"
+ registry_image: "{{ openshift.master.registry_url | replace( '${component}', 'docker-registry' ) | replace ( '${version}', 'v' + g_new_version ) }}"
+ router_image: "{{ openshift.master.registry_url | replace( '${component}', 'haproxy-router' ) | replace ( '${version}', 'v' + g_new_version ) }}"
+ oc_cmd: "{{ openshift.common.client_binary }} --config={{ openshift.common.config_base }}/master/admin.kubeconfig"
+ roles:
+ # Create the new templates shipped in 3.2, existing templates are left
+ # unmodified. This prevents the subsequent role definition for
+ # openshift_examples from failing when trying to replace templates that do
+ # not already exist. We could have potentially done a replace --force to
+ # create and update in one step.
+ - openshift_examples
+ # Update the existing templates
+ - role: openshift_examples
+ openshift_examples_import_command: replace
+ pre_tasks:
+ - name: Collect all routers
+ command: >
+ {{ oc_cmd }} get pods --all-namespaces -l 'router' -o json
+ register: all_routers
+ failed_when: false
+ changed_when: false
+
+ - set_fact: haproxy_routers="{{ (all_routers.stdout | from_json)['items'] | oo_pods_match_component(openshift_deployment_type, 'haproxy-router') | oo_select_keys_from_list(['metadata']) }}"
+ when: all_routers.rc == 0
+
+ - set_fact: haproxy_routers=[]
+ when: all_routers.rc != 0
+
+ - name: Update router image to current version
+ when: all_routers.rc == 0
+ command: >
+ {{ oc_cmd }} patch dc/{{ item['labels']['deploymentconfig'] }} -p
+ '{"spec":{"template":{"spec":{"containers":[{"name":"router","image":"{{ router_image }}","livenessProbe":{"tcpSocket":null,"httpGet":{"path": "/healthz", "port": 1936, "host": "localhost", "scheme": "HTTP"},"initialDelaySeconds":10,"timeoutSeconds":1}}]}}}}'
+ --api-version=v1
+ with_items: haproxy_routers
+
+ - name: Check for default registry
+ command: >
+ {{ oc_cmd }} get -n default dc/docker-registry
+ register: _default_registry
+ failed_when: false
+ changed_when: false
+
+ - name: Update registry image to current version
+ when: _default_registry.rc == 0
+ command: >
+ {{ oc_cmd }} patch dc/docker-registry -p
+ '{"spec":{"template":{"spec":{"containers":[{"name":"registry","image":"{{ registry_image }}"}]}}}}'
+ --api-version=v1
+
diff --git a/playbooks/common/openshift-cluster/upgrades/v3_1_to_v3_2/pre.yml b/playbooks/common/openshift-cluster/upgrades/v3_1_to_v3_2/pre.yml
new file mode 100644
index 000000000..c9afca559
--- /dev/null
+++ b/playbooks/common/openshift-cluster/upgrades/v3_1_to_v3_2/pre.yml
@@ -0,0 +1,252 @@
+---
+###############################################################################
+# Evaluate host groups and gather facts
+###############################################################################
+- name: Load openshift_facts
+ hosts: oo_masters_to_config:oo_nodes_to_config:oo_etcd_to_config:oo_lb_to_config
+ roles:
+ - openshift_facts
+
+- name: Evaluate additional groups for upgrade
+ hosts: localhost
+ connection: local
+ become: no
+ tasks:
+ - name: Evaluate etcd_hosts_to_backup
+ add_host:
+ name: "{{ item }}"
+ groups: etcd_hosts_to_backup
+ with_items: groups.oo_etcd_to_config if groups.oo_etcd_to_config is defined and groups.oo_etcd_to_config | length > 0 else groups.oo_first_master
+
+###############################################################################
+# Pre-upgrade checks
+###############################################################################
+- name: Verify upgrade can proceed
+ hosts: oo_first_master
+ vars:
+ openshift_master_ha: "{{ groups.oo_masters_to_config | length > 1 }}"
+ target_version: "{{ '1.2' if deployment_type == 'origin' else '3.1.1.900' }}"
+ gather_facts: no
+ tasks:
+ - fail:
+ msg: >
+ This upgrade is only supported for atomic-enterprise, origin, openshift-enterprise, and online
+ deployment types
+ when: deployment_type not in ['atomic-enterprise', 'origin','openshift-enterprise', 'online']
+
+ - fail:
+ msg: >
+ openshift_pkg_version is {{ openshift_pkg_version }} which is not a
+ valid version for a {{ target_version }} upgrade
+ when: openshift_pkg_version is defined and openshift_pkg_version.split('-',1).1 | version_compare(target_version ,'<')
+
+ - fail:
+ msg: >
+ openshift_image_tag is {{ openshift_image_tag }} which is not a
+ valid version for a {{ target_version }} upgrade
+ when: openshift_image_tag is defined and openshift_image_tag.split('v',1).1 | version_compare(target_version ,'<')
+
+- name: Verify upgrade can proceed
+ hosts: oo_masters_to_config:oo_nodes_to_config
+ vars:
+ target_version: "{{ '1.2' if deployment_type == 'origin' else '3.1.1.900' }}"
+ roles:
+ - openshift_cli
+ tasks:
+ - name: Clean package cache
+ command: "{{ ansible_pkg_mgr }} clean all"
+ when: not openshift.common.is_atomic | bool
+
+ - set_fact:
+ g_new_service_name: "{{ 'origin' if deployment_type =='origin' else 'atomic-openshift' }}"
+ when: not openshift.common.is_containerized | bool
+
+ - name: Determine available versions
+ script: ../files/rpm_versions.sh {{ g_new_service_name }}
+ register: g_rpm_versions_result
+ when: not openshift.common.is_containerized | bool
+
+ - set_fact:
+ g_aos_versions: "{{ g_rpm_versions_result.stdout | from_yaml }}"
+ when: not openshift.common.is_containerized | bool
+
+ - name: Determine available versions
+ script: ../files/openshift_container_versions.sh {{ openshift.common.service_type }}
+ register: g_containerized_versions_result
+ when: openshift.common.is_containerized | bool
+
+ - set_fact:
+ g_aos_versions: "{{ g_containerized_versions_result.stdout | from_yaml }}"
+ when: openshift.common.is_containerized | bool
+
+ - set_fact:
+ g_new_version: "{{ g_aos_versions.curr_version.split('-', 1).0 if g_aos_versions.avail_version is none else g_aos_versions.avail_version.split('-', 1).0 }}"
+ when: openshift_pkg_version is not defined
+
+ - set_fact:
+ g_new_version: "{{ openshift_pkg_version | replace('-','') }}"
+ when: openshift_pkg_version is defined
+
+ - set_fact:
+ g_new_version: "{{ openshift_image_tag | replace('v','') }}"
+ when: openshift_image_tag is defined
+
+ - fail:
+ msg: Verifying the correct version was found
+ when: verify_upgrade_version is defined and g_new_version != verify_upgrade_version
+
+ - name: Update systemd units
+ script: ../files/ensure_system_units_have_version.sh {{ openshift.common.service_type }} {{ openshift.common.deployment_type }} {{ g_aos_versions.curr_version }}
+ when: openshift.common.is_containerized | bool
+
+ # Note: the version number is hardcoded here in hopes of catching potential
+ # bugs in how g_aos_versions.curr_version is set
+ - name: Verifying the correct version is installed for upgrade
+ shell: grep 3.1.1.6 {{ item }}
+ with_items:
+ - /etc/sysconfig/openvswitch
+ - /etc/sysconfig/{{ openshift.common.service_type }}*
+ when: verify_upgrade_version is defined
+
+ - fail:
+ msg: This playbook requires Origin 1.1 or later
+ when: deployment_type == 'origin' and g_aos_versions.curr_version | version_compare('1.1','<')
+
+ - fail:
+ msg: This playbook requires Atomic Enterprise Platform/OpenShift Enterprise 3.1 or later
+ when: deployment_type == 'atomic-openshift' and g_aos_versions.curr_version | version_compare('3.1','<')
+
+ # TODO: this may only make sense for RPM installs. We probably need another check for containerized installs.
+ - fail:
+ msg: Upgrade packages not found
+ when: (g_aos_versions.avail_version | default(g_aos_versions.curr_version, true) | version_compare(target_version, '<'))
+
+ - name: Determine available Docker
+ script: ../files/rpm_versions.sh docker
+ register: g_docker_version_result
+ when: not openshift.common.is_atomic | bool
+
+ - name: Determine available Docker
+ shell: "rpm -q --queryformat '---\ncurr_version: %{VERSION}\navail_version: \n' docker"
+ register: g_atomic_docker_version_result
+ when: openshift.common.is_atomic | bool
+
+ - set_fact:
+ g_docker_version: "{{ g_docker_version_result.stdout | from_yaml }}"
+ when: not openshift.common.is_atomic | bool
+
+ - set_fact:
+ g_docker_version: "{{ g_atomic_docker_version_result.stdout | from_yaml }}"
+ when: openshift.common.is_atomic | bool
+
+ - fail:
+ msg: This playbook requires access to Docker 1.9 or later
+ when: not openshift.common.is_atomic | bool
+ and (g_docker_version.avail_version | default(g_docker_version.curr_version, true) | version_compare('1.9','<'))
+
+ # TODO: add check to upgrade ostree to get latest Docker
+
+ - set_fact:
+ pre_upgrade_complete: True
+
+
+##############################################################################
+# Gate on pre-upgrade checks
+##############################################################################
+- name: Gate on pre-upgrade checks
+ hosts: localhost
+ connection: local
+ become: no
+ vars:
+ pre_upgrade_hosts: "{{ groups.oo_masters_to_config | union(groups.oo_nodes_to_config) }}"
+ tasks:
+ - set_fact:
+ pre_upgrade_completed: "{{ hostvars
+ | oo_select_keys(pre_upgrade_hosts)
+ | oo_collect('inventory_hostname', {'pre_upgrade_complete': true}) }}"
+ - set_fact:
+ pre_upgrade_failed: "{{ pre_upgrade_hosts | difference(pre_upgrade_completed) }}"
+ - fail:
+ msg: "Upgrade cannot continue. The following hosts did not complete pre-upgrade checks: {{ pre_upgrade_failed | join(',') }}"
+ when: pre_upgrade_failed | length > 0
+
+###############################################################################
+# Backup etcd
+###############################################################################
+- name: Backup etcd
+ hosts: etcd_hosts_to_backup
+ vars:
+ embedded_etcd: "{{ openshift.master.embedded_etcd }}"
+ timestamp: "{{ lookup('pipe', 'date +%Y%m%d%H%M%S') }}"
+ roles:
+ - openshift_facts
+ tasks:
+ # Ensure we persist the etcd role for this host in openshift_facts
+ - openshift_facts:
+ role: etcd
+ local_facts: {}
+ when: "'etcd' not in openshift"
+
+ - stat: path=/var/lib/openshift
+ register: var_lib_openshift
+
+ - stat: path=/var/lib/origin
+ register: var_lib_origin
+
+ - name: Create origin symlink if necessary
+ file: src=/var/lib/openshift/ dest=/var/lib/origin state=link
+ when: var_lib_openshift.stat.exists == True and var_lib_origin.stat.exists == False
+
+ # TODO: replace shell module with command and update later checks
+ # We assume to be using the data dir for all backups.
+ - name: Check available disk space for etcd backup
+ shell: df --output=avail -k {{ openshift.common.data_dir }} | tail -n 1
+ register: avail_disk
+
+ # TODO: replace shell module with command and update later checks
+ - name: Check current embedded etcd disk usage
+ shell: du -k {{ openshift.etcd.etcd_data_dir }} | tail -n 1 | cut -f1
+ register: etcd_disk_usage
+ when: embedded_etcd | bool
+
+ - name: Abort if insufficient disk space for etcd backup
+ fail:
+ msg: >
+ {{ etcd_disk_usage.stdout }} Kb disk space required for etcd backup,
+ {{ avail_disk.stdout }} Kb available.
+ when: (embedded_etcd | bool) and (etcd_disk_usage.stdout|int > avail_disk.stdout|int)
+
+ - name: Install etcd (for etcdctl)
+ action: "{{ ansible_pkg_mgr }} name=etcd state=latest"
+ when: not openshift.common.is_atomic | bool
+
+ - name: Generate etcd backup
+ command: >
+ etcdctl backup --data-dir={{ openshift.etcd.etcd_data_dir }}
+ --backup-dir={{ openshift.common.data_dir }}/etcd-backup-{{ timestamp }}
+
+ - set_fact:
+ etcd_backup_complete: True
+
+ - name: Display location of etcd backup
+ debug:
+ msg: "Etcd backup created in {{ openshift.common.data_dir }}/etcd-backup-{{ timestamp }}"
+
+
+##############################################################################
+# Gate on etcd backup
+##############################################################################
+- name: Gate on etcd backup
+ hosts: localhost
+ connection: local
+ become: no
+ tasks:
+ - set_fact:
+ etcd_backup_completed: "{{ hostvars
+ | oo_select_keys(groups.etcd_hosts_to_backup)
+ | oo_collect('inventory_hostname', {'etcd_backup_complete': true}) }}"
+ - set_fact:
+ etcd_backup_failed: "{{ groups.etcd_hosts_to_backup | difference(etcd_backup_completed) }}"
+ - fail:
+ msg: "Upgrade cannot continue. The following hosts did not complete etcd backup: {{ etcd_backup_failed | join(',') }}"
+ when: etcd_backup_failed | length > 0
diff --git a/playbooks/common/openshift-cluster/upgrades/v3_1_to_v3_2/roles b/playbooks/common/openshift-cluster/upgrades/v3_1_to_v3_2/roles
new file mode 120000
index 000000000..6bc1a7aef
--- /dev/null
+++ b/playbooks/common/openshift-cluster/upgrades/v3_1_to_v3_2/roles
@@ -0,0 +1 @@
+../../../../../roles \ No newline at end of file
diff --git a/playbooks/common/openshift-cluster/upgrades/v3_1_to_v3_2/rpm_upgrade.yml b/playbooks/common/openshift-cluster/upgrades/v3_1_to_v3_2/rpm_upgrade.yml
new file mode 100644
index 000000000..7a2718e1b
--- /dev/null
+++ b/playbooks/common/openshift-cluster/upgrades/v3_1_to_v3_2/rpm_upgrade.yml
@@ -0,0 +1,6 @@
+- name: Upgrade packages
+ command: "{{ ansible_pkg_mgr}} update -y {{ openshift.common.service_type }}-{{ component }}-{{ g_new_version }}"
+
+- name: Ensure python-yaml present for config upgrade
+ action: "{{ ansible_pkg_mgr }} name=PyYAML state=present"
+ when: not openshift.common.is_atomic | bool
diff --git a/playbooks/common/openshift-cluster/upgrades/v3_1_to_v3_2/upgrade.yml b/playbooks/common/openshift-cluster/upgrades/v3_1_to_v3_2/upgrade.yml
new file mode 100644
index 000000000..d84d9f674
--- /dev/null
+++ b/playbooks/common/openshift-cluster/upgrades/v3_1_to_v3_2/upgrade.yml
@@ -0,0 +1,159 @@
+---
+###############################################################################
+# The restart playbook should be run after this playbook completes.
+###############################################################################
+
+- name: Upgrade docker
+ hosts: oo_masters_to_config:oo_nodes_to_config:oo_etcd_to_config
+ roles:
+ - openshift_facts
+ tasks:
+ - include: docker_upgrade.yml
+ when: not openshift.common.is_atomic | bool
+
+###############################################################################
+# Upgrade Masters
+###############################################################################
+- name: Upgrade master
+ hosts: oo_masters_to_config
+ roles:
+ - openshift_facts
+ tasks:
+ - include: rpm_upgrade.yml component=master
+ when: not openshift.common.is_containerized | bool
+
+ - include: containerized_upgrade.yml
+ when: openshift.common.is_containerized | bool
+
+# - name: Upgrade master configuration
+# openshift_upgrade_config:
+# from_version: '3.1'
+# to_version: '3.2'
+# role: master
+# config_base: "{{ hostvars[inventory_hostname].openshift.common.config_base }}"
+
+- name: Set master update status to complete
+ hosts: oo_masters_to_config
+ tasks:
+ - set_fact:
+ master_update_complete: True
+
+##############################################################################
+# Gate on master update complete
+##############################################################################
+- name: Gate on master update
+ hosts: localhost
+ connection: local
+ become: no
+ tasks:
+ - set_fact:
+ master_update_completed: "{{ hostvars
+ | oo_select_keys(groups.oo_masters_to_config)
+ | oo_collect('inventory_hostname', {'master_update_complete': true}) }}"
+ - set_fact:
+ master_update_failed: "{{ groups.oo_masters_to_config | difference(master_update_completed) }}"
+ - fail:
+ msg: "Upgrade cannot continue. The following masters did not finish updating: {{ master_update_failed | join(',') }}"
+ when: master_update_failed | length > 0
+
+###############################################################################
+# Upgrade Nodes
+###############################################################################
+- name: Upgrade nodes
+ hosts: oo_nodes_to_config
+ roles:
+ - openshift_facts
+ tasks:
+ - include: rpm_upgrade.yml
+ vars:
+ component: "node"
+ openshift_version: "{{ openshift_pkg_version | default('') }}"
+ when: not openshift.common.is_containerized | bool
+
+ - include: containerized_upgrade.yml
+ when: openshift.common.is_containerized | bool
+
+ # This will restart the node
+ - name: Restart openvswitch service
+ service: name="{{ openshift.common.service_type }}-node" state=restarted
+
+ - set_fact:
+ node_update_complete: True
+
+##############################################################################
+# Gate on nodes update
+##############################################################################
+- name: Gate on nodes update
+ hosts: localhost
+ connection: local
+ become: no
+ tasks:
+ - set_fact:
+ node_update_completed: "{{ hostvars
+ | oo_select_keys(groups.oo_nodes_to_config)
+ | oo_collect('inventory_hostname', {'node_update_complete': true}) }}"
+ - set_fact:
+ node_update_failed: "{{ groups.oo_nodes_to_config | difference(node_update_completed) }}"
+ - fail:
+ msg: "Upgrade cannot continue. The following nodes did not finish updating: {{ node_update_failed | join(',') }}"
+ when: node_update_failed | length > 0
+
+###############################################################################
+# Reconcile Cluster Roles, Cluster Role Bindings and Security Context Constraints
+###############################################################################
+- name: Reconcile Cluster Roles and Cluster Role Bindings and Security Context Constraints
+ hosts: oo_masters_to_config
+ roles:
+ - { role: openshift_cli, openshift_image_tag: "v{{ g_new_version }}" }
+ vars:
+ origin_reconcile_bindings: "{{ deployment_type == 'origin' and g_new_version | version_compare('1.0.6', '>') }}"
+ ent_reconcile_bindings: true
+ openshift_master_ha: "{{ groups.oo_masters_to_config | length > 1 }}"
+ tasks:
+ - name: Verifying the correct commandline tools are available
+ shell: grep {{ verify_upgrade_version }} {{ openshift.common.admin_binary}}
+ when: openshift.common.is_containerized | bool and verify_upgrade_version is defined
+
+ - name: Reconcile Cluster Roles
+ command: >
+ {{ openshift.common.admin_binary}} --config={{ openshift.common.config_base }}/master/admin.kubeconfig
+ policy reconcile-cluster-roles --confirm
+ run_once: true
+
+ - name: Reconcile Cluster Role Bindings
+ command: >
+ {{ openshift.common.admin_binary}} --config={{ openshift.common.config_base }}/master/admin.kubeconfig
+ policy reconcile-cluster-role-bindings
+ --exclude-groups=system:authenticated
+ --exclude-groups=system:authenticated:oauth
+ --exclude-groups=system:unauthenticated
+ --exclude-users=system:anonymous
+ --additive-only=true --confirm
+ when: origin_reconcile_bindings | bool or ent_reconcile_bindings | bool
+ run_once: true
+
+ - name: Reconcile Security Context Constraints
+ command: >
+ {{ openshift.common.admin_binary}} policy reconcile-sccs --confirm
+ run_once: true
+
+ - set_fact:
+ reconcile_complete: True
+
+##############################################################################
+# Gate on reconcile
+##############################################################################
+- name: Gate on reconcile
+ hosts: localhost
+ connection: local
+ become: no
+ tasks:
+ - set_fact:
+ reconcile_completed: "{{ hostvars
+ | oo_select_keys(groups.oo_masters_to_config)
+ | oo_collect('inventory_hostname', {'reconcile_complete': true}) }}"
+ - set_fact:
+ reconcile_failed: "{{ groups.oo_masters_to_config | difference(reconcile_completed) }}"
+ - fail:
+ msg: "Upgrade cannot continue. The following masters did not finish reconciling: {{ reconcile_failed | join(',') }}"
+ when: reconcile_failed | length > 0
diff --git a/playbooks/common/openshift-cluster/validate_hostnames.yml b/playbooks/common/openshift-cluster/validate_hostnames.yml
new file mode 100644
index 000000000..fd82997b9
--- /dev/null
+++ b/playbooks/common/openshift-cluster/validate_hostnames.yml
@@ -0,0 +1,26 @@
+---
+- include: evaluate_groups.yml
+
+- name: Gather and set facts for node hosts
+ hosts: oo_nodes_to_config
+ roles:
+ - openshift_facts
+ tasks:
+ - openshift_facts:
+ role: "{{ item.role }}"
+ local_facts: "{{ item.local_facts }}"
+ with_items:
+ - role: common
+ local_facts:
+ hostname: "{{ openshift_hostname | default(None) }}"
+ public_hostname: "{{ openshift_public_hostname | default(None) }}"
+ - shell:
+ getent ahostsv4 {{ openshift.common.hostname }} | head -n 1 | awk '{ print $1 }'
+ register: lookupip
+ changed_when: false
+ failed_when: false
+ - name: Warn user about bad openshift_hostname values
+ pause:
+ prompt: "The hostname \"{{ openshift.common.hostname }}\" for \"{{ ansible_nodename }}\" doesn't resolve to an ip address owned by this host. Please set openshift_hostname variable to a hostname that when resolved on the host in question resolves to an IP address matching an interface on this host. This host will fail liveness checks for pods utilizing hostPorts, press ENTER to continue or CTRL-C to abort."
+ seconds: "{{ 10 if openshift_override_hostname_check | default(false) | bool else omit }}"
+ when: lookupip.stdout not in ansible_all_ipv4_addresses