summaryrefslogtreecommitdiffstats
path: root/playbooks/common/openshift-cluster/upgrades/v3_1_to_v3_2
diff options
context:
space:
mode:
Diffstat (limited to 'playbooks/common/openshift-cluster/upgrades/v3_1_to_v3_2')
-rw-r--r--playbooks/common/openshift-cluster/upgrades/v3_1_to_v3_2/containerized_node_upgrade.yml (renamed from playbooks/common/openshift-cluster/upgrades/v3_1_to_v3_2/containerized_upgrade.yml)2
-rw-r--r--playbooks/common/openshift-cluster/upgrades/v3_1_to_v3_2/docker_upgrade.yml14
-rw-r--r--playbooks/common/openshift-cluster/upgrades/v3_1_to_v3_2/node_upgrade.yml24
-rw-r--r--playbooks/common/openshift-cluster/upgrades/v3_1_to_v3_2/post.yml4
-rw-r--r--playbooks/common/openshift-cluster/upgrades/v3_1_to_v3_2/pre.yml137
-rw-r--r--playbooks/common/openshift-cluster/upgrades/v3_1_to_v3_2/rpm_upgrade.yml3
-rw-r--r--playbooks/common/openshift-cluster/upgrades/v3_1_to_v3_2/upgrade.yml112
7 files changed, 115 insertions, 181 deletions
diff --git a/playbooks/common/openshift-cluster/upgrades/v3_1_to_v3_2/containerized_upgrade.yml b/playbooks/common/openshift-cluster/upgrades/v3_1_to_v3_2/containerized_node_upgrade.yml
index 319758a06..60ea84f8e 100644
--- a/playbooks/common/openshift-cluster/upgrades/v3_1_to_v3_2/containerized_upgrade.yml
+++ b/playbooks/common/openshift-cluster/upgrades/v3_1_to_v3_2/containerized_node_upgrade.yml
@@ -1,7 +1,7 @@
- include_vars: ../../../../../roles/openshift_node/vars/main.yml
- name: Update systemd units
- include: ../../../../../roles/openshift_node/tasks/systemd_units.yml openshift_version=v{{ g_new_version }}
+ include: ../../../../../roles/openshift_node/tasks/systemd_units.yml openshift_version={{ openshift_image_tag }}
- name: Verifying the correct version was configured
shell: grep {{ verify_upgrade_version }} {{ item }}
diff --git a/playbooks/common/openshift-cluster/upgrades/v3_1_to_v3_2/docker_upgrade.yml b/playbooks/common/openshift-cluster/upgrades/v3_1_to_v3_2/docker_upgrade.yml
deleted file mode 100644
index c7b18f51b..000000000
--- a/playbooks/common/openshift-cluster/upgrades/v3_1_to_v3_2/docker_upgrade.yml
+++ /dev/null
@@ -1,14 +0,0 @@
-- name: Check if Docker is installed
- command: rpm -q docker
- register: pkg_check
- failed_when: pkg_check.rc > 1
- changed_when: no
-
-- name: Upgrade Docker
- command: "{{ ansible_pkg_mgr}} update -y docker"
- when: pkg_check.rc == 0 and g_docker_version.curr_version | version_compare('1.9','<')
- register: docker_upgrade
-
-- name: Restart Docker
- command: systemctl restart docker
- when: docker_upgrade | changed
diff --git a/playbooks/common/openshift-cluster/upgrades/v3_1_to_v3_2/node_upgrade.yml b/playbooks/common/openshift-cluster/upgrades/v3_1_to_v3_2/node_upgrade.yml
deleted file mode 100644
index a911f12be..000000000
--- a/playbooks/common/openshift-cluster/upgrades/v3_1_to_v3_2/node_upgrade.yml
+++ /dev/null
@@ -1,24 +0,0 @@
-- name: Prepare for Node evacuation
- command: >
- {{ openshift.common.admin_binary }} manage-node {{ openshift.common.hostname | lower }} --schedulable=false
- delegate_to: "{{ groups.oo_first_master.0 }}"
-
-- name: Evacuate Node for Kubelet upgrade
- command: >
- {{ openshift.common.admin_binary }} manage-node {{ openshift.common.hostname | lower }} --evacuate --force
- delegate_to: "{{ groups.oo_first_master.0 }}"
-
-- include: rpm_upgrade.yml
- vars:
- component: "node"
- openshift_version: "{{ openshift_pkg_version | default('') }}"
- when: not openshift.common.is_containerized | bool
-
-- include: containerized_upgrade.yml
- when: openshift.common.is_containerized | bool
-
-- name: Set node schedulability
- command: >
- {{ openshift.common.admin_binary }} manage-node {{ openshift.common.hostname | lower }} --schedulable=true
- delegate_to: "{{ groups.oo_first_master.0 }}"
- when: openshift.node.schedulable | bool
diff --git a/playbooks/common/openshift-cluster/upgrades/v3_1_to_v3_2/post.yml b/playbooks/common/openshift-cluster/upgrades/v3_1_to_v3_2/post.yml
index c16965a35..ccf9514f1 100644
--- a/playbooks/common/openshift-cluster/upgrades/v3_1_to_v3_2/post.yml
+++ b/playbooks/common/openshift-cluster/upgrades/v3_1_to_v3_2/post.yml
@@ -6,8 +6,8 @@
hosts: oo_first_master
vars:
openshift_deployment_type: "{{ deployment_type }}"
- registry_image: "{{ openshift.master.registry_url | replace( '${component}', 'docker-registry' ) | replace ( '${version}', 'v' + g_new_version ) }}"
- router_image: "{{ openshift.master.registry_url | replace( '${component}', 'haproxy-router' ) | replace ( '${version}', 'v' + g_new_version ) }}"
+ registry_image: "{{ openshift.master.registry_url | replace( '${component}', 'docker-registry' ) | replace ( '${version}', openshift_image_tag ) }}"
+ router_image: "{{ openshift.master.registry_url | replace( '${component}', 'haproxy-router' ) | replace ( '${version}', openshift_image_tag ) }}"
oc_cmd: "{{ openshift.common.client_binary }} --config={{ openshift.common.config_base }}/master/admin.kubeconfig"
roles:
- openshift_manageiq
diff --git a/playbooks/common/openshift-cluster/upgrades/v3_1_to_v3_2/pre.yml b/playbooks/common/openshift-cluster/upgrades/v3_1_to_v3_2/pre.yml
index f163cca86..11e77c3de 100644
--- a/playbooks/common/openshift-cluster/upgrades/v3_1_to_v3_2/pre.yml
+++ b/playbooks/common/openshift-cluster/upgrades/v3_1_to_v3_2/pre.yml
@@ -2,10 +2,12 @@
###############################################################################
# Evaluate host groups and gather facts
###############################################################################
-- name: Load openshift_facts and update repos
+
+- include: ../../initialize_facts.yml
+
+- name: Update repos
hosts: oo_masters_to_config:oo_nodes_to_config:oo_etcd_to_config:oo_lb_to_config
roles:
- - openshift_facts
- openshift_repos
- name: Set openshift_no_proxy_internal_hostnames
@@ -34,10 +36,10 @@
###############################################################################
# Pre-upgrade checks
###############################################################################
-- name: Verify upgrade can proceed
+- name: Verify upgrade can proceed on first master
hosts: oo_first_master
vars:
- target_version: "{{ '1.2' if deployment_type == 'origin' else '3.1.1.900' }}"
+ target_version: "{{ '1.2' if deployment_type == 'origin' else '3.2' }}"
g_pacemaker_upgrade_url_segment: "{{ 'org/latest' if deployment_type =='origin' else '.com/enterprise/3.1' }}"
gather_facts: no
tasks:
@@ -53,6 +55,11 @@
https://docs.openshift.{{ g_pacemaker_upgrade_url_segment }}/install_config/upgrading/pacemaker_to_native_ha.html
when: openshift.master.cluster_method is defined and openshift.master.cluster_method == 'pacemaker'
+ # Error out in situations where the user has older versions specified in their
+ # inventory in any of the openshift_release, openshift_image_tag, and
+ # openshift_pkg_version variables. These must be removed or updated to proceed
+ # with upgrade.
+ # TODO: Should we block if you're *over* the next major release version as well?
- fail:
msg: >
openshift_pkg_version is {{ openshift_pkg_version }} which is not a
@@ -65,6 +72,28 @@
valid version for a {{ target_version }} upgrade
when: openshift_image_tag is defined and openshift_image_tag.split('v',1).1 | version_compare(target_version ,'<')
+ - set_fact:
+ openshift_release: "{{ openshift_release[1:] }}"
+ when: openshift_release is defined and openshift_release[0] == 'v'
+
+ - fail:
+ msg: >
+ openshift_release is {{ openshift_release }} which is not a
+ valid release for a {{ target_version }} upgrade
+ when: openshift_release is defined and not openshift_release | version_compare(target_version ,'=')
+
+- include: ../../../../common/openshift-cluster/initialize_openshift_version.yml
+ vars:
+ # Request openshift_release 3.2 and let the openshift_version role handle converting this
+ # to a more specific version, respecting openshift_image_tag and openshift_pkg_version if
+ # defined, and overriding the normal behavior of protecting the installed version
+ openshift_release: "3.2"
+ openshift_protect_installed_version: False
+ # Docker role (a dependency) should be told not to do anything to installed version
+ # of docker, we handle this separately during upgrade. (the inventory may have a
+ # docker_version defined, we don't want to actually do it until later)
+ docker_protect_installed_version: True
+
- name: Verify master processes
hosts: oo_masters_to_config
roles:
@@ -100,6 +129,7 @@
hosts: oo_nodes_to_config
roles:
- openshift_facts
+ - openshift_docker_facts
tasks:
- name: Ensure Node is running
service:
@@ -111,19 +141,17 @@
- name: Verify upgrade targets
hosts: oo_masters_to_config:oo_nodes_to_config
vars:
- target_version: "{{ '1.2' if deployment_type == 'origin' else '3.1.1.900' }}"
+ target_version: "{{ '1.2' if deployment_type == 'origin' else '3.2' }}"
openshift_docker_hosted_registry_network: "{{ hostvars[groups.oo_first_master.0].openshift.common.portal_net }}"
- upgrading: True
- handlers:
- - include: ../../../../../roles/openshift_master/handlers/main.yml
- - include: ../../../../../roles/openshift_node/handlers/main.yml
- roles:
- # We want the cli role to evaluate so that the containerized oc/oadm wrappers
- # are modified to use the correct image tag. However, this can trigger a
- # docker restart if new configuration is laid down which would immediately
- # pull the latest image and defeat the purpose of these tasks.
- - { role: openshift_cli }
pre_tasks:
+ - fail:
+ msg: Verify OpenShift is already installed
+ when: openshift.common.version is not defined
+
+ - fail:
+ msg: Verify the correct version was found
+ when: verify_upgrade_version is defined and openshift_version != verify_upgrade_version
+
- name: Clean package cache
command: "{{ ansible_pkg_mgr }} clean all"
when: not openshift.common.is_atomic | bool
@@ -132,58 +160,31 @@
g_new_service_name: "{{ 'origin' if deployment_type =='origin' else 'atomic-openshift' }}"
when: not openshift.common.is_containerized | bool
- - name: Determine available versions
- script: ../files/rpm_versions.sh {{ g_new_service_name }}
- register: g_rpm_versions_result
- when: not openshift.common.is_containerized | bool
-
- - set_fact:
- g_aos_versions: "{{ g_rpm_versions_result.stdout | from_yaml }}"
- when: not openshift.common.is_containerized | bool
-
- - name: Determine available versions
- script: ../files/openshift_container_versions.sh {{ openshift.common.service_type }}
- register: g_containerized_versions_result
- when: openshift.common.is_containerized | bool
-
- - set_fact:
- g_aos_versions: "{{ g_containerized_versions_result.stdout | from_yaml }}"
+ - name: Verify containers are available for upgrade
+ command: >
+ docker pull {{ openshift.common.cli_image }}:{{ openshift_image_tag }}
when: openshift.common.is_containerized | bool
- set_fact:
- g_new_version: "{{ g_aos_versions.curr_version.split('-', 1).0 if g_aos_versions.avail_version is none else g_aos_versions.avail_version.split('-', 1).0 }}"
- when: openshift_pkg_version is not defined
+ repoquery_cmd: "{{ 'dnf repoquery --latest-limit 1 -d 0' if ansible_pkg_mgr == 'dnf' else 'repoquery' }}"
- - set_fact:
- g_new_version: "{{ openshift_pkg_version | replace('-','') }}"
- when: openshift_pkg_version is defined
-
- - set_fact:
- g_new_version: "{{ openshift_image_tag | replace('v','') }}"
- when: openshift_image_tag is defined
-
- - fail:
- msg: Verifying the correct version was found
- when: g_aos_versions.curr_version == ""
-
- - fail:
- msg: Verifying the correct version was found
- when: verify_upgrade_version is defined and g_new_version != verify_upgrade_version
-
- - include_vars: ../../../../../roles/openshift_master/vars/main.yml
- when: inventory_hostname in groups.oo_masters_to_config
+ - name: Check latest available OpenShift RPM version
+ command: >
+ {{ repoquery_cmd }} --qf '%{version}' "{{ openshift.common.service_type }}"
+ failed_when: false
+ changed_when: false
+ register: avail_openshift_version
+ when: not openshift.common.is_containerized | bool
- - name: Update systemd units
- include: ../../../../../roles/openshift_master/tasks/systemd_units.yml openshift_version=v{{ g_new_version }}
- when: inventory_hostname in groups.oo_masters_to_config
+ - debug: var=avail_openshift_version
- - include_vars: ../../../../../roles/openshift_node/vars/main.yml
- when: inventory_hostname in groups.oo_nodes_to_config
+ - name: Verify OpenShift 3.2 RPMs are available for upgrade
+ fail:
+ msg: "OpenShift {{ avail_openshift_version.stdout }} is available, but 3.2 or greater is required"
+ when: not openshift.common.is_containerized | bool and not avail_openshift_version | skipped and avail_openshift_version.stdout | default('0.0', True) | version_compare('3.2', '<')
- - name: Update systemd units
- include: ../../../../../roles/openshift_node/tasks/systemd_units.yml openshift_version=v{{ g_new_version }}
- when: inventory_hostname in groups.oo_nodes_to_config
+ # TODO: Are these two grep checks necessary anymore?
# Note: the version number is hardcoded here in hopes of catching potential
# bugs in how g_aos_versions.curr_version is set
- name: Verifying the correct version is installed for upgrade
@@ -198,19 +199,15 @@
with_items:
- /etc/systemd/system/openvswitch.service
- /etc/systemd/system/{{ openshift.common.service_type }}*.service
- when: openshift.common.is_containerized | bool
-
- - fail:
- msg: This playbook requires Origin 1.1 or later
- when: deployment_type == 'origin' and g_aos_versions.curr_version | version_compare('1.1','<')
+ when: openshift.common.is_containerized | bool and verify_upgrade_version is defined
- fail:
- msg: This playbook requires Atomic Enterprise Platform/OpenShift Enterprise 3.1 or later
- when: deployment_type == 'atomic-openshift' and g_aos_versions.curr_version | version_compare('3.1','<')
+ msg: This upgrade playbook must be run on Origin 1.1 or later
+ when: deployment_type == 'origin' and openshift.common.version | version_compare('1.1','<')
- fail:
- msg: Upgrade packages not found
- when: openshift_image_tag is not defined and (g_aos_versions.avail_version | default(g_aos_versions.curr_version, true) | version_compare(target_version, '<'))
+ msg: This upgrade playbook must be run on OpenShift Enterprise 3.1 or later
+ when: deployment_type == 'atomic-openshift' and openshift.common.version | version_compare('3.1','<')
- name: Verify docker upgrade targets
hosts: oo_masters_to_config:oo_nodes_to_config:oo_etcd_to_config
@@ -234,8 +231,8 @@
when: openshift.common.is_atomic | bool
- fail:
- msg: This playbook requires access to Docker 1.9 or later
- when: g_docker_version.avail_version | default(g_docker_version.curr_version, true) | version_compare('1.9','<')
+ msg: This playbook requires access to Docker 1.10 or later
+ when: g_docker_version.avail_version | default(g_docker_version.curr_version, true) | version_compare('1.10','<')
# TODO: add check to upgrade ostree to get latest Docker
diff --git a/playbooks/common/openshift-cluster/upgrades/v3_1_to_v3_2/rpm_upgrade.yml b/playbooks/common/openshift-cluster/upgrades/v3_1_to_v3_2/rpm_upgrade.yml
index 5c96ad094..1d97d3802 100644
--- a/playbooks/common/openshift-cluster/upgrades/v3_1_to_v3_2/rpm_upgrade.yml
+++ b/playbooks/common/openshift-cluster/upgrades/v3_1_to_v3_2/rpm_upgrade.yml
@@ -1,5 +1,6 @@
+# We verified latest rpm available is suitable, so just yum update.
- name: Upgrade packages
- command: "{{ ansible_pkg_mgr}} update -y {{ openshift.common.service_type }}-{{ component }}-{{ g_new_version }}"
+ command: "{{ ansible_pkg_mgr}} update -y {{ openshift.common.service_type }}-{{ component }}"
- name: Ensure python-yaml present for config upgrade
action: "{{ ansible_pkg_mgr }} name=PyYAML state=present"
diff --git a/playbooks/common/openshift-cluster/upgrades/v3_1_to_v3_2/upgrade.yml b/playbooks/common/openshift-cluster/upgrades/v3_1_to_v3_2/upgrade.yml
index 964257af5..6c27b0d44 100644
--- a/playbooks/common/openshift-cluster/upgrades/v3_1_to_v3_2/upgrade.yml
+++ b/playbooks/common/openshift-cluster/upgrades/v3_1_to_v3_2/upgrade.yml
@@ -3,49 +3,6 @@
# The restart playbook should be run after this playbook completes.
###############################################################################
-- name: Upgrade docker
- hosts: oo_masters_to_config:oo_nodes_to_config
- roles:
- - openshift_facts
- tasks:
- - include: docker_upgrade.yml
- when: not openshift.common.is_atomic | bool
- - name: Set post docker install facts
- openshift_facts:
- role: "{{ item.role }}"
- local_facts: "{{ item.local_facts }}"
- with_items:
- - role: docker
- local_facts:
- openshift_image_tag: "v{{ g_new_version }}"
- openshift_version: "{{ g_new_version }}"
-
-- name: Upgrade docker
- hosts: oo_etcd_to_config
- roles:
- - openshift_facts
- tasks:
- # Upgrade docker when host is not atomic and host is not a non-containerized etcd node
- - include: docker_upgrade.yml
- when: not openshift.common.is_atomic | bool and not ('oo_etcd_to_config' in group_names and not openshift.common.is_containerized)
-
-# The cli image is used by openshift_docker_facts to determine the currently installed
-# version. We need to explicitly pull the latest image to handle cases where
-# the locally cached 'latest' tag is older the g_new_version.
-- name: Download cli image
- hosts: oo_masters_to_config:oo_nodes_to_config
- roles:
- - { role: openshift_docker_facts }
- vars:
- openshift_docker_hosted_registry_network: "{{ hostvars[groups.oo_first_master.0].openshift.common.portal_net }}"
- tasks:
- - name: Pull Images
- command: >
- docker pull {{ item }}:latest
- with_items:
- - "{{ openshift.common.cli_image }}"
- when: openshift.common.is_containerized | bool
-
###############################################################################
# Upgrade Masters
###############################################################################
@@ -62,7 +19,7 @@
- include_vars: ../../../../../roles/openshift_master/vars/main.yml
- name: Update systemd units
- include: ../../../../../roles/openshift_master/tasks/systemd_units.yml openshift_version=v{{ g_new_version }}
+ include: ../../../../../roles/openshift_master/tasks/systemd_units.yml
# - name: Upgrade master configuration
# openshift_upgrade_config:
@@ -98,36 +55,54 @@
###############################################################################
# Upgrade Nodes
###############################################################################
-- name: Upgrade nodes
- hosts: oo_nodes_to_config
+
+# Here we handle all tasks that might require a node evac. (upgrading docker, and the node service)
+- name: Perform upgrades that may require node evacuation
+ hosts: oo_masters_to_config:oo_etcd_to_config:oo_nodes_to_config
serial: 1
+ any_errors_fatal: true
roles:
- openshift_facts
handlers:
- include: ../../../../../roles/openshift_node/handlers/main.yml
tasks:
- - include: node_upgrade.yml
+ # TODO: To better handle re-trying failed upgrades, it would be nice to check if the node
+ # or docker actually needs an upgrade before proceeding.
+ - name: Mark unschedulable if host is a node
+ command: >
+ {{ openshift.common.admin_binary }} manage-node {{ openshift.common.hostname | lower }} --schedulable=false
+ delegate_to: "{{ groups.oo_first_master.0 }}"
+ when: inventory_hostname in groups.oo_nodes_to_config
- - set_fact:
- node_update_complete: True
+ - name: Evacuate Node for Kubelet upgrade
+ command: >
+ {{ openshift.common.admin_binary }} manage-node {{ openshift.common.hostname | lower }} --evacuate --force
+ delegate_to: "{{ groups.oo_first_master.0 }}"
+ when: inventory_hostname in groups.oo_nodes_to_config
+
+ # Only check if docker upgrade is required if docker_upgrade is not
+ # already set to False.
+ - include: ../docker/upgrade_check.yml
+ when: docker_upgrade is not defined or docker_upgrade | bool
+
+ - include: ../docker/upgrade.yml
+ when: l_docker_upgrade is defined and l_docker_upgrade | bool
+
+ - include: rpm_upgrade.yml
+ vars:
+ component: "node"
+ openshift_version: "{{ openshift_pkg_version | default('') }}"
+ when: inventory_hostname in groups.oo_nodes_to_config and not openshift.common.is_containerized | bool
+
+ - include: containerized_node_upgrade.yml
+ when: inventory_hostname in groups.oo_nodes_to_config and openshift.common.is_containerized | bool
+
+ - name: Set node schedulability
+ command: >
+ {{ openshift.common.admin_binary }} manage-node {{ openshift.common.hostname | lower }} --schedulable=true
+ delegate_to: "{{ groups.oo_first_master.0 }}"
+ when: inventory_hostname in groups.oo_nodes_to_config and openshift.node.schedulable | bool
-##############################################################################
-# Gate on nodes update
-##############################################################################
-- name: Gate on nodes update
- hosts: localhost
- connection: local
- become: no
- tasks:
- - set_fact:
- node_update_completed: "{{ hostvars
- | oo_select_keys(groups.oo_nodes_to_config)
- | oo_collect('inventory_hostname', {'node_update_complete': true}) }}"
- - set_fact:
- node_update_failed: "{{ groups.oo_nodes_to_config | difference(node_update_completed) }}"
- - fail:
- msg: "Upgrade cannot continue. The following nodes did not finish updating: {{ node_update_failed | join(',') }}"
- when: node_update_failed | length > 0
###############################################################################
# Reconcile Cluster Roles, Cluster Role Bindings and Security Context Constraints
@@ -136,12 +111,11 @@
- name: Reconcile Cluster Roles and Cluster Role Bindings and Security Context Constraints
hosts: oo_masters_to_config
roles:
- - { role: openshift_cli, openshift_image_tag: "v{{ g_new_version }}" }
+ - { role: openshift_cli }
vars:
- origin_reconcile_bindings: "{{ deployment_type == 'origin' and g_new_version | version_compare('1.0.6', '>') }}"
+ origin_reconcile_bindings: "{{ deployment_type == 'origin' and openshift_version | version_compare('1.0.6', '>') }}"
ent_reconcile_bindings: true
openshift_docker_hosted_registry_network: "{{ hostvars[groups.oo_first_master.0].openshift.common.portal_net }}"
- upgrading: True
tasks:
- name: Verifying the correct commandline tools are available
shell: grep {{ verify_upgrade_version }} {{ openshift.common.admin_binary}}