summaryrefslogtreecommitdiffstats
path: root/playbooks/common/openshift-cluster
diff options
context:
space:
mode:
Diffstat (limited to 'playbooks/common/openshift-cluster')
-rw-r--r--playbooks/common/openshift-cluster/additional_config.yml28
-rw-r--r--playbooks/common/openshift-cluster/config.yml28
-rw-r--r--playbooks/common/openshift-cluster/enable_dnsmasq.yml66
-rw-r--r--playbooks/common/openshift-cluster/evaluate_groups.yml24
-rw-r--r--playbooks/common/openshift-cluster/initialize_facts.yml11
l---------playbooks/common/openshift-cluster/library1
-rw-r--r--playbooks/common/openshift-cluster/openshift_hosted.yml30
-rw-r--r--playbooks/common/openshift-cluster/update_repos_and_packages.yml7
-rw-r--r--playbooks/common/openshift-cluster/upgrades/files/openshift_container_versions.sh22
-rw-r--r--playbooks/common/openshift-cluster/upgrades/files/rpm_versions.sh12
-rw-r--r--playbooks/common/openshift-cluster/upgrades/files/versions.sh9
-rw-r--r--playbooks/common/openshift-cluster/upgrades/v3_0_minor/upgrade.yml8
-rw-r--r--playbooks/common/openshift-cluster/upgrades/v3_0_to_v3_1/upgrade.yml14
-rw-r--r--playbooks/common/openshift-cluster/upgrades/v3_1_minor/post.yml1
-rw-r--r--playbooks/common/openshift-cluster/upgrades/v3_1_minor/pre.yml5
-rw-r--r--playbooks/common/openshift-cluster/upgrades/v3_1_minor/upgrade.yml6
l---------playbooks/common/openshift-cluster/upgrades/v3_1_to_v3_2/atomic-openshift-master.j21
-rw-r--r--playbooks/common/openshift-cluster/upgrades/v3_1_to_v3_2/containerized_upgrade.yml11
l---------playbooks/common/openshift-cluster/upgrades/v3_1_to_v3_2/docker1
l---------playbooks/common/openshift-cluster/upgrades/v3_1_to_v3_2/docker-cluster1
-rw-r--r--playbooks/common/openshift-cluster/upgrades/v3_1_to_v3_2/docker_upgrade.yml14
l---------playbooks/common/openshift-cluster/upgrades/v3_1_to_v3_2/filter_plugins1
l---------playbooks/common/openshift-cluster/upgrades/v3_1_to_v3_2/library1
l---------playbooks/common/openshift-cluster/upgrades/v3_1_to_v3_2/lookup_plugins1
l---------playbooks/common/openshift-cluster/upgrades/v3_1_to_v3_2/native-cluster1
-rw-r--r--playbooks/common/openshift-cluster/upgrades/v3_1_to_v3_2/node_upgrade.yml24
-rw-r--r--playbooks/common/openshift-cluster/upgrades/v3_1_to_v3_2/post.yml59
-rw-r--r--playbooks/common/openshift-cluster/upgrades/v3_1_to_v3_2/pre.yml345
l---------playbooks/common/openshift-cluster/upgrades/v3_1_to_v3_2/roles1
-rw-r--r--playbooks/common/openshift-cluster/upgrades/v3_1_to_v3_2/rpm_upgrade.yml9
-rw-r--r--playbooks/common/openshift-cluster/upgrades/v3_1_to_v3_2/upgrade.yml192
-rw-r--r--playbooks/common/openshift-cluster/validate_hostnames.yml10
32 files changed, 874 insertions, 70 deletions
diff --git a/playbooks/common/openshift-cluster/additional_config.yml b/playbooks/common/openshift-cluster/additional_config.yml
index 1ac78468a..a34322754 100644
--- a/playbooks/common/openshift-cluster/additional_config.yml
+++ b/playbooks/common/openshift-cluster/additional_config.yml
@@ -17,6 +17,7 @@
- role: openshift_master_cluster
when: openshift_master_ha | bool and openshift.master.cluster_method == "pacemaker"
- role: openshift_examples
+ registry_url: "{{ openshift.master.registry_url }}"
when: openshift.common.install_examples | bool
- role: openshift_cluster_metrics
when: openshift.common.use_cluster_metrics | bool
@@ -27,30 +28,5 @@
(osm_use_cockpit | bool or osm_use_cockpit is undefined )
- role: flannel_register
when: openshift.common.use_flannel | bool
- - role: pods
- when: openshift.common.deployment_type == 'online'
- - role: os_env_extras
- when: openshift.common.deployment_type == 'online'
-- name: Create persistent volumes and create hosted services
- hosts: oo_first_master
- vars:
- attach_registry_volume: "{{ openshift.hosted.registry.storage.kind != None }}"
- deploy_infra: "{{ openshift.master.infra_nodes | default([]) | length > 0 }}"
- persistent_volumes: "{{ hostvars[groups.oo_first_master.0] | oo_persistent_volumes(groups) }}"
- persistent_volume_claims: "{{ hostvars[groups.oo_first_master.0] | oo_persistent_volume_claims }}"
- roles:
- - role: openshift_persistent_volumes
- when: persistent_volumes | length > 0 or persistent_volume_claims | length > 0
- - role: openshift_serviceaccounts
- openshift_serviceaccounts_names:
- - router
- - registry
- openshift_serviceaccounts_namespace: default
- openshift_serviceaccounts_sccs:
- - privileged
- - role: openshift_router
- when: deploy_infra | bool
- - role: openshift_registry
- registry_volume_claim: "{{ openshift.hosted.registry.storage.volume.name }}-claim"
- when: deploy_infra | bool and attach_registry_volume | bool
+
diff --git a/playbooks/common/openshift-cluster/config.yml b/playbooks/common/openshift-cluster/config.yml
index 23c8f039e..5fec11541 100644
--- a/playbooks/common/openshift-cluster/config.yml
+++ b/playbooks/common/openshift-cluster/config.yml
@@ -1,16 +1,42 @@
---
- include: evaluate_groups.yml
+- include: initialize_facts.yml
+
- include: validate_hostnames.yml
-- include: ../openshift-docker/config.yml
+- name: Set oo_options
+ hosts: oo_all_hosts
+ tasks:
+ - set_fact:
+ openshift_docker_additional_registries: "{{ lookup('oo_option', 'docker_additional_registries') }}"
+ when: openshift_docker_additional_registries is not defined
+ - set_fact:
+ openshift_docker_insecure_registries: "{{ lookup('oo_option', 'docker_insecure_registries') }}"
+ when: openshift_docker_insecure_registries is not defined
+ - set_fact:
+ openshift_docker_blocked_registries: "{{ lookup('oo_option', 'docker_blocked_registries') }}"
+ when: openshift_docker_blocked_registries is not defined
+ - set_fact:
+ openshift_docker_options: "{{ lookup('oo_option', 'docker_options') }}"
+ when: openshift_docker_options is not defined
+ - set_fact:
+ openshift_docker_log_driver: "{{ lookup('oo_option', 'docker_log_driver') }}"
+ when: openshift_docker_log_driver is not defined
+ - set_fact:
+ openshift_docker_log_options: "{{ lookup('oo_option', 'docker_log_options') }}"
+ when: openshift_docker_log_options is not defined
- include: ../openshift-etcd/config.yml
- include: ../openshift-nfs/config.yml
+- include: ../openshift-loadbalancer/config.yml
+
- include: ../openshift-master/config.yml
- include: additional_config.yml
- include: ../openshift-node/config.yml
+
+- include: openshift_hosted.yml
diff --git a/playbooks/common/openshift-cluster/enable_dnsmasq.yml b/playbooks/common/openshift-cluster/enable_dnsmasq.yml
new file mode 100644
index 000000000..f2bcc872f
--- /dev/null
+++ b/playbooks/common/openshift-cluster/enable_dnsmasq.yml
@@ -0,0 +1,66 @@
+---
+- include: evaluate_groups.yml
+
+- name: Load openshift_facts
+ hosts: oo_masters_to_config:oo_nodes_to_config
+ roles:
+ - openshift_facts
+ post_tasks:
+ - fail: msg="This playbook requires a master version of at least Origin 1.1 or OSE 3.1"
+ when: not openshift.common.version_gte_3_1_1_or_1_1_1 | bool
+
+- name: Reconfigure masters to listen on our new dns_port
+ hosts: oo_masters_to_config
+ handlers:
+ - include: ../../../roles/openshift_master/handlers/main.yml
+ vars:
+ os_firewall_allow:
+ - service: skydns tcp
+ port: "{{ openshift.master.dns_port }}/tcp"
+ - service: skydns udp
+ port: "{{ openshift.master.dns_port }}/udp"
+ roles:
+ - os_firewall
+ tasks:
+ - openshift_facts:
+ role: "{{ item.role }}"
+ local_facts: "{{ item.local_facts }}"
+ with_items:
+ - role: common
+ local_facts:
+ use_dnsmasq: True
+ - role: master
+ local_facts:
+ dns_port: '8053'
+ - modify_yaml:
+ dest: "{{ openshift.common.config_base }}/master/master-config.yaml"
+ yaml_key: dnsConfig.bindAddress
+ yaml_value: "{{ openshift.master.bind_addr }}:{{ openshift.master.dns_port }}"
+ notify: restart master
+ - meta: flush_handlers
+
+- name: Configure nodes for dnsmasq
+ hosts: oo_nodes_to_config
+ handlers:
+ - include: ../../../roles/openshift_node/handlers/main.yml
+ pre_tasks:
+ - openshift_facts:
+ role: "{{ item.role }}"
+ local_facts: "{{ item.local_facts }}"
+ with_items:
+ - role: common
+ local_facts:
+ use_dnsmasq: True
+ - role: node
+ local_facts:
+ dns_ip: "{{ hostvars[inventory_hostname]['ansible_default_ipv4']['address'] }}"
+ vars:
+ openshift_deployment_type: "{{ deployment_type }}"
+ roles:
+ - openshift_node_dnsmasq
+ post_tasks:
+ - modify_yaml:
+ dest: "{{ openshift.common.config_base }}/node/node-config.yaml"
+ yaml_key: dnsIP
+ yaml_value: "{{ openshift.node.dns_ip }}"
+ notify: restart node
diff --git a/playbooks/common/openshift-cluster/evaluate_groups.yml b/playbooks/common/openshift-cluster/evaluate_groups.yml
index 432a92b49..c5273b08f 100644
--- a/playbooks/common/openshift-cluster/evaluate_groups.yml
+++ b/playbooks/common/openshift-cluster/evaluate_groups.yml
@@ -29,12 +29,20 @@
msg: The nfs group must be limited to one host
when: (groups[g_nfs_hosts] | default([])) | length > 1
+ - name: Evaluate oo_all_hosts
+ add_host:
+ name: "{{ item }}"
+ groups: oo_all_hosts
+ ansible_ssh_user: "{{ g_ssh_user | default(omit) }}"
+ ansible_become: "{{ g_sudo | default(omit) }}"
+ with_items: g_all_hosts | default([])
+
- name: Evaluate oo_masters
add_host:
name: "{{ item }}"
groups: oo_masters
ansible_ssh_user: "{{ g_ssh_user | default(omit) }}"
- ansible_sudo: "{{ g_sudo | default(omit) }}"
+ ansible_become: "{{ g_sudo | default(omit) }}"
with_items: "{{ g_master_hosts | union(g_new_master_hosts) | default([]) }}"
- name: Evaluate oo_etcd_to_config
@@ -42,7 +50,7 @@
name: "{{ item }}"
groups: oo_etcd_to_config
ansible_ssh_user: "{{ g_ssh_user | default(omit) }}"
- ansible_sudo: "{{ g_sudo | default(omit) }}"
+ ansible_become: "{{ g_sudo | default(omit) }}"
with_items: "{{ g_etcd_hosts | default([]) }}"
- name: Evaluate oo_masters_to_config
@@ -50,7 +58,7 @@
name: "{{ item }}"
groups: oo_masters_to_config
ansible_ssh_user: "{{ g_ssh_user | default(omit) }}"
- ansible_sudo: "{{ g_sudo | default(omit) }}"
+ ansible_become: "{{ g_sudo | default(omit) }}"
with_items: "{{ g_new_master_hosts | default(g_master_hosts | default([], true), true) }}"
- name: Evaluate oo_nodes_to_config
@@ -58,7 +66,7 @@
name: "{{ item }}"
groups: oo_nodes_to_config
ansible_ssh_user: "{{ g_ssh_user | default(omit) }}"
- ansible_sudo: "{{ g_sudo | default(omit) }}"
+ ansible_become: "{{ g_sudo | default(omit) }}"
with_items: "{{ g_new_node_hosts | default(g_node_hosts | default([], true), true) }}"
# Skip adding the master to oo_nodes_to_config when g_new_node_hosts is
@@ -67,7 +75,7 @@
name: "{{ item }}"
groups: oo_nodes_to_config
ansible_ssh_user: "{{ g_ssh_user | default(omit) }}"
- ansible_sudo: "{{ g_sudo | default(omit) }}"
+ ansible_become: "{{ g_sudo | default(omit) }}"
with_items: "{{ g_master_hosts | default([]) }}"
when: g_nodeonmaster | default(false) == true and g_new_node_hosts is not defined
@@ -83,7 +91,7 @@
name: "{{ g_master_hosts[0] }}"
groups: oo_first_master
ansible_ssh_user: "{{ g_ssh_user | default(omit) }}"
- ansible_sudo: "{{ g_sudo | default(omit) }}"
+ ansible_become: "{{ g_sudo | default(omit) }}"
when: g_master_hosts|length > 0
- name: Evaluate oo_lb_to_config
@@ -91,7 +99,7 @@
name: "{{ item }}"
groups: oo_lb_to_config
ansible_ssh_user: "{{ g_ssh_user | default(omit) }}"
- ansible_sudo: "{{ g_sudo | default(omit) }}"
+ ansible_become: "{{ g_sudo | default(omit) }}"
with_items: "{{ g_lb_hosts | default([]) }}"
- name: Evaluate oo_nfs_to_config
@@ -99,5 +107,5 @@
name: "{{ item }}"
groups: oo_nfs_to_config
ansible_ssh_user: "{{ g_ssh_user | default(omit) }}"
- ansible_sudo: "{{ g_sudo | default(omit) }}"
+ ansible_become: "{{ g_sudo | default(omit) }}"
with_items: "{{ g_nfs_hosts | default([]) }}"
diff --git a/playbooks/common/openshift-cluster/initialize_facts.yml b/playbooks/common/openshift-cluster/initialize_facts.yml
new file mode 100644
index 000000000..37f523246
--- /dev/null
+++ b/playbooks/common/openshift-cluster/initialize_facts.yml
@@ -0,0 +1,11 @@
+---
+- name: Initialize host facts
+ hosts: oo_all_hosts
+ any_errors_fatal: true
+ roles:
+ - openshift_facts
+ tasks:
+ - openshift_facts:
+ role: common
+ local_facts:
+ hostname: "{{ openshift_hostname | default(None) }}"
diff --git a/playbooks/common/openshift-cluster/library b/playbooks/common/openshift-cluster/library
new file mode 120000
index 000000000..d0b7393d3
--- /dev/null
+++ b/playbooks/common/openshift-cluster/library
@@ -0,0 +1 @@
+../../../library/ \ No newline at end of file
diff --git a/playbooks/common/openshift-cluster/openshift_hosted.yml b/playbooks/common/openshift-cluster/openshift_hosted.yml
new file mode 100644
index 000000000..811b3d685
--- /dev/null
+++ b/playbooks/common/openshift-cluster/openshift_hosted.yml
@@ -0,0 +1,30 @@
+- name: Create persistent volumes and create hosted services
+ hosts: oo_first_master
+ vars:
+ attach_registry_volume: "{{ openshift.hosted.registry.storage.kind != None }}"
+ deploy_infra: "{{ openshift.master.infra_nodes | default([]) | length > 0 }}"
+ persistent_volumes: "{{ hostvars[groups.oo_first_master.0] | oo_persistent_volumes(groups) }}"
+ persistent_volume_claims: "{{ hostvars[groups.oo_first_master.0] | oo_persistent_volume_claims }}"
+ roles:
+ - role: openshift_persistent_volumes
+ when: persistent_volumes | length > 0 or persistent_volume_claims | length > 0
+ - role: openshift_serviceaccounts
+ openshift_serviceaccounts_names:
+ - router
+ - registry
+ openshift_serviceaccounts_namespace: default
+ openshift_serviceaccounts_sccs:
+ - privileged
+ - role: openshift_registry
+ registry_volume_claim: "{{ openshift.hosted.registry.storage.volume.name }}-claim"
+ when: deploy_infra | bool and attach_registry_volume | bool
+ - role: openshift_metrics
+ when: openshift.hosted.metrics.deploy | bool
+
+- name: Create Hosted Resources
+ hosts: oo_first_master
+ pre_tasks:
+ - set_fact:
+ openshift_hosted_router_registryurl: "{{ hostvars[groups.oo_first_master.0].openshift.master.registry_url }}"
+ roles:
+ - role: openshift_hosted
diff --git a/playbooks/common/openshift-cluster/update_repos_and_packages.yml b/playbooks/common/openshift-cluster/update_repos_and_packages.yml
index 1474bb3ca..e3d16d359 100644
--- a/playbooks/common/openshift-cluster/update_repos_and_packages.yml
+++ b/playbooks/common/openshift-cluster/update_repos_and_packages.yml
@@ -1,8 +1,15 @@
---
+- include: evaluate_groups.yml
+
- hosts: oo_hosts_to_update
vars:
openshift_deployment_type: "{{ deployment_type }}"
roles:
+ # Explicitly calling openshift_facts because it appears that when
+ # rhel_subscribe is skipped that the openshift_facts dependency for
+ # openshift_repos is also skipped (this is the case at least for Ansible
+ # 2.0.2)
+ - openshift_facts
- role: rhel_subscribe
when: deployment_type in ["enterprise", "atomic-enterprise", "openshift-enterprise"] and
ansible_distribution == "RedHat" and
diff --git a/playbooks/common/openshift-cluster/upgrades/files/openshift_container_versions.sh b/playbooks/common/openshift-cluster/upgrades/files/openshift_container_versions.sh
new file mode 100644
index 000000000..9bbeff660
--- /dev/null
+++ b/playbooks/common/openshift-cluster/upgrades/files/openshift_container_versions.sh
@@ -0,0 +1,22 @@
+#!/bin/bash
+
+# Here we don't really care if this is a master, api, controller or node image.
+# We just need to know the version of one of them.
+unit_file=$(ls /etc/systemd/system/${1}*.service | grep -v node-dep | head -n1)
+
+if [ ${1} == "origin" ]; then
+ image_name="openshift/origin"
+elif grep aep $unit_file 2>&1 > /dev/null; then
+ image_name="aep3/node"
+elif grep openshift3 $unit_file 2>&1 > /dev/null; then
+ image_name="openshift3/node"
+fi
+
+installed=$(docker run --rm --entrypoint=/bin/openshift ${image_name} version 2> /dev/null | grep openshift | awk '{ print $2 }' | cut -f1 -d"-" | tr -d 'v')
+
+docker pull ${image_name} 2>&1 > /dev/null
+available=$(docker run --rm --entrypoint=/bin/openshift ${image_name} version 2> /dev/null | grep openshift | awk '{ print $2 }' | cut -f1 -d"-" | tr -d 'v')
+
+echo "---"
+echo "curr_version: ${installed}"
+echo "avail_version: ${available}"
diff --git a/playbooks/common/openshift-cluster/upgrades/files/rpm_versions.sh b/playbooks/common/openshift-cluster/upgrades/files/rpm_versions.sh
new file mode 100644
index 000000000..7bf249742
--- /dev/null
+++ b/playbooks/common/openshift-cluster/upgrades/files/rpm_versions.sh
@@ -0,0 +1,12 @@
+#!/bin/bash
+if [ `which dnf 2> /dev/null` ]; then
+ installed=$(dnf repoquery --installed --latest-limit 1 -d 0 --qf '%{version}-%{release}' "${@}" 2> /dev/null)
+ available=$(dnf repoquery --available --latest-limit 1 -d 0 --qf '%{version}-%{release}' "${@}" 2> /dev/null)
+else
+ installed=$(repoquery --plugins --pkgnarrow=installed --qf '%{version}-%{release}' "${@}" 2> /dev/null)
+ available=$(repoquery --plugins --pkgnarrow=available --qf '%{version}-%{release}' "${@}" 2> /dev/null)
+fi
+
+echo "---"
+echo "curr_version: ${installed}"
+echo "avail_version: ${available}"
diff --git a/playbooks/common/openshift-cluster/upgrades/files/versions.sh b/playbooks/common/openshift-cluster/upgrades/files/versions.sh
deleted file mode 100644
index 3a1a8ebb1..000000000
--- a/playbooks/common/openshift-cluster/upgrades/files/versions.sh
+++ /dev/null
@@ -1,9 +0,0 @@
-#!/bin/bash
-
-yum_installed=$(yum list installed -e 0 -q "$@" 2>&1 | tail -n +2 | awk '{ print $2 }' | sort -r | tr '\n' ' ')
-
-yum_available=$(yum list available -e 0 -q "$@" 2>&1 | tail -n +2 | grep -v 'el7ose' | awk '{ print $2 }' | sort -r | tr '\n' ' ')
-
-echo "---"
-echo "curr_version: ${yum_installed}"
-echo "avail_version: ${yum_available}"
diff --git a/playbooks/common/openshift-cluster/upgrades/v3_0_minor/upgrade.yml b/playbooks/common/openshift-cluster/upgrades/v3_0_minor/upgrade.yml
index 63c8ef756..e31e7f8a3 100644
--- a/playbooks/common/openshift-cluster/upgrades/v3_0_minor/upgrade.yml
+++ b/playbooks/common/openshift-cluster/upgrades/v3_0_minor/upgrade.yml
@@ -36,16 +36,17 @@
- name: Ensure AOS 3.0.2 or Origin 1.0.6
hosts: oo_first_master
tasks:
- fail: This playbook requires Origin 1.0.6 or Atomic OpenShift 3.0.2 or later
+ - fail:
+ msg: "This playbook requires Origin 1.0.6 or Atomic OpenShift 3.0.2 or later"
when: _new_version.stdout | version_compare('1.0.6','<') or ( _new_version.stdout | version_compare('3.0','>=' and _new_version.stdout | version_compare('3.0.2','<') )
- name: Update cluster policy
hosts: oo_first_master
tasks:
- - name: oadm policy reconcile-cluster-roles --confirm
+ - name: oadm policy reconcile-cluster-roles --additive-only=true --confirm
command: >
{{ openshift.common.admin_binary}} --config={{ openshift.common.config_base }}/master/admin.kubeconfig
- policy reconcile-cluster-roles --confirm
+ policy reconcile-cluster-roles --additive-only=true --confirm
- name: Upgrade default router
hosts: oo_first_master
@@ -108,5 +109,6 @@
vars:
openshift_examples_import_command: "update"
openshift_deployment_type: "{{ deployment_type }}"
+ registry_url: "{{ openshift.master.registry_url }}"
roles:
- openshift_examples
diff --git a/playbooks/common/openshift-cluster/upgrades/v3_0_to_v3_1/upgrade.yml b/playbooks/common/openshift-cluster/upgrades/v3_0_to_v3_1/upgrade.yml
index 0fb38f32e..c3c1240d8 100644
--- a/playbooks/common/openshift-cluster/upgrades/v3_0_to_v3_1/upgrade.yml
+++ b/playbooks/common/openshift-cluster/upgrades/v3_0_to_v3_1/upgrade.yml
@@ -54,7 +54,7 @@
- script: ../files/pre-upgrade-check
-- name: Verify upgrade can proceed
+- name: Verify upgrade targets
hosts: oo_masters_to_config:oo_nodes_to_config
vars:
target_version: "{{ '1.1' if deployment_type == 'origin' else '3.1' }}"
@@ -66,7 +66,7 @@
g_new_service_name: "{{ 'origin' if deployment_type =='origin' else 'atomic-openshift' }}"
- name: Determine available versions
- script: ../files/versions.sh {{ g_new_service_name }} openshift
+ script: ../files/rpm_versions.sh {{ g_new_service_name }} openshift
register: g_versions_result
- set_fact:
@@ -212,13 +212,10 @@
- name: Update deployment type
hosts: oo_masters_to_config:oo_nodes_to_config:oo_etcd_to_config
+ vars:
+ openshift_deployment_type: "{{ deployment_type }}"
roles:
- openshift_facts
- post_tasks:
- - openshift_facts:
- role: common
- local_facts:
- deployment_type: "{{ deployment_type }}"
- name: Update master facts
hosts: oo_masters_to_config
@@ -493,7 +490,7 @@
- name: Reconcile Cluster Roles
command: >
{{ openshift.common.admin_binary}} --config={{ openshift.common.config_base }}/master/admin.kubeconfig
- policy reconcile-cluster-roles --confirm
+ policy reconcile-cluster-roles --additive-only=true --confirm
run_once: true
- name: Reconcile Cluster Role Bindings
@@ -572,6 +569,7 @@
# Update the existing templates
- role: openshift_examples
openshift_examples_import_command: replace
+ registry_url: "{{ openshift.master.registry_url }}"
pre_tasks:
- name: Collect all routers
command: >
diff --git a/playbooks/common/openshift-cluster/upgrades/v3_1_minor/post.yml b/playbooks/common/openshift-cluster/upgrades/v3_1_minor/post.yml
index 196393b2a..f030eed18 100644
--- a/playbooks/common/openshift-cluster/upgrades/v3_1_minor/post.yml
+++ b/playbooks/common/openshift-cluster/upgrades/v3_1_minor/post.yml
@@ -19,6 +19,7 @@
# Update the existing templates
- role: openshift_examples
openshift_examples_import_command: replace
+ registry_url: "{{ openshift.master.registry_url }}"
pre_tasks:
- name: Collect all routers
command: >
diff --git a/playbooks/common/openshift-cluster/upgrades/v3_1_minor/pre.yml b/playbooks/common/openshift-cluster/upgrades/v3_1_minor/pre.yml
index 12b9c84d3..85d7073f2 100644
--- a/playbooks/common/openshift-cluster/upgrades/v3_1_minor/pre.yml
+++ b/playbooks/common/openshift-cluster/upgrades/v3_1_minor/pre.yml
@@ -29,19 +29,20 @@
valid version for a {{ target_version }} upgrade
when: openshift_pkg_version is defined and openshift_pkg_version.split('-',1).1 | version_compare(target_version ,'<')
-- name: Verify upgrade can proceed
+- name: Verify upgrade targets
hosts: oo_masters_to_config:oo_nodes_to_config
vars:
target_version: "{{ '1.1.1' if deployment_type == 'origin' else '3.1.1' }}"
tasks:
- name: Clean package cache
command: "{{ ansible_pkg_mgr }} clean all"
+ when: not openshift.common.is_atomic | bool
- set_fact:
g_new_service_name: "{{ 'origin' if deployment_type =='origin' else 'atomic-openshift' }}"
- name: Determine available versions
- script: ../files/versions.sh {{ g_new_service_name }}
+ script: ../files/rpm_versions.sh {{ g_new_service_name }}
register: g_versions_result
- set_fact:
diff --git a/playbooks/common/openshift-cluster/upgrades/v3_1_minor/upgrade.yml b/playbooks/common/openshift-cluster/upgrades/v3_1_minor/upgrade.yml
index 54bb251f7..e5cfa58aa 100644
--- a/playbooks/common/openshift-cluster/upgrades/v3_1_minor/upgrade.yml
+++ b/playbooks/common/openshift-cluster/upgrades/v3_1_minor/upgrade.yml
@@ -12,7 +12,7 @@
openshift_version: "{{ openshift_pkg_version | default('') }}"
tasks:
- name: Upgrade master packages
- command: "{{ ansible_pkg_mgr}} update -y {{ openshift.common.service_type }}-master{{ openshift_version }}"
+ command: "{{ ansible_pkg_mgr}} update-to -y {{ openshift.common.service_type }}-master{{ openshift_version }} {{ openshift.common.service_type }}-sdn-ovs{{ openshift_version }}"
when: not openshift.common.is_containerized | bool
- name: Ensure python-yaml present for config upgrade
@@ -63,7 +63,7 @@
- openshift_facts
tasks:
- name: Upgrade node packages
- command: "{{ ansible_pkg_mgr }} update -y {{ openshift.common.service_type }}-node{{ openshift_version }}"
+ command: "{{ ansible_pkg_mgr }} update-to -y {{ openshift.common.service_type }}-node{{ openshift_version }} {{ openshift.common.service_type }}-sdn-ovs{{ openshift_version }}"
when: not openshift.common.is_containerized | bool
- name: Restart node service
@@ -103,7 +103,7 @@
- name: Reconcile Cluster Roles
command: >
{{ openshift.common.admin_binary}} --config={{ openshift.common.config_base }}/master/admin.kubeconfig
- policy reconcile-cluster-roles --confirm
+ policy reconcile-cluster-roles --additive-only=true --confirm
run_once: true
- name: Reconcile Cluster Role Bindings
diff --git a/playbooks/common/openshift-cluster/upgrades/v3_1_to_v3_2/atomic-openshift-master.j2 b/playbooks/common/openshift-cluster/upgrades/v3_1_to_v3_2/atomic-openshift-master.j2
new file mode 120000
index 000000000..cf20e8959
--- /dev/null
+++ b/playbooks/common/openshift-cluster/upgrades/v3_1_to_v3_2/atomic-openshift-master.j2
@@ -0,0 +1 @@
+../../../../../roles/openshift_master/templates/atomic-openshift-master.j2 \ No newline at end of file
diff --git a/playbooks/common/openshift-cluster/upgrades/v3_1_to_v3_2/containerized_upgrade.yml b/playbooks/common/openshift-cluster/upgrades/v3_1_to_v3_2/containerized_upgrade.yml
new file mode 100644
index 000000000..319758a06
--- /dev/null
+++ b/playbooks/common/openshift-cluster/upgrades/v3_1_to_v3_2/containerized_upgrade.yml
@@ -0,0 +1,11 @@
+- include_vars: ../../../../../roles/openshift_node/vars/main.yml
+
+- name: Update systemd units
+ include: ../../../../../roles/openshift_node/tasks/systemd_units.yml openshift_version=v{{ g_new_version }}
+
+- name: Verifying the correct version was configured
+ shell: grep {{ verify_upgrade_version }} {{ item }}
+ with_items:
+ - /etc/sysconfig/openvswitch
+ - /etc/sysconfig/{{ openshift.common.service_type }}*
+ when: verify_upgrade_version is defined
diff --git a/playbooks/common/openshift-cluster/upgrades/v3_1_to_v3_2/docker b/playbooks/common/openshift-cluster/upgrades/v3_1_to_v3_2/docker
new file mode 120000
index 000000000..5a3dd12b3
--- /dev/null
+++ b/playbooks/common/openshift-cluster/upgrades/v3_1_to_v3_2/docker
@@ -0,0 +1 @@
+../../../../../roles/openshift_master/templates/docker \ No newline at end of file
diff --git a/playbooks/common/openshift-cluster/upgrades/v3_1_to_v3_2/docker-cluster b/playbooks/common/openshift-cluster/upgrades/v3_1_to_v3_2/docker-cluster
new file mode 120000
index 000000000..3ee319365
--- /dev/null
+++ b/playbooks/common/openshift-cluster/upgrades/v3_1_to_v3_2/docker-cluster
@@ -0,0 +1 @@
+../../../../../roles/openshift_master/templates/docker-cluster \ No newline at end of file
diff --git a/playbooks/common/openshift-cluster/upgrades/v3_1_to_v3_2/docker_upgrade.yml b/playbooks/common/openshift-cluster/upgrades/v3_1_to_v3_2/docker_upgrade.yml
new file mode 100644
index 000000000..c7b18f51b
--- /dev/null
+++ b/playbooks/common/openshift-cluster/upgrades/v3_1_to_v3_2/docker_upgrade.yml
@@ -0,0 +1,14 @@
+- name: Check if Docker is installed
+ command: rpm -q docker
+ register: pkg_check
+ failed_when: pkg_check.rc > 1
+ changed_when: no
+
+- name: Upgrade Docker
+ command: "{{ ansible_pkg_mgr}} update -y docker"
+ when: pkg_check.rc == 0 and g_docker_version.curr_version | version_compare('1.9','<')
+ register: docker_upgrade
+
+- name: Restart Docker
+ command: systemctl restart docker
+ when: docker_upgrade | changed
diff --git a/playbooks/common/openshift-cluster/upgrades/v3_1_to_v3_2/filter_plugins b/playbooks/common/openshift-cluster/upgrades/v3_1_to_v3_2/filter_plugins
new file mode 120000
index 000000000..27ddaa18b
--- /dev/null
+++ b/playbooks/common/openshift-cluster/upgrades/v3_1_to_v3_2/filter_plugins
@@ -0,0 +1 @@
+../../../../../filter_plugins \ No newline at end of file
diff --git a/playbooks/common/openshift-cluster/upgrades/v3_1_to_v3_2/library b/playbooks/common/openshift-cluster/upgrades/v3_1_to_v3_2/library
new file mode 120000
index 000000000..53bed9684
--- /dev/null
+++ b/playbooks/common/openshift-cluster/upgrades/v3_1_to_v3_2/library
@@ -0,0 +1 @@
+../library \ No newline at end of file
diff --git a/playbooks/common/openshift-cluster/upgrades/v3_1_to_v3_2/lookup_plugins b/playbooks/common/openshift-cluster/upgrades/v3_1_to_v3_2/lookup_plugins
new file mode 120000
index 000000000..cf407f69b
--- /dev/null
+++ b/playbooks/common/openshift-cluster/upgrades/v3_1_to_v3_2/lookup_plugins
@@ -0,0 +1 @@
+../../../../../lookup_plugins \ No newline at end of file
diff --git a/playbooks/common/openshift-cluster/upgrades/v3_1_to_v3_2/native-cluster b/playbooks/common/openshift-cluster/upgrades/v3_1_to_v3_2/native-cluster
new file mode 120000
index 000000000..f44f8eb4f
--- /dev/null
+++ b/playbooks/common/openshift-cluster/upgrades/v3_1_to_v3_2/native-cluster
@@ -0,0 +1 @@
+../../../../../roles/openshift_master/templates/native-cluster \ No newline at end of file
diff --git a/playbooks/common/openshift-cluster/upgrades/v3_1_to_v3_2/node_upgrade.yml b/playbooks/common/openshift-cluster/upgrades/v3_1_to_v3_2/node_upgrade.yml
new file mode 100644
index 000000000..a911f12be
--- /dev/null
+++ b/playbooks/common/openshift-cluster/upgrades/v3_1_to_v3_2/node_upgrade.yml
@@ -0,0 +1,24 @@
+- name: Prepare for Node evacuation
+ command: >
+ {{ openshift.common.admin_binary }} manage-node {{ openshift.common.hostname | lower }} --schedulable=false
+ delegate_to: "{{ groups.oo_first_master.0 }}"
+
+- name: Evacuate Node for Kubelet upgrade
+ command: >
+ {{ openshift.common.admin_binary }} manage-node {{ openshift.common.hostname | lower }} --evacuate --force
+ delegate_to: "{{ groups.oo_first_master.0 }}"
+
+- include: rpm_upgrade.yml
+ vars:
+ component: "node"
+ openshift_version: "{{ openshift_pkg_version | default('') }}"
+ when: not openshift.common.is_containerized | bool
+
+- include: containerized_upgrade.yml
+ when: openshift.common.is_containerized | bool
+
+- name: Set node schedulability
+ command: >
+ {{ openshift.common.admin_binary }} manage-node {{ openshift.common.hostname | lower }} --schedulable=true
+ delegate_to: "{{ groups.oo_first_master.0 }}"
+ when: openshift.node.schedulable | bool
diff --git a/playbooks/common/openshift-cluster/upgrades/v3_1_to_v3_2/post.yml b/playbooks/common/openshift-cluster/upgrades/v3_1_to_v3_2/post.yml
new file mode 100644
index 000000000..c16965a35
--- /dev/null
+++ b/playbooks/common/openshift-cluster/upgrades/v3_1_to_v3_2/post.yml
@@ -0,0 +1,59 @@
+---
+###############################################################################
+# Post upgrade - Upgrade default router, default registry and examples
+###############################################################################
+- name: Upgrade default router and default registry
+ hosts: oo_first_master
+ vars:
+ openshift_deployment_type: "{{ deployment_type }}"
+ registry_image: "{{ openshift.master.registry_url | replace( '${component}', 'docker-registry' ) | replace ( '${version}', 'v' + g_new_version ) }}"
+ router_image: "{{ openshift.master.registry_url | replace( '${component}', 'haproxy-router' ) | replace ( '${version}', 'v' + g_new_version ) }}"
+ oc_cmd: "{{ openshift.common.client_binary }} --config={{ openshift.common.config_base }}/master/admin.kubeconfig"
+ roles:
+ - openshift_manageiq
+ # Create the new templates shipped in 3.2, existing templates are left
+ # unmodified. This prevents the subsequent role definition for
+ # openshift_examples from failing when trying to replace templates that do
+ # not already exist. We could have potentially done a replace --force to
+ # create and update in one step.
+ - openshift_examples
+ # Update the existing templates
+ - role: openshift_examples
+ registry_url: "{{ openshift.master.registry_url }}"
+ openshift_examples_import_command: replace
+ pre_tasks:
+ - name: Collect all routers
+ command: >
+ {{ oc_cmd }} get pods --all-namespaces -l 'router' -o json
+ register: all_routers
+ failed_when: false
+ changed_when: false
+
+ - set_fact: haproxy_routers="{{ (all_routers.stdout | from_json)['items'] | oo_pods_match_component(openshift_deployment_type, 'haproxy-router') | oo_select_keys_from_list(['metadata']) }}"
+ when: all_routers.rc == 0
+
+ - set_fact: haproxy_routers=[]
+ when: all_routers.rc != 0
+
+ - name: Update router image to current version
+ when: all_routers.rc == 0
+ command: >
+ {{ oc_cmd }} patch dc/{{ item['labels']['deploymentconfig'] }} -n {{ item['namespace'] }} -p
+ '{"spec":{"template":{"spec":{"containers":[{"name":"router","image":"{{ router_image }}","livenessProbe":{"tcpSocket":null,"httpGet":{"path": "/healthz", "port": 1936, "host": "localhost", "scheme": "HTTP"},"initialDelaySeconds":10,"timeoutSeconds":1}}]}}}}'
+ --api-version=v1
+ with_items: haproxy_routers
+
+ - name: Check for default registry
+ command: >
+ {{ oc_cmd }} get -n default dc/docker-registry
+ register: _default_registry
+ failed_when: false
+ changed_when: false
+
+ - name: Update registry image to current version
+ when: _default_registry.rc == 0
+ command: >
+ {{ oc_cmd }} patch dc/docker-registry -n default -p
+ '{"spec":{"template":{"spec":{"containers":[{"name":"registry","image":"{{ registry_image }}"}]}}}}'
+ --api-version=v1
+
diff --git a/playbooks/common/openshift-cluster/upgrades/v3_1_to_v3_2/pre.yml b/playbooks/common/openshift-cluster/upgrades/v3_1_to_v3_2/pre.yml
new file mode 100644
index 000000000..f163cca86
--- /dev/null
+++ b/playbooks/common/openshift-cluster/upgrades/v3_1_to_v3_2/pre.yml
@@ -0,0 +1,345 @@
+---
+###############################################################################
+# Evaluate host groups and gather facts
+###############################################################################
+- name: Load openshift_facts and update repos
+ hosts: oo_masters_to_config:oo_nodes_to_config:oo_etcd_to_config:oo_lb_to_config
+ roles:
+ - openshift_facts
+ - openshift_repos
+
+- name: Set openshift_no_proxy_internal_hostnames
+ hosts: oo_masters_to_config:oo_nodes_to_config
+ tasks:
+ - set_fact:
+ openshift_no_proxy_internal_hostnames: "{{ hostvars | oo_select_keys(groups['oo_nodes_to_config']
+ | union(groups['oo_masters_to_config'])
+ | union(groups['oo_etcd_to_config'] | default([])))
+ | oo_collect('openshift.common.hostname') | default([]) | join (',')
+ }}"
+ when: "{{ (openshift_http_proxy is defined or openshift_https_proxy is defined) and
+ openshift_generate_no_proxy_hosts | default(True) | bool }}"
+
+- name: Evaluate additional groups for upgrade
+ hosts: localhost
+ connection: local
+ become: no
+ tasks:
+ - name: Evaluate etcd_hosts_to_backup
+ add_host:
+ name: "{{ item }}"
+ groups: etcd_hosts_to_backup
+ with_items: groups.oo_etcd_to_config if groups.oo_etcd_to_config is defined and groups.oo_etcd_to_config | length > 0 else groups.oo_first_master
+
+###############################################################################
+# Pre-upgrade checks
+###############################################################################
+- name: Verify upgrade can proceed
+ hosts: oo_first_master
+ vars:
+ target_version: "{{ '1.2' if deployment_type == 'origin' else '3.1.1.900' }}"
+ g_pacemaker_upgrade_url_segment: "{{ 'org/latest' if deployment_type =='origin' else '.com/enterprise/3.1' }}"
+ gather_facts: no
+ tasks:
+ - fail:
+ msg: >
+ This upgrade is only supported for atomic-enterprise, origin, openshift-enterprise, and online
+ deployment types
+ when: deployment_type not in ['atomic-enterprise', 'origin','openshift-enterprise', 'online']
+
+ - fail:
+ msg: >
+ This upgrade does not support Pacemaker:
+ https://docs.openshift.{{ g_pacemaker_upgrade_url_segment }}/install_config/upgrading/pacemaker_to_native_ha.html
+ when: openshift.master.cluster_method is defined and openshift.master.cluster_method == 'pacemaker'
+
+ - fail:
+ msg: >
+ openshift_pkg_version is {{ openshift_pkg_version }} which is not a
+ valid version for a {{ target_version }} upgrade
+ when: openshift_pkg_version is defined and openshift_pkg_version.split('-',1).1 | version_compare(target_version ,'<')
+
+ - fail:
+ msg: >
+ openshift_image_tag is {{ openshift_image_tag }} which is not a
+ valid version for a {{ target_version }} upgrade
+ when: openshift_image_tag is defined and openshift_image_tag.split('v',1).1 | version_compare(target_version ,'<')
+
+- name: Verify master processes
+ hosts: oo_masters_to_config
+ roles:
+ - openshift_facts
+ tasks:
+ - openshift_facts:
+ role: master
+ local_facts:
+ ha: "{{ groups.oo_masters_to_config | length > 1 }}"
+
+ - name: Ensure Master is running
+ service:
+ name: "{{ openshift.common.service_type }}-master"
+ state: started
+ enabled: yes
+ when: openshift.master.ha is defined and not openshift.master.ha | bool and openshift.common.is_containerized | bool
+
+ - name: Ensure HA Master is running
+ service:
+ name: "{{ openshift.common.service_type }}-master-api"
+ state: started
+ enabled: yes
+ when: openshift.master.ha is defined and openshift.master.ha | bool and openshift.common.is_containerized | bool
+
+ - name: Ensure HA Master is running
+ service:
+ name: "{{ openshift.common.service_type }}-master-controllers"
+ state: started
+ enabled: yes
+ when: openshift.master.ha is defined and openshift.master.ha | bool and openshift.common.is_containerized | bool
+
+- name: Verify node processes
+ hosts: oo_nodes_to_config
+ roles:
+ - openshift_facts
+ tasks:
+ - name: Ensure Node is running
+ service:
+ name: "{{ openshift.common.service_type }}-node"
+ state: started
+ enabled: yes
+ when: openshift.common.is_containerized | bool
+
+- name: Verify upgrade targets
+ hosts: oo_masters_to_config:oo_nodes_to_config
+ vars:
+ target_version: "{{ '1.2' if deployment_type == 'origin' else '3.1.1.900' }}"
+ openshift_docker_hosted_registry_network: "{{ hostvars[groups.oo_first_master.0].openshift.common.portal_net }}"
+ upgrading: True
+ handlers:
+ - include: ../../../../../roles/openshift_master/handlers/main.yml
+ - include: ../../../../../roles/openshift_node/handlers/main.yml
+ roles:
+ # We want the cli role to evaluate so that the containerized oc/oadm wrappers
+ # are modified to use the correct image tag. However, this can trigger a
+ # docker restart if new configuration is laid down which would immediately
+ # pull the latest image and defeat the purpose of these tasks.
+ - { role: openshift_cli }
+ pre_tasks:
+ - name: Clean package cache
+ command: "{{ ansible_pkg_mgr }} clean all"
+ when: not openshift.common.is_atomic | bool
+
+ - set_fact:
+ g_new_service_name: "{{ 'origin' if deployment_type =='origin' else 'atomic-openshift' }}"
+ when: not openshift.common.is_containerized | bool
+
+ - name: Determine available versions
+ script: ../files/rpm_versions.sh {{ g_new_service_name }}
+ register: g_rpm_versions_result
+ when: not openshift.common.is_containerized | bool
+
+ - set_fact:
+ g_aos_versions: "{{ g_rpm_versions_result.stdout | from_yaml }}"
+ when: not openshift.common.is_containerized | bool
+
+ - name: Determine available versions
+ script: ../files/openshift_container_versions.sh {{ openshift.common.service_type }}
+ register: g_containerized_versions_result
+ when: openshift.common.is_containerized | bool
+
+ - set_fact:
+ g_aos_versions: "{{ g_containerized_versions_result.stdout | from_yaml }}"
+ when: openshift.common.is_containerized | bool
+
+ - set_fact:
+ g_new_version: "{{ g_aos_versions.curr_version.split('-', 1).0 if g_aos_versions.avail_version is none else g_aos_versions.avail_version.split('-', 1).0 }}"
+ when: openshift_pkg_version is not defined
+
+ - set_fact:
+ g_new_version: "{{ openshift_pkg_version | replace('-','') }}"
+ when: openshift_pkg_version is defined
+
+ - set_fact:
+ g_new_version: "{{ openshift_image_tag | replace('v','') }}"
+ when: openshift_image_tag is defined
+
+ - fail:
+ msg: Verifying the correct version was found
+ when: g_aos_versions.curr_version == ""
+
+ - fail:
+ msg: Verifying the correct version was found
+ when: verify_upgrade_version is defined and g_new_version != verify_upgrade_version
+
+ - include_vars: ../../../../../roles/openshift_master/vars/main.yml
+ when: inventory_hostname in groups.oo_masters_to_config
+
+ - name: Update systemd units
+ include: ../../../../../roles/openshift_master/tasks/systemd_units.yml openshift_version=v{{ g_new_version }}
+ when: inventory_hostname in groups.oo_masters_to_config
+
+ - include_vars: ../../../../../roles/openshift_node/vars/main.yml
+ when: inventory_hostname in groups.oo_nodes_to_config
+
+ - name: Update systemd units
+ include: ../../../../../roles/openshift_node/tasks/systemd_units.yml openshift_version=v{{ g_new_version }}
+ when: inventory_hostname in groups.oo_nodes_to_config
+
+ # Note: the version number is hardcoded here in hopes of catching potential
+ # bugs in how g_aos_versions.curr_version is set
+ - name: Verifying the correct version is installed for upgrade
+ shell: grep 3.1.1.6 {{ item }}
+ with_items:
+ - /etc/sysconfig/openvswitch
+ - /etc/sysconfig/{{ openshift.common.service_type }}*
+ when: verify_upgrade_version is defined
+
+ - name: Verifying the image version is used in the systemd unit
+ shell: grep IMAGE_VERSION {{ item }}
+ with_items:
+ - /etc/systemd/system/openvswitch.service
+ - /etc/systemd/system/{{ openshift.common.service_type }}*.service
+ when: openshift.common.is_containerized | bool
+
+ - fail:
+ msg: This playbook requires Origin 1.1 or later
+ when: deployment_type == 'origin' and g_aos_versions.curr_version | version_compare('1.1','<')
+
+ - fail:
+ msg: This playbook requires Atomic Enterprise Platform/OpenShift Enterprise 3.1 or later
+ when: deployment_type == 'atomic-openshift' and g_aos_versions.curr_version | version_compare('3.1','<')
+
+ - fail:
+ msg: Upgrade packages not found
+ when: openshift_image_tag is not defined and (g_aos_versions.avail_version | default(g_aos_versions.curr_version, true) | version_compare(target_version, '<'))
+
+- name: Verify docker upgrade targets
+ hosts: oo_masters_to_config:oo_nodes_to_config:oo_etcd_to_config
+ tasks:
+ - name: Determine available Docker
+ script: ../files/rpm_versions.sh docker
+ register: g_docker_version_result
+ when: not openshift.common.is_atomic | bool
+
+ - name: Determine available Docker
+ shell: "rpm -q --queryformat '---\ncurr_version: %{VERSION}\navail_version: \n' docker"
+ register: g_atomic_docker_version_result
+ when: openshift.common.is_atomic | bool
+
+ - set_fact:
+ g_docker_version: "{{ g_docker_version_result.stdout | from_yaml }}"
+ when: not openshift.common.is_atomic | bool
+
+ - set_fact:
+ g_docker_version: "{{ g_atomic_docker_version_result.stdout | from_yaml }}"
+ when: openshift.common.is_atomic | bool
+
+ - fail:
+ msg: This playbook requires access to Docker 1.9 or later
+ when: g_docker_version.avail_version | default(g_docker_version.curr_version, true) | version_compare('1.9','<')
+
+ # TODO: add check to upgrade ostree to get latest Docker
+
+ - set_fact:
+ pre_upgrade_complete: True
+
+
+##############################################################################
+# Gate on pre-upgrade checks
+##############################################################################
+- name: Gate on pre-upgrade checks
+ hosts: localhost
+ connection: local
+ become: no
+ vars:
+ pre_upgrade_hosts: "{{ groups.oo_masters_to_config | union(groups.oo_nodes_to_config) }}"
+ tasks:
+ - set_fact:
+ pre_upgrade_completed: "{{ hostvars
+ | oo_select_keys(pre_upgrade_hosts)
+ | oo_collect('inventory_hostname', {'pre_upgrade_complete': true}) }}"
+ - set_fact:
+ pre_upgrade_failed: "{{ pre_upgrade_hosts | difference(pre_upgrade_completed) }}"
+ - fail:
+ msg: "Upgrade cannot continue. The following hosts did not complete pre-upgrade checks: {{ pre_upgrade_failed | join(',') }}"
+ when: pre_upgrade_failed | length > 0
+
+###############################################################################
+# Backup etcd
+###############################################################################
+- name: Backup etcd
+ hosts: etcd_hosts_to_backup
+ vars:
+ embedded_etcd: "{{ hostvars[groups.oo_first_master.0].openshift.master.embedded_etcd }}"
+ timestamp: "{{ lookup('pipe', 'date +%Y%m%d%H%M%S') }}"
+ roles:
+ - openshift_facts
+ tasks:
+ # Ensure we persist the etcd role for this host in openshift_facts
+ - openshift_facts:
+ role: etcd
+ local_facts: {}
+ when: "'etcd' not in openshift"
+
+ - stat: path=/var/lib/openshift
+ register: var_lib_openshift
+
+ - stat: path=/var/lib/origin
+ register: var_lib_origin
+
+ - name: Create origin symlink if necessary
+ file: src=/var/lib/openshift/ dest=/var/lib/origin state=link
+ when: var_lib_openshift.stat.exists == True and var_lib_origin.stat.exists == False
+
+ # TODO: replace shell module with command and update later checks
+ # We assume to be using the data dir for all backups.
+ - name: Check available disk space for etcd backup
+ shell: df --output=avail -k {{ openshift.common.data_dir }} | tail -n 1
+ register: avail_disk
+
+ # TODO: replace shell module with command and update later checks
+ - name: Check current embedded etcd disk usage
+ shell: du -k {{ openshift.etcd.etcd_data_dir }} | tail -n 1 | cut -f1
+ register: etcd_disk_usage
+ when: embedded_etcd | bool
+
+ - name: Abort if insufficient disk space for etcd backup
+ fail:
+ msg: >
+ {{ etcd_disk_usage.stdout }} Kb disk space required for etcd backup,
+ {{ avail_disk.stdout }} Kb available.
+ when: (embedded_etcd | bool) and (etcd_disk_usage.stdout|int > avail_disk.stdout|int)
+
+ - name: Install etcd (for etcdctl)
+ action: "{{ ansible_pkg_mgr }} name=etcd state=latest"
+ when: not openshift.common.is_atomic | bool
+
+ - name: Generate etcd backup
+ command: >
+ etcdctl backup --data-dir={{ openshift.etcd.etcd_data_dir }}
+ --backup-dir={{ openshift.common.data_dir }}/etcd-backup-{{ timestamp }}
+
+ - set_fact:
+ etcd_backup_complete: True
+
+ - name: Display location of etcd backup
+ debug:
+ msg: "Etcd backup created in {{ openshift.common.data_dir }}/etcd-backup-{{ timestamp }}"
+
+
+##############################################################################
+# Gate on etcd backup
+##############################################################################
+- name: Gate on etcd backup
+ hosts: localhost
+ connection: local
+ become: no
+ tasks:
+ - set_fact:
+ etcd_backup_completed: "{{ hostvars
+ | oo_select_keys(groups.etcd_hosts_to_backup)
+ | oo_collect('inventory_hostname', {'etcd_backup_complete': true}) }}"
+ - set_fact:
+ etcd_backup_failed: "{{ groups.etcd_hosts_to_backup | difference(etcd_backup_completed) }}"
+ - fail:
+ msg: "Upgrade cannot continue. The following hosts did not complete etcd backup: {{ etcd_backup_failed | join(',') }}"
+ when: etcd_backup_failed | length > 0
diff --git a/playbooks/common/openshift-cluster/upgrades/v3_1_to_v3_2/roles b/playbooks/common/openshift-cluster/upgrades/v3_1_to_v3_2/roles
new file mode 120000
index 000000000..6bc1a7aef
--- /dev/null
+++ b/playbooks/common/openshift-cluster/upgrades/v3_1_to_v3_2/roles
@@ -0,0 +1 @@
+../../../../../roles \ No newline at end of file
diff --git a/playbooks/common/openshift-cluster/upgrades/v3_1_to_v3_2/rpm_upgrade.yml b/playbooks/common/openshift-cluster/upgrades/v3_1_to_v3_2/rpm_upgrade.yml
new file mode 100644
index 000000000..5c96ad094
--- /dev/null
+++ b/playbooks/common/openshift-cluster/upgrades/v3_1_to_v3_2/rpm_upgrade.yml
@@ -0,0 +1,9 @@
+- name: Upgrade packages
+ command: "{{ ansible_pkg_mgr}} update -y {{ openshift.common.service_type }}-{{ component }}-{{ g_new_version }}"
+
+- name: Ensure python-yaml present for config upgrade
+ action: "{{ ansible_pkg_mgr }} name=PyYAML state=present"
+ when: not openshift.common.is_atomic | bool
+
+- name: Restart node service
+ service: name="{{ openshift.common.service_type }}-node" state=restarted
diff --git a/playbooks/common/openshift-cluster/upgrades/v3_1_to_v3_2/upgrade.yml b/playbooks/common/openshift-cluster/upgrades/v3_1_to_v3_2/upgrade.yml
new file mode 100644
index 000000000..964257af5
--- /dev/null
+++ b/playbooks/common/openshift-cluster/upgrades/v3_1_to_v3_2/upgrade.yml
@@ -0,0 +1,192 @@
+---
+###############################################################################
+# The restart playbook should be run after this playbook completes.
+###############################################################################
+
+- name: Upgrade docker
+ hosts: oo_masters_to_config:oo_nodes_to_config
+ roles:
+ - openshift_facts
+ tasks:
+ - include: docker_upgrade.yml
+ when: not openshift.common.is_atomic | bool
+ - name: Set post docker install facts
+ openshift_facts:
+ role: "{{ item.role }}"
+ local_facts: "{{ item.local_facts }}"
+ with_items:
+ - role: docker
+ local_facts:
+ openshift_image_tag: "v{{ g_new_version }}"
+ openshift_version: "{{ g_new_version }}"
+
+- name: Upgrade docker
+ hosts: oo_etcd_to_config
+ roles:
+ - openshift_facts
+ tasks:
+ # Upgrade docker when host is not atomic and host is not a non-containerized etcd node
+ - include: docker_upgrade.yml
+ when: not openshift.common.is_atomic | bool and not ('oo_etcd_to_config' in group_names and not openshift.common.is_containerized)
+
+# The cli image is used by openshift_docker_facts to determine the currently installed
+# version. We need to explicitly pull the latest image to handle cases where
+# the locally cached 'latest' tag is older the g_new_version.
+- name: Download cli image
+ hosts: oo_masters_to_config:oo_nodes_to_config
+ roles:
+ - { role: openshift_docker_facts }
+ vars:
+ openshift_docker_hosted_registry_network: "{{ hostvars[groups.oo_first_master.0].openshift.common.portal_net }}"
+ tasks:
+ - name: Pull Images
+ command: >
+ docker pull {{ item }}:latest
+ with_items:
+ - "{{ openshift.common.cli_image }}"
+ when: openshift.common.is_containerized | bool
+
+###############################################################################
+# Upgrade Masters
+###############################################################################
+- name: Upgrade master
+ hosts: oo_masters_to_config
+ handlers:
+ - include: ../../../../../roles/openshift_master/handlers/main.yml
+ roles:
+ - openshift_facts
+ tasks:
+ - include: rpm_upgrade.yml component=master
+ when: not openshift.common.is_containerized | bool
+
+ - include_vars: ../../../../../roles/openshift_master/vars/main.yml
+
+ - name: Update systemd units
+ include: ../../../../../roles/openshift_master/tasks/systemd_units.yml openshift_version=v{{ g_new_version }}
+
+# - name: Upgrade master configuration
+# openshift_upgrade_config:
+# from_version: '3.1'
+# to_version: '3.2'
+# role: master
+# config_base: "{{ hostvars[inventory_hostname].openshift.common.config_base }}"
+
+- name: Set master update status to complete
+ hosts: oo_masters_to_config
+ tasks:
+ - set_fact:
+ master_update_complete: True
+
+##############################################################################
+# Gate on master update complete
+##############################################################################
+- name: Gate on master update
+ hosts: localhost
+ connection: local
+ become: no
+ tasks:
+ - set_fact:
+ master_update_completed: "{{ hostvars
+ | oo_select_keys(groups.oo_masters_to_config)
+ | oo_collect('inventory_hostname', {'master_update_complete': true}) }}"
+ - set_fact:
+ master_update_failed: "{{ groups.oo_masters_to_config | difference(master_update_completed) }}"
+ - fail:
+ msg: "Upgrade cannot continue. The following masters did not finish updating: {{ master_update_failed | join(',') }}"
+ when: master_update_failed | length > 0
+
+###############################################################################
+# Upgrade Nodes
+###############################################################################
+- name: Upgrade nodes
+ hosts: oo_nodes_to_config
+ serial: 1
+ roles:
+ - openshift_facts
+ handlers:
+ - include: ../../../../../roles/openshift_node/handlers/main.yml
+ tasks:
+ - include: node_upgrade.yml
+
+ - set_fact:
+ node_update_complete: True
+
+##############################################################################
+# Gate on nodes update
+##############################################################################
+- name: Gate on nodes update
+ hosts: localhost
+ connection: local
+ become: no
+ tasks:
+ - set_fact:
+ node_update_completed: "{{ hostvars
+ | oo_select_keys(groups.oo_nodes_to_config)
+ | oo_collect('inventory_hostname', {'node_update_complete': true}) }}"
+ - set_fact:
+ node_update_failed: "{{ groups.oo_nodes_to_config | difference(node_update_completed) }}"
+ - fail:
+ msg: "Upgrade cannot continue. The following nodes did not finish updating: {{ node_update_failed | join(',') }}"
+ when: node_update_failed | length > 0
+
+###############################################################################
+# Reconcile Cluster Roles, Cluster Role Bindings and Security Context Constraints
+###############################################################################
+
+- name: Reconcile Cluster Roles and Cluster Role Bindings and Security Context Constraints
+ hosts: oo_masters_to_config
+ roles:
+ - { role: openshift_cli, openshift_image_tag: "v{{ g_new_version }}" }
+ vars:
+ origin_reconcile_bindings: "{{ deployment_type == 'origin' and g_new_version | version_compare('1.0.6', '>') }}"
+ ent_reconcile_bindings: true
+ openshift_docker_hosted_registry_network: "{{ hostvars[groups.oo_first_master.0].openshift.common.portal_net }}"
+ upgrading: True
+ tasks:
+ - name: Verifying the correct commandline tools are available
+ shell: grep {{ verify_upgrade_version }} {{ openshift.common.admin_binary}}
+ when: openshift.common.is_containerized | bool and verify_upgrade_version is defined
+
+ - name: Reconcile Cluster Roles
+ command: >
+ {{ openshift.common.admin_binary}} --config={{ openshift.common.config_base }}/master/admin.kubeconfig
+ policy reconcile-cluster-roles --additive-only=true --confirm
+ run_once: true
+
+ - name: Reconcile Cluster Role Bindings
+ command: >
+ {{ openshift.common.admin_binary}} --config={{ openshift.common.config_base }}/master/admin.kubeconfig
+ policy reconcile-cluster-role-bindings
+ --exclude-groups=system:authenticated
+ --exclude-groups=system:authenticated:oauth
+ --exclude-groups=system:unauthenticated
+ --exclude-users=system:anonymous
+ --additive-only=true --confirm
+ when: origin_reconcile_bindings | bool or ent_reconcile_bindings | bool
+ run_once: true
+
+ - name: Reconcile Security Context Constraints
+ command: >
+ {{ openshift.common.admin_binary}} policy reconcile-sccs --confirm --additive-only=true
+ run_once: true
+
+ - set_fact:
+ reconcile_complete: True
+
+##############################################################################
+# Gate on reconcile
+##############################################################################
+- name: Gate on reconcile
+ hosts: localhost
+ connection: local
+ become: no
+ tasks:
+ - set_fact:
+ reconcile_completed: "{{ hostvars
+ | oo_select_keys(groups.oo_masters_to_config)
+ | oo_collect('inventory_hostname', {'reconcile_complete': true}) }}"
+ - set_fact:
+ reconcile_failed: "{{ groups.oo_masters_to_config | difference(reconcile_completed) }}"
+ - fail:
+ msg: "Upgrade cannot continue. The following masters did not finish reconciling: {{ reconcile_failed | join(',') }}"
+ when: reconcile_failed | length > 0
diff --git a/playbooks/common/openshift-cluster/validate_hostnames.yml b/playbooks/common/openshift-cluster/validate_hostnames.yml
index fd82997b9..50e25984f 100644
--- a/playbooks/common/openshift-cluster/validate_hostnames.yml
+++ b/playbooks/common/openshift-cluster/validate_hostnames.yml
@@ -1,19 +1,9 @@
---
-- include: evaluate_groups.yml
-
- name: Gather and set facts for node hosts
hosts: oo_nodes_to_config
roles:
- openshift_facts
tasks:
- - openshift_facts:
- role: "{{ item.role }}"
- local_facts: "{{ item.local_facts }}"
- with_items:
- - role: common
- local_facts:
- hostname: "{{ openshift_hostname | default(None) }}"
- public_hostname: "{{ openshift_public_hostname | default(None) }}"
- shell:
getent ahostsv4 {{ openshift.common.hostname }} | head -n 1 | awk '{ print $1 }'
register: lookupip