summaryrefslogtreecommitdiffstats
path: root/playbooks/byo
diff options
context:
space:
mode:
Diffstat (limited to 'playbooks/byo')
-rw-r--r--playbooks/byo/openshift-cluster/upgrades/docker/docker_upgrade.yml105
-rw-r--r--playbooks/byo/openshift-cluster/upgrades/docker/files/nuke_images.sh23
l---------playbooks/byo/openshift-cluster/upgrades/docker/roles1
-rw-r--r--playbooks/byo/openshift-cluster/upgrades/docker/upgrade.yml29
-rw-r--r--playbooks/byo/rhel_subscribe.yml2
5 files changed, 159 insertions, 1 deletions
diff --git a/playbooks/byo/openshift-cluster/upgrades/docker/docker_upgrade.yml b/playbooks/byo/openshift-cluster/upgrades/docker/docker_upgrade.yml
new file mode 100644
index 000000000..8b1b2fb1b
--- /dev/null
+++ b/playbooks/byo/openshift-cluster/upgrades/docker/docker_upgrade.yml
@@ -0,0 +1,105 @@
+
+- name: Check for appropriate Docker versions for 1.9.x to 1.10.x upgrade
+ hosts: oo_masters_to_config:oo_nodes_to_config:oo_etcd_to_config
+ roles:
+ - openshift_facts
+ tasks:
+ - name: Determine available Docker version
+ script: ../../../../common/openshift-cluster/upgrades/files/rpm_versions.sh docker
+ register: g_docker_version_result
+ when: not openshift.common.is_atomic | bool
+
+ - name: Check if Docker is installed
+ command: rpm -q docker
+ register: pkg_check
+ failed_when: pkg_check.rc > 1
+ changed_when: no
+ when: not openshift.common.is_atomic | bool
+
+ - set_fact:
+ g_docker_version: "{{ g_docker_version_result.stdout | from_yaml }}"
+ when: not openshift.common.is_atomic | bool
+
+ - name: Set fact if docker requires an upgrade
+ set_fact:
+ docker_upgrade: true
+ when: not openshift.common.is_atomic | bool and pkg_check.rc == 0 and g_docker_version.curr_version | version_compare('1.10','<')
+
+ - fail:
+ msg: This playbook requires access to Docker 1.10 or later
+ when: g_docker_version.avail_version | default(g_docker_version.curr_version, true) | version_compare('1.10','<')
+
+# If a node fails, halt everything, the admin will need to clean up and we
+# don't want to carry on, potentially taking out every node. The playbook can safely be re-run
+# and will not take any action on a node already running 1.10+.
+- name: Evacuate and upgrade nodes
+ hosts: oo_masters_to_config:oo_nodes_to_config:oo_etcd_to_config
+ serial: 1
+ any_errors_fatal: true
+ tasks:
+ - debug: var=docker_upgrade
+
+ - name: Prepare for Node evacuation
+ command: >
+ {{ openshift.common.admin_binary }} manage-node {{ openshift.common.hostname | lower }} --schedulable=false
+ delegate_to: "{{ groups.oo_first_master.0 }}"
+ when: docker_upgrade is defined and docker_upgrade | bool and inventory_hostname in groups.oo_nodes_to_config
+
+# TODO: skip all node evac stuff for non-nodes (i.e. separate containerized etcd hosts)
+ - name: Evacuate Node for Kubelet upgrade
+ command: >
+ {{ openshift.common.admin_binary }} manage-node {{ openshift.common.hostname | lower }} --evacuate --force
+ delegate_to: "{{ groups.oo_first_master.0 }}"
+ when: docker_upgrade is defined and docker_upgrade | bool and inventory_hostname in groups.oo_nodes_to_config
+
+ - name: Stop containerized services
+ service: name={{ item }} state=stopped
+ with_items:
+ - "{{ openshift.common.service_type }}-master"
+ - "{{ openshift.common.service_type }}-master-api"
+ - "{{ openshift.common.service_type }}-master-controllers"
+ - "{{ openshift.common.service_type }}-node"
+ - etcd_container
+ - openvswitch
+ failed_when: false
+ when: docker_upgrade is defined and docker_upgrade | bool and openshift.common.is_containerized | bool
+
+ - name: Remove all containers and images
+ script: files/nuke_images.sh docker
+ register: nuke_images_result
+ when: docker_upgrade is defined and docker_upgrade | bool
+
+ - name: Upgrade Docker
+ command: "{{ ansible_pkg_mgr}} update -y docker"
+ register: docker_upgrade_result
+ when: docker_upgrade is defined and docker_upgrade | bool
+
+ - name: Restart containerized services
+ service: name={{ item }} state=started
+ with_items:
+ - etcd_container
+ - openvswitch
+ - "{{ openshift.common.service_type }}-master"
+ - "{{ openshift.common.service_type }}-master-api"
+ - "{{ openshift.common.service_type }}-master-controllers"
+ - "{{ openshift.common.service_type }}-node"
+ failed_when: false
+ when: docker_upgrade is defined and docker_upgrade | bool and openshift.common.is_containerized | bool
+
+ - name: Wait for master API to come back online
+ become: no
+ local_action:
+ module: wait_for
+ host="{{ inventory_hostname }}"
+ state=started
+ delay=10
+ port="{{ openshift.master.api_port }}"
+ when: docker_upgrade is defined and docker_upgrade | bool and inventory_hostname in groups.oo_masters_to_config
+
+ - name: Set node schedulability
+ command: >
+ {{ openshift.common.admin_binary }} manage-node {{ openshift.common.hostname | lower }} --schedulable=true
+ delegate_to: "{{ groups.oo_first_master.0 }}"
+ when: openshift.node.schedulable | bool
+ when: docker_upgrade is defined and docker_upgrade | bool and inventory_hostname in groups.oo_nodes_to_config and openshift.node.schedulable | bool
+
diff --git a/playbooks/byo/openshift-cluster/upgrades/docker/files/nuke_images.sh b/playbooks/byo/openshift-cluster/upgrades/docker/files/nuke_images.sh
new file mode 100644
index 000000000..9a5ee2276
--- /dev/null
+++ b/playbooks/byo/openshift-cluster/upgrades/docker/files/nuke_images.sh
@@ -0,0 +1,23 @@
+#!/bin/bash
+
+# Stop any running containers
+running_container_count=`docker ps -q | wc -l`
+if test $running_container_count -gt 0
+then
+ docker stop $(docker ps -q)
+fi
+
+# Delete all containers
+container_count=`docker ps -a -q | wc -l`
+if test $container_count -gt 0
+then
+ docker rm -f -v $(docker ps -a -q)
+fi
+
+# Delete all images (forcefully)
+image_count=`docker images -q | wc -l`
+if test $image_count -gt 0
+then
+ # Taken from: https://gist.github.com/brianclements/f72b2de8e307c7b56689#gistcomment-1443144
+ docker rmi $(docker images | grep "$2/\|/$2 \| $2 \|$2 \|$2-\|$2_" | awk '{print $1 ":" $2}') 2>/dev/null || echo "No images matching \"$2\" left to purge."
+fi
diff --git a/playbooks/byo/openshift-cluster/upgrades/docker/roles b/playbooks/byo/openshift-cluster/upgrades/docker/roles
new file mode 120000
index 000000000..6bc1a7aef
--- /dev/null
+++ b/playbooks/byo/openshift-cluster/upgrades/docker/roles
@@ -0,0 +1 @@
+../../../../../roles \ No newline at end of file
diff --git a/playbooks/byo/openshift-cluster/upgrades/docker/upgrade.yml b/playbooks/byo/openshift-cluster/upgrades/docker/upgrade.yml
new file mode 100644
index 000000000..0f86abd89
--- /dev/null
+++ b/playbooks/byo/openshift-cluster/upgrades/docker/upgrade.yml
@@ -0,0 +1,29 @@
+# Playbook to upgrade Docker to the max allowable version for an OpenShift cluster.
+#
+# Currently only supports upgrading 1.9.x to >= 1.10.x.
+- hosts: localhost
+ connection: local
+ become: no
+ gather_facts: no
+ tasks:
+ - include_vars: ../../cluster_hosts.yml
+ - add_host:
+ name: "{{ item }}"
+ groups: l_oo_all_hosts
+ with_items: g_all_hosts | default([])
+ changed_when: false
+
+- hosts: l_oo_all_hosts
+ gather_facts: no
+ tasks:
+ - include_vars: ../../cluster_hosts.yml
+
+- include: ../../../../common/openshift-cluster/evaluate_groups.yml
+ vars:
+ # Do not allow adding hosts during upgrade.
+ g_new_master_hosts: []
+ g_new_node_hosts: []
+ openshift_cluster_id: "{{ cluster_id | default('default') }}"
+ openshift_deployment_type: "{{ deployment_type }}"
+
+- include: docker_upgrade.yml
diff --git a/playbooks/byo/rhel_subscribe.yml b/playbooks/byo/rhel_subscribe.yml
index a21aa257f..f093411ef 100644
--- a/playbooks/byo/rhel_subscribe.yml
+++ b/playbooks/byo/rhel_subscribe.yml
@@ -17,7 +17,7 @@
- include: ../common/openshift-cluster/evaluate_groups.yml
-- hosts: all
+- hosts: l_oo_all_hosts
vars:
openshift_deployment_type: "{{ deployment_type }}"
roles: