summaryrefslogtreecommitdiffstats
path: root/playbooks/common
diff options
context:
space:
mode:
Diffstat (limited to 'playbooks/common')
-rw-r--r--playbooks/common/openshift-cluster/openshift_hosted.yml1
-rw-r--r--playbooks/common/openshift-cluster/openshift_logging.yml7
-rw-r--r--playbooks/common/openshift-cluster/upgrades/upgrade_control_plane.yml53
-rw-r--r--playbooks/common/openshift-cluster/upgrades/upgrade_nodes.yml91
4 files changed, 67 insertions, 85 deletions
diff --git a/playbooks/common/openshift-cluster/openshift_hosted.yml b/playbooks/common/openshift-cluster/openshift_hosted.yml
index 143bc37a2..3c4a99887 100644
--- a/playbooks/common/openshift-cluster/openshift_hosted.yml
+++ b/playbooks/common/openshift-cluster/openshift_hosted.yml
@@ -54,6 +54,7 @@
- set_fact:
logging_hostname: "{{ openshift_hosted_logging_hostname | default('kibana.' ~ (openshift_master_default_subdomain | default('router.default.svc.cluster.local', true))) }}"
tasks:
+
- block:
- include_role:
name: openshift_hosted_logging
diff --git a/playbooks/common/openshift-cluster/openshift_logging.yml b/playbooks/common/openshift-cluster/openshift_logging.yml
index 82f18f5e1..d96a78c4c 100644
--- a/playbooks/common/openshift-cluster/openshift_logging.yml
+++ b/playbooks/common/openshift-cluster/openshift_logging.yml
@@ -7,7 +7,8 @@
- name: Update Master configs
hosts: masters:!oo_first_master
tasks:
- - include_role:
- name: openshift_logging
- tasks_from: update_master_config
+ - block:
+ - include_role:
+ name: openshift_logging
+ tasks_from: update_master_config
when: openshift_logging_install_logging | default(false) | bool
diff --git a/playbooks/common/openshift-cluster/upgrades/upgrade_control_plane.yml b/playbooks/common/openshift-cluster/upgrades/upgrade_control_plane.yml
index 9cad931af..db2c27919 100644
--- a/playbooks/common/openshift-cluster/upgrades/upgrade_control_plane.yml
+++ b/playbooks/common/openshift-cluster/upgrades/upgrade_control_plane.yml
@@ -229,3 +229,56 @@
tasks:
- include: docker/upgrade.yml
when: l_docker_upgrade is defined and l_docker_upgrade | bool and not openshift.common.is_atomic | bool
+
+- name: Drain and upgrade master nodes
+ hosts: oo_masters_to_config:&oo_nodes_to_upgrade
+ # This var must be set with -e on invocation, as it is not a per-host inventory var
+ # and is evaluated early. Values such as "20%" can also be used.
+ serial: "{{ openshift_upgrade_nodes_serial | default(1) }}"
+ any_errors_fatal: true
+
+ pre_tasks:
+ # TODO: To better handle re-trying failed upgrades, it would be nice to check if the node
+ # or docker actually needs an upgrade before proceeding. Perhaps best to save this until
+ # we merge upgrade functionality into the base roles and a normal config.yml playbook run.
+ - name: Determine if node is currently scheduleable
+ command: >
+ {{ hostvars[groups.oo_first_master.0].openshift.common.client_binary }} get node {{ openshift.node.nodename | lower }} -o json
+ register: node_output
+ delegate_to: "{{ groups.oo_first_master.0 }}"
+ changed_when: false
+
+ - set_fact:
+ was_schedulable: "{{ 'unschedulable' not in (node_output.stdout | from_json).spec }}"
+
+ - name: Mark node unschedulable
+ command: >
+ {{ hostvars[groups.oo_first_master.0].openshift.common.client_binary }} adm manage-node {{ openshift.node.nodename | lower }} --schedulable=false
+ delegate_to: "{{ groups.oo_first_master.0 }}"
+ # NOTE: There is a transient "object has been modified" error here, allow a couple
+ # retries for a more reliable upgrade.
+ register: node_unsched
+ until: node_unsched.rc == 0
+ retries: 3
+ delay: 1
+
+ - name: Drain Node for Kubelet upgrade
+ command: >
+ {{ hostvars[groups.oo_first_master.0].openshift.common.admin_binary }} drain {{ openshift.node.nodename | lower }} --force --delete-local-data
+ delegate_to: "{{ groups.oo_first_master.0 }}"
+
+ roles:
+ - openshift_facts
+ - docker
+ - openshift_node_upgrade
+
+ post_tasks:
+ - name: Set node schedulability
+ command: >
+ {{ hostvars[groups.oo_first_master.0].openshift.common.client_binary }} adm manage-node {{ openshift.node.nodename | lower }} --schedulable=true
+ delegate_to: "{{ groups.oo_first_master.0 }}"
+ when: was_schedulable | bool
+ register: node_sched
+ until: node_sched.rc == 0
+ retries: 3
+ delay: 1
diff --git a/playbooks/common/openshift-cluster/upgrades/upgrade_nodes.yml b/playbooks/common/openshift-cluster/upgrades/upgrade_nodes.yml
index a6a49e5ff..59188c570 100644
--- a/playbooks/common/openshift-cluster/upgrades/upgrade_nodes.yml
+++ b/playbooks/common/openshift-cluster/upgrades/upgrade_nodes.yml
@@ -1,16 +1,11 @@
---
- name: Drain and upgrade nodes
- hosts: oo_nodes_to_upgrade
+ hosts: oo_nodes_to_upgrade:!oo_masters_to_config
# This var must be set with -e on invocation, as it is not a per-host inventory var
# and is evaluated early. Values such as "20%" can also be used.
serial: "{{ openshift_upgrade_nodes_serial | default(1) }}"
any_errors_fatal: true
- roles:
- - openshift_facts
- - docker
- handlers:
- - include: ../../../../roles/openshift_node/handlers/main.yml
- static: yes
+
pre_tasks:
# TODO: To better handle re-trying failed upgrades, it would be nice to check if the node
# or docker actually needs an upgrade before proceeding. Perhaps best to save this until
@@ -21,17 +16,14 @@
register: node_output
delegate_to: "{{ groups.oo_first_master.0 }}"
changed_when: false
- when: inventory_hostname in groups.oo_nodes_to_upgrade
- set_fact:
was_schedulable: "{{ 'unschedulable' not in (node_output.stdout | from_json).spec }}"
- when: inventory_hostname in groups.oo_nodes_to_upgrade
- - name: Mark unschedulable if host is a node
+ - name: Mark node unschedulable
command: >
{{ hostvars[groups.oo_first_master.0].openshift.common.client_binary }} adm manage-node {{ openshift.node.nodename | lower }} --schedulable=false
delegate_to: "{{ groups.oo_first_master.0 }}"
- when: inventory_hostname in groups.oo_nodes_to_upgrade
# NOTE: There is a transient "object has been modified" error here, allow a couple
# retries for a more reliable upgrade.
register: node_unsched
@@ -43,83 +35,18 @@
command: >
{{ hostvars[groups.oo_first_master.0].openshift.common.admin_binary }} drain {{ openshift.node.nodename | lower }} --force --delete-local-data
delegate_to: "{{ groups.oo_first_master.0 }}"
- when: inventory_hostname in groups.oo_nodes_to_upgrade
-
- tasks:
-
- - include: docker/upgrade.yml
- vars:
- # We will restart Docker ourselves after everything is ready:
- skip_docker_restart: True
- when: l_docker_upgrade is defined and l_docker_upgrade | bool and not openshift.common.is_atomic | bool
-
- - include: "{{ node_config_hook }}"
- when: node_config_hook is defined and inventory_hostname in groups.oo_nodes_to_upgrade
-
- - include: rpm_upgrade.yml
- vars:
- component: "node"
- openshift_version: "{{ openshift_pkg_version | default('') }}"
- when: inventory_hostname in groups.oo_nodes_to_upgrade and not openshift.common.is_containerized | bool
-
- - name: Remove obsolete docker-sdn-ovs.conf
- file: path=/etc/systemd/system/docker.service.d/docker-sdn-ovs.conf state=absent
- when: (deployment_type == 'openshift-enterprise' and openshift_release | version_compare('3.4', '>=')) or (deployment_type == 'origin' and openshift_release | version_compare('1.4', '>='))
-
- - include: containerized_node_upgrade.yml
- when: inventory_hostname in groups.oo_nodes_to_upgrade and openshift.common.is_containerized | bool
-
- - name: Ensure containerized services stopped before Docker restart
- service: name={{ item }} state=stopped
- with_items:
- - etcd_container
- - openvswitch
- - "{{ openshift.common.service_type }}-master"
- - "{{ openshift.common.service_type }}-master-api"
- - "{{ openshift.common.service_type }}-master-controllers"
- - "{{ openshift.common.service_type }}-node"
- failed_when: false
- when: openshift.common.is_containerized | bool
- - name: Upgrade openvswitch
- package:
- name: openvswitch
- state: latest
- register: ovs_pkg
- when: inventory_hostname in groups.oo_nodes_to_upgrade and not openshift.common.is_containerized | bool
-
- - name: Restart openvswitch
- systemd:
- name: openvswitch
- state: restarted
- when:
- - inventory_hostname in groups.oo_nodes_to_upgrade and not openshift.common.is_containerized | bool
- - ovs_pkg | changed
-
- # Mandatory Docker restart, ensure all containerized services are running:
- - include: docker/restart.yml
-
- - name: Restart rpm node service
- service: name="{{ openshift.common.service_type }}-node" state=restarted
- when: inventory_hostname in groups.oo_nodes_to_upgrade and not openshift.common.is_containerized | bool
-
- - name: Wait for node to be ready
- command: >
- {{ hostvars[groups.oo_first_master.0].openshift.common.client_binary }} get node {{ openshift.common.hostname | lower }} --no-headers
- register: node_output
- delegate_to: "{{ groups.oo_first_master.0 }}"
- when: inventory_hostname in groups.oo_nodes_to_upgrade
- until: "{{ node_output.stdout.split()[1].startswith('Ready')}}"
- # Give the node two minutes to come back online. Note that we pre-pull images now
- # so containerized services should restart quickly as well.
- retries: 24
- delay: 5
+ roles:
+ - openshift_facts
+ - docker
+ - openshift_node_upgrade
+ post_tasks:
- name: Set node schedulability
command: >
{{ hostvars[groups.oo_first_master.0].openshift.common.client_binary }} adm manage-node {{ openshift.node.nodename | lower }} --schedulable=true
delegate_to: "{{ groups.oo_first_master.0 }}"
- when: inventory_hostname in groups.oo_nodes_to_upgrade and was_schedulable | bool
+ when: was_schedulable | bool
register: node_sched
until: node_sched.rc == 0
retries: 3