summaryrefslogtreecommitdiffstats
path: root/playbooks
diff options
context:
space:
mode:
Diffstat (limited to 'playbooks')
-rw-r--r--playbooks/adhoc/grow_docker_vg/filter_plugins/grow_docker_vg_filters.py (renamed from playbooks/adhoc/grow_docker_vg/filter_plugins/oo_filters.py)11
-rw-r--r--playbooks/adhoc/noc/create_host.yml58
-rw-r--r--playbooks/adhoc/noc/create_maintenance.yml37
-rw-r--r--playbooks/adhoc/noc/get_zabbix_problems.yml43
-rw-r--r--playbooks/adhoc/uninstall.yml8
-rw-r--r--playbooks/adhoc/zabbix_setup/clean_zabbix.yml60
l---------playbooks/adhoc/zabbix_setup/filter_plugins1
-rwxr-xr-xplaybooks/adhoc/zabbix_setup/oo-clean-zaio.yml7
-rwxr-xr-xplaybooks/adhoc/zabbix_setup/oo-config-zaio.yml19
l---------playbooks/adhoc/zabbix_setup/roles1
-rw-r--r--playbooks/byo/openshift-cluster/upgrades/docker/docker_upgrade.yml8
-rw-r--r--playbooks/byo/openshift-cluster/upgrades/v3_4/upgrade.yml2
-rw-r--r--playbooks/common/openshift-cluster/redeploy-certificates.yml8
-rw-r--r--playbooks/common/openshift-cluster/upgrades/docker/restart.yml12
-rw-r--r--playbooks/common/openshift-cluster/upgrades/etcd/backup.yml29
-rw-r--r--playbooks/common/openshift-cluster/upgrades/etcd/containerized_tasks.yml3
-rw-r--r--playbooks/common/openshift-cluster/upgrades/etcd/upgrade.yml2
-rwxr-xr-xplaybooks/common/openshift-cluster/upgrades/library/openshift_upgrade_config.py2
-rw-r--r--playbooks/common/openshift-cluster/upgrades/upgrade_control_plane.yml13
-rw-r--r--playbooks/common/openshift-cluster/upgrades/upgrade_nodes.yml6
-rw-r--r--playbooks/common/openshift-master/restart_services.yml12
21 files changed, 66 insertions, 276 deletions
diff --git a/playbooks/adhoc/grow_docker_vg/filter_plugins/oo_filters.py b/playbooks/adhoc/grow_docker_vg/filter_plugins/grow_docker_vg_filters.py
index c19274e06..daff68fbe 100644
--- a/playbooks/adhoc/grow_docker_vg/filter_plugins/oo_filters.py
+++ b/playbooks/adhoc/grow_docker_vg/filter_plugins/grow_docker_vg_filters.py
@@ -5,22 +5,11 @@
Custom filters for use in openshift-ansible
'''
-import pdb
-
class FilterModule(object):
''' Custom ansible filters '''
@staticmethod
- def oo_pdb(arg):
- ''' This pops you into a pdb instance where arg is the data passed in
- from the filter.
- Ex: "{{ hostvars | oo_pdb }}"
- '''
- pdb.set_trace()
- return arg
-
- @staticmethod
def translate_volume_name(volumes, target_volume):
'''
This filter matches a device string /dev/sdX to /dev/xvdX
diff --git a/playbooks/adhoc/noc/create_host.yml b/playbooks/adhoc/noc/create_host.yml
deleted file mode 100644
index 318396bcc..000000000
--- a/playbooks/adhoc/noc/create_host.yml
+++ /dev/null
@@ -1,58 +0,0 @@
----
-- name: 'Create a host object in zabbix'
- hosts: localhost
- connection: local
- become: no
- gather_facts: no
- roles:
- - os_zabbix
- post_tasks:
-
- - zbxapi:
- server: https://noc2.ops.rhcloud.com/zabbix/api_jsonrpc.php
- zbx_class: Template
- state: list
- params:
- host: ctr_test_kwoodson
- filter:
- host:
- - ctr_kwoodson_test_tmpl
-
- register: tmpl_results
-
- - debug: var=tmpl_results
-
-#ansible-playbook -e 'oo_desc=kwoodson test' -e 'oo_name=kwoodson test name' -e 'oo_start=1435715357' -e 'oo_stop=1435718985' -e 'oo_hostids=11549' create_maintenance.yml
-- name: 'Create a host object in zabbix'
- hosts: localhost
- connection: local
- become: no
- gather_facts: no
- roles:
- - os_zabbix
- post_tasks:
-
- - zbxapi:
- server: https://noc2.ops.rhcloud.com/zabbix/api_jsonrpc.php
- zbx_class: Host
- state: absent
- params:
- host: ctr_test_kwoodson
- interfaces:
- - type: 1
- main: 1
- useip: 1
- ip: 127.0.0.1
- dns: ""
- port: 10050
- groups:
- - groupid: 1
- templates: "{{ tmpl_results.results | oo_collect('templateid') | oo_build_zabbix_list_dict('templateid') }}"
- output: extend
- filter:
- host:
- - ctr_test_kwoodson
-
- register: host_results
-
- - debug: var=host_results
diff --git a/playbooks/adhoc/noc/create_maintenance.yml b/playbooks/adhoc/noc/create_maintenance.yml
deleted file mode 100644
index b694aea1b..000000000
--- a/playbooks/adhoc/noc/create_maintenance.yml
+++ /dev/null
@@ -1,37 +0,0 @@
----
-#ansible-playbook -e 'oo_desc=kwoodson test' -e 'oo_name=kwoodson test name' -e 'oo_start=1435715357' -e 'oo_stop=1435718985' -e 'oo_hostids=11549' create_maintenance.yml
-- name: 'Create a maintenace object in zabbix'
- hosts: localhost
- connection: local
- become: no
- gather_facts: no
- roles:
- - os_zabbix
- vars:
- oo_hostids: ''
- oo_groupids: ''
- post_tasks:
- - assert:
- that: oo_desc is defined
-
- - zbxapi:
- server: https://noc2.ops.rhcloud.com/zabbix/api_jsonrpc.php
- zbx_class: Maintenance
- state: present
- params:
- name: "{{ oo_name }}"
- description: "{{ oo_desc }}"
- active_since: "{{ oo_start }}"
- active_till: "{{ oo_stop }}"
- maintenance_type: "0"
- output: extend
- hostids: "{{ oo_hostids.split(',') | default([]) }}"
- #groupids: "{{ oo_groupids.split(',') | default([]) }}"
- timeperiods:
- - start_time: "{{ oo_start }}"
- period: "{{ oo_stop }}"
- selectTimeperiods: extend
-
- register: maintenance
-
- - debug: var=maintenance
diff --git a/playbooks/adhoc/noc/get_zabbix_problems.yml b/playbooks/adhoc/noc/get_zabbix_problems.yml
deleted file mode 100644
index 32fc7ce68..000000000
--- a/playbooks/adhoc/noc/get_zabbix_problems.yml
+++ /dev/null
@@ -1,43 +0,0 @@
----
-- name: 'Get current hosts who have triggers that are alerting by trigger description'
- hosts: localhost
- connection: local
- become: no
- gather_facts: no
- roles:
- - os_zabbix
- post_tasks:
- - assert:
- that: oo_desc is defined
-
- - zbxapi:
- server: https://noc2.ops.rhcloud.com/zabbix/api_jsonrpc.php
- zbx_class: Trigger
- state: list
- params:
- only_true: true
- output: extend
- selectHosts: extend
- searchWildCardsEnabled: 1
- search:
- description: "{{ oo_desc }}"
- register: problems
-
- - debug: var=problems
-
- - set_fact:
- problem_hosts: "{{ problems.results | oo_collect(attribute='hosts') | oo_flatten | oo_collect(attribute='host') | difference(['aggregates']) }}"
-
- - debug: var=problem_hosts
-
- - add_host:
- name: "{{ item }}"
- groups: problem_hosts_group
- with_items: "{{ problem_hosts }}"
-
-- name: "Run on problem hosts"
- hosts: problem_hosts_group
- gather_facts: no
- tasks:
- - command: "{{ oo_cmd }}"
- when: oo_cmd is defined
diff --git a/playbooks/adhoc/uninstall.yml b/playbooks/adhoc/uninstall.yml
index bdd92a47d..b9966e715 100644
--- a/playbooks/adhoc/uninstall.yml
+++ b/playbooks/adhoc/uninstall.yml
@@ -92,6 +92,8 @@
- atomic-enterprise-sdn-ovs
- atomic-openshift
- atomic-openshift-clients
+ - atomic-openshift-excluder
+ - atomic-openshift-docker-excluder
- atomic-openshift-node
- atomic-openshift-sdn-ovs
- cockpit-bridge
@@ -105,6 +107,8 @@
- openshift-sdn-ovs
- openvswitch
- origin
+ - origin-excluder
+ - origin-docker-excluder
- origin-clients
- origin-node
- origin-sdn-ovs
@@ -254,6 +258,8 @@
- atomic-enterprise-master
- atomic-openshift
- atomic-openshift-clients
+ - atomic-openshift-excluder
+ - atomic-openshift-docker-excluder
- atomic-openshift-master
- cockpit-bridge
- cockpit-docker
@@ -265,6 +271,8 @@
- openshift-master
- origin
- origin-clients
+ - origin-excluder
+ - origin-docker-excluder
- origin-master
- pacemaker
- pcs
diff --git a/playbooks/adhoc/zabbix_setup/clean_zabbix.yml b/playbooks/adhoc/zabbix_setup/clean_zabbix.yml
deleted file mode 100644
index 955f990b7..000000000
--- a/playbooks/adhoc/zabbix_setup/clean_zabbix.yml
+++ /dev/null
@@ -1,60 +0,0 @@
----
-- hosts: localhost
- gather_facts: no
- connection: local
- become: no
- vars:
- g_server: http://localhost:8080/zabbix/api_jsonrpc.php
- g_user: ''
- g_password: ''
-
- roles:
- - lib_zabbix
-
- post_tasks:
- - name: CLEAN List template for heartbeat
- zbx_template:
- zbx_server: "{{ g_server }}"
- zbx_user: "{{ g_user }}"
- zbx_password: "{{ g_password }}"
- state: list
- name: 'Template Heartbeat'
- register: templ_heartbeat
-
- - name: CLEAN List template app zabbix server
- zbx_template:
- zbx_server: "{{ g_server }}"
- zbx_user: "{{ g_user }}"
- zbx_password: "{{ g_password }}"
- state: list
- name: 'Template App Zabbix Server'
- register: templ_zabbix_server
-
- - name: CLEAN List template app zabbix server
- zbx_template:
- zbx_server: "{{ g_server }}"
- zbx_user: "{{ g_user }}"
- zbx_password: "{{ g_password }}"
- state: list
- name: 'Template App Zabbix Agent'
- register: templ_zabbix_agent
-
- - name: CLEAN List all templates
- zbx_template:
- zbx_server: "{{ g_server }}"
- zbx_user: "{{ g_user }}"
- zbx_password: "{{ g_password }}"
- state: list
- register: templates
-
- - debug: var=templ_heartbeat.results
-
- - name: Remove templates if heartbeat template is missing
- zbx_template:
- zbx_server: "{{ g_server }}"
- zbx_user: "{{ g_user }}"
- zbx_password: "{{ g_password }}"
- name: "{{ item }}"
- state: absent
- with_items: "{{ templates.results | difference(templ_zabbix_agent.results) | difference(templ_zabbix_server.results) | oo_collect('host') }}"
- when: templ_heartbeat.results | length == 0
diff --git a/playbooks/adhoc/zabbix_setup/filter_plugins b/playbooks/adhoc/zabbix_setup/filter_plugins
deleted file mode 120000
index b0b7a3414..000000000
--- a/playbooks/adhoc/zabbix_setup/filter_plugins
+++ /dev/null
@@ -1 +0,0 @@
-../../../filter_plugins/ \ No newline at end of file
diff --git a/playbooks/adhoc/zabbix_setup/oo-clean-zaio.yml b/playbooks/adhoc/zabbix_setup/oo-clean-zaio.yml
deleted file mode 100755
index 0fe65b338..000000000
--- a/playbooks/adhoc/zabbix_setup/oo-clean-zaio.yml
+++ /dev/null
@@ -1,7 +0,0 @@
-#!/usr/bin/env ansible-playbook
----
-- include: clean_zabbix.yml
- vars:
- g_server: http://localhost/zabbix/api_jsonrpc.php
- g_user: Admin
- g_password: zabbix
diff --git a/playbooks/adhoc/zabbix_setup/oo-config-zaio.yml b/playbooks/adhoc/zabbix_setup/oo-config-zaio.yml
deleted file mode 100755
index 0d5e01878..000000000
--- a/playbooks/adhoc/zabbix_setup/oo-config-zaio.yml
+++ /dev/null
@@ -1,19 +0,0 @@
-#!/usr/bin/ansible-playbook
----
-- hosts: localhost
- gather_facts: no
- connection: local
- become: no
- vars:
- g_server: http://localhost/zabbix/api_jsonrpc.php
- g_user: Admin
- g_password: zabbix
- g_zbx_scriptrunner_user: scriptrunner
- g_zbx_scriptrunner_bastion_host: specialhost.example.com
- roles:
- - role: os_zabbix
- ozb_server: "{{ g_server }}"
- ozb_user: "{{ g_user }}"
- ozb_password: "{{ g_password }}"
- ozb_scriptrunner_user: "{{ g_zbx_scriptrunner_user }}"
- ozb_scriptrunner_bastion_host: "{{ g_zbx_scriptrunner_bastion_host }}"
diff --git a/playbooks/adhoc/zabbix_setup/roles b/playbooks/adhoc/zabbix_setup/roles
deleted file mode 120000
index 20c4c58cf..000000000
--- a/playbooks/adhoc/zabbix_setup/roles
+++ /dev/null
@@ -1 +0,0 @@
-../../../roles \ No newline at end of file
diff --git a/playbooks/byo/openshift-cluster/upgrades/docker/docker_upgrade.yml b/playbooks/byo/openshift-cluster/upgrades/docker/docker_upgrade.yml
index 0d451cf77..1e0a6d4e7 100644
--- a/playbooks/byo/openshift-cluster/upgrades/docker/docker_upgrade.yml
+++ b/playbooks/byo/openshift-cluster/upgrades/docker/docker_upgrade.yml
@@ -18,20 +18,20 @@
# If a node fails, halt everything, the admin will need to clean up and we
# don't want to carry on, potentially taking out every node. The playbook can safely be re-run
# and will not take any action on a node already running the requested docker version.
-- name: Evacuate and upgrade nodes
+- name: Drain and upgrade nodes
hosts: oo_masters_to_config:oo_nodes_to_upgrade:oo_etcd_to_config
serial: 1
any_errors_fatal: true
tasks:
- - name: Prepare for Node evacuation
+ - name: Prepare for Node draining
command: >
{{ openshift.common.client_binary }} adm manage-node {{ openshift.node.nodename }} --schedulable=false
delegate_to: "{{ groups.oo_first_master.0 }}"
when: l_docker_upgrade is defined and l_docker_upgrade | bool and inventory_hostname in groups.oo_nodes_to_upgrade
- - name: Evacuate Node for Kubelet upgrade
+ - name: Drain Node for Kubelet upgrade
command: >
- {{ openshift.common.client_binary }} adm manage-node {{ openshift.node.nodename }} --evacuate --force
+ {{ openshift.common.client_binary }} adm manage-node {{ openshift.node.nodename }} --drain --force
delegate_to: "{{ groups.oo_first_master.0 }}"
when: l_docker_upgrade is defined and l_docker_upgrade | bool and inventory_hostname in groups.oo_nodes_to_upgrade
diff --git a/playbooks/byo/openshift-cluster/upgrades/v3_4/upgrade.yml b/playbooks/byo/openshift-cluster/upgrades/v3_4/upgrade.yml
index 496b00697..d6115e7a5 100644
--- a/playbooks/byo/openshift-cluster/upgrades/v3_4/upgrade.yml
+++ b/playbooks/byo/openshift-cluster/upgrades/v3_4/upgrade.yml
@@ -89,6 +89,8 @@
- include: ../../../../common/openshift-cluster/upgrades/cleanup_unused_images.yml
- include: ../../../../common/openshift-cluster/upgrades/upgrade_control_plane.yml
+ vars:
+ master_config_hook: "v3_4/master_config_upgrade.yml"
- include: ../../../../common/openshift-cluster/upgrades/upgrade_nodes.yml
diff --git a/playbooks/common/openshift-cluster/redeploy-certificates.yml b/playbooks/common/openshift-cluster/redeploy-certificates.yml
index 5f008a045..5fc81bf3a 100644
--- a/playbooks/common/openshift-cluster/redeploy-certificates.yml
+++ b/playbooks/common/openshift-cluster/redeploy-certificates.yml
@@ -204,7 +204,7 @@
cp {{ openshift.common.config_base }}/master//admin.kubeconfig {{ mktemp.stdout }}/admin.kubeconfig
changed_when: False
-- name: Serially evacuate all nodes to trigger redeployments
+- name: Serially drain all nodes to trigger redeployments
hosts: oo_nodes_to_config
serial: 1
any_errors_fatal: true
@@ -222,7 +222,7 @@
was_schedulable: "{{ 'unschedulable' not in (node_output.stdout | from_json).spec }}"
when: openshift_certificates_redeploy_ca | default(false) | bool
- - name: Prepare for node evacuation
+ - name: Prepare for node draining
command: >
{{ openshift.common.client_binary }} adm --config={{ hostvars[groups.oo_first_master.0].mktemp.stdout }}/admin.kubeconfig
manage-node {{ openshift.node.nodename }}
@@ -230,11 +230,11 @@
delegate_to: "{{ groups.oo_first_master.0 }}"
when: openshift_certificates_redeploy_ca | default(false) | bool and was_schedulable | bool
- - name: Evacuate node
+ - name: Drain node
command: >
{{ openshift.common.client_binary }} adm --config={{ hostvars[groups.oo_first_master.0].mktemp.stdout }}/admin.kubeconfig
manage-node {{ openshift.node.nodename }}
- --evacuate --force
+ --drain --force
delegate_to: "{{ groups.oo_first_master.0 }}"
when: openshift_certificates_redeploy_ca | default(false) | bool and was_schedulable | bool
diff --git a/playbooks/common/openshift-cluster/upgrades/docker/restart.yml b/playbooks/common/openshift-cluster/upgrades/docker/restart.yml
index d800b289b..1b418920f 100644
--- a/playbooks/common/openshift-cluster/upgrades/docker/restart.yml
+++ b/playbooks/common/openshift-cluster/upgrades/docker/restart.yml
@@ -19,11 +19,9 @@
when: openshift.common.is_containerized | bool
- name: Wait for master API to come back online
- become: no
- local_action:
- module: wait_for
- host="{{ inventory_hostname }}"
- state=started
- delay=10
- port="{{ openshift.master.api_port }}"
+ wait_for:
+ host: "{{ openshift.common.hostname }}"
+ state: started
+ delay: 10
+ port: "{{ openshift.master.api_port }}"
when: inventory_hostname in groups.oo_masters_to_config
diff --git a/playbooks/common/openshift-cluster/upgrades/etcd/backup.yml b/playbooks/common/openshift-cluster/upgrades/etcd/backup.yml
index 0a972adf6..be42f005f 100644
--- a/playbooks/common/openshift-cluster/upgrades/etcd/backup.yml
+++ b/playbooks/common/openshift-cluster/upgrades/etcd/backup.yml
@@ -42,15 +42,28 @@
{{ avail_disk.stdout }} Kb available.
when: (embedded_etcd | bool) and (etcd_disk_usage.stdout|int > avail_disk.stdout|int)
- # TODO - Refactor containerized backup to use etcd_container to backup the data so we don't rely on
- # the host's etcdctl binary which may be of a different version.
-
- # for non containerized and non embedded we should have the correct version of etcd installed already
- # For embedded we need to use the latest because OCP 3.3 uses a version of etcd that can only be backed
- # up with etcd-3.x
+ # For non containerized and non embedded we should have the correct version of
+ # etcd installed already. So don't do anything.
+ #
+ # For embedded or containerized we need to use the latest because OCP 3.3 uses
+ # a version of etcd that can only be backed up with etcd-3.x and if it's
+ # containerized then etcd version may be newer than that on the host so
+ # upgrade it.
+ #
+ # On atomic we have neither yum nor dnf so ansible throws a hard to debug error
+ # if you use package there, like this: "Could not find a module for unknown."
+ # see https://bugzilla.redhat.com/show_bug.cgi?id=1408668
+ #
+ # TODO - We should refactor all containerized backups to use the containerized
+ # version of etcd to perform the backup rather than relying on the host's
+ # binaries. Until we do that we'll continue to have problems backing up etcd
+ # when atomic host has an older version than the version that's running in the
+ # container whether that's embedded or not
- name: Install latest etcd for containerized or embedded
- package: name=etcd state=latest
- when: ( openshift.common.is_containerized and not openshift.common.is_atomic ) or embedded_etcd | bool
+ package:
+ name: etcd
+ state: latest
+ when: ( embedded_etcd | bool or openshift.common.is_containerized ) and not openshift.common.is_atomic
- name: Generate etcd backup
command: >
diff --git a/playbooks/common/openshift-cluster/upgrades/etcd/containerized_tasks.yml b/playbooks/common/openshift-cluster/upgrades/etcd/containerized_tasks.yml
index f88981a0b..5f8b59e17 100644
--- a/playbooks/common/openshift-cluster/upgrades/etcd/containerized_tasks.yml
+++ b/playbooks/common/openshift-cluster/upgrades/etcd/containerized_tasks.yml
@@ -8,8 +8,7 @@
- name: Set new_etcd_image
set_fact:
- new_etcd_image: "{{ current_image.stdout | regex_replace('/etcd.*$','/etcd3:' ~ upgrade_version ) if upgrade_version | version_compare('3.0','>=')
- else current_image.stdout.split(':')[0] ~ ':' ~ upgrade_version }}"
+ new_etcd_image: "{{ current_image.stdout | regex_replace('/etcd.*$','/etcd:' ~ upgrade_version ) }}"
- name: Pull new etcd image
command: "docker pull {{ new_etcd_image }}"
diff --git a/playbooks/common/openshift-cluster/upgrades/etcd/upgrade.yml b/playbooks/common/openshift-cluster/upgrades/etcd/upgrade.yml
index 5ff9521ec..0f8d94737 100644
--- a/playbooks/common/openshift-cluster/upgrades/etcd/upgrade.yml
+++ b/playbooks/common/openshift-cluster/upgrades/etcd/upgrade.yml
@@ -75,7 +75,7 @@
hosts: etcd_hosts_to_upgrade
serial: 1
vars:
- upgrade_version: 3.0.14
+ upgrade_version: 3.0.15
tasks:
- include: containerized_tasks.yml
when: etcd_container_version.stdout | default('99') | version_compare('3.0','<') and openshift.common.is_containerized | bool
diff --git a/playbooks/common/openshift-cluster/upgrades/library/openshift_upgrade_config.py b/playbooks/common/openshift-cluster/upgrades/library/openshift_upgrade_config.py
index 1238acb05..673f11889 100755
--- a/playbooks/common/openshift-cluster/upgrades/library/openshift_upgrade_config.py
+++ b/playbooks/common/openshift-cluster/upgrades/library/openshift_upgrade_config.py
@@ -146,7 +146,7 @@ def main():
# ignore broad-except error to avoid stack trace to ansible user
# pylint: disable=broad-except
- except Exception, e:
+ except Exception as e:
return module.fail_json(msg=str(e))
diff --git a/playbooks/common/openshift-cluster/upgrades/upgrade_control_plane.yml b/playbooks/common/openshift-cluster/upgrades/upgrade_control_plane.yml
index 474e6311e..6950b6166 100644
--- a/playbooks/common/openshift-cluster/upgrades/upgrade_control_plane.yml
+++ b/playbooks/common/openshift-cluster/upgrades/upgrade_control_plane.yml
@@ -51,6 +51,14 @@
- include: create_service_signer_cert.yml
+# Set openshift_master_facts separately. In order to reconcile
+# admission_config's, we currently must run openshift_master_facts and
+# then run openshift_facts.
+- name: Set OpenShift master facts
+ hosts: oo_masters_to_config
+ roles:
+ - openshift_master_facts
+
- name: Upgrade master config and systemd units
hosts: oo_masters_to_config
handlers:
@@ -58,8 +66,9 @@
static: yes
roles:
- openshift_facts
- - openshift_master_facts
- tasks:
+ post_tasks:
+ - include_vars: ../../../../roles/openshift_master_facts/vars/main.yml
+
- include: upgrade_scheduler.yml
- include: "{{ master_config_hook }}"
diff --git a/playbooks/common/openshift-cluster/upgrades/upgrade_nodes.yml b/playbooks/common/openshift-cluster/upgrades/upgrade_nodes.yml
index cefc7d12b..68b111df4 100644
--- a/playbooks/common/openshift-cluster/upgrades/upgrade_nodes.yml
+++ b/playbooks/common/openshift-cluster/upgrades/upgrade_nodes.yml
@@ -1,5 +1,5 @@
---
-- name: Evacuate and upgrade nodes
+- name: Drain and upgrade nodes
hosts: oo_nodes_to_upgrade
# This var must be set with -e on invocation, as it is not a per-host inventory var
# and is evaluated early. Values such as "20%" can also be used.
@@ -39,9 +39,9 @@
retries: 3
delay: 1
- - name: Evacuate Node for Kubelet upgrade
+ - name: Drain Node for Kubelet upgrade
command: >
- {{ hostvars[groups.oo_first_master.0].openshift.common.client_binary }} adm manage-node {{ openshift.node.nodename | lower }} --evacuate --force
+ {{ hostvars[groups.oo_first_master.0].openshift.common.client_binary }} adm manage-node {{ openshift.node.nodename | lower }} --drain --force
delegate_to: "{{ groups.oo_first_master.0 }}"
when: inventory_hostname in groups.oo_nodes_to_upgrade
diff --git a/playbooks/common/openshift-master/restart_services.yml b/playbooks/common/openshift-master/restart_services.yml
index 25fa10450..b40c32669 100644
--- a/playbooks/common/openshift-master/restart_services.yml
+++ b/playbooks/common/openshift-master/restart_services.yml
@@ -10,13 +10,11 @@
state: restarted
when: openshift_master_ha | bool and openshift.master.cluster_method != 'pacemaker'
- name: Wait for master API to come back online
- become: no
- local_action:
- module: wait_for
- host="{{ openshift.common.hostname }}"
- state=started
- delay=10
- port="{{ openshift.master.api_port }}"
+ wait_for:
+ host: "{{ openshift.common.hostname }}"
+ state: started
+ delay: 10
+ port: "{{ openshift.master.api_port }}"
when: openshift_master_ha | bool and openshift.master.cluster_method != 'pacemaker'
- name: Restart master controllers
service: