summaryrefslogtreecommitdiffstats
path: root/playbooks/common
diff options
context:
space:
mode:
Diffstat (limited to 'playbooks/common')
-rw-r--r--playbooks/common/openshift-cluster/redeploy-certificates.yml6
-rw-r--r--playbooks/common/openshift-cluster/upgrades/create_service_signer_cert.yml10
-rw-r--r--playbooks/common/openshift-cluster/upgrades/docker/upgrade_check.yml2
-rw-r--r--playbooks/common/openshift-cluster/upgrades/upgrade_control_plane.yml10
-rw-r--r--playbooks/common/openshift-cluster/upgrades/upgrade_nodes.yml6
-rw-r--r--playbooks/common/openshift-master/restart.yml74
-rw-r--r--playbooks/common/openshift-master/restart_hosts.yml26
-rw-r--r--playbooks/common/openshift-master/restart_hosts_pacemaker.yml25
-rw-r--r--playbooks/common/openshift-master/restart_services_pacemaker.yml10
9 files changed, 18 insertions, 151 deletions
diff --git a/playbooks/common/openshift-cluster/redeploy-certificates.yml b/playbooks/common/openshift-cluster/redeploy-certificates.yml
index 4996c56a7..5f008a045 100644
--- a/playbooks/common/openshift-cluster/redeploy-certificates.yml
+++ b/playbooks/common/openshift-cluster/redeploy-certificates.yml
@@ -224,7 +224,7 @@
- name: Prepare for node evacuation
command: >
- {{ openshift.common.admin_binary }} --config={{ hostvars[groups.oo_first_master.0].mktemp.stdout }}/admin.kubeconfig
+ {{ openshift.common.client_binary }} adm --config={{ hostvars[groups.oo_first_master.0].mktemp.stdout }}/admin.kubeconfig
manage-node {{ openshift.node.nodename }}
--schedulable=false
delegate_to: "{{ groups.oo_first_master.0 }}"
@@ -232,7 +232,7 @@
- name: Evacuate node
command: >
- {{ openshift.common.admin_binary }} --config={{ hostvars[groups.oo_first_master.0].mktemp.stdout }}/admin.kubeconfig
+ {{ openshift.common.client_binary }} adm --config={{ hostvars[groups.oo_first_master.0].mktemp.stdout }}/admin.kubeconfig
manage-node {{ openshift.node.nodename }}
--evacuate --force
delegate_to: "{{ groups.oo_first_master.0 }}"
@@ -240,7 +240,7 @@
- name: Set node schedulability
command: >
- {{ openshift.common.admin_binary }} --config={{ hostvars[groups.oo_first_master.0].mktemp.stdout }}/admin.kubeconfig
+ {{ openshift.common.client_binary }} adm --config={{ hostvars[groups.oo_first_master.0].mktemp.stdout }}/admin.kubeconfig
manage-node {{ openshift.node.nodename }} --schedulable=true
delegate_to: "{{ groups.oo_first_master.0 }}"
when: openshift_certificates_redeploy_ca | default(false) | bool and was_schedulable | bool
diff --git a/playbooks/common/openshift-cluster/upgrades/create_service_signer_cert.yml b/playbooks/common/openshift-cluster/upgrades/create_service_signer_cert.yml
index 78f6c46f3..23cf8cf76 100644
--- a/playbooks/common/openshift-cluster/upgrades/create_service_signer_cert.yml
+++ b/playbooks/common/openshift-cluster/upgrades/create_service_signer_cert.yml
@@ -22,11 +22,11 @@
- name: Create service signer certificate
command: >
- {{ openshift.common.admin_binary }} ca create-signer-cert
- --cert=service-signer.crt
- --key=service-signer.key
- --name=openshift-service-serving-signer
- --serial=service-signer.serial.txt
+ {{ openshift.common.client_binary }} adm ca create-signer-cert
+ --cert="{{ remote_cert_create_tmpdir.stdout }}/"service-signer.crt
+ --key="{{ remote_cert_create_tmpdir.stdout }}/"service-signer.key
+ --name="{{ remote_cert_create_tmpdir.stdout }}/"openshift-service-serving-signer
+ --serial="{{ remote_cert_create_tmpdir.stdout }}/"service-signer.serial.txt
args:
chdir: "{{ remote_cert_create_tmpdir.stdout }}/"
when: not (hostvars[groups.oo_first_master.0].service_signer_cert_stat.stat.exists | bool)
diff --git a/playbooks/common/openshift-cluster/upgrades/docker/upgrade_check.yml b/playbooks/common/openshift-cluster/upgrades/docker/upgrade_check.yml
index 46ff421fd..ee75aa853 100644
--- a/playbooks/common/openshift-cluster/upgrades/docker/upgrade_check.yml
+++ b/playbooks/common/openshift-cluster/upgrades/docker/upgrade_check.yml
@@ -36,7 +36,7 @@
- set_fact:
l_docker_upgrade: False
-# Make sure a docker_verison is set if none was requested:
+# Make sure a docker_version is set if none was requested:
- set_fact:
docker_version: "{{ avail_docker_version.stdout }}"
when: pkg_check.rc == 0 and docker_version is not defined
diff --git a/playbooks/common/openshift-cluster/upgrades/upgrade_control_plane.yml b/playbooks/common/openshift-cluster/upgrades/upgrade_control_plane.yml
index 947f865d3..b3f4d7d1a 100644
--- a/playbooks/common/openshift-cluster/upgrades/upgrade_control_plane.yml
+++ b/playbooks/common/openshift-cluster/upgrades/upgrade_control_plane.yml
@@ -200,19 +200,15 @@
# restart.
skip_docker_role: True
tasks:
- - name: Verifying the correct commandline tools are available
- shell: grep {{ verify_upgrade_version }} {{ openshift.common.admin_binary}}
- when: openshift.common.is_containerized | bool and verify_upgrade_version is defined
-
- name: Reconcile Cluster Roles
command: >
- {{ openshift.common.admin_binary}} --config={{ openshift.common.config_base }}/master/admin.kubeconfig
+ {{ openshift.common.client_binary }} adm --config={{ openshift.common.config_base }}/master/admin.kubeconfig
policy reconcile-cluster-roles --additive-only=true --confirm
run_once: true
- name: Reconcile Cluster Role Bindings
command: >
- {{ openshift.common.admin_binary}} --config={{ openshift.common.config_base }}/master/admin.kubeconfig
+ {{ openshift.common.client_binary }} adm --config={{ openshift.common.config_base }}/master/admin.kubeconfig
policy reconcile-cluster-role-bindings
--exclude-groups=system:authenticated
--exclude-groups=system:authenticated:oauth
@@ -224,7 +220,7 @@
- name: Reconcile Security Context Constraints
command: >
- {{ openshift.common.admin_binary}} policy reconcile-sccs --confirm --additive-only=true
+ {{ openshift.common.client_binary }} adm policy reconcile-sccs --confirm --additive-only=true
run_once: true
- set_fact:
diff --git a/playbooks/common/openshift-cluster/upgrades/upgrade_nodes.yml b/playbooks/common/openshift-cluster/upgrades/upgrade_nodes.yml
index 9b572dcdf..1f314c854 100644
--- a/playbooks/common/openshift-cluster/upgrades/upgrade_nodes.yml
+++ b/playbooks/common/openshift-cluster/upgrades/upgrade_nodes.yml
@@ -29,7 +29,7 @@
- name: Mark unschedulable if host is a node
command: >
- {{ openshift.common.admin_binary }} manage-node {{ openshift.node.nodename | lower }} --schedulable=false
+ {{ openshift.common.client_binary }} adm manage-node {{ openshift.node.nodename | lower }} --schedulable=false
delegate_to: "{{ groups.oo_first_master.0 }}"
when: inventory_hostname in groups.oo_nodes_to_upgrade
# NOTE: There is a transient "object has been modified" error here, allow a couple
@@ -41,7 +41,7 @@
- name: Evacuate Node for Kubelet upgrade
command: >
- {{ openshift.common.admin_binary }} manage-node {{ openshift.node.nodename | lower }} --evacuate --force
+ {{ openshift.common.client_binary }} adm manage-node {{ openshift.node.nodename | lower }} --evacuate --force
delegate_to: "{{ groups.oo_first_master.0 }}"
when: inventory_hostname in groups.oo_nodes_to_upgrade
tasks:
@@ -64,7 +64,7 @@
- name: Set node schedulability
command: >
- {{ openshift.common.admin_binary }} manage-node {{ openshift.node.nodename | lower }} --schedulable=true
+ {{ openshift.common.client_binary }} adm manage-node {{ openshift.node.nodename | lower }} --schedulable=true
delegate_to: "{{ groups.oo_first_master.0 }}"
when: inventory_hostname in groups.oo_nodes_to_upgrade and was_schedulable | bool
register: node_sched
diff --git a/playbooks/common/openshift-master/restart.yml b/playbooks/common/openshift-master/restart.yml
index 57a63cfee..5769ef5cd 100644
--- a/playbooks/common/openshift-master/restart.yml
+++ b/playbooks/common/openshift-master/restart.yml
@@ -66,63 +66,8 @@
current_host: "{{ exists.stat.exists }}"
when: openshift.common.rolling_restart_mode == 'system'
-- name: Determine which masters are currently active
- hosts: oo_masters_to_config
- any_errors_fatal: true
- tasks:
- - name: Check master service status
- command: >
- systemctl is-active {{ openshift.common.service_type }}-master
- register: active_check_output
- when: openshift.master.cluster_method | default(None) == 'pacemaker'
- failed_when: false
- changed_when: false
- - set_fact:
- is_active: "{{ active_check_output.stdout == 'active' }}"
- when: openshift.master.cluster_method | default(None) == 'pacemaker'
-
-- name: Evaluate master groups
- hosts: localhost
- become: no
- tasks:
- - fail:
- msg: >
- Did not receive active status from any masters. Please verify pacemaker cluster.
- when: "{{ hostvars[groups.oo_first_master.0].openshift.master.cluster_method | default(None) == 'pacemaker' and 'True' not in (hostvars
- | oo_select_keys(groups['oo_masters_to_config'])
- | oo_collect('is_active')
- | list) }}"
- - name: Evaluate oo_active_masters
- add_host:
- name: "{{ item }}"
- groups: oo_active_masters
- ansible_ssh_user: "{{ g_ssh_user | default(omit) }}"
- ansible_become: "{{ g_sudo | default(omit) }}"
- with_items: "{{ groups.oo_masters_to_config | default([]) }}"
- when: (hostvars[item]['is_active'] | default(false)) | bool
- - name: Evaluate oo_current_masters
- add_host:
- name: "{{ item }}"
- groups: oo_current_masters
- ansible_ssh_user: "{{ g_ssh_user | default(omit) }}"
- ansible_become: "{{ g_sudo | default(omit) }}"
- with_items: "{{ groups.oo_masters_to_config | default([]) }}"
- when: (hostvars[item]['current_host'] | default(false)) | bool
-
-- name: Validate pacemaker cluster
- hosts: oo_active_masters
- tasks:
- - name: Retrieve pcs status
- command: pcs status
- register: pcs_status_output
- changed_when: false
- - fail:
- msg: >
- Pacemaker cluster validation failed. One or more nodes are not online.
- when: not (pcs_status_output.stdout | validate_pcs_cluster(groups.oo_masters_to_config)) | bool
-
- name: Restart masters
- hosts: oo_masters_to_config:!oo_active_masters:!oo_current_masters
+ hosts: oo_masters_to_config
vars:
openshift_master_ha: "{{ groups.oo_masters_to_config | length > 1 }}"
serial: 1
@@ -132,20 +77,3 @@
- include: restart_services.yml
when: openshift.common.rolling_restart_mode == 'services'
-- name: Restart active masters
- hosts: oo_active_masters
- serial: 1
- tasks:
- - include: restart_hosts_pacemaker.yml
- when: openshift.common.rolling_restart_mode == 'system'
- - include: restart_services_pacemaker.yml
- when: openshift.common.rolling_restart_mode == 'services'
-
-- name: Restart current masters
- hosts: oo_current_masters
- serial: 1
- tasks:
- - include: restart_hosts.yml
- when: openshift.common.rolling_restart_mode == 'system'
- - include: restart_services.yml
- when: openshift.common.rolling_restart_mode == 'services'
diff --git a/playbooks/common/openshift-master/restart_hosts.yml b/playbooks/common/openshift-master/restart_hosts.yml
index ff206f5a2..b1c36718c 100644
--- a/playbooks/common/openshift-master/restart_hosts.yml
+++ b/playbooks/common/openshift-master/restart_hosts.yml
@@ -5,8 +5,8 @@
poll: 0
ignore_errors: true
become: yes
-# When cluster_method != pacemaker we can ensure the api_port is
-# available.
+
+# Ensure the api_port is available.
- name: Wait for master API to come back online
become: no
local_action:
@@ -15,25 +15,3 @@
state=started
delay=10
port="{{ openshift.master.api_port }}"
- when: openshift.master.cluster_method != 'pacemaker'
-- name: Wait for master to start
- become: no
- local_action:
- module: wait_for
- host="{{ inventory_hostname }}"
- state=started
- delay=10
- port=22
- when: openshift.master.cluster_method == 'pacemaker'
-- name: Wait for master to become available
- command: pcs status
- register: pcs_status_output
- until: pcs_status_output.stdout | validate_pcs_cluster([inventory_hostname]) | bool
- retries: 15
- delay: 2
- changed_when: false
- when: openshift.master.cluster_method == 'pacemaker'
-- fail:
- msg: >
- Pacemaker cluster validation failed {{ inventory hostname }} is not online.
- when: openshift.master.cluster_method == 'pacemaker' and not (pcs_status_output.stdout | validate_pcs_cluster([inventory_hostname])) | bool
diff --git a/playbooks/common/openshift-master/restart_hosts_pacemaker.yml b/playbooks/common/openshift-master/restart_hosts_pacemaker.yml
deleted file mode 100644
index c9219e8de..000000000
--- a/playbooks/common/openshift-master/restart_hosts_pacemaker.yml
+++ /dev/null
@@ -1,25 +0,0 @@
-- name: Fail over master resource
- command: >
- pcs resource move master {{ hostvars | oo_select_keys(groups['oo_masters_to_config']) | oo_collect('openshift.common.hostname', {'is_active': 'False'}) | list | first }}
-- name: Wait for master API to come back online
- become: no
- local_action:
- module: wait_for
- host="{{ openshift.master.cluster_hostname }}"
- state=started
- delay=10
- port="{{ openshift.master.api_port }}"
-- name: Restart master system
- # https://github.com/ansible/ansible/issues/10616
- shell: sleep 2 && shutdown -r now "OpenShift Ansible master rolling restart"
- async: 1
- poll: 0
- ignore_errors: true
- become: yes
-- name: Wait for master to start
- become: no
- local_action:
- module: wait_for
- host="{{ inventory_hostname }}"
- state=started
- delay=10
diff --git a/playbooks/common/openshift-master/restart_services_pacemaker.yml b/playbooks/common/openshift-master/restart_services_pacemaker.yml
deleted file mode 100644
index e738f3fb6..000000000
--- a/playbooks/common/openshift-master/restart_services_pacemaker.yml
+++ /dev/null
@@ -1,10 +0,0 @@
-- name: Restart master services
- command: pcs resource restart master
-- name: Wait for master API to come back online
- become: no
- local_action:
- module: wait_for
- host="{{ openshift.master.cluster_hostname }}"
- state=started
- delay=10
- port="{{ openshift.master.api_port }}"