summaryrefslogtreecommitdiffstats
path: root/playbooks/common
diff options
context:
space:
mode:
Diffstat (limited to 'playbooks/common')
-rw-r--r--playbooks/common/openshift-cluster/additional_config.yml1
-rw-r--r--playbooks/common/openshift-cluster/config.yml2
-rw-r--r--playbooks/common/openshift-cluster/enable_dnsmasq.yml2
-rw-r--r--playbooks/common/openshift-cluster/evaluate_groups.yml30
-rw-r--r--playbooks/common/openshift-cluster/openshift_hosted.yml27
-rw-r--r--playbooks/common/openshift-cluster/redeploy-certificates.yml8
-rw-r--r--playbooks/common/openshift-cluster/upgrades/containerized_node_upgrade.yml17
-rw-r--r--playbooks/common/openshift-cluster/upgrades/docker/restart.yml27
-rw-r--r--playbooks/common/openshift-cluster/upgrades/docker/upgrade.yml31
-rw-r--r--playbooks/common/openshift-cluster/upgrades/docker/upgrade_check.yml5
-rw-r--r--playbooks/common/openshift-cluster/upgrades/etcd/backup.yml26
-rw-r--r--playbooks/common/openshift-cluster/upgrades/etcd/containerized_tasks.yml3
-rw-r--r--playbooks/common/openshift-cluster/upgrades/etcd/main.yml109
-rw-r--r--playbooks/common/openshift-cluster/upgrades/etcd/rhel_tasks.yml11
-rw-r--r--playbooks/common/openshift-cluster/upgrades/etcd/upgrade.yml94
-rw-r--r--playbooks/common/openshift-cluster/upgrades/files/pre-upgrade-check193
-rw-r--r--playbooks/common/openshift-cluster/upgrades/files/rpm_versions.sh12
-rw-r--r--playbooks/common/openshift-cluster/upgrades/init.yml2
-rwxr-xr-xplaybooks/common/openshift-cluster/upgrades/library/openshift_upgrade_config.py11
-rw-r--r--playbooks/common/openshift-cluster/upgrades/rpm_upgrade.yml4
-rw-r--r--playbooks/common/openshift-cluster/upgrades/upgrade_control_plane.yml21
-rw-r--r--playbooks/common/openshift-cluster/upgrades/upgrade_nodes.yml39
-rw-r--r--playbooks/common/openshift-cluster/upgrades/upgrade_scheduler.yml166
l---------playbooks/common/openshift-cluster/upgrades/v3_1_to_v3_2/nuke_images.sh1
-rw-r--r--playbooks/common/openshift-cluster/upgrades/v3_3/master_config_upgrade.yml1
-rw-r--r--playbooks/common/openshift-cluster/upgrades/v3_3/node_config_upgrade.yml1
-rw-r--r--playbooks/common/openshift-cluster/upgrades/v3_4/master_config_upgrade.yml1
-rw-r--r--playbooks/common/openshift-cluster/validate_hostnames.yml4
-rw-r--r--playbooks/common/openshift-cluster/verify_ansible_version.yml11
-rw-r--r--playbooks/common/openshift-etcd/service.yml2
-rw-r--r--playbooks/common/openshift-loadbalancer/service.yml2
-rw-r--r--playbooks/common/openshift-master/config.yml47
-rw-r--r--playbooks/common/openshift-master/restart.yml13
-rw-r--r--playbooks/common/openshift-master/restart_hosts.yml3
-rw-r--r--playbooks/common/openshift-master/restart_services.yml13
-rw-r--r--playbooks/common/openshift-master/service.yml2
-rw-r--r--playbooks/common/openshift-nfs/service.yml2
-rw-r--r--playbooks/common/openshift-node/config.yml48
-rw-r--r--playbooks/common/openshift-node/service.yml2
39 files changed, 459 insertions, 535 deletions
diff --git a/playbooks/common/openshift-cluster/additional_config.yml b/playbooks/common/openshift-cluster/additional_config.yml
index 825f46415..c0ea93d2c 100644
--- a/playbooks/common/openshift-cluster/additional_config.yml
+++ b/playbooks/common/openshift-cluster/additional_config.yml
@@ -1,3 +1,4 @@
+---
- name: Additional master configuration
hosts: oo_first_master
vars:
diff --git a/playbooks/common/openshift-cluster/config.yml b/playbooks/common/openshift-cluster/config.yml
index 801c8065d..0f226f5f9 100644
--- a/playbooks/common/openshift-cluster/config.yml
+++ b/playbooks/common/openshift-cluster/config.yml
@@ -12,6 +12,8 @@
- node
- include: initialize_openshift_version.yml
+ tags:
+ - always
- name: Set oo_option facts
hosts: oo_all_hosts
diff --git a/playbooks/common/openshift-cluster/enable_dnsmasq.yml b/playbooks/common/openshift-cluster/enable_dnsmasq.yml
index 4cfe8617e..ca5177852 100644
--- a/playbooks/common/openshift-cluster/enable_dnsmasq.yml
+++ b/playbooks/common/openshift-cluster/enable_dnsmasq.yml
@@ -59,7 +59,7 @@
vars:
openshift_deployment_type: "{{ deployment_type }}"
roles:
- - openshift_node_dnsmasq
+ - openshift_node_dnsmasq
post_tasks:
- modify_yaml:
dest: "{{ openshift.common.config_base }}/node/node-config.yaml"
diff --git a/playbooks/common/openshift-cluster/evaluate_groups.yml b/playbooks/common/openshift-cluster/evaluate_groups.yml
index b3e02fb97..45a4875a3 100644
--- a/playbooks/common/openshift-cluster/evaluate_groups.yml
+++ b/playbooks/common/openshift-cluster/evaluate_groups.yml
@@ -7,27 +7,27 @@
tasks:
- fail:
msg: This playbook requires g_etcd_hosts to be set
- when: g_etcd_hosts is not defined
+ when: "{{ g_etcd_hosts is not defined }}"
- fail:
msg: This playbook requires g_master_hosts or g_new_master_hosts to be set
- when: g_master_hosts is not defined and g_new_master_hosts is not defined
+ when: "{{ g_master_hosts is not defined and g_new_master_hosts is not defined }}"
- fail:
msg: This playbook requires g_node_hosts or g_new_node_hosts to be set
- when: g_node_hosts is not defined and g_new_node_hosts is not defined
+ when: "{{ g_node_hosts is not defined and g_new_node_hosts is not defined }}"
- fail:
msg: This playbook requires g_lb_hosts to be set
- when: g_lb_hosts is not defined
+ when: "{{ g_lb_hosts is not defined }}"
- fail:
msg: This playbook requires g_nfs_hosts to be set
- when: g_nfs_hosts is not defined
+ when: "{{ g_nfs_hosts is not defined }}"
- fail:
msg: The nfs group must be limited to one host
- when: (groups[g_nfs_hosts] | default([])) | length > 1
+ when: "{{ (groups[g_nfs_hosts] | default([])) | length > 1 }}"
- name: Evaluate oo_all_hosts
add_host:
@@ -36,6 +36,7 @@
ansible_ssh_user: "{{ g_ssh_user | default(omit) }}"
ansible_become: "{{ g_sudo | default(omit) }}"
with_items: "{{ g_all_hosts | default([]) }}"
+ changed_when: no
- name: Evaluate oo_masters
add_host:
@@ -44,6 +45,7 @@
ansible_ssh_user: "{{ g_ssh_user | default(omit) }}"
ansible_become: "{{ g_sudo | default(omit) }}"
with_items: "{{ g_master_hosts | union(g_new_master_hosts) | default([]) }}"
+ changed_when: no
- name: Evaluate oo_etcd_to_config
add_host:
@@ -52,6 +54,7 @@
ansible_ssh_user: "{{ g_ssh_user | default(omit) }}"
ansible_become: "{{ g_sudo | default(omit) }}"
with_items: "{{ g_etcd_hosts | default([]) }}"
+ changed_when: no
- name: Evaluate oo_masters_to_config
add_host:
@@ -60,6 +63,7 @@
ansible_ssh_user: "{{ g_ssh_user | default(omit) }}"
ansible_become: "{{ g_sudo | default(omit) }}"
with_items: "{{ g_new_master_hosts | default(g_master_hosts | default([], true), true) }}"
+ changed_when: no
- name: Evaluate oo_nodes_to_config
add_host:
@@ -68,23 +72,26 @@
ansible_ssh_user: "{{ g_ssh_user | default(omit) }}"
ansible_become: "{{ g_sudo | default(omit) }}"
with_items: "{{ g_new_node_hosts | default(g_node_hosts | default([], true), true) }}"
+ changed_when: no
# Skip adding the master to oo_nodes_to_config when g_new_node_hosts is
- - name: Evaluate oo_nodes_to_config
+ - name: Add master to oo_nodes_to_config
add_host:
name: "{{ item }}"
groups: oo_nodes_to_config
ansible_ssh_user: "{{ g_ssh_user | default(omit) }}"
ansible_become: "{{ g_sudo | default(omit) }}"
with_items: "{{ g_master_hosts | default([]) }}"
- when: g_nodeonmaster | default(false) | bool and not g_new_node_hosts | default(false) | bool
+ when: "{{ g_nodeonmaster | default(false) | bool and not g_new_node_hosts | default(false) | bool }}"
+ changed_when: no
- name: Evaluate oo_first_etcd
add_host:
name: "{{ g_etcd_hosts[0] }}"
groups: oo_first_etcd
ansible_ssh_user: "{{ g_ssh_user | default(omit) }}"
- when: g_etcd_hosts|length > 0
+ when: "{{ g_etcd_hosts|length > 0 }}"
+ changed_when: no
- name: Evaluate oo_first_master
add_host:
@@ -92,7 +99,8 @@
groups: oo_first_master
ansible_ssh_user: "{{ g_ssh_user | default(omit) }}"
ansible_become: "{{ g_sudo | default(omit) }}"
- when: g_master_hosts|length > 0
+ when: "{{ g_master_hosts|length > 0 }}"
+ changed_when: no
- name: Evaluate oo_lb_to_config
add_host:
@@ -101,6 +109,7 @@
ansible_ssh_user: "{{ g_ssh_user | default(omit) }}"
ansible_become: "{{ g_sudo | default(omit) }}"
with_items: "{{ g_lb_hosts | default([]) }}"
+ changed_when: no
- name: Evaluate oo_nfs_to_config
add_host:
@@ -109,3 +118,4 @@
ansible_ssh_user: "{{ g_ssh_user | default(omit) }}"
ansible_become: "{{ g_sudo | default(omit) }}"
with_items: "{{ g_nfs_hosts | default([]) }}"
+ changed_when: no
diff --git a/playbooks/common/openshift-cluster/openshift_hosted.yml b/playbooks/common/openshift-cluster/openshift_hosted.yml
index ccbba54b4..ec5b18389 100644
--- a/playbooks/common/openshift-cluster/openshift_hosted.yml
+++ b/playbooks/common/openshift-cluster/openshift_hosted.yml
@@ -20,35 +20,14 @@
openshift_hosted_registry_registryurl: "{{ hostvars[groups.oo_first_master.0].openshift.master.registry_url }}"
when: "'master' in hostvars[groups.oo_first_master.0].openshift and 'registry_url' in hostvars[groups.oo_first_master.0].openshift.master"
- set_fact:
- logging_hostname: "{{ openshift_hosted_logging_hostname | default('kibana.' ~ (openshift.master.default_subdomain | default('router.default.svc.cluster.local', true))) }}"
- logging_ops_hostname: "{{ openshift_hosted_logging_ops_hostname | default('kibana-ops.' ~ (openshift.master.default_subdomain | default('router.default.svc.cluster.local', true))) }}"
+ logging_hostname: "{{ openshift_hosted_logging_hostname | default('kibana.' ~ (openshift_master_default_subdomain | default('router.default.svc.cluster.local', true))) }}"
+ logging_ops_hostname: "{{ openshift_hosted_logging_ops_hostname | default('kibana-ops.' ~ (openshift_master_default_subdomain | default('router.default.svc.cluster.local', true))) }}"
logging_master_public_url: "{{ openshift_hosted_logging_master_public_url | default(openshift.master.public_api_url) }}"
logging_elasticsearch_cluster_size: "{{ openshift_hosted_logging_elasticsearch_cluster_size | default(1) }}"
logging_elasticsearch_ops_cluster_size: "{{ openshift_hosted_logging_elasticsearch_ops_cluster_size | default(1) }}"
roles:
- - role: openshift_cli
- - role: openshift_hosted_facts
- - role: openshift_projects
- # TODO: Move standard project definitions to openshift_hosted/vars/main.yml
- # Vars are not accessible in meta/main.yml in ansible-1.9.x
- openshift_projects: "{{ openshift_additional_projects | default({}) | oo_merge_dicts({'default':{'default_node_selector':''},'openshift-infra':{'default_node_selector':''},'logging':{'default_node_selector':''}}) }}"
- - role: openshift_serviceaccounts
- openshift_serviceaccounts_names:
- - router
- openshift_serviceaccounts_namespace: default
- openshift_serviceaccounts_sccs:
- - hostnetwork
- when: openshift.common.version_gte_3_2_or_1_2
- - role: openshift_serviceaccounts
- openshift_serviceaccounts_names:
- - router
- - registry
- openshift_serviceaccounts_namespace: default
- openshift_serviceaccounts_sccs:
- - privileged
- when: not openshift.common.version_gte_3_2_or_1_2
- role: openshift_hosted
- - role: openshift_metrics
+ - role: openshift_hosted_metrics
when: openshift_hosted_metrics_deploy | default(false) | bool
- role: openshift_hosted_logging
when: openshift_hosted_logging_deploy | default(false) | bool
diff --git a/playbooks/common/openshift-cluster/redeploy-certificates.yml b/playbooks/common/openshift-cluster/redeploy-certificates.yml
index 5f008a045..6e3e04a6b 100644
--- a/playbooks/common/openshift-cluster/redeploy-certificates.yml
+++ b/playbooks/common/openshift-cluster/redeploy-certificates.yml
@@ -204,7 +204,7 @@
cp {{ openshift.common.config_base }}/master//admin.kubeconfig {{ mktemp.stdout }}/admin.kubeconfig
changed_when: False
-- name: Serially evacuate all nodes to trigger redeployments
+- name: Serially drain all nodes to trigger redeployments
hosts: oo_nodes_to_config
serial: 1
any_errors_fatal: true
@@ -222,7 +222,7 @@
was_schedulable: "{{ 'unschedulable' not in (node_output.stdout | from_json).spec }}"
when: openshift_certificates_redeploy_ca | default(false) | bool
- - name: Prepare for node evacuation
+ - name: Prepare for node draining
command: >
{{ openshift.common.client_binary }} adm --config={{ hostvars[groups.oo_first_master.0].mktemp.stdout }}/admin.kubeconfig
manage-node {{ openshift.node.nodename }}
@@ -230,11 +230,11 @@
delegate_to: "{{ groups.oo_first_master.0 }}"
when: openshift_certificates_redeploy_ca | default(false) | bool and was_schedulable | bool
- - name: Evacuate node
+ - name: Drain node
command: >
{{ openshift.common.client_binary }} adm --config={{ hostvars[groups.oo_first_master.0].mktemp.stdout }}/admin.kubeconfig
manage-node {{ openshift.node.nodename }}
- --evacuate --force
+ {{ openshift.common.evacuate_or_drain }} --force
delegate_to: "{{ groups.oo_first_master.0 }}"
when: openshift_certificates_redeploy_ca | default(false) | bool and was_schedulable | bool
diff --git a/playbooks/common/openshift-cluster/upgrades/containerized_node_upgrade.yml b/playbooks/common/openshift-cluster/upgrades/containerized_node_upgrade.yml
index 439df5ffd..9f7961614 100644
--- a/playbooks/common/openshift-cluster/upgrades/containerized_node_upgrade.yml
+++ b/playbooks/common/openshift-cluster/upgrades/containerized_node_upgrade.yml
@@ -1,9 +1,14 @@
+---
+# This is a hack to allow us to use systemd_units.yml, but skip the handlers which
+# restart services. We will unconditionally restart all containerized services
+# because we have to unconditionally restart Docker:
+- set_fact:
+ skip_node_svc_handlers: True
+
- name: Update systemd units
include: ../../../../roles/openshift_node/tasks/systemd_units.yml openshift_version={{ openshift_image_tag }}
-- name: Verifying the correct version was configured
- shell: grep {{ verify_upgrade_version }} {{ item }}
- with_items:
- - /etc/sysconfig/openvswitch
- - /etc/sysconfig/{{ openshift.common.service_type }}*
- when: verify_upgrade_version is defined
+# This is a no-op because of skip_node_svc_handlers, but lets us trigger it before end of
+# play when the node has already been marked schedulable again. (this would look strange
+# in logs otherwise)
+- meta: flush_handlers
diff --git a/playbooks/common/openshift-cluster/upgrades/docker/restart.yml b/playbooks/common/openshift-cluster/upgrades/docker/restart.yml
new file mode 100644
index 000000000..1b418920f
--- /dev/null
+++ b/playbooks/common/openshift-cluster/upgrades/docker/restart.yml
@@ -0,0 +1,27 @@
+---
+- name: Restart docker
+ service: name=docker state=restarted
+
+- name: Update docker facts
+ openshift_facts:
+ role: docker
+
+- name: Restart containerized services
+ service: name={{ item }} state=started
+ with_items:
+ - etcd_container
+ - openvswitch
+ - "{{ openshift.common.service_type }}-master"
+ - "{{ openshift.common.service_type }}-master-api"
+ - "{{ openshift.common.service_type }}-master-controllers"
+ - "{{ openshift.common.service_type }}-node"
+ failed_when: false
+ when: openshift.common.is_containerized | bool
+
+- name: Wait for master API to come back online
+ wait_for:
+ host: "{{ openshift.common.hostname }}"
+ state: started
+ delay: 10
+ port: "{{ openshift.master.api_port }}"
+ when: inventory_hostname in groups.oo_masters_to_config
diff --git a/playbooks/common/openshift-cluster/upgrades/docker/upgrade.yml b/playbooks/common/openshift-cluster/upgrades/docker/upgrade.yml
index 5d753447c..17f8fc6e9 100644
--- a/playbooks/common/openshift-cluster/upgrades/docker/upgrade.yml
+++ b/playbooks/common/openshift-cluster/upgrades/docker/upgrade.yml
@@ -20,7 +20,7 @@
- debug: var=docker_image_count.stdout
- name: Remove all containers and images
- script: nuke_images.sh docker
+ script: nuke_images.sh
register: nuke_images_result
when: docker_upgrade_nuke_images is defined and docker_upgrade_nuke_images | bool
@@ -37,30 +37,5 @@
- name: Upgrade Docker
package: name=docker{{ '-' + docker_version }} state=present
-- service: name=docker state=started
-
-- name: Update docker facts
- openshift_facts:
- role: docker
-
-- name: Restart containerized services
- service: name={{ item }} state=started
- with_items:
- - etcd_container
- - openvswitch
- - "{{ openshift.common.service_type }}-master"
- - "{{ openshift.common.service_type }}-master-api"
- - "{{ openshift.common.service_type }}-master-controllers"
- - "{{ openshift.common.service_type }}-node"
- failed_when: false
- when: openshift.common.is_containerized | bool
-
-- name: Wait for master API to come back online
- become: no
- local_action:
- module: wait_for
- host="{{ inventory_hostname }}"
- state=started
- delay=10
- port="{{ openshift.master.api_port }}"
- when: inventory_hostname in groups.oo_masters_to_config
+- include: restart.yml
+ when: not skip_docker_restart | default(False) | bool
diff --git a/playbooks/common/openshift-cluster/upgrades/docker/upgrade_check.yml b/playbooks/common/openshift-cluster/upgrades/docker/upgrade_check.yml
index ee75aa853..e3379f29b 100644
--- a/playbooks/common/openshift-cluster/upgrades/docker/upgrade_check.yml
+++ b/playbooks/common/openshift-cluster/upgrades/docker/upgrade_check.yml
@@ -9,6 +9,8 @@
- name: Check if Docker is installed
command: rpm -q docker
+ args:
+ warn: no
register: pkg_check
failed_when: pkg_check.rc > 1
changed_when: no
@@ -48,6 +50,5 @@
- name: Flag to delete all images prior to upgrade if crossing Docker 1.10 boundary
set_fact:
- docker_upgrade_nuke_images: True
+ docker_upgrade_nuke_images: True
when: l_docker_upgrade | bool and docker_upgrade_nuke_images is not defined and curr_docker_version.stdout | version_compare('1.10','<') and docker_version | version_compare('1.10','>=')
-
diff --git a/playbooks/common/openshift-cluster/upgrades/etcd/backup.yml b/playbooks/common/openshift-cluster/upgrades/etcd/backup.yml
index 727b24234..d0eadf1fc 100644
--- a/playbooks/common/openshift-cluster/upgrades/etcd/backup.yml
+++ b/playbooks/common/openshift-cluster/upgrades/etcd/backup.yml
@@ -1,3 +1,4 @@
+---
- name: Backup etcd
hosts: etcd_hosts_to_backup
vars:
@@ -42,9 +43,28 @@
{{ avail_disk.stdout }} Kb available.
when: (embedded_etcd | bool) and (etcd_disk_usage.stdout|int > avail_disk.stdout|int)
- - name: Install etcd (for etcdctl)
- package: name=etcd state=present
- when: not openshift.common.is_atomic | bool
+ # For non containerized and non embedded we should have the correct version of
+ # etcd installed already. So don't do anything.
+ #
+ # For embedded or containerized we need to use the latest because OCP 3.3 uses
+ # a version of etcd that can only be backed up with etcd-3.x and if it's
+ # containerized then etcd version may be newer than that on the host so
+ # upgrade it.
+ #
+ # On atomic we have neither yum nor dnf so ansible throws a hard to debug error
+ # if you use package there, like this: "Could not find a module for unknown."
+ # see https://bugzilla.redhat.com/show_bug.cgi?id=1408668
+ #
+ # TODO - We should refactor all containerized backups to use the containerized
+ # version of etcd to perform the backup rather than relying on the host's
+ # binaries. Until we do that we'll continue to have problems backing up etcd
+ # when atomic host has an older version than the version that's running in the
+ # container whether that's embedded or not
+ - name: Install latest etcd for containerized or embedded
+ package:
+ name: etcd
+ state: latest
+ when: ( embedded_etcd | bool or openshift.common.is_containerized ) and not openshift.common.is_atomic
- name: Generate etcd backup
command: >
diff --git a/playbooks/common/openshift-cluster/upgrades/etcd/containerized_tasks.yml b/playbooks/common/openshift-cluster/upgrades/etcd/containerized_tasks.yml
index f88981a0b..5f8b59e17 100644
--- a/playbooks/common/openshift-cluster/upgrades/etcd/containerized_tasks.yml
+++ b/playbooks/common/openshift-cluster/upgrades/etcd/containerized_tasks.yml
@@ -8,8 +8,7 @@
- name: Set new_etcd_image
set_fact:
- new_etcd_image: "{{ current_image.stdout | regex_replace('/etcd.*$','/etcd3:' ~ upgrade_version ) if upgrade_version | version_compare('3.0','>=')
- else current_image.stdout.split(':')[0] ~ ':' ~ upgrade_version }}"
+ new_etcd_image: "{{ current_image.stdout | regex_replace('/etcd.*$','/etcd:' ~ upgrade_version ) }}"
- name: Pull new etcd image
command: "docker pull {{ new_etcd_image }}"
diff --git a/playbooks/common/openshift-cluster/upgrades/etcd/main.yml b/playbooks/common/openshift-cluster/upgrades/etcd/main.yml
index 192799376..8268adc2e 100644
--- a/playbooks/common/openshift-cluster/upgrades/etcd/main.yml
+++ b/playbooks/common/openshift-cluster/upgrades/etcd/main.yml
@@ -9,117 +9,36 @@
tags:
- always
-- name: Evaluate additional groups for upgrade
+# We use two groups one for hosts we're upgrading which doesn't include embedded etcd
+# The other for backing up which includes the embedded etcd host, there's no need to
+# upgrade embedded etcd that just happens when the master is updated.
+- name: Evaluate additional groups for etcd
hosts: localhost
connection: local
become: no
tasks:
- - fail:
- msg: 'The etcd upgrade playbook does not support upgrading embedded etcd, simply run the normal playbooks and etcd will be upgraded when your master is updated.'
- when: "{{ groups.oo_etcd_to_config | default([]) | length == 0 }}"
- name: Evaluate etcd_hosts_to_upgrade
add_host:
name: "{{ item }}"
- groups: etcd_hosts_to_upgrade, etcd_hosts_to_backup
+ groups: etcd_hosts_to_upgrade
+ with_items: "{{ groups.oo_etcd_to_config if groups.oo_etcd_to_config is defined and groups.oo_etcd_to_config | length > 0 else [] }}"
+ - name: Evaluate etcd_hosts_to_backup
+ add_host:
+ name: "{{ item }}"
+ groups: etcd_hosts_to_backup
with_items: "{{ groups.oo_etcd_to_config if groups.oo_etcd_to_config is defined and groups.oo_etcd_to_config | length > 0 else groups.oo_first_master }}"
- name: Backup etcd before upgrading anything
include: backup.yml
vars:
backup_tag: "pre-upgrade-"
+ when: openshift_etcd_backup | default(true) | bool
- name: Drop etcdctl profiles
hosts: etcd_hosts_to_upgrade
tasks:
- include: roles/etcd/tasks/etcdctl.yml
-- name: Determine etcd version
- hosts: etcd_hosts_to_upgrade
- tasks:
- - name: Record RPM based etcd version
- command: rpm -qa --qf '%{version}' etcd\*
- register: etcd_installed_version
- failed_when: false
- when: not openshift.common.is_containerized | bool
- - name: Record containerized etcd version
- command: docker exec etcd_container rpm -qa --qf '%{version}' etcd\*
- register: etcd_installed_version
- failed_when: false
- when: openshift.common.is_containerized | bool
-
-# I really dislike this copy/pasta but I wasn't able to find a way to get it to loop
-# through hosts, then loop through tasks only when appropriate
-- name: Upgrade to 2.1
- hosts: etcd_hosts_to_upgrade
- serial: 1
- vars:
- upgrade_version: '2.1'
- tasks:
- - include: rhel_tasks.yml
- when: etcd_installed_version.stdout | default('99') | version_compare('2.1','<') and ansible_distribution == 'RedHat' and not openshift.common.is_containerized | bool
-
-- name: Upgrade RPM hosts to 2.2
- hosts: etcd_hosts_to_upgrade
- serial: 1
- vars:
- upgrade_version: '2.2'
- tasks:
- - include: rhel_tasks.yml
- when: etcd_installed_version.stdout | default('99') | version_compare('2.2','<') and ansible_distribution == 'RedHat' and not openshift.common.is_containerized | bool
-
-- name: Upgrade containerized hosts to 2.2.5
- hosts: etcd_hosts_to_upgrade
- serial: 1
- vars:
- upgrade_version: 2.2.5
- tasks:
- - include: containerized_tasks.yml
- when: etcd_installed_version.stdout | default('99') | version_compare('2.2','<') and openshift.common.is_containerized | bool
-
-- name: Upgrade RPM hosts to 2.3
- hosts: etcd_hosts_to_upgrade
- serial: 1
- vars:
- upgrade_version: '2.3'
- tasks:
- - include: rhel_tasks.yml
- when: etcd_installed_version.stdout | default('99') | version_compare('2.3','<') and ansible_distribution == 'RedHat' and not openshift.common.is_containerized | bool
-
-- name: Upgrade containerized hosts to 2.3.7
- hosts: etcd_hosts_to_upgrade
- serial: 1
- vars:
- upgrade_version: 2.3.7
- tasks:
- - include: containerized_tasks.yml
- when: etcd_installed_version.stdout | default('99') | version_compare('2.3','<') and openshift.common.is_containerized | bool
-
-- name: Upgrade RPM hosts to 3.0
- hosts: etcd_hosts_to_upgrade
- serial: 1
- vars:
- upgrade_version: '3.0'
- tasks:
- - include: rhel_tasks.yml
- when: etcd_installed_version.stdout | default('99') | version_compare('3.0','<') and ansible_distribution == 'RedHat' and not openshift.common.is_containerized | bool
-
-- name: Upgrade containerized hosts to etcd3 image
- hosts: etcd_hosts_to_upgrade
- serial: 1
- vars:
- upgrade_version: 3.0.3
- tasks:
- - include: containerized_tasks.yml
- when: etcd_installed_version.stdout | default('99') | version_compare('3.0','<') and openshift.common.is_containerized | bool
-
-- name: Upgrade fedora to latest
- hosts: etcd_hosts_to_upgrade
- serial: 1
- tasks:
- - include: fedora_tasks.yml
- when: ansible_distribution == 'Fedora' and not openshift.common.is_containerized | bool
-
-- name: Backup etcd
- include: backup.yml
- vars:
- backup_tag: "post-3.0-"
+- name: Perform etcd upgrade
+ include: ./upgrade.yml
+ when: openshift_etcd_upgrade | default(true) | bool
diff --git a/playbooks/common/openshift-cluster/upgrades/etcd/rhel_tasks.yml b/playbooks/common/openshift-cluster/upgrades/etcd/rhel_tasks.yml
index 8e7dc9d9b..3a972e8ab 100644
--- a/playbooks/common/openshift-cluster/upgrades/etcd/rhel_tasks.yml
+++ b/playbooks/common/openshift-cluster/upgrades/etcd/rhel_tasks.yml
@@ -2,13 +2,10 @@
- name: Verify cluster is healthy pre-upgrade
command: "etcdctl --cert-file /etc/etcd/peer.crt --key-file /etc/etcd/peer.key --ca-file /etc/etcd/ca.crt -C https://{{ openshift.common.hostname }}:2379 cluster-health"
-- name: Update etcd package but exclude etcd3
- command: "{{ ansible_pkg_mgr }} install -y etcd-{{ upgrade_version }}\\* --exclude etcd3"
- when: upgrade_version | version_compare('3.0','<')
-
-- name: Update etcd package not excluding etcd3
- command: "{{ ansible_pkg_mgr }} install -y etcd3-{{ upgrade_version }}\\*"
- when: not upgrade_version | version_compare('3.0','<')
+- name: Update etcd RPM
+ package:
+ name: etcd-{{ upgrade_version }}*
+ state: latest
- name: Restart etcd
service:
diff --git a/playbooks/common/openshift-cluster/upgrades/etcd/upgrade.yml b/playbooks/common/openshift-cluster/upgrades/etcd/upgrade.yml
new file mode 100644
index 000000000..0f8d94737
--- /dev/null
+++ b/playbooks/common/openshift-cluster/upgrades/etcd/upgrade.yml
@@ -0,0 +1,94 @@
+---
+- name: Determine etcd version
+ hosts: etcd_hosts_to_upgrade
+ tasks:
+ - name: Record RPM based etcd version
+ command: rpm -qa --qf '%{version}' etcd\*
+ args:
+ warn: no
+ register: etcd_rpm_version
+ failed_when: false
+ when: not openshift.common.is_containerized | bool
+ - name: Record containerized etcd version
+ command: docker exec etcd_container rpm -qa --qf '%{version}' etcd\*
+ register: etcd_container_version
+ failed_when: false
+ when: openshift.common.is_containerized | bool
+
+# I really dislike this copy/pasta but I wasn't able to find a way to get it to loop
+# through hosts, then loop through tasks only when appropriate
+- name: Upgrade to 2.1
+ hosts: etcd_hosts_to_upgrade
+ serial: 1
+ vars:
+ upgrade_version: '2.1'
+ tasks:
+ - include: rhel_tasks.yml
+ when: etcd_rpm_version.stdout | default('99') | version_compare('2.1','<') and ansible_distribution == 'RedHat' and not openshift.common.is_containerized | bool
+
+- name: Upgrade RPM hosts to 2.2
+ hosts: etcd_hosts_to_upgrade
+ serial: 1
+ vars:
+ upgrade_version: '2.2'
+ tasks:
+ - include: rhel_tasks.yml
+ when: etcd_rpm_version.stdout | default('99') | version_compare('2.2','<') and ansible_distribution == 'RedHat' and not openshift.common.is_containerized | bool
+
+- name: Upgrade containerized hosts to 2.2.5
+ hosts: etcd_hosts_to_upgrade
+ serial: 1
+ vars:
+ upgrade_version: 2.2.5
+ tasks:
+ - include: containerized_tasks.yml
+ when: etcd_container_version.stdout | default('99') | version_compare('2.2','<') and openshift.common.is_containerized | bool
+
+- name: Upgrade RPM hosts to 2.3
+ hosts: etcd_hosts_to_upgrade
+ serial: 1
+ vars:
+ upgrade_version: '2.3'
+ tasks:
+ - include: rhel_tasks.yml
+ when: etcd_rpm_version.stdout | default('99') | version_compare('2.3','<') and ansible_distribution == 'RedHat' and not openshift.common.is_containerized | bool
+
+- name: Upgrade containerized hosts to 2.3.7
+ hosts: etcd_hosts_to_upgrade
+ serial: 1
+ vars:
+ upgrade_version: 2.3.7
+ tasks:
+ - include: containerized_tasks.yml
+ when: etcd_container_version.stdout | default('99') | version_compare('2.3','<') and openshift.common.is_containerized | bool
+
+- name: Upgrade RPM hosts to 3.0
+ hosts: etcd_hosts_to_upgrade
+ serial: 1
+ vars:
+ upgrade_version: '3.0'
+ tasks:
+ - include: rhel_tasks.yml
+ when: etcd_rpm_version.stdout | default('99') | version_compare('3.0','<') and ansible_distribution == 'RedHat' and not openshift.common.is_containerized | bool
+
+- name: Upgrade containerized hosts to etcd3 image
+ hosts: etcd_hosts_to_upgrade
+ serial: 1
+ vars:
+ upgrade_version: 3.0.15
+ tasks:
+ - include: containerized_tasks.yml
+ when: etcd_container_version.stdout | default('99') | version_compare('3.0','<') and openshift.common.is_containerized | bool
+
+- name: Upgrade fedora to latest
+ hosts: etcd_hosts_to_upgrade
+ serial: 1
+ tasks:
+ - include: fedora_tasks.yml
+ when: ansible_distribution == 'Fedora' and not openshift.common.is_containerized | bool
+
+- name: Backup etcd
+ include: backup.yml
+ vars:
+ backup_tag: "post-3.0-"
+ when: openshift_etcd_backup | default(true) | bool
diff --git a/playbooks/common/openshift-cluster/upgrades/files/pre-upgrade-check b/playbooks/common/openshift-cluster/upgrades/files/pre-upgrade-check
deleted file mode 100644
index e5c958ebb..000000000
--- a/playbooks/common/openshift-cluster/upgrades/files/pre-upgrade-check
+++ /dev/null
@@ -1,193 +0,0 @@
-#!/usr/bin/env python
-"""
-Pre-upgrade checks that must be run on a master before proceeding with upgrade.
-"""
-# This is a script not a python module:
-# pylint: disable=invalid-name
-
-# NOTE: This script should not require any python libs other than what is
-# in the standard library.
-
-__license__ = "ASL 2.0"
-
-import json
-import os
-import subprocess
-import re
-
-# The maximum length of container.ports.name
-ALLOWED_LENGTH = 15
-# The valid structure of container.ports.name
-ALLOWED_CHARS = re.compile('^[a-z0-9][a-z0-9\\-]*[a-z0-9]$')
-AT_LEAST_ONE_LETTER = re.compile('[a-z]')
-# look at OS_PATH for the full path. Default ot 'oc'
-OC_PATH = os.getenv('OC_PATH', 'oc')
-
-
-def validate(value):
- """
- validate verifies that value matches required conventions
-
- Rules of container.ports.name validation:
-
- * must be less that 16 chars
- * at least one letter
- * only a-z0-9-
- * hyphens can not be leading or trailing or next to each other
-
- :Parameters:
- - `value`: Value to validate
- """
- if len(value) > ALLOWED_LENGTH:
- return False
-
- if '--' in value:
- return False
-
- # We search since it can be anywhere
- if not AT_LEAST_ONE_LETTER.search(value):
- return False
-
- # We match because it must start at the beginning
- if not ALLOWED_CHARS.match(value):
- return False
- return True
-
-
-def list_items(kind):
- """
- list_items returns a list of items from the api
-
- :Parameters:
- - `kind`: Kind of item to access
- """
- response = subprocess.check_output([OC_PATH, 'get', '--all-namespaces', '-o', 'json', kind])
- items = json.loads(response)
- return items.get("items", [])
-
-
-def get(obj, *paths):
- """
- Gets an object
-
- :Parameters:
- - `obj`: A dictionary structure
- - `path`: All other non-keyword arguments
- """
- ret_obj = obj
- for path in paths:
- if ret_obj.get(path, None) is None:
- return []
- ret_obj = ret_obj[path]
- return ret_obj
-
-
-# pylint: disable=too-many-arguments
-def pretty_print_errors(namespace, kind, item_name, container_name, invalid_label, port_name, valid):
- """
- Prints out results in human friendly way.
-
- :Parameters:
- - `namespace`: Namespace of the resource
- - `kind`: Kind of the resource
- - `item_name`: Name of the resource
- - `container_name`: Name of the container. May be "" when kind=Service.
- - `port_name`: Name of the port
- - `invalid_label`: The label of the invalid port. Port.name/targetPort
- - `valid`: True if the port is valid
- """
- if not valid:
- if len(container_name) > 0:
- print('%s/%s -n %s (Container="%s" %s="%s")' % (
- kind, item_name, namespace, container_name, invalid_label, port_name))
- else:
- print('%s/%s -n %s (%s="%s")' % (
- kind, item_name, namespace, invalid_label, port_name))
-
-
-def print_validation_header():
- """
- Prints the error header. Should run on the first error to avoid
- overwhelming the user.
- """
- print """\
-At least one port name is invalid and must be corrected before upgrading.
-Please update or remove any resources with invalid port names.
-
- Valid port names must:
-
- * be less that 16 characters
- * have at least one letter
- * contain only a-z0-9-
- * not start or end with -
- * not contain dashes next to each other ('--')
-"""
-
-
-def main():
- """
- main is the main entry point to this script
- """
- try:
- # the comma at the end suppresses the newline
- print "Checking for oc ...",
- subprocess.check_output([OC_PATH, 'whoami'])
- print "found"
- except:
- print(
- 'Unable to run "%s whoami"\n'
- 'Please ensure OpenShift is running, and "oc" is on your system '
- 'path.\n'
- 'You can override the path with the OC_PATH environment variable.'
- % OC_PATH)
- raise SystemExit(1)
-
- # Where the magic happens
- first_error = True
- for kind, path in [
- ('deploymentconfigs', ("spec", "template", "spec", "containers")),
- ('replicationcontrollers', ("spec", "template", "spec", "containers")),
- ('pods', ("spec", "containers"))]:
- for item in list_items(kind):
- namespace = item["metadata"]["namespace"]
- item_name = item["metadata"]["name"]
- for container in get(item, *path):
- container_name = container["name"]
- for port in get(container, "ports"):
- port_name = port.get("name", None)
- if not port_name:
- # Unnamed ports are OK
- continue
- valid = validate(port_name)
- if not valid and first_error:
- first_error = False
- print_validation_header()
- pretty_print_errors(
- namespace, kind, item_name,
- container_name, "Port.name", port_name, valid)
-
- # Services follow a different flow
- for item in list_items('services'):
- namespace = item["metadata"]["namespace"]
- item_name = item["metadata"]["name"]
- for port in get(item, "spec", "ports"):
- port_name = port.get("targetPort", None)
- if isinstance(port_name, int) or port_name is None:
- # Integer only or unnamed ports are OK
- continue
- valid = validate(port_name)
- if not valid and first_error:
- first_error = False
- print_validation_header()
- pretty_print_errors(
- namespace, "services", item_name, "",
- "targetPort", port_name, valid)
-
- # If we had at least 1 error then exit with 1
- if not first_error:
- raise SystemExit(1)
-
-
-if __name__ == '__main__':
- main()
-
diff --git a/playbooks/common/openshift-cluster/upgrades/files/rpm_versions.sh b/playbooks/common/openshift-cluster/upgrades/files/rpm_versions.sh
deleted file mode 100644
index 7bf249742..000000000
--- a/playbooks/common/openshift-cluster/upgrades/files/rpm_versions.sh
+++ /dev/null
@@ -1,12 +0,0 @@
-#!/bin/bash
-if [ `which dnf 2> /dev/null` ]; then
- installed=$(dnf repoquery --installed --latest-limit 1 -d 0 --qf '%{version}-%{release}' "${@}" 2> /dev/null)
- available=$(dnf repoquery --available --latest-limit 1 -d 0 --qf '%{version}-%{release}' "${@}" 2> /dev/null)
-else
- installed=$(repoquery --plugins --pkgnarrow=installed --qf '%{version}-%{release}' "${@}" 2> /dev/null)
- available=$(repoquery --plugins --pkgnarrow=available --qf '%{version}-%{release}' "${@}" 2> /dev/null)
-fi
-
-echo "---"
-echo "curr_version: ${installed}"
-echo "avail_version: ${available}"
diff --git a/playbooks/common/openshift-cluster/upgrades/init.yml b/playbooks/common/openshift-cluster/upgrades/init.yml
index fbdb7900a..8cac2fb3b 100644
--- a/playbooks/common/openshift-cluster/upgrades/init.yml
+++ b/playbooks/common/openshift-cluster/upgrades/init.yml
@@ -1,6 +1,4 @@
---
-- include: ../verify_ansible_version.yml
-
- hosts: localhost
connection: local
become: no
diff --git a/playbooks/common/openshift-cluster/upgrades/library/openshift_upgrade_config.py b/playbooks/common/openshift-cluster/upgrades/library/openshift_upgrade_config.py
index 9a065fd1c..673f11889 100755
--- a/playbooks/common/openshift-cluster/upgrades/library/openshift_upgrade_config.py
+++ b/playbooks/common/openshift-cluster/upgrades/library/openshift_upgrade_config.py
@@ -17,6 +17,7 @@ requirements: [ ]
EXAMPLES = '''
'''
+
def modify_api_levels(level_list, remove, ensure, msg_prepend='',
msg_append=''):
""" modify_api_levels """
@@ -62,7 +63,6 @@ def upgrade_master_3_0_to_3_1(ansible_module, config_base, backup):
config = yaml.safe_load(master_cfg_file.read())
master_cfg_file.close()
-
# Remove unsupported api versions and ensure supported api versions from
# master config
unsupported_levels = ['v1beta1', 'v1beta2', 'v1beta3']
@@ -118,7 +118,7 @@ def main():
# redefined-outer-name
global module
- module = AnsibleModule(
+ module = AnsibleModule( # noqa: F405
argument_spec=dict(
config_base=dict(required=True),
from_version=dict(required=True, choices=['3.0']),
@@ -146,13 +146,14 @@ def main():
# ignore broad-except error to avoid stack trace to ansible user
# pylint: disable=broad-except
- except Exception, e:
+ except Exception as e:
return module.fail_json(msg=str(e))
+
# ignore pylint errors related to the module_utils import
-# pylint: disable=redefined-builtin, unused-wildcard-import, wildcard-import
+# pylint: disable=redefined-builtin, unused-wildcard-import, wildcard-import, wrong-import-position
# import module snippets
-from ansible.module_utils.basic import *
+from ansible.module_utils.basic import * # noqa: E402,F403
if __name__ == '__main__':
main()
diff --git a/playbooks/common/openshift-cluster/upgrades/rpm_upgrade.yml b/playbooks/common/openshift-cluster/upgrades/rpm_upgrade.yml
index d7d1fe548..df2b664d4 100644
--- a/playbooks/common/openshift-cluster/upgrades/rpm_upgrade.yml
+++ b/playbooks/common/openshift-cluster/upgrades/rpm_upgrade.yml
@@ -6,7 +6,3 @@
- name: Ensure python-yaml present for config upgrade
package: name=PyYAML state=present
when: not openshift.common.is_atomic | bool
-
-- name: Restart node service
- service: name="{{ openshift.common.service_type }}-node" state=restarted
- when: component == "node"
diff --git a/playbooks/common/openshift-cluster/upgrades/upgrade_control_plane.yml b/playbooks/common/openshift-cluster/upgrades/upgrade_control_plane.yml
index 57c25aa41..6950b6166 100644
--- a/playbooks/common/openshift-cluster/upgrades/upgrade_control_plane.yml
+++ b/playbooks/common/openshift-cluster/upgrades/upgrade_control_plane.yml
@@ -27,14 +27,11 @@
embedded_etcd: "{{ groups.oo_etcd_to_config | default([]) | length == 0 }}"
debug_level: "{{ openshift_master_debug_level | default(openshift.common.debug_level | default(2)) }}"
-- name: Backup etcd
- include: ./etcd/backup.yml
+- name: Upgrade and backup etcd
+ include: ./etcd/main.yml
- name: Upgrade master packages
hosts: oo_masters_to_config
- handlers:
- - include: ../../../../roles/openshift_master/handlers/main.yml
- static: yes
roles:
- openshift_facts
tasks:
@@ -54,6 +51,14 @@
- include: create_service_signer_cert.yml
+# Set openshift_master_facts separately. In order to reconcile
+# admission_config's, we currently must run openshift_master_facts and
+# then run openshift_facts.
+- name: Set OpenShift master facts
+ hosts: oo_masters_to_config
+ roles:
+ - openshift_master_facts
+
- name: Upgrade master config and systemd units
hosts: oo_masters_to_config
handlers:
@@ -61,7 +66,11 @@
static: yes
roles:
- openshift_facts
- tasks:
+ post_tasks:
+ - include_vars: ../../../../roles/openshift_master_facts/vars/main.yml
+
+ - include: upgrade_scheduler.yml
+
- include: "{{ master_config_hook }}"
when: master_config_hook is defined
diff --git a/playbooks/common/openshift-cluster/upgrades/upgrade_nodes.yml b/playbooks/common/openshift-cluster/upgrades/upgrade_nodes.yml
index 53d670196..86b344d7a 100644
--- a/playbooks/common/openshift-cluster/upgrades/upgrade_nodes.yml
+++ b/playbooks/common/openshift-cluster/upgrades/upgrade_nodes.yml
@@ -1,5 +1,5 @@
---
-- name: Evacuate and upgrade nodes
+- name: Drain and upgrade nodes
hosts: oo_nodes_to_upgrade
# This var must be set with -e on invocation, as it is not a per-host inventory var
# and is evaluated early. Values such as "20%" can also be used.
@@ -39,13 +39,18 @@
retries: 3
delay: 1
- - name: Evacuate Node for Kubelet upgrade
+ - name: Drain Node for Kubelet upgrade
command: >
- {{ hostvars[groups.oo_first_master.0].openshift.common.client_binary }} adm manage-node {{ openshift.node.nodename | lower }} --evacuate --force
+ {{ hostvars[groups.oo_first_master.0].openshift.common.client_binary }} adm manage-node {{ openshift.node.nodename | lower }} {{ openshift.common.evacuate_or_drain }} --force
delegate_to: "{{ groups.oo_first_master.0 }}"
when: inventory_hostname in groups.oo_nodes_to_upgrade
+
tasks:
+
- include: docker/upgrade.yml
+ vars:
+ # We will restart Docker ourselves after everything is ready:
+ skip_docker_restart: True
when: l_docker_upgrade is defined and l_docker_upgrade | bool and not openshift.common.is_atomic | bool
- include: "{{ node_config_hook }}"
@@ -53,15 +58,35 @@
- include: rpm_upgrade.yml
vars:
- component: "node"
- openshift_version: "{{ openshift_pkg_version | default('') }}"
+ component: "node"
+ openshift_version: "{{ openshift_pkg_version | default('') }}"
when: inventory_hostname in groups.oo_nodes_to_upgrade and not openshift.common.is_containerized | bool
+ - name: Remove obsolete docker-sdn-ovs.conf
+ file: path=/etc/systemd/system/docker.service.d/docker-sdn-ovs.conf state=absent
+ when: (deployment_type == 'openshift-enterprise' and openshift_release | version_compare('3.4', '>=')) or (deployment_type == 'origin' and openshift_release | version_compare('1.4', '>='))
+
- include: containerized_node_upgrade.yml
when: inventory_hostname in groups.oo_nodes_to_upgrade and openshift.common.is_containerized | bool
- - meta: flush_handlers
+ - name: Ensure containerized services stopped before Docker restart
+ service: name={{ item }} state=stopped
+ with_items:
+ - etcd_container
+ - openvswitch
+ - "{{ openshift.common.service_type }}-master"
+ - "{{ openshift.common.service_type }}-master-api"
+ - "{{ openshift.common.service_type }}-master-controllers"
+ - "{{ openshift.common.service_type }}-node"
+ failed_when: false
+ when: openshift.common.is_containerized | bool
+ # Mandatory Docker restart, ensure all containerized services are running:
+ - include: docker/restart.yml
+
+ - name: Restart rpm node service
+ service: name="{{ openshift.common.service_type }}-node" state=restarted
+ when: inventory_hostname in groups.oo_nodes_to_upgrade and not openshift.common.is_containerized | bool
- name: Set node schedulability
command: >
{{ hostvars[groups.oo_first_master.0].openshift.common.client_binary }} adm manage-node {{ openshift.node.nodename | lower }} --schedulable=true
@@ -71,5 +96,3 @@
until: node_sched.rc == 0
retries: 3
delay: 1
-
-
diff --git a/playbooks/common/openshift-cluster/upgrades/upgrade_scheduler.yml b/playbooks/common/openshift-cluster/upgrades/upgrade_scheduler.yml
new file mode 100644
index 000000000..88f2ddc78
--- /dev/null
+++ b/playbooks/common/openshift-cluster/upgrades/upgrade_scheduler.yml
@@ -0,0 +1,166 @@
+---
+# Upgrade predicates
+- vars:
+ prev_predicates: "{{ lookup('openshift_master_facts_default_predicates', short_version=openshift_upgrade_min, deployment_type=openshift_deployment_type) }}"
+ prev_predicates_no_region: "{{ lookup('openshift_master_facts_default_predicates', short_version=openshift_upgrade_min, deployment_type=openshift_deployment_type, regions_enabled=False) }}"
+ default_predicates_no_region: "{{ lookup('openshift_master_facts_default_predicates', regions_enabled=False) }}"
+ # older_predicates are the set of predicates that have previously been
+ # hard-coded into openshift_facts
+ older_predicates:
+ - - name: MatchNodeSelector
+ - name: PodFitsResources
+ - name: PodFitsPorts
+ - name: NoDiskConflict
+ - name: NoVolumeZoneConflict
+ - name: MaxEBSVolumeCount
+ - name: MaxGCEPDVolumeCount
+ - name: Region
+ argument:
+ serviceAffinity:
+ labels:
+ - region
+ - - name: MatchNodeSelector
+ - name: PodFitsResources
+ - name: PodFitsPorts
+ - name: NoDiskConflict
+ - name: NoVolumeZoneConflict
+ - name: Region
+ argument:
+ serviceAffinity:
+ labels:
+ - region
+ - - name: MatchNodeSelector
+ - name: PodFitsResources
+ - name: PodFitsPorts
+ - name: NoDiskConflict
+ - name: Region
+ argument:
+ serviceAffinity:
+ labels:
+ - region
+ # older_predicates_no_region are the set of predicates that have previously
+ # been hard-coded into openshift_facts, with the Region predicate removed
+ older_predicates_no_region:
+ - - name: MatchNodeSelector
+ - name: PodFitsResources
+ - name: PodFitsPorts
+ - name: NoDiskConflict
+ - name: NoVolumeZoneConflict
+ - name: MaxEBSVolumeCount
+ - name: MaxGCEPDVolumeCount
+ - - name: MatchNodeSelector
+ - name: PodFitsResources
+ - name: PodFitsPorts
+ - name: NoDiskConflict
+ - name: NoVolumeZoneConflict
+ - - name: MatchNodeSelector
+ - name: PodFitsResources
+ - name: PodFitsPorts
+ - name: NoDiskConflict
+ block:
+
+ # Handle case where openshift_master_predicates is defined
+ - block:
+ - debug:
+ msg: "WARNING: openshift_master_scheduler_predicates is set to defaults from an earlier release of OpenShift current defaults are: {{ openshift_master_scheduler_default_predicates }}"
+ when: "{{ openshift_master_scheduler_predicates in older_predicates + older_predicates_no_region + [prev_predicates] + [prev_predicates_no_region] }}"
+
+ - debug:
+ msg: "WARNING: openshift_master_scheduler_predicates does not match current defaults of: {{ openshift_master_scheduler_default_predicates }}"
+ when: "{{ openshift_master_scheduler_predicates != openshift_master_scheduler_default_predicates }}"
+ when: "{{ openshift_master_scheduler_predicates | default(none) is not none }}"
+
+ # Handle cases where openshift_master_predicates is not defined
+ - block:
+ - debug:
+ msg: "WARNING: existing scheduler config does not match previous known defaults automated upgrade of scheduler config is disabled.\nexisting scheduler predicates: {{ openshift_master_scheduler_current_predicates }}\ncurrent scheduler default predicates are: {{ openshift_master_scheduler_default_predicates }}"
+ when: "{{ openshift_master_scheduler_current_predicates != openshift_master_scheduler_default_predicates and
+ openshift_master_scheduler_current_predicates not in older_predicates + [prev_predicates] }}"
+
+ - set_fact:
+ openshift_upgrade_scheduler_predicates: "{{ openshift_master_scheduler_default_predicates }}"
+ when: "{{ openshift_master_scheduler_current_predicates != openshift_master_scheduler_default_predicates and
+ openshift_master_scheduler_current_predicates in older_predicates + [prev_predicates] }}"
+
+ - set_fact:
+ openshift_upgrade_scheduler_predicates: "{{ default_predicates_no_region }}"
+ when: "{{ openshift_master_scheduler_current_predicates != default_predicates_no_region and
+ openshift_master_scheduler_current_predicates in older_predicates_no_region + [prev_predicates_no_region] }}"
+
+ when: "{{ openshift_master_scheduler_predicates | default(none) is none }}"
+
+
+# Upgrade priorities
+- vars:
+ prev_priorities: "{{ lookup('openshift_master_facts_default_priorities', short_version=openshift_upgrade_min, deployment_type=openshift_deployment_type) }}"
+ prev_priorities_no_zone: "{{ lookup('openshift_master_facts_default_priorities', short_version=openshift_upgrade_min, deployment_type=openshift_deployment_type, zones_enabled=False) }}"
+ default_priorities_no_zone: "{{ lookup('openshift_master_facts_default_priorities', zones_enabled=False) }}"
+ # older_priorities are the set of priorities that have previously been
+ # hard-coded into openshift_facts
+ older_priorities:
+ - - name: LeastRequestedPriority
+ weight: 1
+ - name: SelectorSpreadPriority
+ weight: 1
+ - name: Zone
+ weight: 2
+ argument:
+ serviceAntiAffinity:
+ label: zone
+ # older_priorities_no_region are the set of priorities that have previously
+ # been hard-coded into openshift_facts, with the Zone priority removed
+ older_priorities_no_zone:
+ - - name: LeastRequestedPriority
+ weight: 1
+ - name: SelectorSpreadPriority
+ weight: 1
+ block:
+
+ # Handle case where openshift_master_priorities is defined
+ - block:
+ - debug:
+ msg: "WARNING: openshift_master_scheduler_priorities is set to defaults from an earlier release of OpenShift current defaults are: {{ openshift_master_scheduler_default_priorities }}"
+ when: "{{ openshift_master_scheduler_priorities in older_priorities + older_priorities_no_zone + [prev_priorities] + [prev_priorities_no_zone] }}"
+
+ - debug:
+ msg: "WARNING: openshift_master_scheduler_priorities does not match current defaults of: {{ openshift_master_scheduler_default_priorities }}"
+ when: "{{ openshift_master_scheduler_priorities != openshift_master_scheduler_default_priorities }}"
+ when: "{{ openshift_master_scheduler_priorities | default(none) is not none }}"
+
+ # Handle cases where openshift_master_priorities is not defined
+ - block:
+ - debug:
+ msg: "WARNING: existing scheduler config does not match previous known defaults automated upgrade of scheduler config is disabled.\nexisting scheduler priorities: {{ openshift_master_scheduler_current_priorities }}\ncurrent scheduler default priorities are: {{ openshift_master_scheduler_default_priorities }}"
+ when: "{{ openshift_master_scheduler_current_priorities != openshift_master_scheduler_default_priorities and
+ openshift_master_scheduler_current_priorities not in older_priorities + [prev_priorities] }}"
+
+ - set_fact:
+ openshift_upgrade_scheduler_priorities: "{{ openshift_master_scheduler_default_priorities }}"
+ when: "{{ openshift_master_scheduler_current_priorities != openshift_master_scheduler_default_priorities and
+ openshift_master_scheduler_current_priorities in older_priorities + [prev_priorities] }}"
+
+ - set_fact:
+ openshift_upgrade_scheduler_priorities: "{{ default_priorities_no_zone }}"
+ when: "{{ openshift_master_scheduler_current_priorities != default_priorities_no_zone and
+ openshift_master_scheduler_current_priorities in older_priorities_no_zone + [prev_priorities_no_zone] }}"
+
+ when: "{{ openshift_master_scheduler_priorities | default(none) is none }}"
+
+
+# Update scheduler
+- vars:
+ scheduler_config:
+ kind: Policy
+ apiVersion: v1
+ predicates: "{{ openshift_upgrade_scheduler_predicates
+ | default(openshift_master_scheduler_current_predicates) }}"
+ priorities: "{{ openshift_upgrade_scheduler_priorities
+ | default(openshift_master_scheduler_current_priorities) }}"
+ block:
+ - name: Update scheduler config
+ copy:
+ content: "{{ scheduler_config | to_nice_json }}"
+ dest: "{{ openshift_master_scheduler_conf }}"
+ backup: true
+ when: "{{ openshift_upgrade_scheduler_predicates is defined or
+ openshift_upgrade_scheduler_priorities is defined }}"
diff --git a/playbooks/common/openshift-cluster/upgrades/v3_1_to_v3_2/nuke_images.sh b/playbooks/common/openshift-cluster/upgrades/v3_1_to_v3_2/nuke_images.sh
deleted file mode 120000
index 49a51bba9..000000000
--- a/playbooks/common/openshift-cluster/upgrades/v3_1_to_v3_2/nuke_images.sh
+++ /dev/null
@@ -1 +0,0 @@
-../files/nuke_images.sh \ No newline at end of file
diff --git a/playbooks/common/openshift-cluster/upgrades/v3_3/master_config_upgrade.yml b/playbooks/common/openshift-cluster/upgrades/v3_3/master_config_upgrade.yml
index 8c0bd272c..68c71a132 100644
--- a/playbooks/common/openshift-cluster/upgrades/v3_3/master_config_upgrade.yml
+++ b/playbooks/common/openshift-cluster/upgrades/v3_3/master_config_upgrade.yml
@@ -53,6 +53,7 @@
dest: "{{ openshift.common.config_base}}/master/master-config.yaml"
yaml_key: 'admissionConfig.pluginConfig'
yaml_value: "{{ openshift.master.admission_plugin_config }}"
+ when: "{{ 'admission_plugin_config' in openshift.master }}"
- modify_yaml:
dest: "{{ openshift.common.config_base}}/master/master-config.yaml"
diff --git a/playbooks/common/openshift-cluster/upgrades/v3_3/node_config_upgrade.yml b/playbooks/common/openshift-cluster/upgrades/v3_3/node_config_upgrade.yml
index 8f64636ae..89b524f14 100644
--- a/playbooks/common/openshift-cluster/upgrades/v3_3/node_config_upgrade.yml
+++ b/playbooks/common/openshift-cluster/upgrades/v3_3/node_config_upgrade.yml
@@ -18,4 +18,3 @@
dest: "{{ openshift.common.config_base}}/node/node-config.yaml"
yaml_key: 'masterClientConnectionOverrides.qps'
yaml_value: 20
-
diff --git a/playbooks/common/openshift-cluster/upgrades/v3_4/master_config_upgrade.yml b/playbooks/common/openshift-cluster/upgrades/v3_4/master_config_upgrade.yml
index 32de9d94a..43c2ffcd4 100644
--- a/playbooks/common/openshift-cluster/upgrades/v3_4/master_config_upgrade.yml
+++ b/playbooks/common/openshift-cluster/upgrades/v3_4/master_config_upgrade.yml
@@ -3,6 +3,7 @@
dest: "{{ openshift.common.config_base}}/master/master-config.yaml"
yaml_key: 'admissionConfig.pluginConfig'
yaml_value: "{{ openshift.master.admission_plugin_config }}"
+ when: "{{ 'admission_plugin_config' in openshift.master }}"
- modify_yaml:
dest: "{{ openshift.common.config_base}}/master/master-config.yaml"
diff --git a/playbooks/common/openshift-cluster/validate_hostnames.yml b/playbooks/common/openshift-cluster/validate_hostnames.yml
index 50e25984f..48cc03b19 100644
--- a/playbooks/common/openshift-cluster/validate_hostnames.yml
+++ b/playbooks/common/openshift-cluster/validate_hostnames.yml
@@ -11,6 +11,6 @@
failed_when: false
- name: Warn user about bad openshift_hostname values
pause:
- prompt: "The hostname \"{{ openshift.common.hostname }}\" for \"{{ ansible_nodename }}\" doesn't resolve to an ip address owned by this host. Please set openshift_hostname variable to a hostname that when resolved on the host in question resolves to an IP address matching an interface on this host. This host will fail liveness checks for pods utilizing hostPorts, press ENTER to continue or CTRL-C to abort."
- seconds: "{{ 10 if openshift_override_hostname_check | default(false) | bool else omit }}"
+ prompt: "The hostname \"{{ openshift.common.hostname }}\" for \"{{ ansible_nodename }}\" doesn't resolve to an ip address owned by this host. Please set openshift_hostname variable to a hostname that when resolved on the host in question resolves to an IP address matching an interface on this host. This host will fail liveness checks for pods utilizing hostPorts, press ENTER to continue or CTRL-C to abort."
+ seconds: "{{ 10 if openshift_override_hostname_check | default(false) | bool else omit }}"
when: lookupip.stdout not in ansible_all_ipv4_addresses
diff --git a/playbooks/common/openshift-cluster/verify_ansible_version.yml b/playbooks/common/openshift-cluster/verify_ansible_version.yml
deleted file mode 100644
index d75b23bf7..000000000
--- a/playbooks/common/openshift-cluster/verify_ansible_version.yml
+++ /dev/null
@@ -1,11 +0,0 @@
----
-- name: Verify Ansible version is greater than or equal to 2.1.0.0
- hosts: localhost
- connection: local
- become: no
- gather_facts: no
- tasks:
- - name: Verify Ansible version is greater than or equal to 2.1.0.0
- fail:
- msg: "Unsupported ansible version: {{ ansible_version.full }} found"
- when: not ansible_version.full | version_compare('2.1.0.0', 'ge')
diff --git a/playbooks/common/openshift-etcd/service.yml b/playbooks/common/openshift-etcd/service.yml
index f460612ba..a039d30b8 100644
--- a/playbooks/common/openshift-etcd/service.yml
+++ b/playbooks/common/openshift-etcd/service.yml
@@ -17,4 +17,4 @@
connection: ssh
gather_facts: no
tasks:
- - service: name=etcd state="{{ new_cluster_state }}"
+ - service: name=etcd state="{{ new_cluster_state }}"
diff --git a/playbooks/common/openshift-loadbalancer/service.yml b/playbooks/common/openshift-loadbalancer/service.yml
index efc80edf9..e413c2b3a 100644
--- a/playbooks/common/openshift-loadbalancer/service.yml
+++ b/playbooks/common/openshift-loadbalancer/service.yml
@@ -17,4 +17,4 @@
connection: ssh
gather_facts: no
tasks:
- - service: name=haproxy state="{{ new_cluster_state }}"
+ - service: name=haproxy state="{{ new_cluster_state }}"
diff --git a/playbooks/common/openshift-master/config.yml b/playbooks/common/openshift-master/config.yml
index 5fcb850a2..39d64a126 100644
--- a/playbooks/common/openshift-master/config.yml
+++ b/playbooks/common/openshift-master/config.yml
@@ -74,11 +74,6 @@
public_console_url: "{{ openshift_master_public_console_url | default(None) }}"
ha: "{{ openshift_master_ha | default(groups.oo_masters | length > 1) }}"
master_count: "{{ openshift_master_count | default(groups.oo_masters | length) }}"
- - openshift_facts:
- role: hosted
- openshift_env:
- openshift_hosted_registry_storage_kind: 'nfs'
- when: openshift_hosted_registry_storage_kind is not defined and groups.oo_nfs_to_config is defined and groups.oo_nfs_to_config | length > 0
- name: Create temp directory for syncing certs
hosts: localhost
@@ -99,8 +94,8 @@
- openshift_facts:
role: master
local_facts:
- session_auth_secrets: "{{ openshift_master_session_auth_secrets | default(openshift.master.session_auth_secrets | default(None)) }}"
- session_encryption_secrets: "{{ openshift_master_session_encryption_secrets | default(openshift.master.session_encryption_secrets | default(None)) }}"
+ session_auth_secrets: "{{ openshift_master_session_auth_secrets | default(openshift.master.session_auth_secrets | default(None)) }}"
+ session_encryption_secrets: "{{ openshift_master_session_encryption_secrets | default(openshift.master.session_encryption_secrets | default(None)) }}"
- name: Generate master session secrets
hosts: oo_first_master
@@ -133,9 +128,7 @@
| oo_collect('openshift.common.hostname') | default([]) | join (',')
}}"
roles:
- - role: openshift_master_facts
- - role: openshift_hosted_facts
- - role: openshift_master_certificates
+ - role: openshift_master
openshift_ca_host: "{{ groups.oo_first_master.0 }}"
openshift_master_etcd_hosts: "{{ hostvars
| oo_select_keys(groups['oo_etcd_to_config'] | default([]))
@@ -145,42 +138,12 @@
| oo_select_keys(groups['oo_masters_to_config'] | default([]))
| oo_collect('openshift.common.all_hostnames')
| oo_flatten | unique }}"
- - role: openshift_etcd_client_certificates
+ openshift_master_hosts: "{{ groups.oo_masters_to_config }}"
etcd_ca_host: "{{ groups.oo_etcd_to_config.0 }}"
etcd_cert_subdir: "openshift-master-{{ openshift.common.hostname }}"
etcd_cert_config_dir: "{{ openshift.common.config_base }}/master"
etcd_cert_prefix: "master.etcd-"
- when: groups.oo_etcd_to_config is defined and groups.oo_etcd_to_config
- - role: openshift_clock
- - role: openshift_cloud_provider
- - role: openshift_builddefaults
- - role: os_firewall
- os_firewall_allow:
- - service: etcd embedded
- port: 4001/tcp
- - service: api server https
- port: "{{ openshift.master.api_port }}/tcp"
- - service: api controllers https
- port: "{{ openshift.master.controllers_port }}/tcp"
- - service: skydns tcp
- port: "{{ openshift.master.dns_port }}/tcp"
- - service: skydns udp
- port: "{{ openshift.master.dns_port }}/udp"
- - service: Fluentd td-agent tcp
- port: 24224/tcp
- - service: Fluentd td-agent udp
- port: 24224/udp
- - service: pcsd
- port: 2224/tcp
- - service: Corosync UDP
- port: 5404/udp
- - service: Corosync UDP
- port: 5405/udp
- - role: openshift_master
- openshift_master_hosts: "{{ groups.oo_masters_to_config }}"
- - role: nickhammond.logrotate
- - role: nuage_master
- when: openshift.common.use_nuage | bool
+
post_tasks:
- name: Create group for deployment type
group_by: key=oo_masters_deployment_type_{{ openshift.common.deployment_type }}
diff --git a/playbooks/common/openshift-master/restart.yml b/playbooks/common/openshift-master/restart.yml
index 5769ef5cd..7b340887a 100644
--- a/playbooks/common/openshift-master/restart.yml
+++ b/playbooks/common/openshift-master/restart.yml
@@ -13,12 +13,12 @@
role: "{{ item.role }}"
local_facts: "{{ item.local_facts }}"
with_items:
- - role: common
- local_facts:
- rolling_restart_mode: "{{ openshift_rolling_restart_mode | default('services') }}"
- - role: master
- local_facts:
- cluster_method: "{{ openshift_master_cluster_method | default(None) }}"
+ - role: common
+ local_facts:
+ rolling_restart_mode: "{{ openshift_rolling_restart_mode | default('services') }}"
+ - role: master
+ local_facts:
+ cluster_method: "{{ openshift_master_cluster_method | default(None) }}"
# Creating a temp file on localhost, we then check each system that will
# be rebooted to see if that file exists, if so we know we're running
@@ -76,4 +76,3 @@
when: openshift.common.rolling_restart_mode == 'system'
- include: restart_services.yml
when: openshift.common.rolling_restart_mode == 'services'
-
diff --git a/playbooks/common/openshift-master/restart_hosts.yml b/playbooks/common/openshift-master/restart_hosts.yml
index b1c36718c..ffa23d26a 100644
--- a/playbooks/common/openshift-master/restart_hosts.yml
+++ b/playbooks/common/openshift-master/restart_hosts.yml
@@ -1,3 +1,4 @@
+---
- name: Restart master system
# https://github.com/ansible/ansible/issues/10616
shell: sleep 2 && shutdown -r now "OpenShift Ansible master rolling restart"
@@ -11,7 +12,7 @@
become: no
local_action:
module: wait_for
- host="{{ inventory_hostname }}"
+ host="{{ openshift.common.hostname }}"
state=started
delay=10
port="{{ openshift.master.api_port }}"
diff --git a/playbooks/common/openshift-master/restart_services.yml b/playbooks/common/openshift-master/restart_services.yml
index 5e539cd65..b40c32669 100644
--- a/playbooks/common/openshift-master/restart_services.yml
+++ b/playbooks/common/openshift-master/restart_services.yml
@@ -1,3 +1,4 @@
+---
- name: Restart master
service:
name: "{{ openshift.common.service_type }}-master"
@@ -9,13 +10,11 @@
state: restarted
when: openshift_master_ha | bool and openshift.master.cluster_method != 'pacemaker'
- name: Wait for master API to come back online
- become: no
- local_action:
- module: wait_for
- host="{{ inventory_hostname }}"
- state=started
- delay=10
- port="{{ openshift.master.api_port }}"
+ wait_for:
+ host: "{{ openshift.common.hostname }}"
+ state: started
+ delay: 10
+ port: "{{ openshift.master.api_port }}"
when: openshift_master_ha | bool and openshift.master.cluster_method != 'pacemaker'
- name: Restart master controllers
service:
diff --git a/playbooks/common/openshift-master/service.yml b/playbooks/common/openshift-master/service.yml
index 5e5198335..43ef8b6a1 100644
--- a/playbooks/common/openshift-master/service.yml
+++ b/playbooks/common/openshift-master/service.yml
@@ -17,4 +17,4 @@
connection: ssh
gather_facts: no
tasks:
- - service: name={{ openshift.common.service_type }}-master state="{{ new_cluster_state }}"
+ - service: name={{ openshift.common.service_type }}-master state="{{ new_cluster_state }}"
diff --git a/playbooks/common/openshift-nfs/service.yml b/playbooks/common/openshift-nfs/service.yml
index 8468014da..8c3f32403 100644
--- a/playbooks/common/openshift-nfs/service.yml
+++ b/playbooks/common/openshift-nfs/service.yml
@@ -15,4 +15,4 @@
connection: ssh
gather_facts: no
tasks:
- - service: name=nfs-server state="{{ new_cluster_state }}"
+ - service: name=nfs-server state="{{ new_cluster_state }}"
diff --git a/playbooks/common/openshift-node/config.yml b/playbooks/common/openshift-node/config.yml
index e28da5713..b36c0eedf 100644
--- a/playbooks/common/openshift-node/config.yml
+++ b/playbooks/common/openshift-node/config.yml
@@ -60,30 +60,8 @@
when: "{{ (openshift_http_proxy is defined or openshift_https_proxy is defined) and
openshift_generate_no_proxy_hosts | default(True) | bool }}"
roles:
- - role: openshift_common
- - role: openshift_clock
- - role: openshift_docker
- - role: openshift_node_certificates
- openshift_ca_host: "{{ groups.oo_first_master.0 }}"
- - role: openshift_cloud_provider
- - role: openshift_node_dnsmasq
- when: openshift.common.use_dnsmasq | bool
- - role: os_firewall
- os_firewall_allow:
- - service: Kubernetes kubelet
- port: 10250/tcp
- - service: http
- port: 80/tcp
- - service: https
- port: 443/tcp
- - service: Openshift kubelet ReadOnlyPort
- port: 10255/tcp
- - service: Openshift kubelet ReadOnlyPort udp
- port: 10255/udp
- - service: OpenShift OVS sdn
- port: 4789/udp
- when: openshift.node.use_openshift_sdn | bool
- role: openshift_node
+ openshift_ca_host: "{{ groups.oo_first_master.0 }}"
- name: Configure nodes
hosts: oo_nodes_to_config:!oo_containerized_master_nodes
@@ -99,30 +77,8 @@
when: "{{ (openshift_http_proxy is defined or openshift_https_proxy is defined) and
openshift_generate_no_proxy_hosts | default(True) | bool }}"
roles:
- - role: openshift_common
- - role: openshift_clock
- - role: openshift_docker
- - role: openshift_node_certificates
- openshift_ca_host: "{{ groups.oo_first_master.0 }}"
- - role: openshift_cloud_provider
- - role: openshift_node_dnsmasq
- when: openshift.common.use_dnsmasq | bool
- - role: os_firewall
- os_firewall_allow:
- - service: Kubernetes kubelet
- port: 10250/tcp
- - service: http
- port: 80/tcp
- - service: https
- port: 443/tcp
- - service: Openshift kubelet ReadOnlyPort
- port: 10255/tcp
- - service: Openshift kubelet ReadOnlyPort udp
- port: 10255/udp
- - service: OpenShift OVS sdn
- port: 4789/udp
- when: openshift.node.use_openshift_sdn | bool
- role: openshift_node
+ openshift_ca_host: "{{ groups.oo_first_master.0 }}"
- name: Additional node config
hosts: oo_nodes_to_config
diff --git a/playbooks/common/openshift-node/service.yml b/playbooks/common/openshift-node/service.yml
index 33095c9fb..2da68ceea 100644
--- a/playbooks/common/openshift-node/service.yml
+++ b/playbooks/common/openshift-node/service.yml
@@ -17,4 +17,4 @@
connection: ssh
gather_facts: no
tasks:
- - service: name={{ service_type }}-node state="{{ new_cluster_state }}"
+ - service: name={{ service_type }}-node state="{{ new_cluster_state }}"