summaryrefslogtreecommitdiffstats
path: root/playbooks/common
diff options
context:
space:
mode:
Diffstat (limited to 'playbooks/common')
-rw-r--r--playbooks/common/openshift-checks/health.yml5
-rw-r--r--playbooks/common/openshift-checks/pre-install.yml5
-rw-r--r--playbooks/common/openshift-cluster/config.yml25
-rw-r--r--playbooks/common/openshift-cluster/evaluate_groups.yml9
-rw-r--r--playbooks/common/openshift-cluster/openshift_hosted.yml4
-rw-r--r--playbooks/common/openshift-cluster/openshift_metrics.yml9
-rw-r--r--playbooks/common/openshift-cluster/redeploy-certificates/masters.yml10
-rw-r--r--playbooks/common/openshift-cluster/redeploy-certificates/registry.yml1
-rw-r--r--playbooks/common/openshift-cluster/redeploy-certificates/router.yml3
-rw-r--r--playbooks/common/openshift-cluster/service_catalog.yml21
-rw-r--r--playbooks/common/openshift-cluster/upgrades/docker/docker_upgrade.yml6
-rw-r--r--playbooks/common/openshift-cluster/upgrades/docker/restart.yml5
-rw-r--r--playbooks/common/openshift-cluster/upgrades/docker/upgrade.yml8
-rw-r--r--playbooks/common/openshift-cluster/upgrades/etcd/upgrade.yml2
-rw-r--r--playbooks/common/openshift-cluster/upgrades/initialize_nodes_to_upgrade.yml45
-rw-r--r--playbooks/common/openshift-cluster/upgrades/upgrade_control_plane.yml28
-rw-r--r--playbooks/common/openshift-cluster/upgrades/upgrade_nodes.yml8
-rw-r--r--playbooks/common/openshift-etcd/migrate.yml120
-rw-r--r--playbooks/common/openshift-etcd/restart.yml2
-rw-r--r--playbooks/common/openshift-glusterfs/config.yml23
-rw-r--r--playbooks/common/openshift-master/config.yml39
-rw-r--r--playbooks/common/openshift-master/restart_hosts.yml1
-rw-r--r--playbooks/common/openshift-master/restart_services.yml1
-rw-r--r--playbooks/common/openshift-node/network_manager.yml2
-rw-r--r--playbooks/common/openshift-node/restart.yml5
25 files changed, 326 insertions, 61 deletions
diff --git a/playbooks/common/openshift-checks/health.yml b/playbooks/common/openshift-checks/health.yml
index 1bee460e8..c7766ff04 100644
--- a/playbooks/common/openshift-checks/health.yml
+++ b/playbooks/common/openshift-checks/health.yml
@@ -1,4 +1,9 @@
---
+# openshift_health_checker depends on openshift_version which now requires group eval.
+- include: ../openshift-cluster/evaluate_groups.yml
+ tags:
+ - always
+
- name: Run OpenShift health checks
hosts: OSEv3
roles:
diff --git a/playbooks/common/openshift-checks/pre-install.yml b/playbooks/common/openshift-checks/pre-install.yml
index e01c6f38d..7ca9f7e8b 100644
--- a/playbooks/common/openshift-checks/pre-install.yml
+++ b/playbooks/common/openshift-checks/pre-install.yml
@@ -1,4 +1,9 @@
---
+# openshift_health_checker depends on openshift_version which now requires group eval.
+- include: ../openshift-cluster/evaluate_groups.yml
+ tags:
+ - always
+
- hosts: OSEv3
name: run OpenShift pre-install checks
roles:
diff --git a/playbooks/common/openshift-cluster/config.yml b/playbooks/common/openshift-cluster/config.yml
index 1482b3a3f..7224ae712 100644
--- a/playbooks/common/openshift-cluster/config.yml
+++ b/playbooks/common/openshift-cluster/config.yml
@@ -1,4 +1,23 @@
---
+# TODO: refactor this into its own include
+# and pass a variable for ctx
+- name: Verify Requirements
+ hosts: oo_all_hosts
+ roles:
+ - openshift_health_checker
+ vars:
+ - r_openshift_health_checker_playbook_context: "install"
+ post_tasks:
+ - action: openshift_health_check
+ args:
+ checks:
+ - disk_availability
+ - memory_availability
+ - package_availability
+ - package_version
+ - docker_image_availability
+ - docker_storage
+
- include: initialize_oo_option_facts.yml
tags:
- always
@@ -45,6 +64,12 @@
tags:
- hosted
+- include: service_catalog.yml
+ when:
+ - openshift_enable_service_catalog | default(false) | bool
+ tags:
+ - servicecatalog
+
- name: Re-enable excluder if it was previously enabled
hosts: oo_masters_to_config:oo_nodes_to_config
tags:
diff --git a/playbooks/common/openshift-cluster/evaluate_groups.yml b/playbooks/common/openshift-cluster/evaluate_groups.yml
index c28ce4c14..baca72c58 100644
--- a/playbooks/common/openshift-cluster/evaluate_groups.yml
+++ b/playbooks/common/openshift-cluster/evaluate_groups.yml
@@ -157,3 +157,12 @@
ansible_become: "{{ g_sudo | default(omit) }}"
with_items: "{{ g_glusterfs_hosts | union(g_glusterfs_registry_hosts | default([])) }}"
changed_when: no
+
+ - name: Evaluate oo_etcd_to_migrate
+ add_host:
+ name: "{{ item }}"
+ groups: oo_etcd_to_migrate
+ ansible_ssh_user: "{{ g_ssh_user | default(omit) }}"
+ ansible_become: "{{ g_sudo | default(omit) }}"
+ with_items: "{{ groups.oo_etcd_to_config if groups.oo_etcd_to_config | default([]) | length != 0 else groups.oo_first_master }}"
+ changed_when: no
diff --git a/playbooks/common/openshift-cluster/openshift_hosted.yml b/playbooks/common/openshift-cluster/openshift_hosted.yml
index 8d94b6509..ce7f981ab 100644
--- a/playbooks/common/openshift-cluster/openshift_hosted.yml
+++ b/playbooks/common/openshift-cluster/openshift_hosted.yml
@@ -26,6 +26,8 @@
logging_elasticsearch_cluster_size: "{{ openshift_hosted_logging_elasticsearch_cluster_size | default(1) }}"
logging_elasticsearch_ops_cluster_size: "{{ openshift_hosted_logging_elasticsearch_ops_cluster_size | default(1) }}"
roles:
+ - role: openshift_default_storage_class
+ when: openshift_cloudprovider_kind is defined and (openshift_cloudprovider_kind == 'aws' or openshift_cloudprovider_kind == 'gce')
- role: openshift_hosted
- role: openshift_metrics
when: openshift_hosted_metrics_deploy | default(false) | bool
@@ -45,8 +47,6 @@
- role: cockpit-ui
when: ( openshift.common.version_gte_3_3_or_1_3 | bool ) and ( openshift_hosted_manage_registry | default(true) | bool ) and not (openshift.docker.hosted_registry_insecure | default(false) | bool)
- - role: openshift_default_storage_class
- when: openshift_cloudprovider_kind is defined and (openshift_cloudprovider_kind == 'aws' or openshift_cloudprovider_kind == 'gce')
- name: Update master-config for publicLoggingURL
hosts: oo_masters_to_config:!oo_first_master
diff --git a/playbooks/common/openshift-cluster/openshift_metrics.yml b/playbooks/common/openshift-cluster/openshift_metrics.yml
index bcff4a1a1..2c8ad5b75 100644
--- a/playbooks/common/openshift-cluster/openshift_metrics.yml
+++ b/playbooks/common/openshift-cluster/openshift_metrics.yml
@@ -5,3 +5,12 @@
hosts: oo_first_master
roles:
- openshift_metrics
+
+- name: OpenShift Metrics
+ hosts: oo_masters:!oo_first_master
+ serial: 1
+ tasks:
+ - name: Setup the non-first masters configs
+ include_role:
+ name: openshift_metrics
+ tasks_from: update_master_config.yaml
diff --git a/playbooks/common/openshift-cluster/redeploy-certificates/masters.yml b/playbooks/common/openshift-cluster/redeploy-certificates/masters.yml
index c30889d64..51b196299 100644
--- a/playbooks/common/openshift-cluster/redeploy-certificates/masters.yml
+++ b/playbooks/common/openshift-cluster/redeploy-certificates/masters.yml
@@ -51,3 +51,13 @@
| oo_collect('openshift.common.hostname')
| default(none, true) }}"
openshift_certificates_redeploy: true
+ - role: lib_utils
+ post_tasks:
+ - yedit:
+ src: "{{ openshift.common.config_base }}/master/master-config.yaml"
+ key: servingInfo.namedCertificates
+ value: "{{ openshift.master.named_certificates | default([]) | oo_named_certificates_list }}"
+ when:
+ - ('named_certificates' in openshift.master)
+ - openshift.master.named_certificates | default([]) | length > 0
+ - openshift_master_overwrite_named_certificates | default(false) | bool
diff --git a/playbooks/common/openshift-cluster/redeploy-certificates/registry.yml b/playbooks/common/openshift-cluster/redeploy-certificates/registry.yml
index 8c8062585..afd5463b2 100644
--- a/playbooks/common/openshift-cluster/redeploy-certificates/registry.yml
+++ b/playbooks/common/openshift-cluster/redeploy-certificates/registry.yml
@@ -66,6 +66,7 @@
--signer-cert={{ openshift.common.config_base }}/master/ca.crt
--signer-key={{ openshift.common.config_base }}/master/ca.key
--signer-serial={{ openshift.common.config_base }}/master/ca.serial.txt
+ --config={{ mktemp.stdout }}/admin.kubeconfig
--hostnames="{{ docker_registry_service_ip.results.clusterip }},docker-registry.default.svc,docker-registry.default.svc.cluster.local,{{ docker_registry_route_hostname }}"
--cert={{ openshift.common.config_base }}/master/registry.crt
--key={{ openshift.common.config_base }}/master/registry.key
diff --git a/playbooks/common/openshift-cluster/redeploy-certificates/router.yml b/playbooks/common/openshift-cluster/redeploy-certificates/router.yml
index 9f14f2d69..748bbbf91 100644
--- a/playbooks/common/openshift-cluster/redeploy-certificates/router.yml
+++ b/playbooks/common/openshift-cluster/redeploy-certificates/router.yml
@@ -116,8 +116,9 @@
tls.crt="{{ mktemp.stdout }}/openshift-hosted-router-certificate.pem"
tls.key="{{ mktemp.stdout }}/openshift-hosted-router-certificate.key"
--type=kubernetes.io/tls
+ --config={{ mktemp.stdout }}/admin.kubeconfig
--confirm
- -o json | {{ openshift.common.client_binary }} replace -f -
+ -o json | {{ openshift.common.client_binary }} --config={{ mktemp.stdout }}/admin.kubeconfig replace -f -
- name: Remove temporary router certificate and key files
file:
diff --git a/playbooks/common/openshift-cluster/service_catalog.yml b/playbooks/common/openshift-cluster/service_catalog.yml
new file mode 100644
index 000000000..6c12875fe
--- /dev/null
+++ b/playbooks/common/openshift-cluster/service_catalog.yml
@@ -0,0 +1,21 @@
+---
+- include: evaluate_groups.yml
+
+- name: Update Master configs
+ hosts: oo_masters
+ serial: 1
+ tasks:
+ - block:
+ - include_role:
+ name: openshift_service_catalog
+ tasks_from: wire_aggregator
+ vars:
+ first_master: "{{ groups.oo_first_master[0] }}"
+
+- name: Service Catalog
+ hosts: oo_first_master
+ roles:
+ - openshift_service_catalog
+ - ansible_service_broker
+ vars:
+ first_master: "{{ groups.oo_first_master[0] }}"
diff --git a/playbooks/common/openshift-cluster/upgrades/docker/docker_upgrade.yml b/playbooks/common/openshift-cluster/upgrades/docker/docker_upgrade.yml
index 07db071ce..02b8a9d3c 100644
--- a/playbooks/common/openshift-cluster/upgrades/docker/docker_upgrade.yml
+++ b/playbooks/common/openshift-cluster/upgrades/docker/docker_upgrade.yml
@@ -52,9 +52,13 @@
- name: Drain Node for Kubelet upgrade
command: >
- {{ openshift.common.admin_binary }} drain {{ openshift.node.nodename }} --force --delete-local-data --ignore-daemonsets
+ {{ openshift.common.admin_binary }} drain {{ openshift.node.nodename }} --config={{ openshift.common.config_base }}/master/admin.kubeconfig --force --delete-local-data --ignore-daemonsets
delegate_to: "{{ groups.oo_first_master.0 }}"
when: l_docker_upgrade is defined and l_docker_upgrade | bool and inventory_hostname in groups.oo_nodes_to_upgrade
+ register: l_docker_upgrade_drain_result
+ until: not l_docker_upgrade_drain_result | failed
+ retries: 60
+ delay: 60
- include: upgrade.yml
when: l_docker_upgrade is defined and l_docker_upgrade | bool
diff --git a/playbooks/common/openshift-cluster/upgrades/docker/restart.yml b/playbooks/common/openshift-cluster/upgrades/docker/restart.yml
index 1b418920f..13313377e 100644
--- a/playbooks/common/openshift-cluster/upgrades/docker/restart.yml
+++ b/playbooks/common/openshift-cluster/upgrades/docker/restart.yml
@@ -1,6 +1,10 @@
---
- name: Restart docker
service: name=docker state=restarted
+ register: l_docker_restart_docker_in_upgrade_result
+ until: not l_docker_restart_docker_in_upgrade_result | failed
+ retries: 3
+ delay: 30
- name: Update docker facts
openshift_facts:
@@ -24,4 +28,5 @@
state: started
delay: 10
port: "{{ openshift.master.api_port }}"
+ timeout: 600
when: inventory_hostname in groups.oo_masters_to_config
diff --git a/playbooks/common/openshift-cluster/upgrades/docker/upgrade.yml b/playbooks/common/openshift-cluster/upgrades/docker/upgrade.yml
index 17f8fc6e9..35d000e49 100644
--- a/playbooks/common/openshift-cluster/upgrades/docker/upgrade.yml
+++ b/playbooks/common/openshift-cluster/upgrades/docker/upgrade.yml
@@ -32,7 +32,13 @@
- debug: var=docker_image_count.stdout
when: docker_upgrade_nuke_images is defined and docker_upgrade_nuke_images | bool
-- service: name=docker state=stopped
+- service:
+ name: docker
+ state: stopped
+ register: l_pb_docker_upgrade_stop_result
+ until: not l_pb_docker_upgrade_stop_result | failed
+ retries: 3
+ delay: 30
- name: Upgrade Docker
package: name=docker{{ '-' + docker_version }} state=present
diff --git a/playbooks/common/openshift-cluster/upgrades/etcd/upgrade.yml b/playbooks/common/openshift-cluster/upgrades/etcd/upgrade.yml
index 0431c1ce0..39e82498d 100644
--- a/playbooks/common/openshift-cluster/upgrades/etcd/upgrade.yml
+++ b/playbooks/common/openshift-cluster/upgrades/etcd/upgrade.yml
@@ -36,7 +36,7 @@
- not openshift.common.is_etcd_system_container | bool
- name: Record containerized etcd version (runc)
- command: runc exec etcd_container rpm -qa --qf '%{version}' etcd\*
+ command: runc exec etcd rpm -qa --qf '%{version}' etcd\*
register: etcd_container_version_runc
failed_when: false
# AUDIT:changed_when: `false` because we are only inspecting
diff --git a/playbooks/common/openshift-cluster/upgrades/initialize_nodes_to_upgrade.yml b/playbooks/common/openshift-cluster/upgrades/initialize_nodes_to_upgrade.yml
index 046535680..72de63070 100644
--- a/playbooks/common/openshift-cluster/upgrades/initialize_nodes_to_upgrade.yml
+++ b/playbooks/common/openshift-cluster/upgrades/initialize_nodes_to_upgrade.yml
@@ -6,27 +6,32 @@
- lib_openshift
tasks:
- - name: Retrieve list of openshift nodes matching upgrade label
- oc_obj:
- state: list
- kind: node
- selector: "{{ openshift_upgrade_nodes_label }}"
- register: nodes_to_upgrade
- when: openshift_upgrade_nodes_label is defined
+ - when: openshift_upgrade_nodes_label is defined
+ block:
+ - name: Retrieve list of openshift nodes matching upgrade label
+ oc_obj:
+ state: list
+ kind: node
+ selector: "{{ openshift_upgrade_nodes_label }}"
+ register: nodes_to_upgrade
- # We got a list of nodes with the label, now we need to match these with inventory hosts
- # using their openshift.common.hostname fact.
- - name: Map labelled nodes to inventory hosts
- add_host:
- name: "{{ item }}"
- groups: temp_nodes_to_upgrade
- ansible_ssh_user: "{{ g_ssh_user | default(omit) }}"
- ansible_become: "{{ g_sudo | default(omit) }}"
- with_items: " {{ groups['oo_nodes_to_config'] }}"
- when:
- - openshift_upgrade_nodes_label is defined
- - hostvars[item].openshift.common.hostname in nodes_to_upgrade.results.results[0]['items'] | map(attribute='metadata.name') | list
- changed_when: false
+ - name: Fail if no nodes match openshift_upgrade_nodes_label
+ fail:
+ msg: "openshift_upgrade_nodes_label was specified but no nodes matched"
+ when: nodes_to_upgrade.results.results[0]['items'] | length == 0
+
+ # We got a list of nodes with the label, now we need to match these with inventory hosts
+ # using their openshift.common.hostname fact.
+ - name: Map labelled nodes to inventory hosts
+ add_host:
+ name: "{{ item }}"
+ groups: temp_nodes_to_upgrade
+ ansible_ssh_user: "{{ g_ssh_user | default(omit) }}"
+ ansible_become: "{{ g_sudo | default(omit) }}"
+ with_items: " {{ groups['oo_nodes_to_config'] }}"
+ when:
+ - hostvars[item].openshift.common.hostname in nodes_to_upgrade.results.results[0]['items'] | map(attribute='metadata.name') | list
+ changed_when: false
# Build up the oo_nodes_to_upgrade group, use the list filtered by label if
# present, otherwise hit all nodes:
diff --git a/playbooks/common/openshift-cluster/upgrades/upgrade_control_plane.yml b/playbooks/common/openshift-cluster/upgrades/upgrade_control_plane.yml
index 6738ce11f..2b2f10aee 100644
--- a/playbooks/common/openshift-cluster/upgrades/upgrade_control_plane.yml
+++ b/playbooks/common/openshift-cluster/upgrades/upgrade_control_plane.yml
@@ -5,13 +5,13 @@
# oc adm migrate storage should be run prior to etcd v3 upgrade
# See: https://github.com/openshift/origin/pull/14625#issuecomment-308467060
-- name: Pre master upgrade - Upgrade job storage
+- name: Pre master upgrade - Upgrade all storage
hosts: oo_first_master
tasks:
- - name: Upgrade job storage
+ - name: Upgrade all storage
command: >
{{ openshift.common.client_binary }} adm --config={{ openshift.common.config_base }}/master/admin.kubeconfig
- migrate storage --include=jobs --confirm
+ migrate storage --include=* --confirm
# If facts cache were for some reason deleted, this fact may not be set, and if not set
# it will always default to true. This causes problems for the etcd data dir fact detection
@@ -143,13 +143,13 @@
- set_fact:
master_update_complete: True
-- name: Post master upgrade - Upgrade job storage
+- name: Post master upgrade - Upgrade clusterpolicies storage
hosts: oo_first_master
tasks:
- - name: Upgrade job storage
+ - name: Upgrade clusterpolicies storage
command: >
{{ openshift.common.client_binary }} adm --config={{ openshift.common.config_base }}/master/admin.kubeconfig
- migrate storage --include=jobs --confirm
+ migrate storage --include=clusterpolicies --confirm
##############################################################################
# Gate on master update complete
@@ -223,13 +223,19 @@
- name: Reconcile Security Context Constraints
command: >
- {{ openshift.common.client_binary }} adm policy reconcile-sccs --confirm --additive-only=true -o name
+ {{ openshift.common.client_binary }} adm policy --config={{ openshift.common.config_base }}/master/admin.kubeconfig reconcile-sccs --confirm --additive-only=true -o name
register: reconcile_scc_result
changed_when:
- reconcile_scc_result.stdout != ''
- reconcile_scc_result.rc == 0
run_once: true
+ - name: Upgrade job storage
+ command: >
+ {{ openshift.common.client_binary }} adm --config={{ openshift.common.config_base }}/master/admin.kubeconfig
+ migrate storage --include=* --confirm
+ run_once: true
+
- set_fact:
reconcile_complete: True
@@ -288,15 +294,19 @@
- name: Drain Node for Kubelet upgrade
command: >
- {{ hostvars[groups.oo_first_master.0].openshift.common.admin_binary }} drain {{ openshift.node.nodename | lower }} --force --delete-local-data --ignore-daemonsets
+ {{ hostvars[groups.oo_first_master.0].openshift.common.admin_binary }} drain {{ openshift.node.nodename | lower }} --config={{ openshift.common.config_base }}/master/admin.kubeconfig --force --delete-local-data --ignore-daemonsets
delegate_to: "{{ groups.oo_first_master.0 }}"
+ register: l_upgrade_control_plane_drain_result
+ until: not l_upgrade_control_plane_drain_result | failed
+ retries: 60
+ delay: 60
roles:
- lib_openshift
- openshift_facts
- docker
- - openshift_node_upgrade
- openshift_node_dnsmasq
+ - openshift_node_upgrade
post_tasks:
- name: Set node schedulability
diff --git a/playbooks/common/openshift-cluster/upgrades/upgrade_nodes.yml b/playbooks/common/openshift-cluster/upgrades/upgrade_nodes.yml
index 35a50cf4e..c93a5d89c 100644
--- a/playbooks/common/openshift-cluster/upgrades/upgrade_nodes.yml
+++ b/playbooks/common/openshift-cluster/upgrades/upgrade_nodes.yml
@@ -26,15 +26,19 @@
- name: Drain Node for Kubelet upgrade
command: >
- {{ hostvars[groups.oo_first_master.0].openshift.common.admin_binary }} drain {{ openshift.node.nodename | lower }} --force --delete-local-data --ignore-daemonsets
+ {{ hostvars[groups.oo_first_master.0].openshift.common.admin_binary }} drain {{ openshift.node.nodename | lower }} --config={{ openshift.common.config_base }}/master/admin.kubeconfig --force --delete-local-data --ignore-daemonsets
delegate_to: "{{ groups.oo_first_master.0 }}"
+ register: l_upgrade_nodes_drain_result
+ until: not l_upgrade_nodes_drain_result | failed
+ retries: 60
+ delay: 60
roles:
- lib_openshift
- openshift_facts
- docker
- - openshift_node_upgrade
- openshift_node_dnsmasq
+ - openshift_node_upgrade
- role: openshift_excluder
r_openshift_excluder_action: enable
r_openshift_excluder_service_type: "{{ openshift.common.service_type }}"
diff --git a/playbooks/common/openshift-etcd/migrate.yml b/playbooks/common/openshift-etcd/migrate.yml
new file mode 100644
index 000000000..c655449fa
--- /dev/null
+++ b/playbooks/common/openshift-etcd/migrate.yml
@@ -0,0 +1,120 @@
+---
+- include: ../openshift-cluster/evaluate_groups.yml
+ tags:
+ - always
+
+- name: Run pre-checks
+ hosts: oo_etcd_to_migrate
+ tags:
+ - always
+ roles:
+ - role: etcd_migrate
+ r_etcd_migrate_action: check
+ r_etcd_common_embedded_etcd: "{{ groups.oo_etcd_to_config | default([]) | length == 0 }}"
+ etcd_peer: "{{ ansible_default_ipv4.address }}"
+
+- include: ../openshift-cluster/initialize_facts.yml
+ tags:
+ - always
+
+- name: Backup v2 data
+ hosts: oo_etcd_to_migrate
+ gather_facts: no
+ tags:
+ - always
+ roles:
+ - role: openshift_facts
+ - role: etcd_common
+ r_etcd_common_action: backup
+ r_etcd_common_etcd_runtime: "{{ openshift.common.etcd_runtime }}"
+ r_etcd_common_backup_tag: pre-migration
+ r_etcd_common_embedded_etcd: "{{ groups.oo_etcd_to_config | default([]) | length == 0 }}"
+ r_etcd_common_backup_sufix_name: "{{ lookup('pipe', 'date +%Y%m%d%H%M%S') }}"
+
+- name: Gate on etcd backup
+ hosts: localhost
+ connection: local
+ become: no
+ tasks:
+ - set_fact:
+ etcd_backup_completed: "{{ hostvars
+ | oo_select_keys(groups.oo_etcd_to_migrate)
+ | oo_collect('inventory_hostname', {'r_etcd_common_backup_complete': true}) }}"
+ - set_fact:
+ etcd_backup_failed: "{{ groups.oo_etcd_to_migrate | difference(etcd_backup_completed) }}"
+ - fail:
+ msg: "Migration cannot continue. The following hosts did not complete etcd backup: {{ etcd_backup_failed | join(',') }}"
+ when:
+ - etcd_backup_failed | length > 0
+
+- name: Prepare masters for etcd data migration
+ hosts: oo_masters_to_config
+ tasks:
+ - set_fact:
+ master_services:
+ - "{{ openshift.common.service_type + '-master' }}"
+ - set_fact:
+ master_services:
+ - "{{ openshift.common.service_type + '-master-controllers' }}"
+ - "{{ openshift.common.service_type + '-master-api' }}"
+ when:
+ - (openshift_master_cluster_method is defined and openshift_master_cluster_method == "native") or openshift.common.is_master_system_container | bool
+ - debug:
+ msg: "master service name: {{ master_services }}"
+ - name: Stop masters
+ service:
+ name: "{{ item }}"
+ state: stopped
+ with_items: "{{ master_services }}"
+
+- name: Migrate etcd data from v2 to v3
+ hosts: oo_etcd_to_migrate
+ gather_facts: no
+ tags:
+ - always
+ roles:
+ - role: etcd_migrate
+ r_etcd_migrate_action: migrate
+ r_etcd_common_embedded_etcd: "{{ groups.oo_etcd_to_config | default([]) | length == 0 }}"
+ etcd_peer: "{{ ansible_default_ipv4.address }}"
+
+- name: Gate on etcd migration
+ hosts: oo_masters_to_config
+ gather_facts: no
+ tasks:
+ - set_fact:
+ etcd_migration_completed: "{{ hostvars
+ | oo_select_keys(groups.oo_etcd_to_migrate)
+ | oo_collect('inventory_hostname', {'r_etcd_migrate_success': true}) }}"
+ - set_fact:
+ etcd_migration_failed: "{{ groups.oo_etcd_to_migrate | difference(etcd_migration_completed) }}"
+
+- name: Configure masters if etcd data migration is succesfull
+ hosts: oo_masters_to_config
+ roles:
+ - role: etcd_migrate
+ r_etcd_migrate_action: configure
+ when: etcd_migration_failed | length == 0
+ tasks:
+ - debug:
+ msg: "Skipping master re-configuration since migration failed."
+ when:
+ - etcd_migration_failed | length > 0
+
+- name: Start masters after etcd data migration
+ hosts: oo_masters_to_config
+ tasks:
+ - name: Start master services
+ service:
+ name: "{{ item }}"
+ state: started
+ register: service_status
+ # Sometimes the master-api, resp. master-controllers fails to start for the first time
+ until: service_status.state is defined and service_status.state == "started"
+ retries: 5
+ delay: 10
+ with_items: "{{ master_services[::-1] }}"
+ - fail:
+ msg: "Migration failed. The following hosts were not properly migrated: {{ etcd_migration_failed | join(',') }}"
+ when:
+ - etcd_migration_failed | length > 0
diff --git a/playbooks/common/openshift-etcd/restart.yml b/playbooks/common/openshift-etcd/restart.yml
index 196c86f28..af1ef245a 100644
--- a/playbooks/common/openshift-etcd/restart.yml
+++ b/playbooks/common/openshift-etcd/restart.yml
@@ -5,5 +5,5 @@
tasks:
- name: restart etcd
service:
- name: "{{ 'etcd' if not openshift.common.is_containerized | bool else 'etcd_container' }}"
+ name: "{{ 'etcd_container' if openshift.common.etcd_runtime == 'docker' else 'etcd' }}"
state: restarted
diff --git a/playbooks/common/openshift-glusterfs/config.yml b/playbooks/common/openshift-glusterfs/config.yml
index 1efdfb336..edc15a3f2 100644
--- a/playbooks/common/openshift-glusterfs/config.yml
+++ b/playbooks/common/openshift-glusterfs/config.yml
@@ -1,6 +1,6 @@
---
-- name: Open firewall ports for GlusterFS
- hosts: oo_glusterfs_to_config
+- name: Open firewall ports for GlusterFS nodes
+ hosts: glusterfs
vars:
os_firewall_allow:
- service: glusterfs_sshd
@@ -14,7 +14,24 @@
roles:
- role: os_firewall
when:
- - openshift_storage_glusterfs_is_native | default(True)
+ - openshift_storage_glusterfs_is_native | default(True) | bool
+
+- name: Open firewall ports for GlusterFS registry nodes
+ hosts: glusterfs_registry
+ vars:
+ os_firewall_allow:
+ - service: glusterfs_sshd
+ port: "2222/tcp"
+ - service: glusterfs_daemon
+ port: "24007/tcp"
+ - service: glusterfs_management
+ port: "24008/tcp"
+ - service: glusterfs_bricks
+ port: "49152-49251/tcp"
+ roles:
+ - role: os_firewall
+ when:
+ - openshift_storage_glusterfs_registry_is_native | default(True) | bool
- name: Configure GlusterFS
hosts: oo_first_master
diff --git a/playbooks/common/openshift-master/config.yml b/playbooks/common/openshift-master/config.yml
index 70108fb7a..7d3a371e3 100644
--- a/playbooks/common/openshift-master/config.yml
+++ b/playbooks/common/openshift-master/config.yml
@@ -20,25 +20,6 @@
- node
- .config_managed
- - name: Check for existing configuration
- stat:
- path: /etc/origin/master/master-config.yaml
- register: master_config_stat
-
- - name: Set clean install fact
- set_fact:
- l_clean_install: "{{ not master_config_stat.stat.exists | bool }}"
-
- - name: Determine if etcd3 storage is in use
- command: grep -Pzo "storage-backend:\n.*etcd3" /etc/origin/master/master-config.yaml -q
- register: etcd3_grep
- failed_when: false
- changed_when: false
-
- - name: Set etcd3 fact
- set_fact:
- l_etcd3_enabled: "{{ etcd3_grep.rc == 0 | bool }}"
-
- set_fact:
openshift_master_pod_eviction_timeout: "{{ lookup('oo_option', 'openshift_master_pod_eviction_timeout') | default(none, true) }}"
when: openshift_master_pod_eviction_timeout is not defined
@@ -88,7 +69,7 @@
ha: "{{ openshift_master_ha | default(groups.oo_masters | length > 1) }}"
master_count: "{{ openshift_master_count | default(groups.oo_masters | length) }}"
-- name: Determine if session secrets must be generated
+- name: Inspect state of first master session secrets and config
hosts: oo_first_master
roles:
- role: openshift_facts
@@ -98,6 +79,24 @@
local_facts:
session_auth_secrets: "{{ openshift_master_session_auth_secrets | default(openshift.master.session_auth_secrets | default(None)) }}"
session_encryption_secrets: "{{ openshift_master_session_encryption_secrets | default(openshift.master.session_encryption_secrets | default(None)) }}"
+ - name: Check for existing configuration
+ stat:
+ path: /etc/origin/master/master-config.yaml
+ register: master_config_stat
+
+ - name: Set clean install fact
+ set_fact:
+ l_clean_install: "{{ not master_config_stat.stat.exists | bool }}"
+
+ - name: Determine if etcd3 storage is in use
+ command: grep -Pzo "storage-backend:\n.*etcd3" /etc/origin/master/master-config.yaml -q
+ register: etcd3_grep
+ failed_when: false
+ changed_when: false
+
+ - name: Set etcd3 fact
+ set_fact:
+ l_etcd3_enabled: "{{ etcd3_grep.rc == 0 | bool }}"
- name: Generate master session secrets
hosts: oo_first_master
diff --git a/playbooks/common/openshift-master/restart_hosts.yml b/playbooks/common/openshift-master/restart_hosts.yml
index 67ba0aa2e..a5dbe0590 100644
--- a/playbooks/common/openshift-master/restart_hosts.yml
+++ b/playbooks/common/openshift-master/restart_hosts.yml
@@ -37,3 +37,4 @@
state: started
delay: 10
port: "{{ openshift.master.api_port }}"
+ timeout: 600
diff --git a/playbooks/common/openshift-master/restart_services.yml b/playbooks/common/openshift-master/restart_services.yml
index 508b5a3ac..a844fb369 100644
--- a/playbooks/common/openshift-master/restart_services.yml
+++ b/playbooks/common/openshift-master/restart_services.yml
@@ -15,6 +15,7 @@
state: started
delay: 10
port: "{{ openshift.master.api_port }}"
+ timeout: 600
when: openshift_master_ha | bool
- name: Restart master controllers
service:
diff --git a/playbooks/common/openshift-node/network_manager.yml b/playbooks/common/openshift-node/network_manager.yml
index 0014a5dbd..b3a7399dc 100644
--- a/playbooks/common/openshift-node/network_manager.yml
+++ b/playbooks/common/openshift-node/network_manager.yml
@@ -1,4 +1,6 @@
---
+- include: ../openshift-cluster/evaluate_groups.yml
+
- name: Install and configure NetworkManager
hosts: oo_all_hosts
become: yes
diff --git a/playbooks/common/openshift-node/restart.yml b/playbooks/common/openshift-node/restart.yml
index 01cf948e0..ed2473a43 100644
--- a/playbooks/common/openshift-node/restart.yml
+++ b/playbooks/common/openshift-node/restart.yml
@@ -11,6 +11,10 @@
service:
name: docker
state: restarted
+ register: l_docker_restart_docker_in_node_result
+ until: not l_docker_restart_docker_in_node_result | failed
+ retries: 3
+ delay: 30
- name: Update docker facts
openshift_facts:
@@ -36,6 +40,7 @@
state: started
delay: 10
port: "{{ openshift.master.api_port }}"
+ timeout: 600
when: inventory_hostname in groups.oo_masters_to_config
- name: restart node