summaryrefslogtreecommitdiffstats
path: root/playbooks/common/openshift-cluster
diff options
context:
space:
mode:
Diffstat (limited to 'playbooks/common/openshift-cluster')
-rw-r--r--playbooks/common/openshift-cluster/config.yml25
-rw-r--r--playbooks/common/openshift-cluster/evaluate_groups.yml9
-rw-r--r--playbooks/common/openshift-cluster/redeploy-certificates/masters.yml10
-rw-r--r--playbooks/common/openshift-cluster/service_catalog.yml21
-rw-r--r--playbooks/common/openshift-cluster/upgrades/etcd/upgrade.yml2
-rw-r--r--playbooks/common/openshift-cluster/upgrades/initialize_nodes_to_upgrade.yml45
-rw-r--r--playbooks/common/openshift-cluster/upgrades/upgrade_control_plane.yml20
-rw-r--r--playbooks/common/openshift-cluster/upgrades/upgrade_nodes.yml2
8 files changed, 105 insertions, 29 deletions
diff --git a/playbooks/common/openshift-cluster/config.yml b/playbooks/common/openshift-cluster/config.yml
index 1482b3a3f..7224ae712 100644
--- a/playbooks/common/openshift-cluster/config.yml
+++ b/playbooks/common/openshift-cluster/config.yml
@@ -1,4 +1,23 @@
---
+# TODO: refactor this into its own include
+# and pass a variable for ctx
+- name: Verify Requirements
+ hosts: oo_all_hosts
+ roles:
+ - openshift_health_checker
+ vars:
+ - r_openshift_health_checker_playbook_context: "install"
+ post_tasks:
+ - action: openshift_health_check
+ args:
+ checks:
+ - disk_availability
+ - memory_availability
+ - package_availability
+ - package_version
+ - docker_image_availability
+ - docker_storage
+
- include: initialize_oo_option_facts.yml
tags:
- always
@@ -45,6 +64,12 @@
tags:
- hosted
+- include: service_catalog.yml
+ when:
+ - openshift_enable_service_catalog | default(false) | bool
+ tags:
+ - servicecatalog
+
- name: Re-enable excluder if it was previously enabled
hosts: oo_masters_to_config:oo_nodes_to_config
tags:
diff --git a/playbooks/common/openshift-cluster/evaluate_groups.yml b/playbooks/common/openshift-cluster/evaluate_groups.yml
index c28ce4c14..baca72c58 100644
--- a/playbooks/common/openshift-cluster/evaluate_groups.yml
+++ b/playbooks/common/openshift-cluster/evaluate_groups.yml
@@ -157,3 +157,12 @@
ansible_become: "{{ g_sudo | default(omit) }}"
with_items: "{{ g_glusterfs_hosts | union(g_glusterfs_registry_hosts | default([])) }}"
changed_when: no
+
+ - name: Evaluate oo_etcd_to_migrate
+ add_host:
+ name: "{{ item }}"
+ groups: oo_etcd_to_migrate
+ ansible_ssh_user: "{{ g_ssh_user | default(omit) }}"
+ ansible_become: "{{ g_sudo | default(omit) }}"
+ with_items: "{{ groups.oo_etcd_to_config if groups.oo_etcd_to_config | default([]) | length != 0 else groups.oo_first_master }}"
+ changed_when: no
diff --git a/playbooks/common/openshift-cluster/redeploy-certificates/masters.yml b/playbooks/common/openshift-cluster/redeploy-certificates/masters.yml
index c30889d64..51b196299 100644
--- a/playbooks/common/openshift-cluster/redeploy-certificates/masters.yml
+++ b/playbooks/common/openshift-cluster/redeploy-certificates/masters.yml
@@ -51,3 +51,13 @@
| oo_collect('openshift.common.hostname')
| default(none, true) }}"
openshift_certificates_redeploy: true
+ - role: lib_utils
+ post_tasks:
+ - yedit:
+ src: "{{ openshift.common.config_base }}/master/master-config.yaml"
+ key: servingInfo.namedCertificates
+ value: "{{ openshift.master.named_certificates | default([]) | oo_named_certificates_list }}"
+ when:
+ - ('named_certificates' in openshift.master)
+ - openshift.master.named_certificates | default([]) | length > 0
+ - openshift_master_overwrite_named_certificates | default(false) | bool
diff --git a/playbooks/common/openshift-cluster/service_catalog.yml b/playbooks/common/openshift-cluster/service_catalog.yml
new file mode 100644
index 000000000..6c12875fe
--- /dev/null
+++ b/playbooks/common/openshift-cluster/service_catalog.yml
@@ -0,0 +1,21 @@
+---
+- include: evaluate_groups.yml
+
+- name: Update Master configs
+ hosts: oo_masters
+ serial: 1
+ tasks:
+ - block:
+ - include_role:
+ name: openshift_service_catalog
+ tasks_from: wire_aggregator
+ vars:
+ first_master: "{{ groups.oo_first_master[0] }}"
+
+- name: Service Catalog
+ hosts: oo_first_master
+ roles:
+ - openshift_service_catalog
+ - ansible_service_broker
+ vars:
+ first_master: "{{ groups.oo_first_master[0] }}"
diff --git a/playbooks/common/openshift-cluster/upgrades/etcd/upgrade.yml b/playbooks/common/openshift-cluster/upgrades/etcd/upgrade.yml
index 0431c1ce0..39e82498d 100644
--- a/playbooks/common/openshift-cluster/upgrades/etcd/upgrade.yml
+++ b/playbooks/common/openshift-cluster/upgrades/etcd/upgrade.yml
@@ -36,7 +36,7 @@
- not openshift.common.is_etcd_system_container | bool
- name: Record containerized etcd version (runc)
- command: runc exec etcd_container rpm -qa --qf '%{version}' etcd\*
+ command: runc exec etcd rpm -qa --qf '%{version}' etcd\*
register: etcd_container_version_runc
failed_when: false
# AUDIT:changed_when: `false` because we are only inspecting
diff --git a/playbooks/common/openshift-cluster/upgrades/initialize_nodes_to_upgrade.yml b/playbooks/common/openshift-cluster/upgrades/initialize_nodes_to_upgrade.yml
index 046535680..72de63070 100644
--- a/playbooks/common/openshift-cluster/upgrades/initialize_nodes_to_upgrade.yml
+++ b/playbooks/common/openshift-cluster/upgrades/initialize_nodes_to_upgrade.yml
@@ -6,27 +6,32 @@
- lib_openshift
tasks:
- - name: Retrieve list of openshift nodes matching upgrade label
- oc_obj:
- state: list
- kind: node
- selector: "{{ openshift_upgrade_nodes_label }}"
- register: nodes_to_upgrade
- when: openshift_upgrade_nodes_label is defined
+ - when: openshift_upgrade_nodes_label is defined
+ block:
+ - name: Retrieve list of openshift nodes matching upgrade label
+ oc_obj:
+ state: list
+ kind: node
+ selector: "{{ openshift_upgrade_nodes_label }}"
+ register: nodes_to_upgrade
- # We got a list of nodes with the label, now we need to match these with inventory hosts
- # using their openshift.common.hostname fact.
- - name: Map labelled nodes to inventory hosts
- add_host:
- name: "{{ item }}"
- groups: temp_nodes_to_upgrade
- ansible_ssh_user: "{{ g_ssh_user | default(omit) }}"
- ansible_become: "{{ g_sudo | default(omit) }}"
- with_items: " {{ groups['oo_nodes_to_config'] }}"
- when:
- - openshift_upgrade_nodes_label is defined
- - hostvars[item].openshift.common.hostname in nodes_to_upgrade.results.results[0]['items'] | map(attribute='metadata.name') | list
- changed_when: false
+ - name: Fail if no nodes match openshift_upgrade_nodes_label
+ fail:
+ msg: "openshift_upgrade_nodes_label was specified but no nodes matched"
+ when: nodes_to_upgrade.results.results[0]['items'] | length == 0
+
+ # We got a list of nodes with the label, now we need to match these with inventory hosts
+ # using their openshift.common.hostname fact.
+ - name: Map labelled nodes to inventory hosts
+ add_host:
+ name: "{{ item }}"
+ groups: temp_nodes_to_upgrade
+ ansible_ssh_user: "{{ g_ssh_user | default(omit) }}"
+ ansible_become: "{{ g_sudo | default(omit) }}"
+ with_items: " {{ groups['oo_nodes_to_config'] }}"
+ when:
+ - hostvars[item].openshift.common.hostname in nodes_to_upgrade.results.results[0]['items'] | map(attribute='metadata.name') | list
+ changed_when: false
# Build up the oo_nodes_to_upgrade group, use the list filtered by label if
# present, otherwise hit all nodes:
diff --git a/playbooks/common/openshift-cluster/upgrades/upgrade_control_plane.yml b/playbooks/common/openshift-cluster/upgrades/upgrade_control_plane.yml
index 6738ce11f..227fbf60a 100644
--- a/playbooks/common/openshift-cluster/upgrades/upgrade_control_plane.yml
+++ b/playbooks/common/openshift-cluster/upgrades/upgrade_control_plane.yml
@@ -5,13 +5,13 @@
# oc adm migrate storage should be run prior to etcd v3 upgrade
# See: https://github.com/openshift/origin/pull/14625#issuecomment-308467060
-- name: Pre master upgrade - Upgrade job storage
+- name: Pre master upgrade - Upgrade all storage
hosts: oo_first_master
tasks:
- - name: Upgrade job storage
+ - name: Upgrade all storage
command: >
{{ openshift.common.client_binary }} adm --config={{ openshift.common.config_base }}/master/admin.kubeconfig
- migrate storage --include=jobs --confirm
+ migrate storage --include=* --confirm
# If facts cache were for some reason deleted, this fact may not be set, and if not set
# it will always default to true. This causes problems for the etcd data dir fact detection
@@ -143,13 +143,13 @@
- set_fact:
master_update_complete: True
-- name: Post master upgrade - Upgrade job storage
+- name: Post master upgrade - Upgrade clusterpolicies storage
hosts: oo_first_master
tasks:
- - name: Upgrade job storage
+ - name: Upgrade clusterpolicies storage
command: >
{{ openshift.common.client_binary }} adm --config={{ openshift.common.config_base }}/master/admin.kubeconfig
- migrate storage --include=jobs --confirm
+ migrate storage --include=clusterpolicies --confirm
##############################################################################
# Gate on master update complete
@@ -230,6 +230,12 @@
- reconcile_scc_result.rc == 0
run_once: true
+ - name: Upgrade job storage
+ command: >
+ {{ openshift.common.client_binary }} adm --config={{ openshift.common.config_base }}/master/admin.kubeconfig
+ migrate storage --include=* --confirm
+ run_once: true
+
- set_fact:
reconcile_complete: True
@@ -295,8 +301,8 @@
- lib_openshift
- openshift_facts
- docker
- - openshift_node_upgrade
- openshift_node_dnsmasq
+ - openshift_node_upgrade
post_tasks:
- name: Set node schedulability
diff --git a/playbooks/common/openshift-cluster/upgrades/upgrade_nodes.yml b/playbooks/common/openshift-cluster/upgrades/upgrade_nodes.yml
index 35a50cf4e..1d1e440d4 100644
--- a/playbooks/common/openshift-cluster/upgrades/upgrade_nodes.yml
+++ b/playbooks/common/openshift-cluster/upgrades/upgrade_nodes.yml
@@ -33,8 +33,8 @@
- lib_openshift
- openshift_facts
- docker
- - openshift_node_upgrade
- openshift_node_dnsmasq
+ - openshift_node_upgrade
- role: openshift_excluder
r_openshift_excluder_action: enable
r_openshift_excluder_service_type: "{{ openshift.common.service_type }}"