summaryrefslogtreecommitdiffstats
path: root/playbooks/common
diff options
context:
space:
mode:
Diffstat (limited to 'playbooks/common')
-rw-r--r--playbooks/common/README.md7
-rw-r--r--playbooks/common/openshift-cfme/config.yml44
-rw-r--r--playbooks/common/openshift-checks/adhoc.yml13
-rw-r--r--playbooks/common/openshift-checks/health.yml10
-rw-r--r--playbooks/common/openshift-checks/install.yml47
-rw-r--r--playbooks/common/openshift-checks/pre-install.yml10
-rw-r--r--playbooks/common/openshift-cluster/additional_config.yml23
-rw-r--r--playbooks/common/openshift-cluster/cockpit-ui.yml6
-rw-r--r--playbooks/common/openshift-cluster/config.yml61
-rw-r--r--playbooks/common/openshift-cluster/create_persistent_volumes.yml18
-rw-r--r--playbooks/common/openshift-cluster/enable_dnsmasq.yml8
-rw-r--r--playbooks/common/openshift-cluster/evaluate_groups.yml45
-rw-r--r--playbooks/common/openshift-cluster/initialize_facts.yml162
-rw-r--r--playbooks/common/openshift-cluster/initialize_oo_option_facts.yml27
-rw-r--r--playbooks/common/openshift-cluster/initialize_openshift_repos.yml8
-rw-r--r--playbooks/common/openshift-cluster/initialize_openshift_version.yml28
-rw-r--r--playbooks/common/openshift-cluster/openshift_default_storage_class.yml6
-rw-r--r--playbooks/common/openshift-cluster/openshift_hosted.yml95
-rw-r--r--playbooks/common/openshift-cluster/openshift_hosted_create_projects.yml7
-rw-r--r--playbooks/common/openshift-cluster/openshift_hosted_registry.yml13
-rw-r--r--playbooks/common/openshift-cluster/openshift_hosted_router.yml13
-rw-r--r--playbooks/common/openshift-cluster/openshift_logging.yml21
-rw-r--r--playbooks/common/openshift-cluster/openshift_metrics.yml31
-rw-r--r--playbooks/common/openshift-cluster/openshift_prometheus.yml25
-rw-r--r--playbooks/common/openshift-cluster/redeploy-certificates/check-expiry.yml12
-rw-r--r--playbooks/common/openshift-cluster/redeploy-certificates/etcd-backup.yml19
-rw-r--r--playbooks/common/openshift-cluster/redeploy-certificates/etcd-ca.yml127
-rw-r--r--playbooks/common/openshift-cluster/redeploy-certificates/etcd.yml70
-rw-r--r--playbooks/common/openshift-cluster/redeploy-certificates/masters-backup.yml (renamed from playbooks/common/openshift-cluster/redeploy-certificates/masters.yml)23
-rw-r--r--playbooks/common/openshift-cluster/redeploy-certificates/nodes-backup.yml (renamed from playbooks/common/openshift-cluster/redeploy-certificates/nodes.yml)5
-rw-r--r--playbooks/common/openshift-cluster/redeploy-certificates/openshift-ca.yml87
-rw-r--r--playbooks/common/openshift-cluster/redeploy-certificates/registry.yml1
-rw-r--r--playbooks/common/openshift-cluster/redeploy-certificates/router.yml137
-rw-r--r--playbooks/common/openshift-cluster/sanity_checks.yml51
-rw-r--r--playbooks/common/openshift-cluster/service_catalog.yml29
-rw-r--r--playbooks/common/openshift-cluster/std_include.yml30
-rw-r--r--playbooks/common/openshift-cluster/tasks/set_etcd_launch_facts.yml2
-rw-r--r--playbooks/common/openshift-cluster/tasks/set_master_launch_facts.yml2
-rw-r--r--playbooks/common/openshift-cluster/tasks/set_node_launch_facts.yml2
-rw-r--r--playbooks/common/openshift-cluster/update_repos_and_packages.yml18
-rw-r--r--playbooks/common/openshift-cluster/upgrades/containerized_node_upgrade.yml14
-rw-r--r--playbooks/common/openshift-cluster/upgrades/docker/docker_upgrade.yml9
-rw-r--r--playbooks/common/openshift-cluster/upgrades/docker/tasks/restart.yml (renamed from playbooks/common/openshift-cluster/upgrades/docker/restart.yml)6
-rw-r--r--playbooks/common/openshift-cluster/upgrades/docker/tasks/upgrade.yml (renamed from playbooks/common/openshift-cluster/upgrades/docker/upgrade.yml)9
-rw-r--r--playbooks/common/openshift-cluster/upgrades/docker/upgrade_check.yml4
-rw-r--r--playbooks/common/openshift-cluster/upgrades/etcd/backup.yml19
-rw-r--r--playbooks/common/openshift-cluster/upgrades/etcd/main.yml11
-rw-r--r--playbooks/common/openshift-cluster/upgrades/etcd/upgrade.yml14
-rw-r--r--playbooks/common/openshift-cluster/upgrades/etcd/upgrade_image_members.yml15
-rw-r--r--playbooks/common/openshift-cluster/upgrades/etcd/upgrade_rpm_members.yml15
-rw-r--r--playbooks/common/openshift-cluster/upgrades/files/shared_resource_viewer_role.yaml37
-rw-r--r--playbooks/common/openshift-cluster/upgrades/init.yml3
-rw-r--r--playbooks/common/openshift-cluster/upgrades/initialize_nodes_to_upgrade.yml1
l---------playbooks/common/openshift-cluster/upgrades/master_docker1
-rw-r--r--playbooks/common/openshift-cluster/upgrades/post_control_plane.yml29
-rw-r--r--playbooks/common/openshift-cluster/upgrades/pre/tasks/verify_docker_upgrade_targets.yml (renamed from playbooks/common/openshift-cluster/upgrades/pre/verify_docker_upgrade_targets.yml)6
-rw-r--r--playbooks/common/openshift-cluster/upgrades/pre/verify_control_plane_running.yml42
-rw-r--r--playbooks/common/openshift-cluster/upgrades/pre/verify_etcd3_backend.yml22
-rw-r--r--playbooks/common/openshift-cluster/upgrades/pre/verify_health_checks.yml16
-rw-r--r--playbooks/common/openshift-cluster/upgrades/pre/verify_inventory_vars.yml4
-rw-r--r--playbooks/common/openshift-cluster/upgrades/pre/verify_nodes_running.yml13
-rw-r--r--playbooks/common/openshift-cluster/upgrades/pre/verify_upgrade_targets.yml14
-rw-r--r--playbooks/common/openshift-cluster/upgrades/rpm_upgrade.yml3
-rw-r--r--playbooks/common/openshift-cluster/upgrades/upgrade_control_plane.yml120
-rw-r--r--playbooks/common/openshift-cluster/upgrades/upgrade_nodes.yml8
-rw-r--r--playbooks/common/openshift-cluster/upgrades/upgrade_scheduler.yml35
-rw-r--r--playbooks/common/openshift-cluster/upgrades/v3_3/master_config_upgrade.yml4
-rw-r--r--playbooks/common/openshift-cluster/upgrades/v3_3/upgrade.yml7
-rw-r--r--playbooks/common/openshift-cluster/upgrades/v3_3/upgrade_control_plane.yml7
-rw-r--r--playbooks/common/openshift-cluster/upgrades/v3_3/upgrade_nodes.yml7
-rw-r--r--playbooks/common/openshift-cluster/upgrades/v3_4/master_config_upgrade.yml14
-rw-r--r--playbooks/common/openshift-cluster/upgrades/v3_4/upgrade.yml7
-rw-r--r--playbooks/common/openshift-cluster/upgrades/v3_4/upgrade_control_plane.yml7
-rw-r--r--playbooks/common/openshift-cluster/upgrades/v3_4/upgrade_nodes.yml7
-rw-r--r--playbooks/common/openshift-cluster/upgrades/v3_5/master_config_upgrade.yml14
-rw-r--r--playbooks/common/openshift-cluster/upgrades/v3_5/upgrade.yml17
-rw-r--r--playbooks/common/openshift-cluster/upgrades/v3_5/upgrade_control_plane.yml15
-rw-r--r--playbooks/common/openshift-cluster/upgrades/v3_5/upgrade_nodes.yml7
-rw-r--r--playbooks/common/openshift-cluster/upgrades/v3_6/master_config_upgrade.yml15
-rw-r--r--playbooks/common/openshift-cluster/upgrades/v3_6/upgrade.yml21
-rw-r--r--playbooks/common/openshift-cluster/upgrades/v3_6/upgrade_control_plane.yml19
-rw-r--r--playbooks/common/openshift-cluster/upgrades/v3_6/upgrade_nodes.yml11
l---------playbooks/common/openshift-cluster/upgrades/v3_7/filter_plugins1
-rw-r--r--playbooks/common/openshift-cluster/upgrades/v3_7/master_config_upgrade.yml20
l---------playbooks/common/openshift-cluster/upgrades/v3_7/roles1
-rw-r--r--playbooks/common/openshift-cluster/upgrades/v3_7/upgrade.yml142
-rw-r--r--playbooks/common/openshift-cluster/upgrades/v3_7/upgrade_control_plane.yml144
-rw-r--r--playbooks/common/openshift-cluster/upgrades/v3_7/upgrade_nodes.yml115
-rw-r--r--playbooks/common/openshift-cluster/upgrades/v3_7/validator.yml23
-rw-r--r--playbooks/common/openshift-cluster/validate_hostnames.yml15
-rw-r--r--playbooks/common/openshift-etcd/ca.yml15
-rw-r--r--playbooks/common/openshift-etcd/certificates.yml4
-rw-r--r--playbooks/common/openshift-etcd/config.yml25
-rw-r--r--playbooks/common/openshift-etcd/embedded2external.yml172
-rw-r--r--playbooks/common/openshift-etcd/master_etcd_certificates.yml14
-rw-r--r--playbooks/common/openshift-etcd/migrate.yml169
-rw-r--r--playbooks/common/openshift-etcd/restart.yml18
-rw-r--r--playbooks/common/openshift-etcd/scaleup.yml83
-rw-r--r--playbooks/common/openshift-etcd/server_certificates.yml15
-rw-r--r--playbooks/common/openshift-etcd/service.yml23
-rw-r--r--playbooks/common/openshift-glusterfs/config.yml67
-rw-r--r--playbooks/common/openshift-loadbalancer/config.yml31
-rw-r--r--playbooks/common/openshift-loadbalancer/service.yml23
-rw-r--r--playbooks/common/openshift-management/add_container_provider.yml8
-rw-r--r--playbooks/common/openshift-management/config.yml35
l---------playbooks/common/openshift-management/filter_plugins (renamed from playbooks/common/openshift-cfme/filter_plugins)0
l---------playbooks/common/openshift-management/library (renamed from playbooks/common/openshift-cfme/library)0
l---------playbooks/common/openshift-management/roles (renamed from playbooks/common/openshift-cfme/roles)0
-rw-r--r--playbooks/common/openshift-management/uninstall.yml (renamed from playbooks/common/openshift-cfme/uninstall.yml)4
-rw-r--r--playbooks/common/openshift-master/additional_config.yml46
-rw-r--r--playbooks/common/openshift-master/certificates.yml14
-rw-r--r--playbooks/common/openshift-master/config.yml206
-rw-r--r--playbooks/common/openshift-master/files/openshift-ansible-catalog-console.js2
-rw-r--r--playbooks/common/openshift-master/restart.yml2
-rw-r--r--playbooks/common/openshift-master/restart_hosts.yml1
-rw-r--r--playbooks/common/openshift-master/restart_services.yml6
-rw-r--r--playbooks/common/openshift-master/scaleup.yml39
-rw-r--r--playbooks/common/openshift-master/service.yml23
-rw-r--r--playbooks/common/openshift-master/set_network_facts.yml34
-rw-r--r--playbooks/common/openshift-master/tasks/wire_aggregator.yml215
-rw-r--r--playbooks/common/openshift-nfs/config.yml22
-rw-r--r--playbooks/common/openshift-nfs/service.yml21
-rw-r--r--playbooks/common/openshift-node/additional_config.yml64
-rw-r--r--playbooks/common/openshift-node/certificates.yml8
-rw-r--r--playbooks/common/openshift-node/clean_image.yml10
-rw-r--r--playbooks/common/openshift-node/config.yml118
-rw-r--r--playbooks/common/openshift-node/configure_nodes.yml17
-rw-r--r--playbooks/common/openshift-node/containerized_nodes.yml19
-rw-r--r--playbooks/common/openshift-node/enable_excluders.yml8
-rw-r--r--playbooks/common/openshift-node/etcd_client_config.yml11
-rw-r--r--playbooks/common/openshift-node/image_prep.yml24
-rw-r--r--playbooks/common/openshift-node/manage_node.yml12
-rw-r--r--playbooks/common/openshift-node/network_manager.yml2
-rw-r--r--playbooks/common/openshift-node/restart.yml6
-rw-r--r--playbooks/common/openshift-node/scaleup.yml50
-rw-r--r--playbooks/common/openshift-node/service.yml26
-rw-r--r--playbooks/common/openshift-node/setup.yml27
137 files changed, 3019 insertions, 1173 deletions
diff --git a/playbooks/common/README.md b/playbooks/common/README.md
index 0b5e26989..968bd99cb 100644
--- a/playbooks/common/README.md
+++ b/playbooks/common/README.md
@@ -1,9 +1,8 @@
# Common playbooks
This directory has a generic set of playbooks that are included by playbooks in
-[`byo`](../byo), as well as other playbooks related to the
-[`bin/cluster`](../../bin) tool.
+[`byo`](../byo).
Note: playbooks in this directory use generic group names that do not line up
-with the groups used by the `byo` playbooks or `bin/cluster` derived playbooks,
-requiring an explicit remapping of groups.
+with the groups used by the `byo` playbooks, requiring an explicit remapping of
+groups.
diff --git a/playbooks/common/openshift-cfme/config.yml b/playbooks/common/openshift-cfme/config.yml
deleted file mode 100644
index 533a35d9e..000000000
--- a/playbooks/common/openshift-cfme/config.yml
+++ /dev/null
@@ -1,44 +0,0 @@
----
-# TODO: Make this work. The 'name' variable below is undefined
-# presently because it's part of the cfme role. This play can't run
-# until that's re-worked.
-#
-# - name: Pre-Pull manageiq-pods docker images
-# hosts: nodes
-# tasks:
-# - name: Ensure the latest manageiq-pods docker image is pulling
-# docker_image:
-# name: "{{ openshift_cfme_container_image }}"
-# # Fire-and-forget method, never timeout
-# async: 99999999999
-# # F-a-f, never check on this. True 'background' task.
-# poll: 0
-
-- name: Configure Masters for CFME Bulk Image Imports
- hosts: oo_masters_to_config
- serial: 1
- tasks:
- - name: Run master cfme tuning playbook
- include_role:
- name: openshift_cfme
- tasks_from: tune_masters
-
-- name: Setup CFME
- hosts: oo_first_master
- vars:
- r_openshift_cfme_miq_template_content: "{{ lookup('file', 'roles/openshift_cfme/files/miq-template.yaml') | from_yaml}}"
- pre_tasks:
- - name: Create a temporary place to evaluate the PV templates
- command: mktemp -d /tmp/openshift-ansible-XXXXXXX
- register: r_openshift_cfme_mktemp
- changed_when: false
- - name: Ensure the server template was read from disk
- debug:
- msg="{{ r_openshift_cfme_miq_template_content | from_yaml }}"
-
- tasks:
- - name: Run the CFME Setup Role
- include_role:
- name: openshift_cfme
- vars:
- template_dir: "{{ hostvars[groups.masters.0].r_openshift_cfme_mktemp.stdout }}"
diff --git a/playbooks/common/openshift-checks/adhoc.yml b/playbooks/common/openshift-checks/adhoc.yml
new file mode 100644
index 000000000..d0deaeb65
--- /dev/null
+++ b/playbooks/common/openshift-checks/adhoc.yml
@@ -0,0 +1,13 @@
+---
+- name: OpenShift Health Checks
+ hosts: oo_all_hosts
+
+ roles:
+ - openshift_health_checker
+ vars:
+ - r_openshift_health_checker_playbook_context: adhoc
+ post_tasks:
+ - name: Run health checks (adhoc)
+ action: openshift_health_check
+ args:
+ checks: '{{ openshift_checks | default([]) }}'
diff --git a/playbooks/common/openshift-checks/health.yml b/playbooks/common/openshift-checks/health.yml
index 1bee460e8..d0921b9d3 100644
--- a/playbooks/common/openshift-checks/health.yml
+++ b/playbooks/common/openshift-checks/health.yml
@@ -1,11 +1,13 @@
---
-- name: Run OpenShift health checks
- hosts: OSEv3
+- name: OpenShift Health Checks
+ hosts: oo_all_hosts
+
roles:
- openshift_health_checker
vars:
- - r_openshift_health_checker_playbook_context: "health"
+ - r_openshift_health_checker_playbook_context: health
post_tasks:
- - action: openshift_health_check # https://github.com/ansible/ansible/issues/20513
+ - name: Run health checks (@health)
+ action: openshift_health_check
args:
checks: ['@health']
diff --git a/playbooks/common/openshift-checks/install.yml b/playbooks/common/openshift-checks/install.yml
new file mode 100644
index 000000000..6701a2e15
--- /dev/null
+++ b/playbooks/common/openshift-checks/install.yml
@@ -0,0 +1,47 @@
+---
+- name: Health Check Checkpoint Start
+ hosts: oo_all_hosts
+ gather_facts: false
+ tasks:
+ - name: Set Health Check 'In Progress'
+ set_stats:
+ data:
+ installer_phase_health: "In Progress"
+ aggregate: false
+
+- name: OpenShift Health Checks
+ hosts: oo_all_hosts
+ any_errors_fatal: true
+ roles:
+ - openshift_health_checker
+ vars:
+ - r_openshift_health_checker_playbook_context: install
+ post_tasks:
+ - name: Run health checks (install) - EL
+ when: ansible_distribution != "Fedora"
+ action: openshift_health_check
+ args:
+ checks:
+ - disk_availability
+ - memory_availability
+ - package_availability
+ - package_version
+ - docker_image_availability
+ - docker_storage
+
+ - name: Run health checks (install) - Fedora
+ when: ansible_distribution == "Fedora"
+ action: openshift_health_check
+ args:
+ checks:
+ - docker_image_availability
+
+- name: Health Check Checkpoint End
+ hosts: oo_all_hosts
+ gather_facts: false
+ tasks:
+ - name: Set Health Check 'Complete'
+ set_stats:
+ data:
+ installer_phase_health: "Complete"
+ aggregate: false
diff --git a/playbooks/common/openshift-checks/pre-install.yml b/playbooks/common/openshift-checks/pre-install.yml
index e01c6f38d..32449d4e4 100644
--- a/playbooks/common/openshift-checks/pre-install.yml
+++ b/playbooks/common/openshift-checks/pre-install.yml
@@ -1,11 +1,13 @@
---
-- hosts: OSEv3
- name: run OpenShift pre-install checks
+- name: OpenShift Health Checks
+ hosts: oo_all_hosts
+
roles:
- openshift_health_checker
vars:
- - r_openshift_health_checker_playbook_context: "pre-install"
+ - r_openshift_health_checker_playbook_context: pre-install
post_tasks:
- - action: openshift_health_check # https://github.com/ansible/ansible/issues/20513
+ - name: Run health checks (@preflight)
+ action: openshift_health_check
args:
checks: ['@preflight']
diff --git a/playbooks/common/openshift-cluster/additional_config.yml b/playbooks/common/openshift-cluster/additional_config.yml
deleted file mode 100644
index c0ea93d2c..000000000
--- a/playbooks/common/openshift-cluster/additional_config.yml
+++ /dev/null
@@ -1,23 +0,0 @@
----
-- name: Additional master configuration
- hosts: oo_first_master
- vars:
- cockpit_plugins: "{{ osm_cockpit_plugins | default(['cockpit-kubernetes']) }}"
- etcd_urls: "{{ openshift.master.etcd_urls }}"
- openshift_master_ha: "{{ groups.oo_masters | length > 1 }}"
- omc_cluster_hosts: "{{ groups.oo_masters | join(' ')}}"
- roles:
- - role: openshift_master_cluster
- when: openshift_master_ha | bool and openshift.master.cluster_method == "pacemaker"
- - role: openshift_examples
- registry_url: "{{ openshift.master.registry_url }}"
- when: openshift.common.install_examples | bool
- - role: openshift_hosted_templates
- registry_url: "{{ openshift.master.registry_url }}"
- - role: openshift_manageiq
- when: openshift.common.use_manageiq | bool
- - role: cockpit
- when: not openshift.common.is_atomic and ( deployment_type in ['atomic-enterprise','openshift-enterprise'] ) and
- (osm_use_cockpit | bool or osm_use_cockpit is undefined ) and ( openshift.common.deployment_subtype != 'registry' )
- - role: flannel_register
- when: openshift.common.use_flannel | bool
diff --git a/playbooks/common/openshift-cluster/cockpit-ui.yml b/playbooks/common/openshift-cluster/cockpit-ui.yml
new file mode 100644
index 000000000..5ddafdb07
--- /dev/null
+++ b/playbooks/common/openshift-cluster/cockpit-ui.yml
@@ -0,0 +1,6 @@
+---
+- name: Create Hosted Resources - cockpit-ui
+ hosts: oo_first_master
+ roles:
+ - role: cockpit-ui
+ when: ( openshift.common.version_gte_3_3_or_1_3 | bool ) and ( openshift_hosted_manage_registry | default(true) | bool ) and not (openshift.docker.hosted_registry_insecure | default(false) | bool)
diff --git a/playbooks/common/openshift-cluster/config.yml b/playbooks/common/openshift-cluster/config.yml
index 1482b3a3f..244787985 100644
--- a/playbooks/common/openshift-cluster/config.yml
+++ b/playbooks/common/openshift-cluster/config.yml
@@ -1,56 +1,41 @@
---
-- include: initialize_oo_option_facts.yml
- tags:
- - always
-
-- name: Disable excluders
- hosts: oo_masters_to_config:oo_nodes_to_config
- tags:
- - always
- gather_facts: no
- roles:
- - role: openshift_excluder
- r_openshift_excluder_action: disable
- r_openshift_excluder_service_type: "{{ openshift.common.service_type }}"
+- include: ../openshift-checks/install.yml
- include: ../openshift-etcd/config.yml
- tags:
- - etcd
- include: ../openshift-nfs/config.yml
- tags:
- - nfs
+ when: groups.oo_nfs_to_config | default([]) | count > 0
- include: ../openshift-loadbalancer/config.yml
- tags:
- - loadbalancer
+ when: groups.oo_lb_to_config | default([]) | count > 0
- include: ../openshift-master/config.yml
- tags:
- - master
-- include: additional_config.yml
- tags:
- - master
+- include: ../openshift-master/additional_config.yml
- include: ../openshift-node/config.yml
- tags:
- - node
- include: ../openshift-glusterfs/config.yml
- tags:
- - glusterfs
+ when: groups.oo_glusterfs_to_config | default([]) | count > 0
- include: openshift_hosted.yml
- tags:
- - hosted
-- name: Re-enable excluder if it was previously enabled
- hosts: oo_masters_to_config:oo_nodes_to_config
- tags:
- - always
+- include: openshift_metrics.yml
+ when: openshift_metrics_install_metrics | default(false) | bool
+
+- include: openshift_logging.yml
+ when: openshift_logging_install_logging | default(false) | bool
+
+- include: service_catalog.yml
+ when: openshift_enable_service_catalog | default(false) | bool
+
+- include: ../openshift-management/config.yml
+ when: openshift_management_install_management | default(false) | bool
+
+- name: Print deprecated variable warning message if necessary
+ hosts: oo_first_master
gather_facts: no
- roles:
- - role: openshift_excluder
- r_openshift_excluder_action: enable
- r_openshift_excluder_service_type: "{{ openshift.common.service_type }}"
+ tasks:
+ - debug: msg="{{__deprecation_message}}"
+ when:
+ - __deprecation_message | default ('') | length > 0
diff --git a/playbooks/common/openshift-cluster/create_persistent_volumes.yml b/playbooks/common/openshift-cluster/create_persistent_volumes.yml
new file mode 100644
index 000000000..ec6f2c52c
--- /dev/null
+++ b/playbooks/common/openshift-cluster/create_persistent_volumes.yml
@@ -0,0 +1,18 @@
+---
+- name: Create persistent volumes
+ hosts: oo_first_master
+ vars:
+ persistent_volumes: "{{ hostvars[groups.oo_first_master.0] | oo_persistent_volumes(groups) }}"
+ persistent_volume_claims: "{{ hostvars[groups.oo_first_master.0] | oo_persistent_volume_claims }}"
+ tasks:
+ - debug: var=persistent_volumes
+ - debug: var=persistent_volume_claims
+
+- name: Create Hosted Resources - persistent volumes
+ hosts: oo_first_master
+ vars:
+ persistent_volumes: "{{ hostvars[groups.oo_first_master.0] | oo_persistent_volumes(groups) }}"
+ persistent_volume_claims: "{{ hostvars[groups.oo_first_master.0] | oo_persistent_volume_claims }}"
+ roles:
+ - role: openshift_persistent_volumes
+ when: persistent_volumes | length > 0 or persistent_volume_claims | length > 0
diff --git a/playbooks/common/openshift-cluster/enable_dnsmasq.yml b/playbooks/common/openshift-cluster/enable_dnsmasq.yml
index 5425f448f..be14b06f0 100644
--- a/playbooks/common/openshift-cluster/enable_dnsmasq.yml
+++ b/playbooks/common/openshift-cluster/enable_dnsmasq.yml
@@ -27,9 +27,6 @@
role: "{{ item.role }}"
local_facts: "{{ item.local_facts }}"
with_items:
- - role: common
- local_facts:
- use_dnsmasq: True
- role: master
local_facts:
dns_port: '8053'
@@ -37,7 +34,7 @@
dest: "{{ openshift.common.config_base }}/master/master-config.yaml"
yaml_key: dnsConfig.bindAddress
yaml_value: "{{ openshift.master.bind_addr }}:{{ openshift.master.dns_port }}"
- notify: restart master
+ notify: restart master api
- meta: flush_handlers
- name: Configure nodes for dnsmasq
@@ -50,9 +47,6 @@
role: "{{ item.role }}"
local_facts: "{{ item.local_facts }}"
with_items:
- - role: common
- local_facts:
- use_dnsmasq: True
- role: node
local_facts:
dns_ip: "{{ hostvars[inventory_hostname]['ansible_default_ipv4']['address'] }}"
diff --git a/playbooks/common/openshift-cluster/evaluate_groups.yml b/playbooks/common/openshift-cluster/evaluate_groups.yml
index c28ce4c14..78b552279 100644
--- a/playbooks/common/openshift-cluster/evaluate_groups.yml
+++ b/playbooks/common/openshift-cluster/evaluate_groups.yml
@@ -5,20 +5,20 @@
become: no
gather_facts: no
tasks:
- - name: Evaluate groups - g_etcd_hosts required
+ - name: Evaluate groups - g_etcd_hosts or g_new_etcd_hosts required
fail:
- msg: This playbook requires g_etcd_hosts to be set
- when: g_etcd_hosts is not defined
+ msg: This playbook requires g_etcd_hosts or g_new_etcd_hosts to be set
+ when: g_etcd_hosts is not defined and g_new_etcd_hosts is not defined
- name: Evaluate groups - g_master_hosts or g_new_master_hosts required
fail:
msg: This playbook requires g_master_hosts or g_new_master_hosts to be set
- when: g_master_hosts is not defined or g_new_master_hosts is not defined
+ when: g_master_hosts is not defined and g_new_master_hosts is not defined
- name: Evaluate groups - g_node_hosts or g_new_node_hosts required
fail:
msg: This playbook requires g_node_hosts or g_new_node_hosts to be set
- when: g_node_hosts is not defined or g_new_node_hosts is not defined
+ when: g_node_hosts is not defined and g_new_node_hosts is not defined
- name: Evaluate groups - g_lb_hosts required
fail:
@@ -33,13 +33,26 @@
- name: Evaluate groups - g_nfs_hosts is single host
fail:
msg: The nfs group must be limited to one host
- when: (groups[g_nfs_hosts] | default([])) | length > 1
+ when: g_nfs_hosts | default([]) | length > 1
- name: Evaluate groups - g_glusterfs_hosts required
fail:
msg: This playbook requires g_glusterfs_hosts to be set
when: g_glusterfs_hosts is not defined
+ - name: Evaluate groups - Fail if no etcd hosts group is defined
+ fail:
+ msg: >
+ Running etcd as an embedded service is no longer supported. If this is a
+ new install please define an 'etcd' group with either one or three
+ hosts. These hosts may be the same hosts as your masters. If this is an
+ upgrade you may set openshift_master_unsupported_embedded_etcd=true
+ until a migration playbook becomes available.
+ when:
+ - g_etcd_hosts | default([]) | length not in [3,1]
+ - not openshift_master_unsupported_embedded_etcd | default(False)
+ - not (openshift_node_bootstrap | default(False))
+
- name: Evaluate oo_all_hosts
add_host:
name: "{{ item }}"
@@ -67,6 +80,15 @@
when: g_master_hosts|length > 0
changed_when: no
+ - name: Evaluate oo_new_etcd_to_config
+ add_host:
+ name: "{{ item }}"
+ groups: oo_new_etcd_to_config
+ ansible_ssh_user: "{{ g_ssh_user | default(omit) }}"
+ ansible_become: "{{ g_sudo | default(omit) }}"
+ with_items: "{{ g_new_etcd_hosts | default([]) }}"
+ changed_when: no
+
- name: Evaluate oo_masters_to_config
add_host:
name: "{{ item }}"
@@ -108,7 +130,7 @@
add_host:
name: "{{ item }}"
groups: oo_etcd_hosts_to_backup
- with_items: "{{ groups.oo_etcd_to_config if groups.oo_etcd_to_config is defined and groups.oo_etcd_to_config | length > 0 else groups.oo_first_master }}"
+ with_items: "{{ groups.oo_etcd_to_config if groups.oo_etcd_to_config is defined and groups.oo_etcd_to_config | length > 0 else (groups.oo_first_master | default([])) }}"
changed_when: False
- name: Evaluate oo_nodes_to_config
@@ -157,3 +179,12 @@
ansible_become: "{{ g_sudo | default(omit) }}"
with_items: "{{ g_glusterfs_hosts | union(g_glusterfs_registry_hosts | default([])) }}"
changed_when: no
+
+ - name: Evaluate oo_etcd_to_migrate
+ add_host:
+ name: "{{ item }}"
+ groups: oo_etcd_to_migrate
+ ansible_ssh_user: "{{ g_ssh_user | default(omit) }}"
+ ansible_become: "{{ g_sudo | default(omit) }}"
+ with_items: "{{ groups.oo_etcd_to_config if groups.oo_etcd_to_config | default([]) | length != 0 else (groups.oo_first_master |default([]))}}"
+ changed_when: no
diff --git a/playbooks/common/openshift-cluster/initialize_facts.yml b/playbooks/common/openshift-cluster/initialize_facts.yml
index 9cebecd68..91223d368 100644
--- a/playbooks/common/openshift-cluster/initialize_facts.yml
+++ b/playbooks/common/openshift-cluster/initialize_facts.yml
@@ -6,12 +6,164 @@
- name: Initialize host facts
hosts: oo_all_hosts
- roles:
- - openshift_facts
tasks:
- - openshift_facts:
+ - name: load openshift_facts module
+ include_role:
+ name: openshift_facts
+ static: yes
+
+ # TODO: Should this role be refactored into health_checks??
+ - name: Run openshift_sanitize_inventory to set variables
+ include_role:
+ name: openshift_sanitize_inventory
+
+ - name: Detecting Operating System from ostree_booted
+ stat:
+ path: /run/ostree-booted
+ register: ostree_booted
+
+ # Locally setup containerized facts for now
+ - name: initialize_facts set fact l_is_atomic
+ set_fact:
+ l_is_atomic: "{{ ostree_booted.stat.exists }}"
+
+ - name: initialize_facts set fact for containerized and l_is_*_system_container
+ set_fact:
+ l_is_containerized: "{{ (l_is_atomic | bool) or (containerized | default(false) | bool) }}"
+ l_is_openvswitch_system_container: "{{ (openshift_use_openvswitch_system_container | default(openshift_use_system_containers | default(false)) | bool) }}"
+ l_is_node_system_container: "{{ (openshift_use_node_system_container | default(openshift_use_system_containers | default(false)) | bool) }}"
+ l_is_master_system_container: "{{ (openshift_use_master_system_container | default(openshift_use_system_containers | default(false)) | bool) }}"
+ l_is_etcd_system_container: "{{ (openshift_use_etcd_system_container | default(openshift_use_system_containers | default(false)) | bool) }}"
+
+ - name: initialize_facts set facts for l_any_system_container
+ set_fact:
+ l_any_system_container: "{{ l_is_etcd_system_container or l_is_openvswitch_system_container or l_is_node_system_container or l_is_master_system_container }}"
+
+ - name: initialize_facts set fact for l_etcd_runtime
+ set_fact:
+ l_etcd_runtime: "{{ 'runc' if l_is_etcd_system_container else 'docker' if l_is_containerized else 'host' }}"
+
+ # TODO: Should this be moved into health checks??
+ # Seems as though any check that happens with a corresponding fail should move into health_checks
+ - name: Validate python version - ans_dist is fedora and python is v3
+ fail:
+ msg: |
+ openshift-ansible requires Python 3 for {{ ansible_distribution }};
+ For information on enabling Python 3 with Ansible, see https://docs.ansible.com/ansible/python_3_support.html
+ when:
+ - ansible_distribution == 'Fedora'
+ - ansible_python['version']['major'] != 3
+
+ # TODO: Should this be moved into health checks??
+ # Seems as though any check that happens with a corresponding fail should move into health_checks
+ - name: Validate python version - ans_dist not Fedora and python must be v2
+ fail:
+ msg: "openshift-ansible requires Python 2 for {{ ansible_distribution }}"
+ when:
+ - ansible_distribution != 'Fedora'
+ - ansible_python['version']['major'] != 2
+
+ # TODO: Should this be moved into health checks??
+ # Seems as though any check that happens with a corresponding fail should move into health_checks
+ # Fail as early as possible if Atomic and old version of Docker
+ - when:
+ - l_is_atomic | bool
+ block:
+
+ # See https://access.redhat.com/articles/2317361
+ # and https://github.com/ansible/ansible/issues/15892
+ # NOTE: the "'s can not be removed at this level else the docker command will fail
+ # NOTE: When ansible >2.2.1.x is used this can be updated per
+ # https://github.com/openshift/openshift-ansible/pull/3475#discussion_r103525121
+ - name: Determine Atomic Host Docker Version
+ shell: 'CURLY="{"; docker version --format "$CURLY{json .Server.Version}}"'
+ register: l_atomic_docker_version
+
+ - name: assert atomic host docker version is 1.12 or later
+ assert:
+ that:
+ - l_atomic_docker_version.stdout | replace('"', '') | version_compare('1.12','>=')
+ msg: Installation on Atomic Host requires Docker 1.12 or later. Please upgrade and restart the Atomic Host.
+
+ - when:
+ - not l_is_atomic | bool
+ block:
+ - name: Ensure openshift-ansible installer package deps are installed
+ package:
+ name: "{{ item }}"
+ state: present
+ with_items:
+ - iproute
+ - "{{ 'python3-dbus' if ansible_distribution == 'Fedora' else 'dbus-python' }}"
+ - "{{ 'python3-PyYAML' if ansible_distribution == 'Fedora' else 'PyYAML' }}"
+ - yum-utils
+
+ - name: Ensure various deps for running system containers are installed
+ package:
+ name: "{{ item }}"
+ state: present
+ with_items:
+ - atomic
+ - ostree
+ - runc
+ when:
+ - l_any_system_container | bool
+
+ - name: Default system_images_registry to a enterprise registry
+ set_fact:
+ system_images_registry: "registry.access.redhat.com"
+ when:
+ - system_images_registry is not defined
+ - openshift_deployment_type == "openshift-enterprise"
+
+ - name: Default system_images_registry to community registry
+ set_fact:
+ system_images_registry: "docker.io"
+ when:
+ - system_images_registry is not defined
+ - openshift_deployment_type == "origin"
+
+ - name: Gather Cluster facts and set is_containerized if needed
+ openshift_facts:
role: common
local_facts:
+ deployment_type: "{{ openshift_deployment_type }}"
+ deployment_subtype: "{{ openshift_deployment_subtype | default(None) }}"
+ cli_image: "{{ osm_image | default(None) }}"
hostname: "{{ openshift_hostname | default(None) }}"
- - set_fact:
- openshift_docker_hosted_registry_network: "{{ hostvars[groups.oo_first_master.0].openshift.common.portal_net }}"
+ ip: "{{ openshift_ip | default(None) }}"
+ is_containerized: "{{ l_is_containerized | default(None) }}"
+ is_openvswitch_system_container: "{{ l_is_openvswitch_system_container | default(false) }}"
+ is_node_system_container: "{{ l_is_node_system_container | default(false) }}"
+ is_master_system_container: "{{ l_is_master_system_container | default(false) }}"
+ is_etcd_system_container: "{{ l_is_etcd_system_container | default(false) }}"
+ etcd_runtime: "{{ l_etcd_runtime }}"
+ system_images_registry: "{{ system_images_registry }}"
+ public_hostname: "{{ openshift_public_hostname | default(None) }}"
+ public_ip: "{{ openshift_public_ip | default(None) }}"
+ portal_net: "{{ openshift_portal_net | default(openshift_master_portal_net) | default(None) }}"
+ http_proxy: "{{ openshift_http_proxy | default(None) }}"
+ https_proxy: "{{ openshift_https_proxy | default(None) }}"
+ no_proxy: "{{ openshift_no_proxy | default(None) }}"
+ generate_no_proxy_hosts: "{{ openshift_generate_no_proxy_hosts | default(True) }}"
+
+ - name: Set fact of no_proxy_internal_hostnames
+ openshift_facts:
+ role: common
+ local_facts:
+ no_proxy_internal_hostnames: "{{ hostvars | oo_select_keys(groups['oo_nodes_to_config']
+ | union(groups['oo_masters_to_config'])
+ | union(groups['oo_etcd_to_config'] | default([])))
+ | oo_collect('openshift.common.hostname') | default([]) | join (',')
+ }}"
+ when:
+ - openshift_http_proxy is defined or openshift_https_proxy is defined
+ - openshift_generate_no_proxy_hosts | default(True) | bool
+
+ - name: initialize_facts set_fact repoquery command
+ set_fact:
+ repoquery_cmd: "{{ 'dnf repoquery --latest-limit 1 -d 0' if ansible_pkg_mgr == 'dnf' else 'repoquery --plugins' }}"
+
+ - name: initialize_facts set_fact on openshift_docker_hosted_registry_network
+ set_fact:
+ openshift_docker_hosted_registry_network: "{{ '' if 'oo_first_master' not in groups else hostvars[groups.oo_first_master.0].openshift.common.portal_net }}"
diff --git a/playbooks/common/openshift-cluster/initialize_oo_option_facts.yml b/playbooks/common/openshift-cluster/initialize_oo_option_facts.yml
deleted file mode 100644
index ac3c702a0..000000000
--- a/playbooks/common/openshift-cluster/initialize_oo_option_facts.yml
+++ /dev/null
@@ -1,27 +0,0 @@
----
-- name: Set oo_option facts
- hosts: oo_all_hosts
- tags:
- - always
- tasks:
- - set_fact:
- openshift_docker_additional_registries: "{{ lookup('oo_option', 'docker_additional_registries') }}"
- when: openshift_docker_additional_registries is not defined
- - set_fact:
- openshift_docker_insecure_registries: "{{ lookup('oo_option', 'docker_insecure_registries') }}"
- when: openshift_docker_insecure_registries is not defined
- - set_fact:
- openshift_docker_blocked_registries: "{{ lookup('oo_option', 'docker_blocked_registries') }}"
- when: openshift_docker_blocked_registries is not defined
- - set_fact:
- openshift_docker_options: "{{ lookup('oo_option', 'docker_options') }}"
- when: openshift_docker_options is not defined
- - set_fact:
- openshift_docker_log_driver: "{{ lookup('oo_option', 'docker_log_driver') }}"
- when: openshift_docker_log_driver is not defined
- - set_fact:
- openshift_docker_log_options: "{{ lookup('oo_option', 'docker_log_options') }}"
- when: openshift_docker_log_options is not defined
- - set_fact:
- openshift_docker_selinux_enabled: "{{ lookup('oo_option', 'docker_selinux_enabled') }}"
- when: openshift_docker_selinux_enabled is not defined
diff --git a/playbooks/common/openshift-cluster/initialize_openshift_repos.yml b/playbooks/common/openshift-cluster/initialize_openshift_repos.yml
new file mode 100644
index 000000000..a7114fc80
--- /dev/null
+++ b/playbooks/common/openshift-cluster/initialize_openshift_repos.yml
@@ -0,0 +1,8 @@
+---
+- name: Setup yum repositories for all hosts
+ hosts: oo_all_hosts
+ gather_facts: no
+ tasks:
+ - name: initialize openshift repos
+ include_role:
+ name: openshift_repos
diff --git a/playbooks/common/openshift-cluster/initialize_openshift_version.yml b/playbooks/common/openshift-cluster/initialize_openshift_version.yml
index f4e52869e..37a5284d5 100644
--- a/playbooks/common/openshift-cluster/initialize_openshift_version.yml
+++ b/playbooks/common/openshift-cluster/initialize_openshift_version.yml
@@ -1,24 +1,5 @@
---
# NOTE: requires openshift_facts be run
-- name: Verify compatible yum/subscription-manager combination
- hosts: oo_all_hosts
- gather_facts: no
- tasks:
- # See:
- # https://bugzilla.redhat.com/show_bug.cgi?id=1395047
- # https://bugzilla.redhat.com/show_bug.cgi?id=1282961
- # https://github.com/openshift/openshift-ansible/issues/1138
- # Consider the repoquery module for this work
- - name: Check for bad combinations of yum and subscription-manager
- command: >
- {{ repoquery_cmd }} --installed --qf '%{version}' "yum"
- register: yum_ver_test
- changed_when: false
- when: not openshift.common.is_atomic | bool
- - fail:
- msg: Incompatible versions of yum and subscription-manager found. You may need to update yum and yum-utils.
- when: not openshift.common.is_atomic | bool and 'Plugin \"search-disabled-repos\" requires API 2.7. Supported API is 2.6.' in yum_ver_test.stdout
-
- name: Determine openshift_version to configure on first master
hosts: oo_first_master
roles:
@@ -27,9 +8,14 @@
# NOTE: We set this even on etcd hosts as they may also later run as masters,
# and we don't want to install wrong version of docker and have to downgrade
# later.
-- name: Set openshift_version for all hosts
- hosts: oo_all_hosts:!oo_first_master
+- name: Set openshift_version for etcd, node, and master hosts
+ hosts: oo_etcd_to_config:oo_nodes_to_config:oo_masters_to_config:!oo_first_master
vars:
openshift_version: "{{ hostvars[groups.oo_first_master.0].openshift_version }}"
+ pre_tasks:
+ - set_fact:
+ openshift_pkg_version: -{{ openshift_version }}
+ when: openshift_pkg_version is not defined
+ - debug: msg="openshift_pkg_version set to {{ openshift_pkg_version }}"
roles:
- openshift_version
diff --git a/playbooks/common/openshift-cluster/openshift_default_storage_class.yml b/playbooks/common/openshift-cluster/openshift_default_storage_class.yml
new file mode 100644
index 000000000..62fe0dd60
--- /dev/null
+++ b/playbooks/common/openshift-cluster/openshift_default_storage_class.yml
@@ -0,0 +1,6 @@
+---
+- name: Create Hosted Resources - openshift_default_storage_class
+ hosts: oo_first_master
+ roles:
+ - role: openshift_default_storage_class
+ when: openshift_cloudprovider_kind is defined and (openshift_cloudprovider_kind == 'aws' or openshift_cloudprovider_kind == 'gce' or openshift_cloudprovider_kind == 'openstack')
diff --git a/playbooks/common/openshift-cluster/openshift_hosted.yml b/playbooks/common/openshift-cluster/openshift_hosted.yml
index 8d94b6509..c1536eb36 100644
--- a/playbooks/common/openshift-cluster/openshift_hosted.yml
+++ b/playbooks/common/openshift-cluster/openshift_hosted.yml
@@ -1,72 +1,35 @@
---
-- name: Create persistent volumes
- hosts: oo_first_master
- tags:
- - hosted
- vars:
- persistent_volumes: "{{ hostvars[groups.oo_first_master.0] | oo_persistent_volumes(groups) }}"
- persistent_volume_claims: "{{ hostvars[groups.oo_first_master.0] | oo_persistent_volume_claims }}"
- roles:
- - role: openshift_persistent_volumes
- when: persistent_volumes | length > 0 or persistent_volume_claims | length > 0
+- name: Hosted Install Checkpoint Start
+ hosts: oo_all_hosts
+ gather_facts: false
+ tasks:
+ - name: Set Hosted install 'In Progress'
+ set_stats:
+ data:
+ installer_phase_hosted: "In Progress"
+ aggregate: false
-- name: Create Hosted Resources
- hosts: oo_first_master
- tags:
- - hosted
- pre_tasks:
- - set_fact:
- openshift_hosted_router_registryurl: "{{ hostvars[groups.oo_first_master.0].openshift.master.registry_url }}"
- openshift_hosted_registry_registryurl: "{{ hostvars[groups.oo_first_master.0].openshift.master.registry_url }}"
- when: "'master' in hostvars[groups.oo_first_master.0].openshift and 'registry_url' in hostvars[groups.oo_first_master.0].openshift.master"
- - set_fact:
- logging_hostname: "{{ openshift_hosted_logging_hostname | default('kibana.' ~ (openshift_master_default_subdomain | default('router.default.svc.cluster.local', true))) }}"
- logging_ops_hostname: "{{ openshift_hosted_logging_ops_hostname | default('kibana-ops.' ~ (openshift_master_default_subdomain | default('router.default.svc.cluster.local', true))) }}"
- logging_master_public_url: "{{ openshift_hosted_logging_master_public_url | default(openshift.master.public_api_url) }}"
- logging_elasticsearch_cluster_size: "{{ openshift_hosted_logging_elasticsearch_cluster_size | default(1) }}"
- logging_elasticsearch_ops_cluster_size: "{{ openshift_hosted_logging_elasticsearch_ops_cluster_size | default(1) }}"
- roles:
- - role: openshift_hosted
- - role: openshift_metrics
- when: openshift_hosted_metrics_deploy | default(false) | bool
- - role: openshift_logging
- when: openshift_hosted_logging_deploy | default(false) | bool
- openshift_hosted_logging_hostname: "{{ logging_hostname }}"
- openshift_hosted_logging_ops_hostname: "{{ logging_ops_hostname }}"
- openshift_hosted_logging_master_public_url: "{{ logging_master_public_url }}"
- openshift_hosted_logging_elasticsearch_cluster_size: "{{ logging_elasticsearch_cluster_size }}"
- openshift_hosted_logging_elasticsearch_pvc_dynamic: "{{ 'true' if openshift_hosted_logging_storage_kind | default(none) == 'dynamic' else '' }}"
- openshift_hosted_logging_elasticsearch_pvc_size: "{{ openshift.hosted.logging.storage.volume.size if openshift_hosted_logging_storage_kind | default(none) in ['dynamic','nfs'] else '' }}"
- openshift_hosted_logging_elasticsearch_pvc_prefix: "{{ 'logging-es' if openshift_hosted_logging_storage_kind | default(none) == 'dynamic' else '' }}"
- openshift_hosted_logging_elasticsearch_ops_cluster_size: "{{ logging_elasticsearch_ops_cluster_size }}"
- openshift_hosted_logging_elasticsearch_ops_pvc_dynamic: "{{ 'true' if openshift_hosted_loggingops_storage_kind | default(none) == 'dynamic' else '' }}"
- openshift_hosted_logging_elasticsearch_ops_pvc_size: "{{ openshift.hosted.logging.storage.volume.size if openshift_hosted_logging_storage_kind | default(none) in ['dynamic','nfs' ] else '' }}"
- openshift_hosted_logging_elasticsearch_ops_pvc_prefix: "{{ 'logging-es-ops' if openshift_hosted_loggingops_storage_kind | default(none) =='dynamic' else '' }}"
+- include: create_persistent_volumes.yml
- - role: cockpit-ui
- when: ( openshift.common.version_gte_3_3_or_1_3 | bool ) and ( openshift_hosted_manage_registry | default(true) | bool ) and not (openshift.docker.hosted_registry_insecure | default(false) | bool)
- - role: openshift_default_storage_class
- when: openshift_cloudprovider_kind is defined and (openshift_cloudprovider_kind == 'aws' or openshift_cloudprovider_kind == 'gce')
+- include: openshift_default_storage_class.yml
-- name: Update master-config for publicLoggingURL
- hosts: oo_masters_to_config:!oo_first_master
- tags:
- - hosted
- pre_tasks:
- - set_fact:
- openshift_logging_kibana_hostname: "{{ openshift_hosted_logging_hostname | default('kibana.' ~ (openshift_master_default_subdomain | default('router.default.svc.cluster.local', true))) }}"
- - set_fact:
- openshift_metrics_hawkular_hostname: "{{ g_metrics_hostname | default('hawkular-metrics.' ~ (openshift_master_default_subdomain | default('router.default.svc.cluster.local', true))) }}"
- tasks:
+- include: openshift_hosted_create_projects.yml
+
+- include: openshift_hosted_router.yml
- - block:
- - include_role:
- name: openshift_logging
- tasks_from: update_master_config
- when: openshift_hosted_logging_deploy | default(false) | bool
+- include: openshift_hosted_registry.yml
- - block:
- - include_role:
- name: openshift_metrics
- tasks_from: update_master_config
- when: openshift_hosted_metrics_deploy | default(false) | bool
+- include: cockpit-ui.yml
+
+- include: openshift_prometheus.yml
+ when: openshift_hosted_prometheus_deploy | default(False) | bool
+
+- name: Hosted Install Checkpoint End
+ hosts: oo_all_hosts
+ gather_facts: false
+ tasks:
+ - name: Set Hosted install 'Complete'
+ set_stats:
+ data:
+ installer_phase_hosted: "Complete"
+ aggregate: false
diff --git a/playbooks/common/openshift-cluster/openshift_hosted_create_projects.yml b/playbooks/common/openshift-cluster/openshift_hosted_create_projects.yml
new file mode 100644
index 000000000..d5ca5185c
--- /dev/null
+++ b/playbooks/common/openshift-cluster/openshift_hosted_create_projects.yml
@@ -0,0 +1,7 @@
+---
+- name: Create Hosted Resources - openshift projects
+ hosts: oo_first_master
+ tasks:
+ - include_role:
+ name: openshift_hosted
+ tasks_from: create_projects.yml
diff --git a/playbooks/common/openshift-cluster/openshift_hosted_registry.yml b/playbooks/common/openshift-cluster/openshift_hosted_registry.yml
new file mode 100644
index 000000000..2a91a827c
--- /dev/null
+++ b/playbooks/common/openshift-cluster/openshift_hosted_registry.yml
@@ -0,0 +1,13 @@
+---
+- name: Create Hosted Resources - registry
+ hosts: oo_first_master
+ tasks:
+ - set_fact:
+ openshift_hosted_registry_registryurl: "{{ hostvars[groups.oo_first_master.0].openshift.master.registry_url }}"
+ when: "'master' in hostvars[groups.oo_first_master.0].openshift and 'registry_url' in hostvars[groups.oo_first_master.0].openshift.master"
+ - include_role:
+ name: openshift_hosted
+ tasks_from: registry.yml
+ when:
+ - openshift_hosted_manage_registry | default(True) | bool
+ - openshift_hosted_registry_registryurl is defined
diff --git a/playbooks/common/openshift-cluster/openshift_hosted_router.yml b/playbooks/common/openshift-cluster/openshift_hosted_router.yml
new file mode 100644
index 000000000..bcb5a34a4
--- /dev/null
+++ b/playbooks/common/openshift-cluster/openshift_hosted_router.yml
@@ -0,0 +1,13 @@
+---
+- name: Create Hosted Resources - router
+ hosts: oo_first_master
+ tasks:
+ - set_fact:
+ openshift_hosted_router_registryurl: "{{ hostvars[groups.oo_first_master.0].openshift.master.registry_url }}"
+ when: "'master' in hostvars[groups.oo_first_master.0].openshift and 'registry_url' in hostvars[groups.oo_first_master.0].openshift.master"
+ - include_role:
+ name: openshift_hosted
+ tasks_from: router.yml
+ when:
+ - openshift_hosted_manage_router | default(True) | bool
+ - openshift_hosted_router_registryurl is defined
diff --git a/playbooks/common/openshift-cluster/openshift_logging.yml b/playbooks/common/openshift-cluster/openshift_logging.yml
index 57580406c..529a4c939 100644
--- a/playbooks/common/openshift-cluster/openshift_logging.yml
+++ b/playbooks/common/openshift-cluster/openshift_logging.yml
@@ -1,5 +1,13 @@
---
-- include: evaluate_groups.yml
+- name: Logging Install Checkpoint Start
+ hosts: oo_all_hosts
+ gather_facts: false
+ tasks:
+ - name: Set Logging install 'In Progress'
+ set_stats:
+ data:
+ installer_phase_logging: "In Progress"
+ aggregate: false
- name: OpenShift Aggregated Logging
hosts: oo_first_master
@@ -13,4 +21,13 @@
- include_role:
name: openshift_logging
tasks_from: update_master_config
- when: openshift_logging_install_logging | default(false) | bool
+
+- name: Logging Install Checkpoint End
+ hosts: oo_all_hosts
+ gather_facts: false
+ tasks:
+ - name: Set Logging install 'Complete'
+ set_stats:
+ data:
+ installer_phase_logging: "Complete"
+ aggregate: false
diff --git a/playbooks/common/openshift-cluster/openshift_metrics.yml b/playbooks/common/openshift-cluster/openshift_metrics.yml
index bcff4a1a1..9c0bd489b 100644
--- a/playbooks/common/openshift-cluster/openshift_metrics.yml
+++ b/playbooks/common/openshift-cluster/openshift_metrics.yml
@@ -1,7 +1,34 @@
---
-- include: evaluate_groups.yml
+- name: Metrics Install Checkpoint Start
+ hosts: oo_all_hosts
+ gather_facts: false
+ tasks:
+ - name: Set Metrics install 'In Progress'
+ set_stats:
+ data:
+ installer_phase_metrics: "In Progress"
+ aggregate: false
- name: OpenShift Metrics
hosts: oo_first_master
roles:
- - openshift_metrics
+ - role: openshift_metrics
+
+- name: OpenShift Metrics
+ hosts: oo_masters:!oo_first_master
+ serial: 1
+ tasks:
+ - name: Setup the non-first masters configs
+ include_role:
+ name: openshift_metrics
+ tasks_from: update_master_config.yaml
+
+- name: Metrics Install Checkpoint End
+ hosts: oo_all_hosts
+ gather_facts: false
+ tasks:
+ - name: Set Metrics install 'Complete'
+ set_stats:
+ data:
+ installer_phase_metrics: "Complete"
+ aggregate: false
diff --git a/playbooks/common/openshift-cluster/openshift_prometheus.yml b/playbooks/common/openshift-cluster/openshift_prometheus.yml
new file mode 100644
index 000000000..a73b294a5
--- /dev/null
+++ b/playbooks/common/openshift-cluster/openshift_prometheus.yml
@@ -0,0 +1,25 @@
+---
+- name: Prometheus Install Checkpoint Start
+ hosts: oo_all_hosts
+ gather_facts: false
+ tasks:
+ - name: Set Prometheus install 'In Progress'
+ set_stats:
+ data:
+ installer_phase_prometheus: "In Progress"
+ aggregate: false
+
+- name: Create Hosted Resources - openshift_prometheus
+ hosts: oo_first_master
+ roles:
+ - role: openshift_prometheus
+
+- name: Prometheus Install Checkpoint End
+ hosts: oo_all_hosts
+ gather_facts: false
+ tasks:
+ - name: Set Prometheus install 'Complete'
+ set_stats:
+ data:
+ installer_phase_prometheus: "Complete"
+ aggregate: false
diff --git a/playbooks/common/openshift-cluster/redeploy-certificates/check-expiry.yml b/playbooks/common/openshift-cluster/redeploy-certificates/check-expiry.yml
new file mode 100644
index 000000000..4a9fbf7eb
--- /dev/null
+++ b/playbooks/common/openshift-cluster/redeploy-certificates/check-expiry.yml
@@ -0,0 +1,12 @@
+---
+- name: Check cert expirys
+ hosts: "{{ g_check_expiry_hosts }}"
+ vars:
+ openshift_certificate_expiry_show_all: yes
+ roles:
+ # Sets 'check_results' per host which contains health status for
+ # etcd, master and node certificates. We will use 'check_results'
+ # to determine if any certificates were expired prior to running
+ # this playbook. Service restarts will be skipped if any
+ # certificates were previously expired.
+ - role: openshift_certificate_expiry
diff --git a/playbooks/common/openshift-cluster/redeploy-certificates/etcd-backup.yml b/playbooks/common/openshift-cluster/redeploy-certificates/etcd-backup.yml
new file mode 100644
index 000000000..d738c8207
--- /dev/null
+++ b/playbooks/common/openshift-cluster/redeploy-certificates/etcd-backup.yml
@@ -0,0 +1,19 @@
+---
+- name: Backup and remove generated etcd certificates
+ hosts: oo_first_etcd
+ any_errors_fatal: true
+ tasks:
+ - include_role:
+ name: etcd
+ tasks_from: backup_generated_certificates
+ - include_role:
+ name: etcd
+ tasks_from: remove_generated_certificates
+
+- name: Backup deployed etcd certificates
+ hosts: oo_etcd_to_config
+ any_errors_fatal: true
+ tasks:
+ - include_role:
+ name: etcd
+ tasks_from: backup_server_certificates
diff --git a/playbooks/common/openshift-cluster/redeploy-certificates/etcd-ca.yml b/playbooks/common/openshift-cluster/redeploy-certificates/etcd-ca.yml
index 6964e8567..044875d1c 100644
--- a/playbooks/common/openshift-cluster/redeploy-certificates/etcd-ca.yml
+++ b/playbooks/common/openshift-cluster/redeploy-certificates/etcd-ca.yml
@@ -13,34 +13,15 @@
- name: Backup existing etcd CA certificate directories
hosts: oo_etcd_to_config
- roles:
- - role: etcd_common
- r_etcd_common_etcd_runtime: "{{ openshift.common.etcd_runtime }}"
tasks:
- - name: Determine if CA certificate directory exists
- stat:
- path: "{{ etcd_ca_dir }}"
- register: etcd_ca_certs_dir_stat
- - name: Backup generated etcd certificates
- command: >
- tar -czf {{ etcd_conf_dir }}/etcd-ca-certificate-backup-{{ ansible_date_time.epoch }}.tgz
- {{ etcd_ca_dir }}
- args:
- warn: no
- when: etcd_ca_certs_dir_stat.stat.exists | bool
- - name: Remove CA certificate directory
- file:
- path: "{{ etcd_ca_dir }}"
- state: absent
- when: etcd_ca_certs_dir_stat.stat.exists | bool
+ - include_role:
+ name: etcd
+ tasks_from: backup_ca_certificates
+ - include_role:
+ name: etcd
+ tasks_from: remove_ca_certificates
-- name: Generate new etcd CA
- hosts: oo_first_etcd
- roles:
- - role: openshift_etcd_ca
- etcd_peers: "{{ groups.oo_etcd_to_config | default([], true) }}"
- etcd_ca_host: "{{ groups.oo_etcd_to_config.0 }}"
- etcd_certificates_etcd_hosts: "{{ groups.oo_etcd_to_config | default([], true) }}"
+- include: ../../openshift-etcd/ca.yml
- name: Create temp directory for syncing certs
hosts: localhost
@@ -55,52 +36,14 @@
- name: Distribute etcd CA to etcd hosts
hosts: oo_etcd_to_config
- vars:
- etcd_ca_host: "{{ groups.oo_etcd_to_config.0 }}"
- roles:
- - role: etcd_common
- r_etcd_common_etcd_runtime: "{{ openshift.common.etcd_runtime }}"
tasks:
- - name: Create a tarball of the etcd ca certs
- command: >
- tar -czvf {{ etcd_conf_dir }}/{{ etcd_ca_name }}.tgz
- -C {{ etcd_ca_dir }} .
- args:
- creates: "{{ etcd_conf_dir }}/{{ etcd_ca_name }}.tgz"
- warn: no
- delegate_to: "{{ etcd_ca_host }}"
- run_once: true
- - name: Retrieve etcd ca cert tarball
- fetch:
- src: "{{ etcd_conf_dir }}/{{ etcd_ca_name }}.tgz"
- dest: "{{ hostvars['localhost'].g_etcd_mktemp.stdout }}/"
- flat: yes
- fail_on_missing: yes
- validate_checksum: yes
- delegate_to: "{{ etcd_ca_host }}"
- run_once: true
- - name: Ensure ca directory exists
- file:
- path: "{{ etcd_ca_dir }}"
- state: directory
- - name: Unarchive etcd ca cert tarballs
- unarchive:
- src: "{{ hostvars['localhost'].g_etcd_mktemp.stdout }}/{{ etcd_ca_name }}.tgz"
- dest: "{{ etcd_ca_dir }}"
- - name: Read current etcd CA
- slurp:
- src: "{{ etcd_conf_dir }}/ca.crt"
- register: g_current_etcd_ca_output
- - name: Read new etcd CA
- slurp:
- src: "{{ etcd_ca_dir }}/ca.crt"
- register: g_new_etcd_ca_output
- - copy:
- content: "{{ (g_new_etcd_ca_output.content|b64decode) + (g_current_etcd_ca_output.content|b64decode) }}"
- dest: "{{ item }}/ca.crt"
- with_items:
- - "{{ etcd_conf_dir }}"
- - "{{ etcd_ca_dir }}"
+ - include_role:
+ name: etcd
+ tasks_from: distribute_ca
+ vars:
+ r_etcd_common_etcd_runtime: "{{ openshift.common.etcd_runtime }}"
+ etcd_sync_cert_dir: "{{ hostvars['localhost'].g_etcd_mktemp.stdout }}"
+ etcd_ca_host: "{{ groups.oo_etcd_to_config.0 }}"
- include: ../../openshift-etcd/restart.yml
# Do not restart etcd when etcd certificates were previously expired.
@@ -111,17 +54,13 @@
- name: Retrieve etcd CA certificate
hosts: oo_first_etcd
- roles:
- - role: etcd_common
- r_etcd_common_etcd_runtime: "{{ openshift.common.etcd_runtime }}"
tasks:
- - name: Retrieve etcd CA certificate
- fetch:
- src: "{{ etcd_conf_dir }}/ca.crt"
- dest: "{{ hostvars['localhost'].g_etcd_mktemp.stdout }}/"
- flat: yes
- fail_on_missing: yes
- validate_checksum: yes
+ - include_role:
+ name: etcd
+ tasks_from: retrieve_ca_certificates
+ vars:
+ etcd_sync_cert_dir: "{{ hostvars['localhost'].g_etcd_mktemp.stdout }}"
+ r_etcd_common_etcd_runtime: "{{ openshift.common.etcd_runtime }}"
- name: Distribute etcd CA to masters
hosts: oo_masters_to_config
@@ -146,13 +85,19 @@
changed_when: false
- include: ../../openshift-master/restart.yml
- # Do not restart masters when master certificates were previously expired.
- when: ('expired' not in hostvars
- | oo_select_keys(groups['oo_masters_to_config'])
- | oo_collect('check_results.check_results.ocp_certs')
- | oo_collect('health', {'path':hostvars[groups.oo_first_master.0].openshift.common.config_base ~ "/master/master.server.crt"}))
- and
- ('expired' not in hostvars
- | oo_select_keys(groups['oo_masters_to_config'])
- | oo_collect('check_results.check_results.ocp_certs')
- | oo_collect('health', {'path':hostvars[groups.oo_first_master.0].openshift.common.config_base ~ "/master/ca-bundle.crt"}))
+ # Do not restart masters when master or etcd certificates were previously expired.
+ when:
+ # masters
+ - ('expired' not in hostvars
+ | oo_select_keys(groups['oo_masters_to_config'])
+ | oo_collect('check_results.check_results.ocp_certs')
+ | oo_collect('health', {'path':hostvars[groups.oo_first_master.0].openshift.common.config_base ~ "/master/master.server.crt"}))
+ - ('expired' not in hostvars
+ | oo_select_keys(groups['oo_masters_to_config'])
+ | oo_collect('check_results.check_results.ocp_certs')
+ | oo_collect('health', {'path':hostvars[groups.oo_first_master.0].openshift.common.config_base ~ "/master/ca-bundle.crt"}))
+ # etcd
+ - ('expired' not in (hostvars
+ | oo_select_keys(groups['etcd'])
+ | oo_collect('check_results.check_results.etcd')
+ | oo_collect('health')))
diff --git a/playbooks/common/openshift-cluster/redeploy-certificates/etcd.yml b/playbooks/common/openshift-cluster/redeploy-certificates/etcd.yml
deleted file mode 100644
index 6b5c805e6..000000000
--- a/playbooks/common/openshift-cluster/redeploy-certificates/etcd.yml
+++ /dev/null
@@ -1,70 +0,0 @@
----
-- name: Backup and remove generated etcd certificates
- hosts: oo_first_etcd
- any_errors_fatal: true
- roles:
- - role: etcd_common
- r_etcd_common_etcd_runtime: "{{ openshift.common.etcd_runtime }}"
- post_tasks:
- - name: Determine if generated etcd certificates exist
- stat:
- path: "{{ etcd_conf_dir }}/generated_certs"
- register: etcd_generated_certs_dir_stat
- - name: Backup generated etcd certificates
- command: >
- tar -czf {{ etcd_conf_dir }}/etcd-generated-certificate-backup-{{ ansible_date_time.epoch }}.tgz
- {{ etcd_conf_dir }}/generated_certs
- args:
- warn: no
- when: etcd_generated_certs_dir_stat.stat.exists | bool
- - name: Remove generated etcd certificates
- file:
- path: "{{ item }}"
- state: absent
- with_items:
- - "{{ etcd_conf_dir }}/generated_certs"
-
-- name: Backup and removed deployed etcd certificates
- hosts: oo_etcd_to_config
- any_errors_fatal: true
- roles:
- - role: etcd_common
- r_etcd_common_etcd_runtime: "{{ openshift.common.etcd_runtime }}"
- post_tasks:
- - name: Backup etcd certificates
- command: >
- tar -czvf /etc/etcd/etcd-server-certificate-backup-{{ ansible_date_time.epoch }}.tgz
- {{ etcd_conf_dir }}/ca.crt
- {{ etcd_conf_dir }}/server.crt
- {{ etcd_conf_dir }}/server.key
- {{ etcd_conf_dir }}/peer.crt
- {{ etcd_conf_dir }}/peer.key
- args:
- warn: no
-
-- name: Redeploy etcd certificates
- hosts: oo_etcd_to_config
- any_errors_fatal: true
- roles:
- - role: openshift_etcd_server_certificates
- etcd_certificates_redeploy: true
- etcd_ca_host: "{{ groups.oo_etcd_to_config.0 }}"
- etcd_peers: "{{ groups.oo_etcd_to_config | default([], true) }}"
- etcd_certificates_etcd_hosts: "{{ groups.oo_etcd_to_config | default([], true) }}"
- openshift_ca_host: "{{ groups.oo_first_master.0 }}"
- r_etcd_common_etcd_runtime: "{{ openshift.common.etcd_runtime }}"
-
-- name: Redeploy etcd client certificates for masters
- hosts: oo_masters_to_config
- any_errors_fatal: true
- roles:
- - role: openshift_etcd_client_certificates
- etcd_certificates_redeploy: true
- etcd_ca_host: "{{ groups.oo_etcd_to_config.0 }}"
- etcd_cert_subdir: "openshift-master-{{ openshift.common.hostname }}"
- etcd_cert_config_dir: "{{ openshift.common.config_base }}/master"
- etcd_cert_prefix: "master.etcd-"
- openshift_ca_host: "{{ groups.oo_first_master.0 }}"
- openshift_master_count: "{{ openshift.master.master_count | default(groups.oo_masters | length) }}"
- r_etcd_common_etcd_runtime: "{{ openshift.common.etcd_runtime }}"
- when: groups.oo_etcd_to_config is defined and groups.oo_etcd_to_config
diff --git a/playbooks/common/openshift-cluster/redeploy-certificates/masters.yml b/playbooks/common/openshift-cluster/redeploy-certificates/masters-backup.yml
index c30889d64..4dbc041b0 100644
--- a/playbooks/common/openshift-cluster/redeploy-certificates/masters.yml
+++ b/playbooks/common/openshift-cluster/redeploy-certificates/masters-backup.yml
@@ -1,5 +1,5 @@
---
-- name: Redeploy master certificates
+- name: Backup and remove master cerftificates
hosts: oo_masters_to_config
any_errors_fatal: true
vars:
@@ -7,12 +7,12 @@
openshift_master_count: "{{ openshift.master.master_count | default(groups.oo_masters | length) }}"
pre_tasks:
- stat:
- path: "{{ openshift_generated_configs_dir }}"
+ path: "{{ openshift.common.config_base }}/generated-configs"
register: openshift_generated_configs_dir_stat
- name: Backup generated certificate and config directories
command: >
tar -czvf /etc/origin/master-node-cert-config-backup-{{ ansible_date_time.epoch }}.tgz
- {{ openshift_generated_configs_dir }}
+ {{ openshift.common.config_base }}/generated-configs
{{ openshift.common.config_base }}/master
when: openshift_generated_configs_dir_stat.stat.exists
delegate_to: "{{ openshift_ca_host }}"
@@ -22,7 +22,7 @@
path: "{{ item }}"
state: absent
with_items:
- - "{{ openshift_generated_configs_dir }}"
+ - "{{ openshift.common.config_base }}/generated-configs"
- name: Remove generated certificates
file:
path: "{{ openshift.common.config_base }}/master/{{ item }}"
@@ -36,18 +36,3 @@
- "openshift-master.crt"
- "openshift-master.key"
- "openshift-master.kubeconfig"
- - name: Remove generated etcd client certificates
- file:
- path: "{{ openshift.common.config_base }}/master/{{ item }}"
- state: absent
- with_items:
- - "master.etcd-client.crt"
- - "master.etcd-client.key"
- when: groups.oo_etcd_to_config | default([]) | length == 0
- roles:
- - role: openshift_master_certificates
- openshift_master_etcd_hosts: "{{ hostvars
- | oo_select_keys(groups['oo_etcd_to_config'] | default([]))
- | oo_collect('openshift.common.hostname')
- | default(none, true) }}"
- openshift_certificates_redeploy: true
diff --git a/playbooks/common/openshift-cluster/redeploy-certificates/nodes.yml b/playbooks/common/openshift-cluster/redeploy-certificates/nodes-backup.yml
index 4990a03f2..2ad84b3b9 100644
--- a/playbooks/common/openshift-cluster/redeploy-certificates/nodes.yml
+++ b/playbooks/common/openshift-cluster/redeploy-certificates/nodes-backup.yml
@@ -22,8 +22,3 @@
state: absent
with_items:
- "{{ openshift.common.config_base }}/node/ca.crt"
- roles:
- - role: openshift_node_certificates
- openshift_node_master_api_url: "{{ hostvars[groups.oo_first_master.0].openshift.master.api_url }}"
- openshift_ca_host: "{{ groups.oo_first_master.0 }}"
- openshift_certificates_redeploy: true
diff --git a/playbooks/common/openshift-cluster/redeploy-certificates/openshift-ca.yml b/playbooks/common/openshift-cluster/redeploy-certificates/openshift-ca.yml
index 089ae6bbc..2068ed199 100644
--- a/playbooks/common/openshift-cluster/redeploy-certificates/openshift-ca.yml
+++ b/playbooks/common/openshift-cluster/redeploy-certificates/openshift-ca.yml
@@ -7,7 +7,7 @@
when: not openshift.common.version_gte_3_2_or_1_2 | bool
- name: Check cert expirys
- hosts: oo_nodes_to_config:oo_masters_to_config
+ hosts: oo_nodes_to_config:oo_masters_to_config:oo_etcd_to_config
vars:
openshift_certificate_expiry_show_all: yes
roles:
@@ -44,8 +44,8 @@
- modify_yaml:
dest: "{{ openshift.common.config_base }}/master/master-config.yaml"
yaml_key: servingInfo.clientCA
- yaml_value: ca-bundle.crt
- when: (g_master_config_output.content|b64decode|from_yaml).servingInfo.clientCA != 'ca-bundle.crt'
+ yaml_value: ca.crt
+ when: (g_master_config_output.content|b64decode|from_yaml).servingInfo.clientCA != 'ca.crt'
- modify_yaml:
dest: "{{ openshift.common.config_base }}/master/master-config.yaml"
yaml_key: etcdClientInfo.ca
@@ -105,25 +105,27 @@
- "ca.serial.txt"
- "ca-bundle.crt"
-- name: Generate new OpenShift CA certificate
+- name: Create temporary directory for creating new CA certificate
hosts: oo_first_master
- pre_tasks:
+ tasks:
- name: Create temporary directory for creating new CA certificate
command: >
mktemp -d /tmp/openshift-ansible-XXXXXXX
register: g_new_openshift_ca_mktemp
changed_when: false
- roles:
- - role: openshift_ca
+
+- name: Create OpenShift CA
+ hosts: oo_first_master
+ vars:
# Set openshift_ca_config_dir to a temporary directory where CA
# will be created. We'll replace the existing CA with the CA
# created in the temporary directory.
- openshift_ca_config_dir: "{{ g_new_openshift_ca_mktemp.stdout }}"
+ openshift_ca_config_dir: "{{ hostvars[groups.oo_first_master.0].g_new_openshift_ca_mktemp.stdout }}"
+ roles:
+ - role: openshift_master_facts
+ - role: openshift_named_certificates
+ - role: openshift_ca
openshift_ca_host: "{{ groups.oo_first_master.0 }}"
- openshift_master_hostnames: "{{ hostvars
- | oo_select_keys(groups['oo_masters_to_config'] | default([]))
- | oo_collect('openshift.common.all_hostnames')
- | oo_flatten | unique }}"
- name: Create temp directory for syncing certs
hosts: localhost
@@ -209,16 +211,22 @@
with_items: "{{ client_users }}"
- include: ../../openshift-master/restart.yml
- # Do not restart masters when master certificates were previously expired.
- when: ('expired' not in hostvars
- | oo_select_keys(groups['oo_masters_to_config'])
- | oo_collect('check_results.check_results.ocp_certs')
- | oo_collect('health', {'path':hostvars[groups.oo_first_master.0].openshift.common.config_base ~ "/master/master.server.crt"}))
- and
- ('expired' not in hostvars
- | oo_select_keys(groups['oo_masters_to_config'])
- | oo_collect('check_results.check_results.ocp_certs')
- | oo_collect('health', {'path':hostvars[groups.oo_first_master.0].openshift.common.config_base ~ "/master/ca-bundle.crt"}))
+ # Do not restart masters when master or etcd certificates were previously expired.
+ when:
+ # masters
+ - ('expired' not in hostvars
+ | oo_select_keys(groups['oo_masters_to_config'])
+ | oo_collect('check_results.check_results.ocp_certs')
+ | oo_collect('health', {'path':hostvars[groups.oo_first_master.0].openshift.common.config_base ~ "/master/master.server.crt"}))
+ - ('expired' not in hostvars
+ | oo_select_keys(groups['oo_masters_to_config'])
+ | oo_collect('check_results.check_results.ocp_certs')
+ | oo_collect('health', {'path':hostvars[groups.oo_first_master.0].openshift.common.config_base ~ "/master/ca-bundle.crt"}))
+ # etcd
+ - ('expired' not in (hostvars
+ | oo_select_keys(groups['etcd'])
+ | oo_collect('check_results.check_results.etcd')
+ | oo_collect('health')))
- name: Distribute OpenShift CA certificate to nodes
hosts: oo_nodes_to_config
@@ -268,13 +276,28 @@
changed_when: false
- include: ../../openshift-node/restart.yml
- # Do not restart nodes when node certificates were previously expired.
- when: ('expired' not in hostvars
- | oo_select_keys(groups['oo_nodes_to_config'])
- | oo_collect('check_results.check_results.ocp_certs')
- | oo_collect('health', {'path':hostvars[groups.oo_nodes_to_config.0].openshift.common.config_base ~ "/node/server.crt"}))
- and
- ('expired' not in hostvars
- | oo_select_keys(groups['oo_nodes_to_config'])
- | oo_collect('check_results.check_results.ocp_certs')
- | oo_collect('health', {'path':hostvars[groups.oo_nodes_to_config.0].openshift.common.config_base ~ "/node/ca.crt"}))
+ # Do not restart nodes when node, master or etcd certificates were previously expired.
+ when:
+ # nodes
+ - ('expired' not in hostvars
+ | oo_select_keys(groups['oo_nodes_to_config'])
+ | oo_collect('check_results.check_results.ocp_certs')
+ | oo_collect('health', {'path':hostvars[groups.oo_nodes_to_config.0].openshift.common.config_base ~ "/node/server.crt"}))
+ - ('expired' not in hostvars
+ | oo_select_keys(groups['oo_nodes_to_config'])
+ | oo_collect('check_results.check_results.ocp_certs')
+ | oo_collect('health', {'path':hostvars[groups.oo_nodes_to_config.0].openshift.common.config_base ~ "/node/ca.crt"}))
+ # masters
+ - ('expired' not in hostvars
+ | oo_select_keys(groups['oo_masters_to_config'])
+ | oo_collect('check_results.check_results.ocp_certs')
+ | oo_collect('health', {'path':hostvars[groups.oo_first_master.0].openshift.common.config_base ~ "/master/master.server.crt"}))
+ - ('expired' not in hostvars
+ | oo_select_keys(groups['oo_masters_to_config'])
+ | oo_collect('check_results.check_results.ocp_certs')
+ | oo_collect('health', {'path':hostvars[groups.oo_first_master.0].openshift.common.config_base ~ "/master/ca-bundle.crt"}))
+ # etcd
+ - ('expired' not in (hostvars
+ | oo_select_keys(groups['etcd'])
+ | oo_collect('check_results.check_results.etcd')
+ | oo_collect('health')))
diff --git a/playbooks/common/openshift-cluster/redeploy-certificates/registry.yml b/playbooks/common/openshift-cluster/redeploy-certificates/registry.yml
index 8c8062585..afd5463b2 100644
--- a/playbooks/common/openshift-cluster/redeploy-certificates/registry.yml
+++ b/playbooks/common/openshift-cluster/redeploy-certificates/registry.yml
@@ -66,6 +66,7 @@
--signer-cert={{ openshift.common.config_base }}/master/ca.crt
--signer-key={{ openshift.common.config_base }}/master/ca.key
--signer-serial={{ openshift.common.config_base }}/master/ca.serial.txt
+ --config={{ mktemp.stdout }}/admin.kubeconfig
--hostnames="{{ docker_registry_service_ip.results.clusterip }},docker-registry.default.svc,docker-registry.default.svc.cluster.local,{{ docker_registry_route_hostname }}"
--cert={{ openshift.common.config_base }}/master/registry.crt
--key={{ openshift.common.config_base }}/master/registry.key
diff --git a/playbooks/common/openshift-cluster/redeploy-certificates/router.yml b/playbooks/common/openshift-cluster/redeploy-certificates/router.yml
index 9f14f2d69..2116c745c 100644
--- a/playbooks/common/openshift-cluster/redeploy-certificates/router.yml
+++ b/playbooks/common/openshift-cluster/redeploy-certificates/router.yml
@@ -7,23 +7,34 @@
tasks:
- name: Create temp directory for kubeconfig
command: mktemp -d /tmp/openshift-ansible-XXXXXX
- register: mktemp
+ register: router_cert_redeploy_tempdir
changed_when: false
+
- name: Copy admin client config(s)
command: >
- cp {{ openshift.common.config_base }}/master//admin.kubeconfig {{ mktemp.stdout }}/admin.kubeconfig
+ cp {{ openshift.common.config_base }}/master//admin.kubeconfig {{ router_cert_redeploy_tempdir.stdout }}/admin.kubeconfig
changed_when: false
- name: Determine if router exists
command: >
{{ openshift.common.client_binary }} get dc/router -o json
- --config={{ mktemp.stdout }}/admin.kubeconfig
+ --config={{ router_cert_redeploy_tempdir.stdout }}/admin.kubeconfig
-n default
register: l_router_dc
failed_when: false
changed_when: false
- - set_fact:
+ - name: Determine if router service exists
+ command: >
+ {{ openshift.common.client_binary }} get svc/router -o json
+ --config={{ router_cert_redeploy_tempdir.stdout }}/admin.kubeconfig
+ -n default
+ register: l_router_svc
+ failed_when: false
+ changed_when: false
+
+ - name: Collect router environment variables and secrets
+ set_fact:
router_env_vars: "{{ ((l_router_dc.stdout | from_json)['spec']['template']['spec']['containers'][0]['env']
| oo_collect('name'))
| default([]) }}"
@@ -34,20 +45,32 @@
changed_when: false
when: l_router_dc.rc == 0
+ - name: Collect router service annotations
+ set_fact:
+ router_service_annotations: "{{ (l_router_svc.stdout | from_json)['metadata']['annotations'] if 'annotations' in (l_router_svc.stdout | from_json)['metadata'] else [] }}"
+ when: l_router_svc.rc == 0
+
- name: Update router environment variables
shell: >
{{ openshift.common.client_binary }} env dc/router
OPENSHIFT_CA_DATA="$(cat /etc/origin/master/ca.crt)"
OPENSHIFT_CERT_DATA="$(cat /etc/origin/master/openshift-router.crt)"
OPENSHIFT_KEY_DATA="$(cat /etc/origin/master/openshift-router.key)"
- --config={{ mktemp.stdout }}/admin.kubeconfig
+ --config={{ router_cert_redeploy_tempdir.stdout }}/admin.kubeconfig
-n default
- when: l_router_dc.rc == 0 and 'OPENSHIFT_CA_DATA' in router_env_vars and 'OPENSHIFT_CERT_DATA' in router_env_vars and 'OPENSHIFT_KEY_DATA' in router_env_vars
+ when:
+ - l_router_dc.rc == 0
+ - ('OPENSHIFT_CA_DATA' in router_env_vars)
+ - ('OPENSHIFT_CERT_DATA' in router_env_vars)
+ - ('OPENSHIFT_KEY_DATA' in router_env_vars)
+ # When the router service contains service signer annotations we
+ # will delete the existing certificate secret and allow OpenShift to
+ # replace the secret.
- block:
- name: Delete existing router certificate secret
oc_secret:
- kubeconfig: "{{ mktemp.stdout }}/admin.kubeconfig"
+ kubeconfig: "{{ router_cert_redeploy_tempdir.stdout }}/admin.kubeconfig"
name: router-certs
namespace: default
state: absent
@@ -58,85 +81,61 @@
{{ openshift.common.client_binary }} annotate service/router
service.alpha.openshift.io/serving-cert-secret-name-
service.alpha.openshift.io/serving-cert-signed-by-
- --config={{ mktemp.stdout }}/admin.kubeconfig
+ --config={{ router_cert_redeploy_tempdir.stdout }}/admin.kubeconfig
-n default
- name: Add serving-cert-secret annotation to router service
command: >
{{ openshift.common.client_binary }} annotate service/router
service.alpha.openshift.io/serving-cert-secret-name=router-certs
- --config={{ mktemp.stdout }}/admin.kubeconfig
+ --config={{ router_cert_redeploy_tempdir.stdout }}/admin.kubeconfig
-n default
- when: l_router_dc.rc == 0 and 'router-certs' in router_secrets and openshift_hosted_router_certificate is undefined
+ when:
+ - l_router_dc.rc == 0
+ - l_router_svc.rc == 0
+ - ('router-certs' in router_secrets)
+ - openshift_hosted_router_certificate is undefined
+ - ('service.alpha.openshift.io/serving-cert-secret-name') in router_service_annotations
+ - ('service.alpha.openshift.io/serving-cert-signed-by') in router_service_annotations
- - block:
- - assert:
- that:
- - "'certfile' in openshift_hosted_router_certificate"
- - "'keyfile' in openshift_hosted_router_certificate"
- - "'cafile' in openshift_hosted_router_certificate"
- msg: |-
- openshift_hosted_router_certificate has been set in the inventory but is
- missing one or more required keys. Ensure that 'certfile', 'keyfile',
- and 'cafile' keys have been specified for the openshift_hosted_router_certificate
- inventory variable.
-
- - name: Read router certificate and key
- become: no
- local_action:
- module: slurp
- src: "{{ item }}"
- register: openshift_router_certificate_output
- # Defaulting dictionary keys to none to avoid deprecation warnings
- # (future fatal errors) during template evaluation. Dictionary keys
- # won't be accessed unless openshift_hosted_router_certificate is
- # defined and has all keys (certfile, keyfile, cafile) which we
- # check above.
- with_items:
- - "{{ (openshift_hosted_router_certificate | default({'certfile':none})).certfile }}"
- - "{{ (openshift_hosted_router_certificate | default({'keyfile':none})).keyfile }}"
- - "{{ (openshift_hosted_router_certificate | default({'cafile':none})).cafile }}"
-
- - name: Write temporary router certificate file
- copy:
- content: "{% for certificate in openshift_router_certificate_output.results -%}{{ certificate.content | b64decode }}{% endfor -%}"
- dest: "{{ mktemp.stdout }}/openshift-hosted-router-certificate.pem"
- mode: 0600
-
- - name: Write temporary router key file
- copy:
- content: "{{ (openshift_router_certificate_output.results
- | oo_collect('content', {'source':(openshift_hosted_router_certificate | default({'keyfile':none})).keyfile}))[0] | b64decode }}"
- dest: "{{ mktemp.stdout }}/openshift-hosted-router-certificate.key"
- mode: 0600
-
- - name: Replace router-certs secret
- shell: >
- {{ openshift.common.client_binary }} secrets new router-certs
- tls.crt="{{ mktemp.stdout }}/openshift-hosted-router-certificate.pem"
- tls.key="{{ mktemp.stdout }}/openshift-hosted-router-certificate.key"
- --type=kubernetes.io/tls
- --confirm
- -o json | {{ openshift.common.client_binary }} replace -f -
+ # When there are no annotations on the router service we will allow
+ # the openshift_hosted role to either create a new wildcard
+ # certificate (since we deleted the original) or reapply a custom
+ # openshift_hosted_router_certificate.
+ - file:
+ path: "{{ item }}"
+ state: absent
+ with_items:
+ - /etc/origin/master/openshift-router.crt
+ - /etc/origin/master/openshift-router.key
+ when:
+ - l_router_dc.rc == 0
+ - l_router_svc.rc == 0
+ - ('router-certs' in router_secrets)
+ - ('service.alpha.openshift.io/serving-cert-secret-name') not in router_service_annotations
+ - ('service.alpha.openshift.io/serving-cert-signed-by') not in router_service_annotations
- - name: Remove temporary router certificate and key files
- file:
- path: "{{ item }}"
- state: absent
- with_items:
- - "{{ mktemp.stdout }}/openshift-hosted-router-certificate.pem"
- - "{{ mktemp.stdout }}/openshift-hosted-router-certificate.key"
- when: l_router_dc.rc == 0 and 'router-certs' in router_secrets and openshift_hosted_router_certificate is defined
+ - include_role:
+ name: openshift_hosted
+ tasks_from: main
+ vars:
+ openshift_hosted_manage_registry: false
+ when:
+ - l_router_dc.rc == 0
+ - l_router_svc.rc == 0
+ - ('router-certs' in router_secrets)
+ - ('service.alpha.openshift.io/serving-cert-secret-name') not in router_service_annotations
+ - ('service.alpha.openshift.io/serving-cert-signed-by') not in router_service_annotations
- name: Redeploy router
command: >
{{ openshift.common.client_binary }} deploy dc/router
--latest
- --config={{ mktemp.stdout }}/admin.kubeconfig
+ --config={{ router_cert_redeploy_tempdir.stdout }}/admin.kubeconfig
-n default
- name: Delete temp directory
file:
- name: "{{ mktemp.stdout }}"
+ name: "{{ router_cert_redeploy_tempdir.stdout }}"
state: absent
changed_when: False
diff --git a/playbooks/common/openshift-cluster/sanity_checks.yml b/playbooks/common/openshift-cluster/sanity_checks.yml
new file mode 100644
index 000000000..26716a92d
--- /dev/null
+++ b/playbooks/common/openshift-cluster/sanity_checks.yml
@@ -0,0 +1,51 @@
+---
+- name: Verify Requirements
+ hosts: oo_all_hosts
+ tasks:
+ - fail:
+ msg: Flannel can not be used with openshift sdn, set openshift_use_openshift_sdn=false if you want to use flannel
+ when: openshift_use_openshift_sdn | default(true) | bool and openshift_use_flannel | default(false) | bool
+
+ - fail:
+ msg: Nuage sdn can not be used with openshift sdn, set openshift_use_openshift_sdn=false if you want to use nuage
+ when: openshift_use_openshift_sdn | default(true) | bool and openshift_use_nuage | default(false) | bool
+
+ - fail:
+ msg: Nuage sdn can not be used with flannel
+ when: openshift_use_flannel | default(false) | bool and openshift_use_nuage | default(false) | bool
+
+ - fail:
+ msg: Contiv can not be used with openshift sdn, set openshift_use_openshift_sdn=false if you want to use contiv
+ when: openshift_use_openshift_sdn | default(true) | bool and openshift_use_contiv | default(false) | bool
+
+ - fail:
+ msg: Contiv can not be used with flannel
+ when: openshift_use_flannel | default(false) | bool and openshift_use_contiv | default(false) | bool
+
+ - fail:
+ msg: Contiv can not be used with nuage
+ when: openshift_use_nuage | default(false) | bool and openshift_use_contiv | default(false) | bool
+
+ - fail:
+ msg: Calico can not be used with openshift sdn, set openshift_use_openshift_sdn=false if you want to use Calico
+ when: openshift_use_openshift_sdn | default(true) | bool and openshift_use_calico | default(false) | bool
+
+ - fail:
+ msg: The Calico playbook does not yet integrate with the Flannel playbook in Openshift. Set either openshift_use_calico or openshift_use_flannel, but not both.
+ when: openshift_use_calico | default(false) | bool and openshift_use_flannel | default(false) | bool
+
+ - fail:
+ msg: Calico can not be used with Nuage in Openshift. Set either openshift_use_calico or openshift_use_nuage, but not both
+ when: openshift_use_calico | default(false) | bool and openshift_use_nuage | default(false) | bool
+
+ - fail:
+ msg: Calico can not be used with Contiv in Openshift. Set either openshift_use_calico or openshift_use_contiv, but not both
+ when: openshift_use_calico | default(false) | bool and openshift_use_contiv | default(false) | bool
+
+ - fail:
+ msg: openshift_hostname must be 63 characters or less
+ when: openshift_hostname is defined and openshift_hostname | length > 63
+
+ - fail:
+ msg: openshift_public_hostname must be 63 characters or less
+ when: openshift_public_hostname is defined and openshift_public_hostname | length > 63
diff --git a/playbooks/common/openshift-cluster/service_catalog.yml b/playbooks/common/openshift-cluster/service_catalog.yml
new file mode 100644
index 000000000..bd964b2ce
--- /dev/null
+++ b/playbooks/common/openshift-cluster/service_catalog.yml
@@ -0,0 +1,29 @@
+---
+- name: Service Catalog Install Checkpoint Start
+ hosts: oo_all_hosts
+ gather_facts: false
+ tasks:
+ - name: Set Service Catalog install 'In Progress'
+ set_stats:
+ data:
+ installer_phase_servicecatalog: "In Progress"
+ aggregate: false
+
+- name: Service Catalog
+ hosts: oo_first_master
+ roles:
+ - openshift_service_catalog
+ - ansible_service_broker
+ - template_service_broker
+ vars:
+ first_master: "{{ groups.oo_first_master[0] }}"
+
+- name: Service Catalog Install Checkpoint End
+ hosts: oo_all_hosts
+ gather_facts: false
+ tasks:
+ - name: Set Service Catalog install 'Complete'
+ set_stats:
+ data:
+ installer_phase_servicecatalog: "Complete"
+ aggregate: false
diff --git a/playbooks/common/openshift-cluster/std_include.yml b/playbooks/common/openshift-cluster/std_include.yml
index 6ed31a644..45b34c8bd 100644
--- a/playbooks/common/openshift-cluster/std_include.yml
+++ b/playbooks/common/openshift-cluster/std_include.yml
@@ -1,4 +1,16 @@
---
+- name: Initialization Checkpoint Start
+ hosts: oo_all_hosts
+ gather_facts: false
+ roles:
+ - installer_checkpoint
+ tasks:
+ - name: Set install initialization 'In Progress'
+ set_stats:
+ data:
+ installer_phase_initialize: "In Progress"
+ aggregate: false
+
- include: evaluate_groups.yml
tags:
- always
@@ -7,10 +19,28 @@
tags:
- always
+- include: sanity_checks.yml
+ tags:
+ - always
+
- include: validate_hostnames.yml
tags:
- node
+- include: initialize_openshift_repos.yml
+ tags:
+ - always
+
- include: initialize_openshift_version.yml
tags:
- always
+
+- name: Initialization Checkpoint End
+ hosts: oo_all_hosts
+ gather_facts: false
+ tasks:
+ - name: Set install initialization 'Complete'
+ set_stats:
+ data:
+ installer_phase_initialize: "Complete"
+ aggregate: false
diff --git a/playbooks/common/openshift-cluster/tasks/set_etcd_launch_facts.yml b/playbooks/common/openshift-cluster/tasks/set_etcd_launch_facts.yml
index 1a6580795..eb118365a 100644
--- a/playbooks/common/openshift-cluster/tasks/set_etcd_launch_facts.yml
+++ b/playbooks/common/openshift-cluster/tasks/set_etcd_launch_facts.yml
@@ -3,7 +3,7 @@
- name: Generate etcd instance names(s)
set_fact:
- scratch_name: "{{ cluster_id }}-{{ k8s_type }}-{{ '%05x' | format(1048576 | random) }}"
+ scratch_name: "{{ openshift_cluster_id | default('default') }}-{{ k8s_type }}-{{ '%05x' | format(1048576 | random) }}"
register: etcd_names_output
with_sequence: count={{ num_etcd }}
diff --git a/playbooks/common/openshift-cluster/tasks/set_master_launch_facts.yml b/playbooks/common/openshift-cluster/tasks/set_master_launch_facts.yml
index 36d7b7870..783f70f50 100644
--- a/playbooks/common/openshift-cluster/tasks/set_master_launch_facts.yml
+++ b/playbooks/common/openshift-cluster/tasks/set_master_launch_facts.yml
@@ -3,7 +3,7 @@
- name: Generate master instance names(s)
set_fact:
- scratch_name: "{{ cluster_id }}-{{ k8s_type }}-{{ '%05x' | format(1048576 | random) }}"
+ scratch_name: "{{ openshift_cluster_id | default('default') }}-{{ k8s_type }}-{{ '%05x' | format(1048576 | random) }}"
register: master_names_output
with_sequence: count={{ num_masters }}
diff --git a/playbooks/common/openshift-cluster/tasks/set_node_launch_facts.yml b/playbooks/common/openshift-cluster/tasks/set_node_launch_facts.yml
index 278942f8b..c103e40a9 100644
--- a/playbooks/common/openshift-cluster/tasks/set_node_launch_facts.yml
+++ b/playbooks/common/openshift-cluster/tasks/set_node_launch_facts.yml
@@ -5,7 +5,7 @@
- name: Generate node instance names(s)
set_fact:
- scratch_name: "{{ cluster_id }}-{{ k8s_type }}-{{ sub_host_type }}-{{ '%05x' | format(1048576 | random) }}"
+ scratch_name: "{{ openshift_cluster_id | default('default') }}-{{ k8s_type }}-{{ sub_host_type }}-{{ '%05x' | format(1048576 | random) }}"
register: node_names_output
with_sequence: count={{ number_nodes }}
diff --git a/playbooks/common/openshift-cluster/update_repos_and_packages.yml b/playbooks/common/openshift-cluster/update_repos_and_packages.yml
deleted file mode 100644
index be956fca5..000000000
--- a/playbooks/common/openshift-cluster/update_repos_and_packages.yml
+++ /dev/null
@@ -1,18 +0,0 @@
----
-- include: evaluate_groups.yml
-
-- name: Subscribe hosts, update repos and update OS packages
- hosts: oo_hosts_to_update
- roles:
- # Explicitly calling openshift_facts because it appears that when
- # rhel_subscribe is skipped that the openshift_facts dependency for
- # openshift_repos is also skipped (this is the case at least for Ansible
- # 2.0.2)
- - openshift_facts
- - role: rhel_subscribe
- when: deployment_type in ["enterprise", "atomic-enterprise", "openshift-enterprise"] and
- ansible_distribution == "RedHat" and
- lookup('oo_option', 'rhel_skip_subscription') | default(rhsub_skip, True) |
- default('no', True) | lower in ['no', 'false']
- - openshift_repos
- - os_update_latest
diff --git a/playbooks/common/openshift-cluster/upgrades/containerized_node_upgrade.yml b/playbooks/common/openshift-cluster/upgrades/containerized_node_upgrade.yml
deleted file mode 100644
index 9f7961614..000000000
--- a/playbooks/common/openshift-cluster/upgrades/containerized_node_upgrade.yml
+++ /dev/null
@@ -1,14 +0,0 @@
----
-# This is a hack to allow us to use systemd_units.yml, but skip the handlers which
-# restart services. We will unconditionally restart all containerized services
-# because we have to unconditionally restart Docker:
-- set_fact:
- skip_node_svc_handlers: True
-
-- name: Update systemd units
- include: ../../../../roles/openshift_node/tasks/systemd_units.yml openshift_version={{ openshift_image_tag }}
-
-# This is a no-op because of skip_node_svc_handlers, but lets us trigger it before end of
-# play when the node has already been marked schedulable again. (this would look strange
-# in logs otherwise)
-- meta: flush_handlers
diff --git a/playbooks/common/openshift-cluster/upgrades/docker/docker_upgrade.yml b/playbooks/common/openshift-cluster/upgrades/docker/docker_upgrade.yml
index 07db071ce..98953f72e 100644
--- a/playbooks/common/openshift-cluster/upgrades/docker/docker_upgrade.yml
+++ b/playbooks/common/openshift-cluster/upgrades/docker/docker_upgrade.yml
@@ -4,7 +4,6 @@
# Do not allow adding hosts during upgrade.
g_new_master_hosts: []
g_new_node_hosts: []
- openshift_cluster_id: "{{ cluster_id | default('default') }}"
- include: ../initialize_nodes_to_upgrade.yml
@@ -52,11 +51,15 @@
- name: Drain Node for Kubelet upgrade
command: >
- {{ openshift.common.admin_binary }} drain {{ openshift.node.nodename }} --force --delete-local-data --ignore-daemonsets
+ {{ openshift.common.admin_binary }} drain {{ openshift.node.nodename }} --config={{ openshift.common.config_base }}/master/admin.kubeconfig --force --delete-local-data --ignore-daemonsets
delegate_to: "{{ groups.oo_first_master.0 }}"
when: l_docker_upgrade is defined and l_docker_upgrade | bool and inventory_hostname in groups.oo_nodes_to_upgrade
+ register: l_docker_upgrade_drain_result
+ until: not l_docker_upgrade_drain_result | failed
+ retries: 60
+ delay: 60
- - include: upgrade.yml
+ - include: tasks/upgrade.yml
when: l_docker_upgrade is defined and l_docker_upgrade | bool
- name: Set node schedulability
diff --git a/playbooks/common/openshift-cluster/upgrades/docker/restart.yml b/playbooks/common/openshift-cluster/upgrades/docker/tasks/restart.yml
index 1b418920f..83f16ac0d 100644
--- a/playbooks/common/openshift-cluster/upgrades/docker/restart.yml
+++ b/playbooks/common/openshift-cluster/upgrades/docker/tasks/restart.yml
@@ -1,6 +1,10 @@
---
- name: Restart docker
service: name=docker state=restarted
+ register: l_docker_restart_docker_in_upgrade_result
+ until: not l_docker_restart_docker_in_upgrade_result | failed
+ retries: 3
+ delay: 30
- name: Update docker facts
openshift_facts:
@@ -11,7 +15,6 @@
with_items:
- etcd_container
- openvswitch
- - "{{ openshift.common.service_type }}-master"
- "{{ openshift.common.service_type }}-master-api"
- "{{ openshift.common.service_type }}-master-controllers"
- "{{ openshift.common.service_type }}-node"
@@ -24,4 +27,5 @@
state: started
delay: 10
port: "{{ openshift.master.api_port }}"
+ timeout: 600
when: inventory_hostname in groups.oo_masters_to_config
diff --git a/playbooks/common/openshift-cluster/upgrades/docker/upgrade.yml b/playbooks/common/openshift-cluster/upgrades/docker/tasks/upgrade.yml
index 17f8fc6e9..808cc562c 100644
--- a/playbooks/common/openshift-cluster/upgrades/docker/upgrade.yml
+++ b/playbooks/common/openshift-cluster/upgrades/docker/tasks/upgrade.yml
@@ -4,7 +4,6 @@
- name: Stop containerized services
service: name={{ item }} state=stopped
with_items:
- - "{{ openshift.common.service_type }}-master"
- "{{ openshift.common.service_type }}-master-api"
- "{{ openshift.common.service_type }}-master-controllers"
- "{{ openshift.common.service_type }}-node"
@@ -32,7 +31,13 @@
- debug: var=docker_image_count.stdout
when: docker_upgrade_nuke_images is defined and docker_upgrade_nuke_images | bool
-- service: name=docker state=stopped
+- service:
+ name: docker
+ state: stopped
+ register: l_pb_docker_upgrade_stop_result
+ until: not l_pb_docker_upgrade_stop_result | failed
+ retries: 3
+ delay: 30
- name: Upgrade Docker
package: name=docker{{ '-' + docker_version }} state=present
diff --git a/playbooks/common/openshift-cluster/upgrades/docker/upgrade_check.yml b/playbooks/common/openshift-cluster/upgrades/docker/upgrade_check.yml
index b2a2eac9a..52345a9ba 100644
--- a/playbooks/common/openshift-cluster/upgrades/docker/upgrade_check.yml
+++ b/playbooks/common/openshift-cluster/upgrades/docker/upgrade_check.yml
@@ -18,12 +18,16 @@
- name: Get current version of Docker
command: "{{ repoquery_cmd }} --installed --qf '%{version}' docker"
register: curr_docker_version
+ retries: 4
+ until: curr_docker_version | succeeded
changed_when: false
- name: Get latest available version of Docker
command: >
{{ repoquery_cmd }} --qf '%{version}' "docker"
register: avail_docker_version
+ retries: 4
+ until: avail_docker_version | succeeded
# Don't expect docker rpm to be available on hosts that don't already have it installed:
when: pkg_check.rc == 0
failed_when: false
diff --git a/playbooks/common/openshift-cluster/upgrades/etcd/backup.yml b/playbooks/common/openshift-cluster/upgrades/etcd/backup.yml
index 616ba04f8..d086cad00 100644
--- a/playbooks/common/openshift-cluster/upgrades/etcd/backup.yml
+++ b/playbooks/common/openshift-cluster/upgrades/etcd/backup.yml
@@ -2,13 +2,16 @@
- name: Backup etcd
hosts: oo_etcd_hosts_to_backup
roles:
- - role: openshift_facts
- - role: etcd_common
- r_etcd_common_action: backup
- r_etcd_common_backup_tag: etcd_backup_tag
- r_etcd_common_etcd_runtime: "{{ openshift.common.etcd_runtime }}"
- r_etcd_common_embedded_etcd: "{{ groups.oo_etcd_to_config | default([]) | length == 0 }}"
- r_etcd_common_backup_sufix_name: "{{ lookup('pipe', 'date +%Y%m%d%H%M%S') }}"
+ - role: openshift_etcd_facts
+ post_tasks:
+ - include_role:
+ name: etcd
+ tasks_from: backup
+ vars:
+ r_etcd_common_backup_tag: "{{ etcd_backup_tag }}"
+ r_etcd_common_etcd_runtime: "{{ openshift.common.etcd_runtime }}"
+ r_etcd_common_embedded_etcd: "{{ groups.oo_etcd_to_config | default([]) | length == 0 }}"
+ r_etcd_common_backup_sufix_name: "{{ lookup('pipe', 'date +%Y%m%d%H%M%S') }}"
- name: Gate on etcd backup
hosts: localhost
@@ -20,7 +23,7 @@
| oo_select_keys(groups.oo_etcd_hosts_to_backup)
| oo_collect('inventory_hostname', {'r_etcd_common_backup_complete': true}) }}"
- set_fact:
- etcd_backup_failed: "{{ groups.oo_etcd_hosts_to_backup | difference(etcd_backup_completed) }}"
+ etcd_backup_failed: "{{ groups.oo_etcd_hosts_to_backup | difference(etcd_backup_completed) | list }}"
- fail:
msg: "Upgrade cannot continue. The following hosts did not complete etcd backup: {{ etcd_backup_failed | join(',') }}"
when: etcd_backup_failed | length > 0
diff --git a/playbooks/common/openshift-cluster/upgrades/etcd/main.yml b/playbooks/common/openshift-cluster/upgrades/etcd/main.yml
index 64abc54e7..5b8ba3bb2 100644
--- a/playbooks/common/openshift-cluster/upgrades/etcd/main.yml
+++ b/playbooks/common/openshift-cluster/upgrades/etcd/main.yml
@@ -15,10 +15,15 @@
hosts: oo_etcd_hosts_to_upgrade
tasks:
- include_role:
- name: etcd_common
- vars:
- r_etcd_common_action: drop_etcdctl
+ name: etcd
+ tasks_from: drop_etcdctl
- name: Perform etcd upgrade
include: ./upgrade.yml
when: openshift_etcd_upgrade | default(true) | bool
+
+- name: Backup etcd
+ include: backup.yml
+ vars:
+ etcd_backup_tag: "post-3.0-"
+ when: openshift_etcd_backup | default(true) | bool
diff --git a/playbooks/common/openshift-cluster/upgrades/etcd/upgrade.yml b/playbooks/common/openshift-cluster/upgrades/etcd/upgrade.yml
index 0431c1ce0..d71c96cd7 100644
--- a/playbooks/common/openshift-cluster/upgrades/etcd/upgrade.yml
+++ b/playbooks/common/openshift-cluster/upgrades/etcd/upgrade.yml
@@ -36,7 +36,7 @@
- not openshift.common.is_etcd_system_container | bool
- name: Record containerized etcd version (runc)
- command: runc exec etcd_container rpm -qa --qf '%{version}' etcd\*
+ command: runc exec etcd rpm -qa --qf '%{version}' etcd\*
register: etcd_container_version_runc
failed_when: false
# AUDIT:changed_when: `false` because we are only inspecting
@@ -98,13 +98,11 @@
serial: 1
tasks:
- include_role:
- name: etcd_upgrade
+ name: etcd
+ tasks_from: upgrade_image
+ vars:
+ r_etcd_common_etcd_runtime: "host"
+ etcd_peer: "{{ openshift.common.hostname }}"
when:
- ansible_distribution == 'Fedora'
- not openshift.common.is_containerized | bool
-
-- name: Backup etcd
- include: backup.yml
- vars:
- etcd_backup_tag: "post-3.0-"
- when: openshift_etcd_backup | default(true) | bool
diff --git a/playbooks/common/openshift-cluster/upgrades/etcd/upgrade_image_members.yml b/playbooks/common/openshift-cluster/upgrades/etcd/upgrade_image_members.yml
index 831ca8f57..e5e895775 100644
--- a/playbooks/common/openshift-cluster/upgrades/etcd/upgrade_image_members.yml
+++ b/playbooks/common/openshift-cluster/upgrades/etcd/upgrade_image_members.yml
@@ -5,13 +5,14 @@
- name: Upgrade containerized hosts to {{ etcd_upgrade_version }}
hosts: oo_etcd_hosts_to_upgrade
serial: 1
- roles:
- - role: etcd_upgrade
- r_etcd_upgrade_action: upgrade
- r_etcd_upgrade_mechanism: image
- r_etcd_upgrade_version: "{{ etcd_upgrade_version }}"
- r_etcd_common_etcd_runtime: "{{ openshift.common.etcd_runtime }}"
- etcd_peer: "{{ openshift.common.hostname }}"
+ tasks:
+ - include_role:
+ name: etcd
+ tasks_from: upgrade_image
+ vars:
+ r_etcd_upgrade_version: "{{ etcd_upgrade_version }}"
+ r_etcd_common_etcd_runtime: "{{ openshift.common.etcd_runtime }}"
+ etcd_peer: "{{ openshift.common.hostname }}"
when:
- etcd_container_version | default('99') | version_compare(etcd_upgrade_version,'<')
- openshift.common.is_containerized | bool
diff --git a/playbooks/common/openshift-cluster/upgrades/etcd/upgrade_rpm_members.yml b/playbooks/common/openshift-cluster/upgrades/etcd/upgrade_rpm_members.yml
index 2e79451e0..a2a26bad4 100644
--- a/playbooks/common/openshift-cluster/upgrades/etcd/upgrade_rpm_members.yml
+++ b/playbooks/common/openshift-cluster/upgrades/etcd/upgrade_rpm_members.yml
@@ -5,13 +5,14 @@
- name: Upgrade to {{ etcd_upgrade_version }}
hosts: oo_etcd_hosts_to_upgrade
serial: 1
- roles:
- - role: etcd_upgrade
- r_etcd_upgrade_action: upgrade
- r_etcd_upgrade_mechanism: rpm
- r_etcd_upgrade_version: "{{ etcd_upgrade_version }}"
- r_etcd_common_etcd_runtime: "host"
- etcd_peer: "{{ openshift.common.hostname }}"
+ tasks:
+ - include_role:
+ name: etcd
+ tasks_from: upgrade_rpm
+ vars:
+ r_etcd_upgrade_version: "{{ etcd_upgrade_version }}"
+ r_etcd_common_etcd_runtime: "host"
+ etcd_peer: "{{ openshift.common.hostname }}"
when:
- etcd_rpm_version.stdout | default('99') | version_compare(etcd_upgrade_version, '<')
- ansible_distribution == 'RedHat'
diff --git a/playbooks/common/openshift-cluster/upgrades/files/shared_resource_viewer_role.yaml b/playbooks/common/openshift-cluster/upgrades/files/shared_resource_viewer_role.yaml
new file mode 100644
index 000000000..9c9c260fb
--- /dev/null
+++ b/playbooks/common/openshift-cluster/upgrades/files/shared_resource_viewer_role.yaml
@@ -0,0 +1,37 @@
+---
+apiVersion: v1
+kind: Role
+metadata:
+ name: shared-resource-viewer
+ namespace: openshift
+rules:
+- apiGroups:
+ - ""
+ - template.openshift.io
+ attributeRestrictions: null
+ resources:
+ - templates
+ verbs:
+ - get
+ - list
+ - watch
+- apiGroups:
+ - ""
+ - image.openshift.io
+ attributeRestrictions: null
+ resources:
+ - imagestreamimages
+ - imagestreams
+ - imagestreamtags
+ verbs:
+ - get
+ - list
+ - watch
+- apiGroups:
+ - ""
+ - image.openshift.io
+ attributeRestrictions: null
+ resources:
+ - imagestreams/layers
+ verbs:
+ - get
diff --git a/playbooks/common/openshift-cluster/upgrades/init.yml b/playbooks/common/openshift-cluster/upgrades/init.yml
index 0f421928b..2826951e6 100644
--- a/playbooks/common/openshift-cluster/upgrades/init.yml
+++ b/playbooks/common/openshift-cluster/upgrades/init.yml
@@ -4,9 +4,6 @@
# Do not allow adding hosts during upgrade.
g_new_master_hosts: []
g_new_node_hosts: []
- openshift_cluster_id: "{{ cluster_id | default('default') }}"
-
-- include: ../initialize_oo_option_facts.yml
- include: ../initialize_facts.yml
diff --git a/playbooks/common/openshift-cluster/upgrades/initialize_nodes_to_upgrade.yml b/playbooks/common/openshift-cluster/upgrades/initialize_nodes_to_upgrade.yml
index 72de63070..fc1cbf32a 100644
--- a/playbooks/common/openshift-cluster/upgrades/initialize_nodes_to_upgrade.yml
+++ b/playbooks/common/openshift-cluster/upgrades/initialize_nodes_to_upgrade.yml
@@ -30,6 +30,7 @@
ansible_become: "{{ g_sudo | default(omit) }}"
with_items: " {{ groups['oo_nodes_to_config'] }}"
when:
+ - hostvars[item].openshift is defined
- hostvars[item].openshift.common.hostname in nodes_to_upgrade.results.results[0]['items'] | map(attribute='metadata.name') | list
changed_when: false
diff --git a/playbooks/common/openshift-cluster/upgrades/master_docker b/playbooks/common/openshift-cluster/upgrades/master_docker
deleted file mode 120000
index 6aeca2842..000000000
--- a/playbooks/common/openshift-cluster/upgrades/master_docker
+++ /dev/null
@@ -1 +0,0 @@
-../../../../roles/openshift_master/templates/master_docker \ No newline at end of file
diff --git a/playbooks/common/openshift-cluster/upgrades/post_control_plane.yml b/playbooks/common/openshift-cluster/upgrades/post_control_plane.yml
index d9ddf3860..122066955 100644
--- a/playbooks/common/openshift-cluster/upgrades/post_control_plane.yml
+++ b/playbooks/common/openshift-cluster/upgrades/post_control_plane.yml
@@ -90,10 +90,12 @@
# openshift_examples from failing when trying to replace templates that do
# not already exist. We could have potentially done a replace --force to
# create and update in one step.
- - openshift_examples
+ - role: openshift_examples
+ when: openshift_install_examples | default(true,true) | bool
- openshift_hosted_templates
# Update the existing templates
- role: openshift_examples
+ when: openshift_install_examples | default(true,true) | bool
registry_url: "{{ openshift.master.registry_url }}"
openshift_examples_import_command: replace
- role: openshift_hosted_templates
@@ -101,9 +103,16 @@
openshift_hosted_templates_import_command: replace
# Check for warnings to be printed at the end of the upgrade:
-- name: Check for warnings
+- name: Clean up and display warnings
hosts: oo_masters_to_config
- tasks:
+ tags:
+ - always
+ gather_facts: no
+ roles:
+ - role: openshift_excluder
+ r_openshift_excluder_action: enable
+ r_openshift_excluder_service_type: "{{ openshift.common.service_type }}"
+ post_tasks:
# Check if any masters are using pluginOrderOverride and warn if so, only for 1.3/3.3 and beyond:
- name: grep pluginOrderOverride
command: grep pluginOrderOverride {{ openshift.common.config_base }}/master/master-config.yaml
@@ -119,12 +128,8 @@
- not grep_plugin_order_override | skipped
- grep_plugin_order_override.rc == 0
-- name: Re-enable excluder if it was previously enabled
- hosts: oo_masters_to_config
- tags:
- - always
- gather_facts: no
- roles:
- - role: openshift_excluder
- r_openshift_excluder_action: enable
- r_openshift_excluder_service_type: "{{ openshift.common.service_type }}"
+ - name: Warn if shared-resource-viewer could not be updated
+ debug:
+ msg: "WARNING the shared-resource-viewer role could not be upgraded to 3.6 spec because it's marked protected, please see https://bugzilla.redhat.com/show_bug.cgi?id=1493213"
+ when:
+ - __shared_resource_viewer_protected | default(false)
diff --git a/playbooks/common/openshift-cluster/upgrades/pre/verify_docker_upgrade_targets.yml b/playbooks/common/openshift-cluster/upgrades/pre/tasks/verify_docker_upgrade_targets.yml
index 9d8b73cff..6d8503879 100644
--- a/playbooks/common/openshift-cluster/upgrades/pre/verify_docker_upgrade_targets.yml
+++ b/playbooks/common/openshift-cluster/upgrades/pre/tasks/verify_docker_upgrade_targets.yml
@@ -1,8 +1,10 @@
---
# Only check if docker upgrade is required if docker_upgrade is not
# already set to False.
-- include: ../docker/upgrade_check.yml
- when: docker_upgrade is not defined or docker_upgrade | bool and not openshift.common.is_atomic | bool
+- include: ../../docker/upgrade_check.yml
+ when:
+ - docker_upgrade is not defined or (docker_upgrade | bool)
+ - not (openshift.common.is_atomic | bool)
# Additional checks for Atomic hosts:
diff --git a/playbooks/common/openshift-cluster/upgrades/pre/verify_control_plane_running.yml b/playbooks/common/openshift-cluster/upgrades/pre/verify_control_plane_running.yml
index 06eb5f936..6a5bc24f7 100644
--- a/playbooks/common/openshift-cluster/upgrades/pre/verify_control_plane_running.yml
+++ b/playbooks/common/openshift-cluster/upgrades/pre/verify_control_plane_running.yml
@@ -9,23 +9,29 @@
local_facts:
ha: "{{ groups.oo_masters_to_config | length > 1 }}"
- - name: Ensure Master is running
- service:
- name: "{{ openshift.common.service_type }}-master"
- state: started
- enabled: yes
- when: openshift.master.ha is defined and not openshift.master.ha | bool and openshift.common.is_containerized | bool
+ - when: openshift.common.is_containerized | bool
+ block:
+ - set_fact:
+ master_services:
+ - "{{ openshift.common.service_type }}-master"
- - name: Ensure HA Master is running
- service:
- name: "{{ openshift.common.service_type }}-master-api"
- state: started
- enabled: yes
- when: openshift.master.ha is defined and openshift.master.ha | bool and openshift.common.is_containerized | bool
+ # In case of the non-ha to ha upgrade.
+ - name: Check if the {{ openshift.common.service_type }}-master-api.service exists
+ command: >
+ systemctl list-units {{ openshift.common.service_type }}-master-api.service --no-legend
+ register: master_api_service_status
- - name: Ensure HA Master is running
- service:
- name: "{{ openshift.common.service_type }}-master-controllers"
- state: started
- enabled: yes
- when: openshift.master.ha is defined and openshift.master.ha | bool and openshift.common.is_containerized | bool
+ - set_fact:
+ master_services:
+ - "{{ openshift.common.service_type }}-master-api"
+ - "{{ openshift.common.service_type }}-master-controllers"
+ when:
+ - master_api_service_status.stdout_lines | length > 0
+ - (openshift.common.service_type + '-master-api.service') in master_api_service_status.stdout_lines[0]
+
+ - name: Ensure Master is running
+ service:
+ name: "{{ item }}"
+ state: started
+ enabled: yes
+ with_items: "{{ master_services }}"
diff --git a/playbooks/common/openshift-cluster/upgrades/pre/verify_etcd3_backend.yml b/playbooks/common/openshift-cluster/upgrades/pre/verify_etcd3_backend.yml
new file mode 100644
index 000000000..f75ae3b15
--- /dev/null
+++ b/playbooks/common/openshift-cluster/upgrades/pre/verify_etcd3_backend.yml
@@ -0,0 +1,22 @@
+---
+- name: Verify all masters has etcd3 storage backend set
+ hosts: oo_masters_to_config
+ gather_facts: no
+ roles:
+ - lib_utils
+ tasks:
+ - name: Read master storage backend setting
+ yedit:
+ state: list
+ src: /etc/origin/master/master-config.yaml
+ key: kubernetesMasterConfig.apiServerArguments.storage-backend
+ register: _storage_backend
+
+ - fail:
+ msg: "Storage backend in /etc/origin/master/master-config.yaml must be set to 'etcd3' before the upgrade can continue"
+ when:
+ # assuming the master-config.yml is properly configured, i.e. the value is a list
+ - _storage_backend.result | default([], true) | length == 0 or _storage_backend.result[0] != "etcd3"
+
+ - debug:
+ msg: "Storage backend is set to etcd3"
diff --git a/playbooks/common/openshift-cluster/upgrades/pre/verify_health_checks.yml b/playbooks/common/openshift-cluster/upgrades/pre/verify_health_checks.yml
new file mode 100644
index 000000000..2a8de50a2
--- /dev/null
+++ b/playbooks/common/openshift-cluster/upgrades/pre/verify_health_checks.yml
@@ -0,0 +1,16 @@
+---
+- name: OpenShift Health Checks
+ hosts: oo_all_hosts
+ any_errors_fatal: true
+ roles:
+ - openshift_health_checker
+ vars:
+ - r_openshift_health_checker_playbook_context: upgrade
+ post_tasks:
+ - name: Run health checks (upgrade)
+ action: openshift_health_check
+ args:
+ checks:
+ - disk_availability
+ - memory_availability
+ - docker_image_availability
diff --git a/playbooks/common/openshift-cluster/upgrades/pre/verify_inventory_vars.yml b/playbooks/common/openshift-cluster/upgrades/pre/verify_inventory_vars.yml
index 9a959a959..3c0017891 100644
--- a/playbooks/common/openshift-cluster/upgrades/pre/verify_inventory_vars.yml
+++ b/playbooks/common/openshift-cluster/upgrades/pre/verify_inventory_vars.yml
@@ -5,9 +5,9 @@
tasks:
- fail:
msg: >
- This upgrade is only supported for origin, openshift-enterprise, and online
+ This upgrade is only supported for origin and openshift-enterprise
deployment types
- when: deployment_type not in ['origin','openshift-enterprise', 'online']
+ when: deployment_type not in ['origin','openshift-enterprise']
# Error out in situations where the user has older versions specified in their
# inventory in any of the openshift_release, openshift_image_tag, and
diff --git a/playbooks/common/openshift-cluster/upgrades/pre/verify_nodes_running.yml b/playbooks/common/openshift-cluster/upgrades/pre/verify_nodes_running.yml
deleted file mode 100644
index 354af3cde..000000000
--- a/playbooks/common/openshift-cluster/upgrades/pre/verify_nodes_running.yml
+++ /dev/null
@@ -1,13 +0,0 @@
----
-- name: Verify node processes
- hosts: oo_nodes_to_config
- roles:
- - openshift_facts
- - openshift_docker_facts
- tasks:
- - name: Ensure Node is running
- service:
- name: "{{ openshift.common.service_type }}-node"
- state: started
- enabled: yes
- when: openshift.common.is_containerized | bool
diff --git a/playbooks/common/openshift-cluster/upgrades/pre/verify_upgrade_targets.yml b/playbooks/common/openshift-cluster/upgrades/pre/verify_upgrade_targets.yml
index 9b4a8e413..13fa37b09 100644
--- a/playbooks/common/openshift-cluster/upgrades/pre/verify_upgrade_targets.yml
+++ b/playbooks/common/openshift-cluster/upgrades/pre/verify_upgrade_targets.yml
@@ -4,6 +4,12 @@
msg: Verify OpenShift is already installed
when: openshift.common.version is not defined
+- name: Update oreg_auth docker login credentials if necessary
+ include_role:
+ name: docker
+ tasks_from: registry_auth.yml
+ when: oreg_auth_user is defined
+
- name: Verify containers are available for upgrade
command: >
docker pull {{ openshift.common.cli_image }}:{{ openshift_image_tag }}
@@ -27,13 +33,17 @@
- name: Set fact avail_openshift_version
set_fact:
- avail_openshift_version: "{{ repoquery_out.results.versions.available_versions.0 }}"
+ avail_openshift_version: "{{ repoquery_out.results.versions.available_versions_full.0 }}"
+ - name: Set openshift_pkg_version when not specified
+ set_fact:
+ openshift_pkg_version: "-{{ repoquery_out.results.versions.available_versions_full.0 }}"
+ when: openshift_pkg_version | default('') == ''
- name: Verify OpenShift RPMs are available for upgrade
fail:
msg: "OpenShift {{ avail_openshift_version }} is available, but {{ openshift_upgrade_target }} or greater is required"
when:
- - avail_openshift_version | default('0.0', True) | version_compare(openshift_release, '<')
+ - (openshift_pkg_version | default('-0.0', True)).split('-')[1] | version_compare(openshift_release, '<')
- name: Fail when openshift version does not meet minium requirement for Origin upgrade
fail:
diff --git a/playbooks/common/openshift-cluster/upgrades/rpm_upgrade.yml b/playbooks/common/openshift-cluster/upgrades/rpm_upgrade.yml
index 164baca81..8cc46ab68 100644
--- a/playbooks/common/openshift-cluster/upgrades/rpm_upgrade.yml
+++ b/playbooks/common/openshift-cluster/upgrades/rpm_upgrade.yml
@@ -8,7 +8,6 @@
# TODO: If the sdn package isn't already installed this will install it, we
# should fix that
-
- name: Upgrade master packages
package: name={{ master_pkgs | join(',') }} state=present
vars:
@@ -16,7 +15,7 @@
- "{{ openshift.common.service_type }}{{ openshift_pkg_version }}"
- "{{ openshift.common.service_type }}-master{{ openshift_pkg_version }}"
- "{{ openshift.common.service_type }}-node{{ openshift_pkg_version }}"
- - "{{ openshift.common.service_type }}-sdn-ovs{{ openshift_pkg_version}}"
+ - "{{ openshift.common.service_type }}-sdn-ovs{{ openshift_pkg_version }}"
- "{{ openshift.common.service_type }}-clients{{ openshift_pkg_version }}"
- "tuned-profiles-{{ openshift.common.service_type }}-node{{ openshift_pkg_version }}"
- PyYAML
diff --git a/playbooks/common/openshift-cluster/upgrades/upgrade_control_plane.yml b/playbooks/common/openshift-cluster/upgrades/upgrade_control_plane.yml
index 6738ce11f..a5e2f7940 100644
--- a/playbooks/common/openshift-cluster/upgrades/upgrade_control_plane.yml
+++ b/playbooks/common/openshift-cluster/upgrades/upgrade_control_plane.yml
@@ -5,13 +5,19 @@
# oc adm migrate storage should be run prior to etcd v3 upgrade
# See: https://github.com/openshift/origin/pull/14625#issuecomment-308467060
-- name: Pre master upgrade - Upgrade job storage
+- name: Pre master upgrade - Upgrade all storage
hosts: oo_first_master
tasks:
- - name: Upgrade job storage
+ - name: Upgrade all storage
command: >
{{ openshift.common.client_binary }} adm --config={{ openshift.common.config_base }}/master/admin.kubeconfig
- migrate storage --include=jobs --confirm
+ migrate storage --include=* --confirm
+ register: l_pb_upgrade_control_plane_pre_upgrade_storage
+ when: openshift_upgrade_pre_storage_migration_enabled | default(true) | bool
+ failed_when:
+ - openshift_upgrade_pre_storage_migration_enabled | default(true) | bool
+ - l_pb_upgrade_control_plane_pre_upgrade_storage.rc != 0
+ - openshift_upgrade_pre_storage_migration_fatal | default(true) | bool
# If facts cache were for some reason deleted, this fact may not be set, and if not set
# it will always default to true. This causes problems for the etcd data dir fact detection
@@ -25,7 +31,6 @@
role: master
local_facts:
embedded_etcd: "{{ groups.oo_etcd_to_config | default([]) | length == 0 }}"
- debug_level: "{{ openshift_master_debug_level | default(openshift.common.debug_level | default(2)) }}"
- name: Upgrade and backup etcd
include: ./etcd/main.yml
@@ -85,7 +90,10 @@
- include_vars: ../../../../roles/openshift_master/vars/main.yml
- - name: Update systemd units
+ - name: Update journald config
+ include: ../../../../roles/openshift_master/tasks/journald.yml
+
+ - name: Remove any legacy systemd units and update systemd units
include: ../../../../roles/openshift_master/tasks/systemd_units.yml
- name: Check for ca-bundle.crt
@@ -140,16 +148,21 @@
- include: "{{ openshift_master_upgrade_post_hook }}"
when: openshift_master_upgrade_post_hook is defined
- - set_fact:
- master_update_complete: True
-
-- name: Post master upgrade - Upgrade job storage
- hosts: oo_first_master
- tasks:
- - name: Upgrade job storage
+ - name: Post master upgrade - Upgrade clusterpolicies storage
command: >
{{ openshift.common.client_binary }} adm --config={{ openshift.common.config_base }}/master/admin.kubeconfig
- migrate storage --include=jobs --confirm
+ migrate storage --include=clusterpolicies --confirm
+ register: l_pb_upgrade_control_plane_post_upgrade_storage
+ when: openshift_upgrade_post_storage_migration_enabled | default(true) | bool
+ failed_when:
+ - openshift_upgrade_post_storage_migration_enabled | default(true) | bool
+ - l_pb_upgrade_control_plane_post_upgrade_storage.rc != 0
+ - openshift_upgrade_post_storage_migration_fatal | default(false) | bool
+ run_once: true
+ delegate_to: "{{ groups.oo_first_master.0 }}"
+
+ - set_fact:
+ master_update_complete: True
##############################################################################
# Gate on master update complete
@@ -164,7 +177,7 @@
| oo_select_keys(groups.oo_masters_to_config)
| oo_collect('inventory_hostname', {'master_update_complete': true}) }}"
- set_fact:
- master_update_failed: "{{ groups.oo_masters_to_config | difference(master_update_completed) }}"
+ master_update_failed: "{{ groups.oo_masters_to_config | difference(master_update_completed) | list }}"
- fail:
msg: "Upgrade cannot continue. The following masters did not finish updating: {{ master_update_failed | join(',') }}"
when: master_update_failed | length > 0
@@ -178,18 +191,18 @@
roles:
- { role: openshift_cli }
vars:
- origin_reconcile_bindings: "{{ deployment_type == 'origin' and openshift_version | version_compare('1.0.6', '>') }}"
- ent_reconcile_bindings: true
openshift_docker_hosted_registry_network: "{{ hostvars[groups.oo_first_master.0].openshift.common.portal_net }}"
# Another spot where we assume docker is running and do not want to accidentally trigger an unsafe
# restart.
skip_docker_role: True
+ __master_shared_resource_viewer_file: "shared_resource_viewer_role.yaml"
tasks:
- name: Reconcile Cluster Roles
command: >
{{ openshift.common.client_binary }} adm --config={{ openshift.common.config_base }}/master/admin.kubeconfig
policy reconcile-cluster-roles --additive-only=true --confirm -o name
register: reconcile_cluster_role_result
+ when: openshift_version | version_compare('3.7','<')
changed_when:
- reconcile_cluster_role_result.stdout != ''
- reconcile_cluster_role_result.rc == 0
@@ -204,7 +217,7 @@
--exclude-groups=system:unauthenticated
--exclude-users=system:anonymous
--additive-only=true --confirm -o name
- when: origin_reconcile_bindings | bool or ent_reconcile_bindings | bool
+ when: openshift_version | version_compare('3.7','<')
register: reconcile_bindings_result
changed_when:
- reconcile_bindings_result.stdout != ''
@@ -219,17 +232,74 @@
changed_when:
- reconcile_jenkins_role_binding_result.stdout != ''
- reconcile_jenkins_role_binding_result.rc == 0
- when: openshift.common.version_gte_3_4_or_1_4 | bool
+ when:
+ - openshift_version | version_compare('3.7','<')
+ - openshift_version | version_compare('3.4','>=')
+
+ - when: openshift_upgrade_target | version_compare('3.7','<')
+ block:
+ - name: Retrieve shared-resource-viewer
+ oc_obj:
+ state: list
+ kind: role
+ name: "shared-resource-viewer"
+ namespace: "openshift"
+ register: objout
+
+ - name: Determine if shared-resource-viewer is protected
+ set_fact:
+ __shared_resource_viewer_protected: true
+ when:
+ - "'results' in objout"
+ - "'results' in objout['results']"
+ - "'annotations' in objout['results']['results'][0]['metadata']"
+ - "'openshift.io/reconcile-protect' in objout['results']['results'][0]['metadata']['annotations']"
+ - "objout['results']['results'][0]['metadata']['annotations']['openshift.io/reconcile-protect'] == 'true'"
+ - copy:
+ src: "{{ item }}"
+ dest: "/tmp/{{ item }}"
+ with_items:
+ - "{{ __master_shared_resource_viewer_file }}"
+ when: __shared_resource_viewer_protected is not defined
+
+ - name: Fixup shared-resource-viewer role
+ oc_obj:
+ state: present
+ kind: role
+ name: "shared-resource-viewer"
+ namespace: "openshift"
+ files:
+ - "/tmp/{{ __master_shared_resource_viewer_file }}"
+ delete_after: true
+ when: __shared_resource_viewer_protected is not defined
+ register: result
+ retries: 3
+ delay: 5
+ until: result.rc == 0
+ ignore_errors: true
+
- name: Reconcile Security Context Constraints
command: >
- {{ openshift.common.client_binary }} adm policy reconcile-sccs --confirm --additive-only=true -o name
+ {{ openshift.common.client_binary }} adm policy --config={{ openshift.common.config_base }}/master/admin.kubeconfig reconcile-sccs --confirm --additive-only=true -o name
register: reconcile_scc_result
changed_when:
- reconcile_scc_result.stdout != ''
- reconcile_scc_result.rc == 0
run_once: true
+ - name: Migrate storage post policy reconciliation
+ command: >
+ {{ openshift.common.client_binary }} adm --config={{ openshift.common.config_base }}/master/admin.kubeconfig
+ migrate storage --include=* --confirm
+ run_once: true
+ register: l_pb_upgrade_control_plane_post_upgrade_storage
+ when: openshift_upgrade_post_storage_migration_enabled | default(true) | bool
+ failed_when:
+ - openshift_upgrade_post_storage_migration_enabled | default(true) | bool
+ - l_pb_upgrade_control_plane_post_upgrade_storage.rc != 0
+ - openshift_upgrade_post_storage_migration_fatal | default(false) | bool
+
- set_fact:
reconcile_complete: True
@@ -246,7 +316,7 @@
| oo_select_keys(groups.oo_masters_to_config)
| oo_collect('inventory_hostname', {'reconcile_complete': true}) }}"
- set_fact:
- reconcile_failed: "{{ groups.oo_masters_to_config | difference(reconcile_completed) }}"
+ reconcile_failed: "{{ groups.oo_masters_to_config | difference(reconcile_completed) | list }}"
- fail:
msg: "Upgrade cannot continue. The following masters did not finish reconciling: {{ reconcile_failed | join(',') }}"
when: reconcile_failed | length > 0
@@ -258,7 +328,7 @@
roles:
- openshift_facts
tasks:
- - include: docker/upgrade.yml
+ - include: docker/tasks/upgrade.yml
when: l_docker_upgrade is defined and l_docker_upgrade | bool and not openshift.common.is_atomic | bool
- name: Drain and upgrade master nodes
@@ -288,15 +358,19 @@
- name: Drain Node for Kubelet upgrade
command: >
- {{ hostvars[groups.oo_first_master.0].openshift.common.admin_binary }} drain {{ openshift.node.nodename | lower }} --force --delete-local-data --ignore-daemonsets
+ {{ hostvars[groups.oo_first_master.0].openshift.common.admin_binary }} drain {{ openshift.node.nodename | lower }} --config={{ openshift.common.config_base }}/master/admin.kubeconfig --force --delete-local-data --ignore-daemonsets
delegate_to: "{{ groups.oo_first_master.0 }}"
+ register: l_upgrade_control_plane_drain_result
+ until: not l_upgrade_control_plane_drain_result | failed
+ retries: 60
+ delay: 60
roles:
- lib_openshift
- openshift_facts
- docker
- - openshift_node_upgrade
- openshift_node_dnsmasq
+ - openshift_node_upgrade
post_tasks:
- name: Set node schedulability
diff --git a/playbooks/common/openshift-cluster/upgrades/upgrade_nodes.yml b/playbooks/common/openshift-cluster/upgrades/upgrade_nodes.yml
index 35a50cf4e..c93a5d89c 100644
--- a/playbooks/common/openshift-cluster/upgrades/upgrade_nodes.yml
+++ b/playbooks/common/openshift-cluster/upgrades/upgrade_nodes.yml
@@ -26,15 +26,19 @@
- name: Drain Node for Kubelet upgrade
command: >
- {{ hostvars[groups.oo_first_master.0].openshift.common.admin_binary }} drain {{ openshift.node.nodename | lower }} --force --delete-local-data --ignore-daemonsets
+ {{ hostvars[groups.oo_first_master.0].openshift.common.admin_binary }} drain {{ openshift.node.nodename | lower }} --config={{ openshift.common.config_base }}/master/admin.kubeconfig --force --delete-local-data --ignore-daemonsets
delegate_to: "{{ groups.oo_first_master.0 }}"
+ register: l_upgrade_nodes_drain_result
+ until: not l_upgrade_nodes_drain_result | failed
+ retries: 60
+ delay: 60
roles:
- lib_openshift
- openshift_facts
- docker
- - openshift_node_upgrade
- openshift_node_dnsmasq
+ - openshift_node_upgrade
- role: openshift_excluder
r_openshift_excluder_action: enable
r_openshift_excluder_service_type: "{{ openshift.common.service_type }}"
diff --git a/playbooks/common/openshift-cluster/upgrades/upgrade_scheduler.yml b/playbooks/common/openshift-cluster/upgrades/upgrade_scheduler.yml
index 83d2cec81..8558bf3e9 100644
--- a/playbooks/common/openshift-cluster/upgrades/upgrade_scheduler.yml
+++ b/playbooks/common/openshift-cluster/upgrades/upgrade_scheduler.yml
@@ -74,18 +74,21 @@
- block:
- debug:
msg: "WARNING: existing scheduler config does not match previous known defaults automated upgrade of scheduler config is disabled.\nexisting scheduler predicates: {{ openshift_master_scheduler_current_predicates }}\ncurrent scheduler default predicates are: {{ openshift_master_scheduler_default_predicates }}"
- when: "{{ openshift_master_scheduler_current_predicates != openshift_master_scheduler_default_predicates and
- openshift_master_scheduler_current_predicates not in older_predicates + [prev_predicates] }}"
+ when:
+ - openshift_master_scheduler_current_predicates != openshift_master_scheduler_default_predicates
+ - openshift_master_scheduler_current_predicates not in older_predicates + [prev_predicates]
- set_fact:
openshift_upgrade_scheduler_predicates: "{{ openshift_master_scheduler_default_predicates }}"
- when: "{{ openshift_master_scheduler_current_predicates != openshift_master_scheduler_default_predicates and
- openshift_master_scheduler_current_predicates in older_predicates + [prev_predicates] }}"
+ when:
+ - openshift_master_scheduler_current_predicates != openshift_master_scheduler_default_predicates
+ - openshift_master_scheduler_current_predicates in older_predicates + [prev_predicates]
- set_fact:
openshift_upgrade_scheduler_predicates: "{{ default_predicates_no_region }}"
- when: "{{ openshift_master_scheduler_current_predicates != default_predicates_no_region and
- openshift_master_scheduler_current_predicates in older_predicates_no_region + [prev_predicates_no_region] }}"
+ when:
+ - openshift_master_scheduler_current_predicates != default_predicates_no_region
+ - openshift_master_scheduler_current_predicates in older_predicates_no_region + [prev_predicates_no_region]
when: openshift_master_scheduler_predicates | default(none) is none
@@ -131,18 +134,21 @@
- block:
- debug:
msg: "WARNING: existing scheduler config does not match previous known defaults automated upgrade of scheduler config is disabled.\nexisting scheduler priorities: {{ openshift_master_scheduler_current_priorities }}\ncurrent scheduler default priorities are: {{ openshift_master_scheduler_default_priorities }}"
- when: "{{ openshift_master_scheduler_current_priorities != openshift_master_scheduler_default_priorities and
- openshift_master_scheduler_current_priorities not in older_priorities + [prev_priorities] }}"
+ when:
+ - openshift_master_scheduler_current_priorities != openshift_master_scheduler_default_priorities
+ - openshift_master_scheduler_current_priorities not in older_priorities + [prev_priorities]
- set_fact:
openshift_upgrade_scheduler_priorities: "{{ openshift_master_scheduler_default_priorities }}"
- when: "{{ openshift_master_scheduler_current_priorities != openshift_master_scheduler_default_priorities and
- openshift_master_scheduler_current_priorities in older_priorities + [prev_priorities] }}"
+ when:
+ - openshift_master_scheduler_current_priorities != openshift_master_scheduler_default_priorities
+ - openshift_master_scheduler_current_priorities in older_priorities + [prev_priorities]
- set_fact:
openshift_upgrade_scheduler_priorities: "{{ default_priorities_no_zone }}"
- when: "{{ openshift_master_scheduler_current_priorities != default_priorities_no_zone and
- openshift_master_scheduler_current_priorities in older_priorities_no_zone + [prev_priorities_no_zone] }}"
+ when:
+ - openshift_master_scheduler_current_priorities != default_priorities_no_zone
+ - openshift_master_scheduler_current_priorities in older_priorities_no_zone + [prev_priorities_no_zone]
when: openshift_master_scheduler_priorities | default(none) is none
@@ -162,5 +168,6 @@
content: "{{ scheduler_config | to_nice_json }}"
dest: "{{ openshift_master_scheduler_conf }}"
backup: true
- when: "{{ openshift_upgrade_scheduler_predicates is defined or
- openshift_upgrade_scheduler_priorities is defined }}"
+ when: >
+ openshift_upgrade_scheduler_predicates is defined or
+ openshift_upgrade_scheduler_priorities is defined
diff --git a/playbooks/common/openshift-cluster/upgrades/v3_3/master_config_upgrade.yml b/playbooks/common/openshift-cluster/upgrades/v3_3/master_config_upgrade.yml
index d69472fad..5e7a66171 100644
--- a/playbooks/common/openshift-cluster/upgrades/v3_3/master_config_upgrade.yml
+++ b/playbooks/common/openshift-cluster/upgrades/v3_3/master_config_upgrade.yml
@@ -41,12 +41,12 @@
- modify_yaml:
dest: "{{ openshift.common.config_base}}/master/master-config.yaml"
- yaml_key: 'controllerConfig.servicesServingCert.signer.certFile'
+ yaml_key: 'controllerConfig.serviceServingCert.signer.certFile'
yaml_value: service-signer.crt
- modify_yaml:
dest: "{{ openshift.common.config_base}}/master/master-config.yaml"
- yaml_key: 'controllerConfig.servicesServingCert.signer.keyFile'
+ yaml_key: 'controllerConfig.serviceServingCert.signer.keyFile'
yaml_value: service-signer.key
- modify_yaml:
diff --git a/playbooks/common/openshift-cluster/upgrades/v3_3/upgrade.yml b/playbooks/common/openshift-cluster/upgrades/v3_3/upgrade.yml
index f1245aa2e..a241ef039 100644
--- a/playbooks/common/openshift-cluster/upgrades/v3_3/upgrade.yml
+++ b/playbooks/common/openshift-cluster/upgrades/v3_3/upgrade.yml
@@ -39,8 +39,9 @@
| union(groups['oo_etcd_to_config'] | default([])))
| oo_collect('openshift.common.hostname') | default([]) | join (',')
}}"
- when: "{{ (openshift_http_proxy is defined or openshift_https_proxy is defined) and
- openshift_generate_no_proxy_hosts | default(True) | bool }}"
+ when:
+ - openshift_http_proxy is defined or openshift_https_proxy is defined
+ - openshift_generate_no_proxy_hosts | default(True) | bool
- include: ../pre/verify_inventory_vars.yml
tags:
@@ -88,7 +89,7 @@
- name: Verify docker upgrade targets
hosts: oo_masters_to_config:oo_nodes_to_upgrade:oo_etcd_to_config
tasks:
- - include: ../pre/verify_docker_upgrade_targets.yml
+ - include: ../pre/tasks/verify_docker_upgrade_targets.yml
tags:
- pre_upgrade
diff --git a/playbooks/common/openshift-cluster/upgrades/v3_3/upgrade_control_plane.yml b/playbooks/common/openshift-cluster/upgrades/v3_3/upgrade_control_plane.yml
index b693ab55c..54c85f0fb 100644
--- a/playbooks/common/openshift-cluster/upgrades/v3_3/upgrade_control_plane.yml
+++ b/playbooks/common/openshift-cluster/upgrades/v3_3/upgrade_control_plane.yml
@@ -47,8 +47,9 @@
| union(groups['oo_etcd_to_config'] | default([])))
| oo_collect('openshift.common.hostname') | default([]) | join (',')
}}"
- when: "{{ (openshift_http_proxy is defined or openshift_https_proxy is defined) and
- openshift_generate_no_proxy_hosts | default(True) | bool }}"
+ when:
+ - openshift_http_proxy is defined or openshift_https_proxy is defined
+ - openshift_generate_no_proxy_hosts | default(True) | bool
- include: ../pre/verify_inventory_vars.yml
tags:
@@ -92,7 +93,7 @@
- name: Verify docker upgrade targets
hosts: oo_masters_to_config:oo_etcd_to_config
tasks:
- - include: ../pre/verify_docker_upgrade_targets.yml
+ - include: ../pre/tasks/verify_docker_upgrade_targets.yml
tags:
- pre_upgrade
diff --git a/playbooks/common/openshift-cluster/upgrades/v3_3/upgrade_nodes.yml b/playbooks/common/openshift-cluster/upgrades/v3_3/upgrade_nodes.yml
index 4fd029107..cee4e9087 100644
--- a/playbooks/common/openshift-cluster/upgrades/v3_3/upgrade_nodes.yml
+++ b/playbooks/common/openshift-cluster/upgrades/v3_3/upgrade_nodes.yml
@@ -40,8 +40,9 @@
| union(groups['oo_etcd_to_config'] | default([])))
| oo_collect('openshift.common.hostname') | default([]) | join (',')
}}"
- when: "{{ (openshift_http_proxy is defined or openshift_https_proxy is defined) and
- openshift_generate_no_proxy_hosts | default(True) | bool }}"
+ when:
+ - openshift_http_proxy is defined or openshift_https_proxy is defined
+ - openshift_generate_no_proxy_hosts | default(True) | bool
- include: ../pre/verify_inventory_vars.yml
tags:
@@ -89,7 +90,7 @@
- name: Verify docker upgrade targets
hosts: oo_nodes_to_upgrade
tasks:
- - include: ../pre/verify_docker_upgrade_targets.yml
+ - include: ../pre/tasks/verify_docker_upgrade_targets.yml
tags:
- pre_upgrade
diff --git a/playbooks/common/openshift-cluster/upgrades/v3_4/master_config_upgrade.yml b/playbooks/common/openshift-cluster/upgrades/v3_4/master_config_upgrade.yml
index ed89dbe8d..52458e03c 100644
--- a/playbooks/common/openshift-cluster/upgrades/v3_4/master_config_upgrade.yml
+++ b/playbooks/common/openshift-cluster/upgrades/v3_4/master_config_upgrade.yml
@@ -1,16 +1,10 @@
---
- modify_yaml:
dest: "{{ openshift.common.config_base}}/master/master-config.yaml"
- yaml_key: 'admissionConfig.pluginConfig'
- yaml_value: "{{ openshift.master.admission_plugin_config }}"
- when: "'admission_plugin_config' in openshift.master"
+ yaml_key: 'controllerConfig.serviceServingCert.signer.certFile'
+ yaml_value: service-signer.crt
- modify_yaml:
dest: "{{ openshift.common.config_base}}/master/master-config.yaml"
- yaml_key: 'admissionConfig.pluginOrderOverride'
- yaml_value:
-
-- modify_yaml:
- dest: "{{ openshift.common.config_base}}/master/master-config.yaml"
- yaml_key: 'kubernetesMasterConfig.admissionConfig'
- yaml_value:
+ yaml_key: 'controllerConfig.serviceServingCert.signer.keyFile'
+ yaml_value: service-signer.key
diff --git a/playbooks/common/openshift-cluster/upgrades/v3_4/upgrade.yml b/playbooks/common/openshift-cluster/upgrades/v3_4/upgrade.yml
index 965e39482..ae217ba2e 100644
--- a/playbooks/common/openshift-cluster/upgrades/v3_4/upgrade.yml
+++ b/playbooks/common/openshift-cluster/upgrades/v3_4/upgrade.yml
@@ -39,8 +39,9 @@
| union(groups['oo_etcd_to_config'] | default([])))
| oo_collect('openshift.common.hostname') | default([]) | join (',')
}}"
- when: "{{ (openshift_http_proxy is defined or openshift_https_proxy is defined) and
- openshift_generate_no_proxy_hosts | default(True) | bool }}"
+ when:
+ - openshift_http_proxy is defined or openshift_https_proxy is defined
+ - openshift_generate_no_proxy_hosts | default(True) | bool
- include: ../pre/verify_inventory_vars.yml
tags:
@@ -88,7 +89,7 @@
- name: Verify docker upgrade targets
hosts: oo_masters_to_config:oo_nodes_to_upgrade:oo_etcd_to_config
tasks:
- - include: ../pre/verify_docker_upgrade_targets.yml
+ - include: ../pre/tasks/verify_docker_upgrade_targets.yml
tags:
- pre_upgrade
diff --git a/playbooks/common/openshift-cluster/upgrades/v3_4/upgrade_control_plane.yml b/playbooks/common/openshift-cluster/upgrades/v3_4/upgrade_control_plane.yml
index 7830f462c..d7cb38d03 100644
--- a/playbooks/common/openshift-cluster/upgrades/v3_4/upgrade_control_plane.yml
+++ b/playbooks/common/openshift-cluster/upgrades/v3_4/upgrade_control_plane.yml
@@ -47,8 +47,9 @@
| union(groups['oo_etcd_to_config'] | default([])))
| oo_collect('openshift.common.hostname') | default([]) | join (',')
}}"
- when: "{{ (openshift_http_proxy is defined or openshift_https_proxy is defined) and
- openshift_generate_no_proxy_hosts | default(True) | bool }}"
+ when:
+ - openshift_http_proxy is defined or openshift_https_proxy is defined
+ - openshift_generate_no_proxy_hosts | default(True) | bool
- include: ../pre/verify_inventory_vars.yml
tags:
@@ -92,7 +93,7 @@
- name: Verify docker upgrade targets
hosts: oo_masters_to_config:oo_etcd_to_config
tasks:
- - include: ../pre/verify_docker_upgrade_targets.yml
+ - include: ../pre/tasks/verify_docker_upgrade_targets.yml
tags:
- pre_upgrade
diff --git a/playbooks/common/openshift-cluster/upgrades/v3_4/upgrade_nodes.yml b/playbooks/common/openshift-cluster/upgrades/v3_4/upgrade_nodes.yml
index 4364ff8e3..8531e6045 100644
--- a/playbooks/common/openshift-cluster/upgrades/v3_4/upgrade_nodes.yml
+++ b/playbooks/common/openshift-cluster/upgrades/v3_4/upgrade_nodes.yml
@@ -40,8 +40,9 @@
| union(groups['oo_etcd_to_config'] | default([])))
| oo_collect('openshift.common.hostname') | default([]) | join (',')
}}"
- when: "{{ (openshift_http_proxy is defined or openshift_https_proxy is defined) and
- openshift_generate_no_proxy_hosts | default(True) | bool }}"
+ when:
+ - openshift_http_proxy is defined or openshift_https_proxy is defined
+ - openshift_generate_no_proxy_hosts | default(True) | bool
- include: ../pre/verify_inventory_vars.yml
tags:
@@ -89,7 +90,7 @@
- name: Verify docker upgrade targets
hosts: oo_nodes_to_upgrade
tasks:
- - include: ../pre/verify_docker_upgrade_targets.yml
+ - include: ../pre/tasks/verify_docker_upgrade_targets.yml
tags:
- pre_upgrade
diff --git a/playbooks/common/openshift-cluster/upgrades/v3_5/master_config_upgrade.yml b/playbooks/common/openshift-cluster/upgrades/v3_5/master_config_upgrade.yml
index ed89dbe8d..52458e03c 100644
--- a/playbooks/common/openshift-cluster/upgrades/v3_5/master_config_upgrade.yml
+++ b/playbooks/common/openshift-cluster/upgrades/v3_5/master_config_upgrade.yml
@@ -1,16 +1,10 @@
---
- modify_yaml:
dest: "{{ openshift.common.config_base}}/master/master-config.yaml"
- yaml_key: 'admissionConfig.pluginConfig'
- yaml_value: "{{ openshift.master.admission_plugin_config }}"
- when: "'admission_plugin_config' in openshift.master"
+ yaml_key: 'controllerConfig.serviceServingCert.signer.certFile'
+ yaml_value: service-signer.crt
- modify_yaml:
dest: "{{ openshift.common.config_base}}/master/master-config.yaml"
- yaml_key: 'admissionConfig.pluginOrderOverride'
- yaml_value:
-
-- modify_yaml:
- dest: "{{ openshift.common.config_base}}/master/master-config.yaml"
- yaml_key: 'kubernetesMasterConfig.admissionConfig'
- yaml_value:
+ yaml_key: 'controllerConfig.serviceServingCert.signer.keyFile'
+ yaml_value: service-signer.key
diff --git a/playbooks/common/openshift-cluster/upgrades/v3_5/upgrade.yml b/playbooks/common/openshift-cluster/upgrades/v3_5/upgrade.yml
index 4e7c14e94..bda245fe1 100644
--- a/playbooks/common/openshift-cluster/upgrades/v3_5/upgrade.yml
+++ b/playbooks/common/openshift-cluster/upgrades/v3_5/upgrade.yml
@@ -39,13 +39,18 @@
| union(groups['oo_etcd_to_config'] | default([])))
| oo_collect('openshift.common.hostname') | default([]) | join (',')
}}"
- when: "{{ (openshift_http_proxy is defined or openshift_https_proxy is defined) and
- openshift_generate_no_proxy_hosts | default(True) | bool }}"
+ when:
+ - openshift_http_proxy is defined or openshift_https_proxy is defined
+ - openshift_generate_no_proxy_hosts | default(True) | bool
- include: ../pre/verify_inventory_vars.yml
tags:
- pre_upgrade
+- include: ../pre/verify_control_plane_running.yml
+ tags:
+ - pre_upgrade
+
- include: ../disable_master_excluders.yml
tags:
- pre_upgrade
@@ -70,10 +75,6 @@
# docker is configured and running.
skip_docker_role: True
-- include: ../pre/verify_control_plane_running.yml
- tags:
- - pre_upgrade
-
- include: ../../../openshift-master/validate_restart.yml
tags:
- pre_upgrade
@@ -88,7 +89,7 @@
- name: Verify docker upgrade targets
hosts: oo_masters_to_config:oo_nodes_to_upgrade:oo_etcd_to_config
tasks:
- - include: ../pre/verify_docker_upgrade_targets.yml
+ - include: ../pre/tasks/verify_docker_upgrade_targets.yml
tags:
- pre_upgrade
@@ -111,6 +112,8 @@
- include: ../cleanup_unused_images.yml
- include: ../upgrade_control_plane.yml
+ vars:
+ master_config_hook: "v3_5/master_config_upgrade.yml"
- include: ../upgrade_nodes.yml
diff --git a/playbooks/common/openshift-cluster/upgrades/v3_5/upgrade_control_plane.yml b/playbooks/common/openshift-cluster/upgrades/v3_5/upgrade_control_plane.yml
index 45b664d06..6cdea7b84 100644
--- a/playbooks/common/openshift-cluster/upgrades/v3_5/upgrade_control_plane.yml
+++ b/playbooks/common/openshift-cluster/upgrades/v3_5/upgrade_control_plane.yml
@@ -47,13 +47,18 @@
| union(groups['oo_etcd_to_config'] | default([])))
| oo_collect('openshift.common.hostname') | default([]) | join (',')
}}"
- when: "{{ (openshift_http_proxy is defined or openshift_https_proxy is defined) and
- openshift_generate_no_proxy_hosts | default(True) | bool }}"
+ when:
+ - openshift_http_proxy is defined or openshift_https_proxy is defined
+ - openshift_generate_no_proxy_hosts | default(True) | bool
- include: ../pre/verify_inventory_vars.yml
tags:
- pre_upgrade
+- include: ../pre/verify_control_plane_running.yml
+ tags:
+ - pre_upgrade
+
- include: ../disable_master_excluders.yml
tags:
- pre_upgrade
@@ -74,10 +79,6 @@
# docker is configured and running.
skip_docker_role: True
-- include: ../pre/verify_control_plane_running.yml
- tags:
- - pre_upgrade
-
- include: ../../../openshift-master/validate_restart.yml
tags:
- pre_upgrade
@@ -92,7 +93,7 @@
- name: Verify docker upgrade targets
hosts: oo_masters_to_config:oo_etcd_to_config
tasks:
- - include: ../pre/verify_docker_upgrade_targets.yml
+ - include: ../pre/tasks/verify_docker_upgrade_targets.yml
tags:
- pre_upgrade
diff --git a/playbooks/common/openshift-cluster/upgrades/v3_5/upgrade_nodes.yml b/playbooks/common/openshift-cluster/upgrades/v3_5/upgrade_nodes.yml
index 036d3fcf5..e29d0f8e6 100644
--- a/playbooks/common/openshift-cluster/upgrades/v3_5/upgrade_nodes.yml
+++ b/playbooks/common/openshift-cluster/upgrades/v3_5/upgrade_nodes.yml
@@ -40,8 +40,9 @@
| union(groups['oo_etcd_to_config'] | default([])))
| oo_collect('openshift.common.hostname') | default([]) | join (',')
}}"
- when: "{{ (openshift_http_proxy is defined or openshift_https_proxy is defined) and
- openshift_generate_no_proxy_hosts | default(True) | bool }}"
+ when:
+ - openshift_http_proxy is defined or openshift_https_proxy is defined
+ - openshift_generate_no_proxy_hosts | default(True) | bool
- include: ../pre/verify_inventory_vars.yml
tags:
@@ -89,7 +90,7 @@
- name: Verify docker upgrade targets
hosts: oo_nodes_to_upgrade
tasks:
- - include: ../pre/verify_docker_upgrade_targets.yml
+ - include: ../pre/tasks/verify_docker_upgrade_targets.yml
tags:
- pre_upgrade
diff --git a/playbooks/common/openshift-cluster/upgrades/v3_6/master_config_upgrade.yml b/playbooks/common/openshift-cluster/upgrades/v3_6/master_config_upgrade.yml
index ed89dbe8d..db0c8f886 100644
--- a/playbooks/common/openshift-cluster/upgrades/v3_6/master_config_upgrade.yml
+++ b/playbooks/common/openshift-cluster/upgrades/v3_6/master_config_upgrade.yml
@@ -1,16 +1,15 @@
---
- modify_yaml:
dest: "{{ openshift.common.config_base}}/master/master-config.yaml"
- yaml_key: 'admissionConfig.pluginConfig'
- yaml_value: "{{ openshift.master.admission_plugin_config }}"
- when: "'admission_plugin_config' in openshift.master"
+ yaml_key: 'controllerConfig.serviceServingCert.signer.certFile'
+ yaml_value: service-signer.crt
- modify_yaml:
dest: "{{ openshift.common.config_base}}/master/master-config.yaml"
- yaml_key: 'admissionConfig.pluginOrderOverride'
- yaml_value:
+ yaml_key: 'controllerConfig.serviceServingCert.signer.keyFile'
+ yaml_value: service-signer.key
- modify_yaml:
- dest: "{{ openshift.common.config_base}}/master/master-config.yaml"
- yaml_key: 'kubernetesMasterConfig.admissionConfig'
- yaml_value:
+ dest: "{{ openshift.common.config_base }}/master/master-config.yaml"
+ yaml_key: servingInfo.clientCA
+ yaml_value: ca.crt
diff --git a/playbooks/common/openshift-cluster/upgrades/v3_6/upgrade.yml b/playbooks/common/openshift-cluster/upgrades/v3_6/upgrade.yml
index 5b9ac9e8f..dd109cfa9 100644
--- a/playbooks/common/openshift-cluster/upgrades/v3_6/upgrade.yml
+++ b/playbooks/common/openshift-cluster/upgrades/v3_6/upgrade.yml
@@ -39,13 +39,22 @@
| union(groups['oo_etcd_to_config'] | default([])))
| oo_collect('openshift.common.hostname') | default([]) | join (',')
}}"
- when: "{{ (openshift_http_proxy is defined or openshift_https_proxy is defined) and
- openshift_generate_no_proxy_hosts | default(True) | bool }}"
+ when:
+ - openshift_http_proxy is defined or openshift_https_proxy is defined
+ - openshift_generate_no_proxy_hosts | default(True) | bool
- include: ../pre/verify_inventory_vars.yml
tags:
- pre_upgrade
+- include: ../pre/verify_health_checks.yml
+ tags:
+ - pre_upgrade
+
+- include: ../pre/verify_control_plane_running.yml
+ tags:
+ - pre_upgrade
+
- include: ../disable_master_excluders.yml
tags:
- pre_upgrade
@@ -70,10 +79,6 @@
# docker is configured and running.
skip_docker_role: True
-- include: ../pre/verify_control_plane_running.yml
- tags:
- - pre_upgrade
-
- include: ../../../openshift-master/validate_restart.yml
tags:
- pre_upgrade
@@ -88,7 +93,7 @@
- name: Verify docker upgrade targets
hosts: oo_masters_to_config:oo_nodes_to_upgrade:oo_etcd_to_config
tasks:
- - include: ../pre/verify_docker_upgrade_targets.yml
+ - include: ../pre/tasks/verify_docker_upgrade_targets.yml
tags:
- pre_upgrade
@@ -111,6 +116,8 @@
- include: ../cleanup_unused_images.yml
- include: ../upgrade_control_plane.yml
+ vars:
+ master_config_hook: "v3_6/master_config_upgrade.yml"
- include: ../upgrade_nodes.yml
diff --git a/playbooks/common/openshift-cluster/upgrades/v3_6/upgrade_control_plane.yml b/playbooks/common/openshift-cluster/upgrades/v3_6/upgrade_control_plane.yml
index a470c7595..8ab68002d 100644
--- a/playbooks/common/openshift-cluster/upgrades/v3_6/upgrade_control_plane.yml
+++ b/playbooks/common/openshift-cluster/upgrades/v3_6/upgrade_control_plane.yml
@@ -47,13 +47,22 @@
| union(groups['oo_etcd_to_config'] | default([])))
| oo_collect('openshift.common.hostname') | default([]) | join (',')
}}"
- when: "{{ (openshift_http_proxy is defined or openshift_https_proxy is defined) and
- openshift_generate_no_proxy_hosts | default(True) | bool }}"
+ when:
+ - openshift_http_proxy is defined or openshift_https_proxy is defined
+ - openshift_generate_no_proxy_hosts | default(True) | bool
- include: ../pre/verify_inventory_vars.yml
tags:
- pre_upgrade
+- include: ../pre/verify_health_checks.yml
+ tags:
+ - pre_upgrade
+
+- include: ../pre/verify_control_plane_running.yml
+ tags:
+ - pre_upgrade
+
- include: ../disable_master_excluders.yml
tags:
- pre_upgrade
@@ -74,10 +83,6 @@
# docker is configured and running.
skip_docker_role: True
-- include: ../pre/verify_control_plane_running.yml
- tags:
- - pre_upgrade
-
- include: ../../../openshift-master/validate_restart.yml
tags:
- pre_upgrade
@@ -92,7 +97,7 @@
- name: Verify docker upgrade targets
hosts: oo_masters_to_config:oo_etcd_to_config
tasks:
- - include: ../pre/verify_docker_upgrade_targets.yml
+ - include: ../pre/tasks/verify_docker_upgrade_targets.yml
tags:
- pre_upgrade
diff --git a/playbooks/common/openshift-cluster/upgrades/v3_6/upgrade_nodes.yml b/playbooks/common/openshift-cluster/upgrades/v3_6/upgrade_nodes.yml
index 25eceaf90..ba6fcc3f8 100644
--- a/playbooks/common/openshift-cluster/upgrades/v3_6/upgrade_nodes.yml
+++ b/playbooks/common/openshift-cluster/upgrades/v3_6/upgrade_nodes.yml
@@ -40,13 +40,18 @@
| union(groups['oo_etcd_to_config'] | default([])))
| oo_collect('openshift.common.hostname') | default([]) | join (',')
}}"
- when: "{{ (openshift_http_proxy is defined or openshift_https_proxy is defined) and
- openshift_generate_no_proxy_hosts | default(True) | bool }}"
+ when:
+ - openshift_http_proxy is defined or openshift_https_proxy is defined
+ - openshift_generate_no_proxy_hosts | default(True) | bool
- include: ../pre/verify_inventory_vars.yml
tags:
- pre_upgrade
+- include: ../pre/verify_health_checks.yml
+ tags:
+ - pre_upgrade
+
- include: ../disable_node_excluders.yml
tags:
- pre_upgrade
@@ -89,7 +94,7 @@
- name: Verify docker upgrade targets
hosts: oo_nodes_to_upgrade
tasks:
- - include: ../pre/verify_docker_upgrade_targets.yml
+ - include: ../pre/tasks/verify_docker_upgrade_targets.yml
tags:
- pre_upgrade
diff --git a/playbooks/common/openshift-cluster/upgrades/v3_7/filter_plugins b/playbooks/common/openshift-cluster/upgrades/v3_7/filter_plugins
new file mode 120000
index 000000000..7de3c1dd7
--- /dev/null
+++ b/playbooks/common/openshift-cluster/upgrades/v3_7/filter_plugins
@@ -0,0 +1 @@
+../../../../../filter_plugins/ \ No newline at end of file
diff --git a/playbooks/common/openshift-cluster/upgrades/v3_7/master_config_upgrade.yml b/playbooks/common/openshift-cluster/upgrades/v3_7/master_config_upgrade.yml
new file mode 100644
index 000000000..1d4d1919c
--- /dev/null
+++ b/playbooks/common/openshift-cluster/upgrades/v3_7/master_config_upgrade.yml
@@ -0,0 +1,20 @@
+---
+- modify_yaml:
+ dest: "{{ openshift.common.config_base}}/master/master-config.yaml"
+ yaml_key: 'controllerConfig.election.lockName'
+ yaml_value: 'openshift-master-controllers'
+
+- modify_yaml:
+ dest: "{{ openshift.common.config_base}}/master/master-config.yaml"
+ yaml_key: 'controllerConfig.serviceServingCert.signer.certFile'
+ yaml_value: service-signer.crt
+
+- modify_yaml:
+ dest: "{{ openshift.common.config_base}}/master/master-config.yaml"
+ yaml_key: 'controllerConfig.serviceServingCert.signer.keyFile'
+ yaml_value: service-signer.key
+
+- modify_yaml:
+ dest: "{{ openshift.common.config_base }}/master/master-config.yaml"
+ yaml_key: servingInfo.clientCA
+ yaml_value: ca.crt
diff --git a/playbooks/common/openshift-cluster/upgrades/v3_7/roles b/playbooks/common/openshift-cluster/upgrades/v3_7/roles
new file mode 120000
index 000000000..415645be6
--- /dev/null
+++ b/playbooks/common/openshift-cluster/upgrades/v3_7/roles
@@ -0,0 +1 @@
+../../../../../roles/ \ No newline at end of file
diff --git a/playbooks/common/openshift-cluster/upgrades/v3_7/upgrade.yml b/playbooks/common/openshift-cluster/upgrades/v3_7/upgrade.yml
new file mode 100644
index 000000000..f4862e321
--- /dev/null
+++ b/playbooks/common/openshift-cluster/upgrades/v3_7/upgrade.yml
@@ -0,0 +1,142 @@
+---
+#
+# Full Control Plane + Nodes Upgrade
+#
+- include: ../init.yml
+ tags:
+ - pre_upgrade
+
+- name: Configure the upgrade target for the common upgrade tasks
+ hosts: oo_all_hosts
+ tags:
+ - pre_upgrade
+ tasks:
+ - set_fact:
+ openshift_upgrade_target: '3.7'
+ openshift_upgrade_min: '3.6'
+
+# Pre-upgrade
+
+- include: ../initialize_nodes_to_upgrade.yml
+ tags:
+ - pre_upgrade
+
+- include: ../pre/verify_etcd3_backend.yml
+ tags:
+ - pre_upgrade
+
+- name: Update repos and initialize facts on all hosts
+ hosts: oo_masters_to_config:oo_nodes_to_upgrade:oo_etcd_to_config:oo_lb_to_config
+ tags:
+ - pre_upgrade
+ roles:
+ - openshift_repos
+
+- name: Set openshift_no_proxy_internal_hostnames
+ hosts: oo_masters_to_config:oo_nodes_to_upgrade
+ tags:
+ - pre_upgrade
+ tasks:
+ - set_fact:
+ openshift_no_proxy_internal_hostnames: "{{ hostvars | oo_select_keys(groups['oo_nodes_to_config']
+ | union(groups['oo_masters_to_config'])
+ | union(groups['oo_etcd_to_config'] | default([])))
+ | oo_collect('openshift.common.hostname') | default([]) | join (',')
+ }}"
+ when:
+ - openshift_http_proxy is defined or openshift_https_proxy is defined
+ - openshift_generate_no_proxy_hosts | default(True) | bool
+
+- include: ../pre/verify_inventory_vars.yml
+ tags:
+ - pre_upgrade
+
+- include: ../pre/verify_health_checks.yml
+ tags:
+ - pre_upgrade
+
+- include: ../pre/verify_control_plane_running.yml
+ tags:
+ - pre_upgrade
+
+- include: ../disable_master_excluders.yml
+ tags:
+ - pre_upgrade
+
+- include: ../disable_node_excluders.yml
+ tags:
+ - pre_upgrade
+
+- include: ../../initialize_openshift_version.yml
+ tags:
+ - pre_upgrade
+ vars:
+ # Request specific openshift_release and let the openshift_version role handle converting this
+ # to a more specific version, respecting openshift_image_tag and openshift_pkg_version if
+ # defined, and overriding the normal behavior of protecting the installed version
+ openshift_release: "{{ openshift_upgrade_target }}"
+ openshift_protect_installed_version: False
+
+ # We skip the docker role at this point in upgrade to prevent
+ # unintended package, container, or config upgrades which trigger
+ # docker restarts. At this early stage of upgrade we can assume
+ # docker is configured and running.
+ skip_docker_role: True
+
+- include: ../../../openshift-master/validate_restart.yml
+ tags:
+ - pre_upgrade
+
+- name: Verify upgrade targets
+ hosts: oo_masters_to_config:oo_nodes_to_upgrade
+ tasks:
+ - include: ../pre/verify_upgrade_targets.yml
+ tags:
+ - pre_upgrade
+
+- name: Verify docker upgrade targets
+ hosts: oo_masters_to_config:oo_nodes_to_upgrade:oo_etcd_to_config
+ tasks:
+ - include: ../pre/tasks/verify_docker_upgrade_targets.yml
+ tags:
+ - pre_upgrade
+
+- include: validator.yml
+ tags:
+ - pre_upgrade
+
+- include: ../pre/gate_checks.yml
+ tags:
+ - pre_upgrade
+
+# Pre-upgrade completed, nothing after this should be tagged pre_upgrade.
+
+# Separate step so we can execute in parallel and clear out anything unused
+# before we get into the serialized upgrade process which will then remove
+# remaining images if possible.
+- name: Cleanup unused Docker images
+ hosts: oo_masters_to_config:oo_nodes_to_upgrade:oo_etcd_to_config
+ tasks:
+ - include: ../cleanup_unused_images.yml
+
+- include: ../upgrade_control_plane.yml
+ vars:
+ master_config_hook: "v3_7/master_config_upgrade.yml"
+
+# All controllers must be stopped at the same time then restarted
+- name: Cycle all controller services to force new leader election mode
+ hosts: oo_masters_to_config
+ gather_facts: no
+ tasks:
+ - name: Stop {{ openshift.common.service_type }}-master-controllers
+ systemd:
+ name: "{{ openshift.common.service_type }}-master-controllers"
+ state: stopped
+ - name: Start {{ openshift.common.service_type }}-master-controllers
+ systemd:
+ name: "{{ openshift.common.service_type }}-master-controllers"
+ state: started
+
+- include: ../upgrade_nodes.yml
+
+- include: ../post_control_plane.yml
diff --git a/playbooks/common/openshift-cluster/upgrades/v3_7/upgrade_control_plane.yml b/playbooks/common/openshift-cluster/upgrades/v3_7/upgrade_control_plane.yml
new file mode 100644
index 000000000..d5a8379d7
--- /dev/null
+++ b/playbooks/common/openshift-cluster/upgrades/v3_7/upgrade_control_plane.yml
@@ -0,0 +1,144 @@
+---
+#
+# Control Plane Upgrade Playbook
+#
+# Upgrades masters and Docker (only on standalone etcd hosts)
+#
+# This upgrade does not include:
+# - node service running on masters
+# - docker running on masters
+# - node service running on dedicated nodes
+#
+# You can run the upgrade_nodes.yml playbook after this to upgrade these components separately.
+#
+- include: ../init.yml
+ tags:
+ - pre_upgrade
+
+- name: Configure the upgrade target for the common upgrade tasks
+ hosts: oo_all_hosts
+ tags:
+ - pre_upgrade
+ tasks:
+ - set_fact:
+ openshift_upgrade_target: '3.7'
+ openshift_upgrade_min: '3.6'
+
+# Pre-upgrade
+- include: ../initialize_nodes_to_upgrade.yml
+ tags:
+ - pre_upgrade
+
+- include: ../pre/verify_etcd3_backend.yml
+ tags:
+ - pre_upgrade
+
+- name: Update repos on control plane hosts
+ hosts: oo_masters_to_config:oo_etcd_to_config:oo_lb_to_config
+ tags:
+ - pre_upgrade
+ roles:
+ - openshift_repos
+
+- name: Set openshift_no_proxy_internal_hostnames
+ hosts: oo_masters_to_config:oo_nodes_to_upgrade
+ tags:
+ - pre_upgrade
+ tasks:
+ - set_fact:
+ openshift_no_proxy_internal_hostnames: "{{ hostvars | oo_select_keys(groups['oo_nodes_to_config']
+ | union(groups['oo_masters_to_config'])
+ | union(groups['oo_etcd_to_config'] | default([])))
+ | oo_collect('openshift.common.hostname') | default([]) | join (',')
+ }}"
+ when:
+ - openshift_http_proxy is defined or openshift_https_proxy is defined
+ - openshift_generate_no_proxy_hosts | default(True) | bool
+
+- include: ../pre/verify_inventory_vars.yml
+ tags:
+ - pre_upgrade
+
+- include: ../pre/verify_health_checks.yml
+ tags:
+ - pre_upgrade
+
+- include: ../pre/verify_control_plane_running.yml
+ tags:
+ - pre_upgrade
+
+- include: ../disable_master_excluders.yml
+ tags:
+ - pre_upgrade
+
+- include: ../../initialize_openshift_version.yml
+ tags:
+ - pre_upgrade
+ vars:
+ # Request specific openshift_release and let the openshift_version role handle converting this
+ # to a more specific version, respecting openshift_image_tag and openshift_pkg_version if
+ # defined, and overriding the normal behavior of protecting the installed version
+ openshift_release: "{{ openshift_upgrade_target }}"
+ openshift_protect_installed_version: False
+
+ # We skip the docker role at this point in upgrade to prevent
+ # unintended package, container, or config upgrades which trigger
+ # docker restarts. At this early stage of upgrade we can assume
+ # docker is configured and running.
+ skip_docker_role: True
+
+- include: ../../../openshift-master/validate_restart.yml
+ tags:
+ - pre_upgrade
+
+- name: Verify upgrade targets
+ hosts: oo_masters_to_config
+ tasks:
+ - include: ../pre/verify_upgrade_targets.yml
+ tags:
+ - pre_upgrade
+
+- name: Verify docker upgrade targets
+ hosts: oo_masters_to_config:oo_etcd_to_config
+ tasks:
+ - include: ../pre/tasks/verify_docker_upgrade_targets.yml
+ tags:
+ - pre_upgrade
+
+- include: validator.yml
+ tags:
+ - pre_upgrade
+
+- include: ../pre/gate_checks.yml
+ tags:
+ - pre_upgrade
+
+# Pre-upgrade completed, nothing after this should be tagged pre_upgrade.
+
+# Separate step so we can execute in parallel and clear out anything unused
+# before we get into the serialized upgrade process which will then remove
+# remaining images if possible.
+- name: Cleanup unused Docker images
+ hosts: oo_masters_to_config:oo_etcd_to_config
+ tasks:
+ - include: ../cleanup_unused_images.yml
+
+- include: ../upgrade_control_plane.yml
+ vars:
+ master_config_hook: "v3_7/master_config_upgrade.yml"
+
+# All controllers must be stopped at the same time then restarted
+- name: Cycle all controller services to force new leader election mode
+ hosts: oo_etcd_to_config
+ gather_facts: no
+ tasks:
+ - name: Stop {{ openshift.common.service_type }}-master-controllers
+ systemd:
+ name: "{{ openshift.common.service_type }}-master-controllers"
+ state: stopped
+ - name: Start {{ openshift.common.service_type }}-master-controllers
+ systemd:
+ name: "{{ openshift.common.service_type }}-master-controllers"
+ state: started
+
+- include: ../post_control_plane.yml
diff --git a/playbooks/common/openshift-cluster/upgrades/v3_7/upgrade_nodes.yml b/playbooks/common/openshift-cluster/upgrades/v3_7/upgrade_nodes.yml
new file mode 100644
index 000000000..bc080f9a3
--- /dev/null
+++ b/playbooks/common/openshift-cluster/upgrades/v3_7/upgrade_nodes.yml
@@ -0,0 +1,115 @@
+---
+#
+# Node Upgrade Playbook
+#
+# Upgrades nodes only, but requires the control plane to have already been upgraded.
+#
+- include: ../init.yml
+ tags:
+ - pre_upgrade
+
+- name: Configure the upgrade target for the common upgrade tasks
+ hosts: oo_all_hosts
+ tags:
+ - pre_upgrade
+ tasks:
+ - set_fact:
+ openshift_upgrade_target: '3.7'
+ openshift_upgrade_min: '3.6'
+
+# Pre-upgrade
+- include: ../initialize_nodes_to_upgrade.yml
+ tags:
+ - pre_upgrade
+
+- name: Update repos on nodes
+ hosts: oo_masters_to_config:oo_nodes_to_upgrade:oo_etcd_to_config:oo_lb_to_config
+ roles:
+ - openshift_repos
+ tags:
+ - pre_upgrade
+
+- name: Set openshift_no_proxy_internal_hostnames
+ hosts: oo_masters_to_config:oo_nodes_to_upgrade
+ tags:
+ - pre_upgrade
+ tasks:
+ - set_fact:
+ openshift_no_proxy_internal_hostnames: "{{ hostvars | oo_select_keys(groups['oo_nodes_to_upgrade']
+ | union(groups['oo_masters_to_config'])
+ | union(groups['oo_etcd_to_config'] | default([])))
+ | oo_collect('openshift.common.hostname') | default([]) | join (',')
+ }}"
+ when:
+ - openshift_http_proxy is defined or openshift_https_proxy is defined
+ - openshift_generate_no_proxy_hosts | default(True) | bool
+
+- include: ../pre/verify_inventory_vars.yml
+ tags:
+ - pre_upgrade
+
+- include: ../pre/verify_health_checks.yml
+ tags:
+ - pre_upgrade
+
+- include: ../disable_node_excluders.yml
+ tags:
+ - pre_upgrade
+
+- include: ../../initialize_openshift_version.yml
+ tags:
+ - pre_upgrade
+ vars:
+ # Request specific openshift_release and let the openshift_version role handle converting this
+ # to a more specific version, respecting openshift_image_tag and openshift_pkg_version if
+ # defined, and overriding the normal behavior of protecting the installed version
+ openshift_release: "{{ openshift_upgrade_target }}"
+ openshift_protect_installed_version: False
+
+ # We skip the docker role at this point in upgrade to prevent
+ # unintended package, container, or config upgrades which trigger
+ # docker restarts. At this early stage of upgrade we can assume
+ # docker is configured and running.
+ skip_docker_role: True
+
+- name: Verify masters are already upgraded
+ hosts: oo_masters_to_config
+ tags:
+ - pre_upgrade
+ tasks:
+ - fail: msg="Master running {{ openshift.common.version }} must be upgraded to {{ openshift_version }} before node upgrade can be run."
+ when: openshift.common.version != openshift_version
+
+- include: ../pre/verify_control_plane_running.yml
+ tags:
+ - pre_upgrade
+
+- name: Verify upgrade targets
+ hosts: oo_nodes_to_upgrade
+ tasks:
+ - include: ../pre/verify_upgrade_targets.yml
+ tags:
+ - pre_upgrade
+
+- name: Verify docker upgrade targets
+ hosts: oo_nodes_to_upgrade
+ tasks:
+ - include: ../pre/tasks/verify_docker_upgrade_targets.yml
+ tags:
+ - pre_upgrade
+
+- include: ../pre/gate_checks.yml
+ tags:
+ - pre_upgrade
+
+# Pre-upgrade completed, nothing after this should be tagged pre_upgrade.
+
+# Separate step so we can execute in parallel and clear out anything unused
+# before we get into the serialized upgrade process which will then remove
+# remaining images if possible.
+- name: Cleanup unused Docker images
+ hosts: oo_nodes_to_upgrade
+ tasks:
+ - include: ../cleanup_unused_images.yml
+
+- include: ../upgrade_nodes.yml
diff --git a/playbooks/common/openshift-cluster/upgrades/v3_7/validator.yml b/playbooks/common/openshift-cluster/upgrades/v3_7/validator.yml
new file mode 100644
index 000000000..8e4f99c91
--- /dev/null
+++ b/playbooks/common/openshift-cluster/upgrades/v3_7/validator.yml
@@ -0,0 +1,23 @@
+---
+###############################################################################
+# Pre upgrade checks for known data problems, if this playbook fails you should
+# contact support. If you're not supported contact users@lists.openshift.com
+###############################################################################
+- name: Verify 3.7 specific upgrade checks
+ hosts: oo_first_master
+ roles:
+ - { role: lib_openshift }
+
+ tasks:
+ - name: Check for invalid namespaces and SDN errors
+ oc_objectvalidator:
+
+ - name: Confirm OpenShift authorization objects are in sync
+ command: >
+ {{ openshift.common.client_binary }} adm migrate authorization
+ when: openshift_version | version_compare('3.7','<')
+ changed_when: false
+ register: l_oc_result
+ until: l_oc_result.rc == 0
+ retries: 4
+ delay: 15
diff --git a/playbooks/common/openshift-cluster/validate_hostnames.yml b/playbooks/common/openshift-cluster/validate_hostnames.yml
index 33fc5630f..be2e6a15a 100644
--- a/playbooks/common/openshift-cluster/validate_hostnames.yml
+++ b/playbooks/common/openshift-cluster/validate_hostnames.yml
@@ -1,17 +1,22 @@
---
-- name: Gather and set facts for node hosts
+- name: Validate node hostnames
hosts: oo_nodes_to_config
- roles:
- - openshift_facts
tasks:
- - shell:
+ - name: Query DNS for IP address of {{ openshift.common.hostname }}
+ shell:
getent ahostsv4 {{ openshift.common.hostname }} | head -n 1 | awk '{ print $1 }'
register: lookupip
changed_when: false
failed_when: false
- name: Warn user about bad openshift_hostname values
pause:
- prompt: "The hostname \"{{ openshift.common.hostname }}\" for \"{{ ansible_nodename }}\" doesn't resolve to an ip address owned by this host. Please set openshift_hostname variable to a hostname that when resolved on the host in question resolves to an IP address matching an interface on this host. This host will fail liveness checks for pods utilizing hostPorts, press ENTER to continue or CTRL-C to abort."
+ prompt:
+ The hostname {{ openshift.common.hostname }} for {{ ansible_nodename }}
+ doesn't resolve to an IP address owned by this host. Please set
+ openshift_hostname variable to a hostname that when resolved on the host
+ in question resolves to an IP address matching an interface on this
+ host. This host will fail liveness checks for pods utilizing hostPorts,
+ press ENTER to continue or CTRL-C to abort.
seconds: "{{ 10 if openshift_override_hostname_check | default(false) | bool else omit }}"
when:
- lookupip.stdout != '127.0.0.1'
diff --git a/playbooks/common/openshift-etcd/ca.yml b/playbooks/common/openshift-etcd/ca.yml
new file mode 100644
index 000000000..ac5543be9
--- /dev/null
+++ b/playbooks/common/openshift-etcd/ca.yml
@@ -0,0 +1,15 @@
+---
+- name: Generate new etcd CA
+ hosts: oo_first_etcd
+ roles:
+ - role: openshift_etcd_facts
+ tasks:
+ - include_role:
+ name: etcd
+ tasks_from: ca
+ vars:
+ etcd_peers: "{{ groups.oo_etcd_to_config | default([], true) }}"
+ etcd_ca_host: "{{ groups.oo_etcd_to_config.0 }}"
+ etcd_certificates_etcd_hosts: "{{ groups.oo_etcd_to_config | default([], true) }}"
+ when:
+ - etcd_ca_setup | default(True) | bool
diff --git a/playbooks/common/openshift-etcd/certificates.yml b/playbooks/common/openshift-etcd/certificates.yml
new file mode 100644
index 000000000..eb6b94f33
--- /dev/null
+++ b/playbooks/common/openshift-etcd/certificates.yml
@@ -0,0 +1,4 @@
+---
+- include: server_certificates.yml
+
+- include: master_etcd_certificates.yml
diff --git a/playbooks/common/openshift-etcd/config.yml b/playbooks/common/openshift-etcd/config.yml
index 2cb6197d1..48d46bbb0 100644
--- a/playbooks/common/openshift-etcd/config.yml
+++ b/playbooks/common/openshift-etcd/config.yml
@@ -1,11 +1,36 @@
---
+- name: etcd Install Checkpoint Start
+ hosts: oo_all_hosts
+ gather_facts: false
+ tasks:
+ - name: Set etcd install 'In Progress'
+ set_stats:
+ data:
+ installer_phase_etcd: "In Progress"
+ aggregate: false
+
+- include: ca.yml
+
+- include: certificates.yml
+
- name: Configure etcd
hosts: oo_etcd_to_config
any_errors_fatal: true
roles:
+ - role: os_firewall
- role: openshift_etcd
etcd_peers: "{{ groups.oo_etcd_to_config | default([], true) }}"
etcd_ca_host: "{{ groups.oo_etcd_to_config.0 }}"
etcd_certificates_etcd_hosts: "{{ groups.oo_etcd_to_config | default([], true) }}"
r_etcd_common_etcd_runtime: "{{ openshift.common.etcd_runtime }}"
- role: nickhammond.logrotate
+
+- name: etcd Install Checkpoint End
+ hosts: oo_all_hosts
+ gather_facts: false
+ tasks:
+ - name: Set etcd install 'Complete'
+ set_stats:
+ data:
+ installer_phase_etcd: "Complete"
+ aggregate: false
diff --git a/playbooks/common/openshift-etcd/embedded2external.yml b/playbooks/common/openshift-etcd/embedded2external.yml
new file mode 100644
index 000000000..b16b78c4f
--- /dev/null
+++ b/playbooks/common/openshift-etcd/embedded2external.yml
@@ -0,0 +1,172 @@
+---
+- name: Pre-migrate checks
+ hosts: localhost
+ tasks:
+ # Check there is only one etcd host
+ - assert:
+ that: groups.oo_etcd_to_config | default([]) | length == 1
+ msg: "[etcd] group must contain only one host"
+ # Check there is only one master
+ - assert:
+ that: groups.oo_masters_to_config | default([]) | length == 1
+ msg: "[master] group must contain only one host"
+
+# 1. stop a master
+- name: Prepare masters for etcd data migration
+ hosts: oo_first_master
+ roles:
+ - role: openshift_facts
+ tasks:
+ - name: Check the master API is ready
+ include_role:
+ name: openshift_master
+ tasks_from: check_master_api_is_ready
+ - set_fact:
+ master_service: "{{ openshift.common.service_type + '-master' }}"
+ embedded_etcd_backup_suffix: "{{ lookup('pipe', 'date +%Y%m%d%H%M%S') }}"
+ - debug:
+ msg: "master service name: {{ master_service }}"
+ - name: Stop master
+ service:
+ name: "{{ master_service }}"
+ state: stopped
+ # 2. backup embedded etcd
+ # Can't use with_items with include_role: https://github.com/ansible/ansible/issues/21285
+ - include_role:
+ name: etcd
+ tasks_from: backup
+ vars:
+ r_etcd_common_etcd_runtime: "{{ openshift.common.etcd_runtime }}"
+ r_etcd_common_backup_tag: pre-migrate
+ r_etcd_common_embedded_etcd: "{{ true }}"
+ r_etcd_common_backup_sufix_name: "{{ embedded_etcd_backup_suffix }}"
+
+ - include_role:
+ name: etcd
+ tasks_from: backup.archive
+ vars:
+ r_etcd_common_etcd_runtime: "{{ openshift.common.etcd_runtime }}"
+ r_etcd_common_backup_tag: pre-migrate
+ r_etcd_common_embedded_etcd: "{{ true }}"
+ r_etcd_common_backup_sufix_name: "{{ embedded_etcd_backup_suffix }}"
+
+# 3. deploy certificates (for etcd and master)
+- include: ca.yml
+
+- include: server_certificates.yml
+
+- name: Backup etcd client certificates for master host
+ hosts: oo_first_master
+ tasks:
+ - include_role:
+ name: etcd
+ tasks_from: backup_master_etcd_certificates
+
+- name: Redeploy master etcd certificates
+ include: master_etcd_certificates.yml
+ vars:
+ etcd_certificates_redeploy: "{{ true }}"
+
+# 4. deploy external etcd
+- include: ../openshift-etcd/config.yml
+
+# 5. stop external etcd
+- name: Cleanse etcd
+ hosts: oo_etcd_to_config[0]
+ gather_facts: no
+ pre_tasks:
+ - include_role:
+ name: etcd
+ tasks_from: disable_etcd
+ vars:
+ r_etcd_common_etcd_runtime: "{{ openshift.common.etcd_runtime }}"
+ - include_role:
+ name: etcd
+ tasks_from: clean_data
+ vars:
+ r_etcd_common_etcd_runtime: "{{ openshift.common.etcd_runtime }}"
+
+# 6. copy the embedded etcd backup to the external host
+# TODO(jchaloup): if the etcd and first master are on the same host, just copy the directory
+- name: Copy embedded etcd backup to the external host
+ hosts: localhost
+ tasks:
+ - name: Create local temp directory for syncing etcd backup
+ local_action: command mktemp -d /tmp/etcd_backup-XXXXXXX
+ register: g_etcd_client_mktemp
+ changed_when: False
+ become: no
+
+ - include_role:
+ name: etcd
+ tasks_from: backup.fetch
+ vars:
+ r_etcd_common_etcd_runtime: "{{ hostvars[groups.oo_first_master.0].openshift.common.etcd_runtime }}"
+ etcd_backup_sync_directory: "{{ g_etcd_client_mktemp.stdout }}"
+ r_etcd_common_backup_tag: pre-migrate
+ r_etcd_common_embedded_etcd: "{{ true }}"
+ r_etcd_common_backup_sufix_name: "{{ hostvars[groups.oo_first_master.0].embedded_etcd_backup_suffix }}"
+ delegate_to: "{{ groups.oo_first_master[0] }}"
+
+ - include_role:
+ name: etcd
+ tasks_from: backup.copy
+ vars:
+ r_etcd_common_etcd_runtime: "{{ hostvars[groups.oo_etcd_to_config.0].openshift.common.etcd_runtime }}"
+ etcd_backup_sync_directory: "{{ g_etcd_client_mktemp.stdout }}"
+ r_etcd_common_backup_tag: pre-migrate
+ r_etcd_common_backup_sufix_name: "{{ hostvars[groups.oo_first_master.0].embedded_etcd_backup_suffix }}"
+ delegate_to: "{{ groups.oo_etcd_to_config[0] }}"
+
+ - debug:
+ msg: "etcd_backup_dest_directory: {{ g_etcd_client_mktemp.stdout }}"
+
+ - name: Delete temporary directory
+ local_action: file path="{{ g_etcd_client_mktemp.stdout }}" state=absent
+ changed_when: False
+ become: no
+
+# 7. force new cluster from the backup
+- name: Force new etcd cluster
+ hosts: oo_etcd_to_config[0]
+ tasks:
+ - include_role:
+ name: etcd
+ tasks_from: backup.unarchive
+ vars:
+ r_etcd_common_etcd_runtime: "{{ openshift.common.etcd_runtime }}"
+ r_etcd_common_backup_tag: pre-migrate
+ r_etcd_common_backup_sufix_name: "{{ hostvars[groups.oo_first_master.0].embedded_etcd_backup_suffix }}"
+
+ - include_role:
+ name: etcd
+ tasks_from: backup.force_new_cluster
+ vars:
+ r_etcd_common_etcd_runtime: "{{ openshift.common.etcd_runtime }}"
+ r_etcd_common_backup_tag: pre-migrate
+ r_etcd_common_backup_sufix_name: "{{ hostvars[groups.oo_first_master.0].embedded_etcd_backup_suffix }}"
+ etcd_peer: "{{ openshift.common.ip }}"
+ etcd_url_scheme: "https"
+ etcd_peer_url_scheme: "https"
+
+# 8. re-configure master to use the external etcd
+- name: Configure master to use external etcd
+ hosts: oo_first_master
+ tasks:
+ - include_role:
+ name: openshift_master
+ tasks_from: configure_external_etcd
+ vars:
+ etcd_peer_url_scheme: "https"
+ etcd_ip: "{{ hostvars[groups.oo_etcd_to_config.0].openshift.common.ip }}"
+ etcd_peer_port: 2379
+
+ # 9. start the master
+ - name: Start master
+ service:
+ name: "{{ master_service }}"
+ state: started
+ register: service_status
+ until: service_status.state is defined and service_status.state == "started"
+ retries: 5
+ delay: 10
diff --git a/playbooks/common/openshift-etcd/master_etcd_certificates.yml b/playbooks/common/openshift-etcd/master_etcd_certificates.yml
new file mode 100644
index 000000000..0a25aac57
--- /dev/null
+++ b/playbooks/common/openshift-etcd/master_etcd_certificates.yml
@@ -0,0 +1,14 @@
+---
+- name: Create etcd client certificates for master hosts
+ hosts: oo_masters_to_config
+ any_errors_fatal: true
+ roles:
+ - role: openshift_etcd_facts
+ - role: openshift_etcd_client_certificates
+ etcd_ca_host: "{{ groups.oo_etcd_to_config.0 }}"
+ etcd_cert_subdir: "openshift-master-{{ openshift.common.hostname }}"
+ etcd_cert_config_dir: "{{ openshift.common.config_base }}/master"
+ etcd_cert_prefix: "master.etcd-"
+ openshift_ca_host: "{{ groups.oo_first_master.0 }}"
+ r_etcd_common_etcd_runtime: "{{ openshift.common.etcd_runtime }}"
+ when: groups.oo_etcd_to_config is defined and groups.oo_etcd_to_config
diff --git a/playbooks/common/openshift-etcd/migrate.yml b/playbooks/common/openshift-etcd/migrate.yml
new file mode 100644
index 000000000..31362f2f6
--- /dev/null
+++ b/playbooks/common/openshift-etcd/migrate.yml
@@ -0,0 +1,169 @@
+---
+- name: Check if the master has embedded etcd
+ hosts: localhost
+ connection: local
+ become: no
+ gather_facts: no
+ tags:
+ - always
+ tasks:
+ - fail:
+ msg: "Migration of an embedded etcd is not supported. Please, migrate the embedded etcd into an external etcd first."
+ when:
+ - groups.oo_etcd_to_config | default([]) | length == 0
+
+- name: Run pre-checks
+ hosts: oo_etcd_to_migrate
+ tasks:
+ - include_role:
+ name: etcd
+ tasks_from: migrate.pre_check
+ vars:
+ r_etcd_common_embedded_etcd: "{{ groups.oo_etcd_to_config | default([]) | length == 0 }}"
+ etcd_peer: "{{ ansible_default_ipv4.address }}"
+
+# TODO: This will be different for release-3.6 branch
+- name: Prepare masters for etcd data migration
+ hosts: oo_masters_to_config
+ tasks:
+ - set_fact:
+ master_services:
+ - "{{ openshift.common.service_type + '-master-controllers' }}"
+ - "{{ openshift.common.service_type + '-master-api' }}"
+ - debug:
+ msg: "master service name: {{ master_services }}"
+ - name: Stop masters
+ service:
+ name: "{{ item }}"
+ state: stopped
+ with_items: "{{ master_services }}"
+
+- name: Backup v2 data
+ hosts: oo_etcd_to_migrate
+ gather_facts: no
+ roles:
+ - role: openshift_facts
+ post_tasks:
+ - include_role:
+ name: etcd
+ tasks_from: backup
+ vars:
+ r_etcd_common_etcd_runtime: "{{ openshift.common.etcd_runtime }}"
+ r_etcd_common_backup_tag: pre-migration
+ r_etcd_common_embedded_etcd: "{{ groups.oo_etcd_to_config | default([]) | length == 0 }}"
+ r_etcd_common_backup_sufix_name: "{{ lookup('pipe', 'date +%Y%m%d%H%M%S') }}"
+
+- name: Gate on etcd backup
+ hosts: localhost
+ connection: local
+ become: no
+ tasks:
+ - set_fact:
+ etcd_backup_completed: "{{ hostvars
+ | oo_select_keys(groups.oo_etcd_to_migrate)
+ | oo_collect('inventory_hostname', {'r_etcd_common_backup_complete': true}) }}"
+ - set_fact:
+ etcd_backup_failed: "{{ groups.oo_etcd_to_migrate | difference(etcd_backup_completed) | list }}"
+ - fail:
+ msg: "Migration cannot continue. The following hosts did not complete etcd backup: {{ etcd_backup_failed | join(',') }}"
+ when:
+ - etcd_backup_failed | length > 0
+
+- name: Stop etcd
+ hosts: oo_etcd_to_migrate
+ gather_facts: no
+ pre_tasks:
+ - include_role:
+ name: etcd
+ tasks_from: disable_etcd
+ vars:
+ r_etcd_common_etcd_runtime: "{{ openshift.common.etcd_runtime }}"
+
+- name: Migrate data on first etcd
+ hosts: oo_etcd_to_migrate[0]
+ gather_facts: no
+ tasks:
+ - include_role:
+ name: etcd
+ tasks_from: migrate
+ vars:
+ r_etcd_common_embedded_etcd: "{{ groups.oo_etcd_to_config | default([]) | length == 0 }}"
+ etcd_peer: "{{ openshift.common.ip }}"
+ etcd_url_scheme: "https"
+ etcd_peer_url_scheme: "https"
+
+- name: Clean data stores on remaining etcd hosts
+ hosts: oo_etcd_to_migrate[1:]
+ gather_facts: no
+ tasks:
+ - include_role:
+ name: etcd
+ tasks_from: clean_data
+ vars:
+ r_etcd_common_embedded_etcd: "{{ groups.oo_etcd_to_config | default([]) | length == 0 }}"
+ etcd_peer: "{{ openshift.common.ip }}"
+ etcd_url_scheme: "https"
+ etcd_peer_url_scheme: "https"
+ - name: Add etcd hosts
+ delegate_to: localhost
+ add_host:
+ name: "{{ item }}"
+ groups: oo_new_etcd_to_config
+ ansible_ssh_user: "{{ g_ssh_user | default(omit) }}"
+ ansible_become: "{{ g_sudo | default(omit) }}"
+ with_items: "{{ groups.oo_etcd_to_migrate[1:] | default([]) }}"
+ changed_when: no
+ - name: Set success
+ set_fact:
+ r_etcd_migrate_success: true
+
+- include: ./scaleup.yml
+
+- name: Gate on etcd migration
+ hosts: oo_masters_to_config
+ gather_facts: no
+ tasks:
+ - set_fact:
+ etcd_migration_completed: "{{ hostvars
+ | oo_select_keys(groups.oo_etcd_to_migrate)
+ | oo_collect('inventory_hostname', {'r_etcd_migrate_success': true}) }}"
+ - set_fact:
+ etcd_migration_failed: "{{ groups.oo_etcd_to_migrate | difference(etcd_migration_completed) | list }}"
+
+- name: Add TTLs on the first master
+ hosts: oo_first_master[0]
+ tasks:
+ - include_role:
+ name: etcd
+ tasks_from: migrate.add_ttls
+ vars:
+ etcd_peer: "{{ hostvars[groups.oo_etcd_to_migrate.0].openshift.common.ip }}"
+ etcd_url_scheme: "https"
+ etcd_peer_url_scheme: "https"
+ when: etcd_migration_failed | length == 0
+
+- name: Configure masters if etcd data migration is succesfull
+ hosts: oo_masters_to_config
+ tasks:
+ - include_role:
+ name: etcd
+ tasks_from: migrate.configure_master
+ when: etcd_migration_failed | length == 0
+ - debug:
+ msg: "Skipping master re-configuration since migration failed."
+ when:
+ - etcd_migration_failed | length > 0
+ - name: Start master services
+ service:
+ name: "{{ item }}"
+ state: started
+ register: service_status
+ # Sometimes the master-api, resp. master-controllers fails to start for the first time
+ until: service_status.state is defined and service_status.state == "started"
+ retries: 5
+ delay: 10
+ with_items: "{{ master_services[::-1] }}"
+ - fail:
+ msg: "Migration failed. The following hosts were not properly migrated: {{ etcd_migration_failed | join(',') }}"
+ when:
+ - etcd_migration_failed | length > 0
diff --git a/playbooks/common/openshift-etcd/restart.yml b/playbooks/common/openshift-etcd/restart.yml
index af1ef245a..5eaea5ae8 100644
--- a/playbooks/common/openshift-etcd/restart.yml
+++ b/playbooks/common/openshift-etcd/restart.yml
@@ -7,3 +7,21 @@
service:
name: "{{ 'etcd_container' if openshift.common.etcd_runtime == 'docker' else 'etcd' }}"
state: restarted
+ when:
+ - not g_etcd_certificates_expired | default(false) | bool
+
+- name: Restart etcd
+ hosts: oo_etcd_to_config
+ tasks:
+ - name: stop etcd
+ service:
+ name: "{{ 'etcd_container' if openshift.common.etcd_runtime == 'docker' else 'etcd' }}"
+ state: stopped
+ when:
+ - g_etcd_certificates_expired | default(false) | bool
+ - name: start etcd
+ service:
+ name: "{{ 'etcd_container' if openshift.common.etcd_runtime == 'docker' else 'etcd' }}"
+ state: started
+ when:
+ - g_etcd_certificates_expired | default(false) | bool
diff --git a/playbooks/common/openshift-etcd/scaleup.yml b/playbooks/common/openshift-etcd/scaleup.yml
new file mode 100644
index 000000000..20061366c
--- /dev/null
+++ b/playbooks/common/openshift-etcd/scaleup.yml
@@ -0,0 +1,83 @@
+---
+- name: Gather facts
+ hosts: oo_etcd_to_config:oo_new_etcd_to_config
+ roles:
+ - openshift_etcd_facts
+ post_tasks:
+ - set_fact:
+ etcd_hostname: "{{ etcd_hostname }}"
+ etcd_ip: "{{ etcd_ip }}"
+
+- name: Configure etcd
+ hosts: oo_new_etcd_to_config
+ serial: 1
+ any_errors_fatal: true
+ vars:
+ etcd_ca_host: "{{ groups.oo_etcd_to_config.0 }}"
+ pre_tasks:
+ - name: Add new etcd members to cluster
+ command: >
+ /usr/bin/etcdctl --cert-file {{ etcd_peer_cert_file }}
+ --key-file {{ etcd_peer_key_file }}
+ --ca-file {{ etcd_peer_ca_file }}
+ -C {{ etcd_peer_url_scheme }}://{{ hostvars[etcd_ca_host].etcd_ip }}:{{ etcd_client_port }}
+ member add {{ etcd_hostname }} {{ etcd_peer_url_scheme }}://{{ etcd_ip }}:{{ etcd_peer_port }}
+ delegate_to: "{{ etcd_ca_host }}"
+ failed_when:
+ - etcd_add_check.rc == 1
+ - ("peerURL exists" not in etcd_add_check.stderr)
+ register: etcd_add_check
+ retries: 3
+ delay: 10
+ until: etcd_add_check.rc == 0
+ - include_role:
+ name: etcd
+ tasks_from: server_certificates
+ vars:
+ etcd_peers: "{{ groups.oo_new_etcd_to_config | default([], true) }}"
+ etcd_certificates_etcd_hosts: "{{ groups.oo_new_etcd_to_config | default([], true) }}"
+ r_etcd_common_etcd_runtime: "{{ openshift.common.etcd_runtime }}"
+ roles:
+ - role: os_firewall
+ when: etcd_add_check.rc == 0
+ - role: openshift_etcd
+ when: etcd_add_check.rc == 0
+ etcd_peers: "{{ groups.oo_etcd_to_config | union(groups.oo_new_etcd_to_config)| default([], true) }}"
+ etcd_ca_host: "{{ groups.oo_etcd_to_config.0 }}"
+ etcd_certificates_etcd_hosts: "{{ groups.oo_etcd_to_config | default([], true) }}"
+ etcd_initial_cluster_state: "existing"
+ etcd_initial_cluster: "{{ etcd_add_check.stdout_lines[3] | regex_replace('ETCD_INITIAL_CLUSTER=','') | regex_replace('\"','') }}"
+ etcd_ca_setup: False
+ r_etcd_common_etcd_runtime: "{{ openshift.common.etcd_runtime }}"
+ - role: nickhammond.logrotate
+ when: etcd_add_check.rc == 0
+ post_tasks:
+ - name: Verify cluster is stable
+ command: >
+ /usr/bin/etcdctl --cert-file {{ etcd_peer_cert_file }}
+ --key-file {{ etcd_peer_key_file }}
+ --ca-file {{ etcd_peer_ca_file }}
+ -C {{ etcd_peer_url_scheme }}://{{ hostvars[etcd_ca_host].etcd_hostname }}:{{ etcd_client_port }}
+ cluster-health
+ register: scaleup_health
+ retries: 3
+ delay: 30
+ until: scaleup_health.rc == 0
+
+- name: Update master etcd client urls
+ hosts: oo_masters_to_config
+ serial: 1
+ vars:
+ etcd_ca_host: "{{ groups.oo_etcd_to_config.0 }}"
+ openshift_ca_host: "{{ groups.oo_first_master.0 }}"
+ openshift_master_etcd_hosts: "{{ hostvars
+ | oo_select_keys(groups['oo_etcd_to_config'] | union(groups['oo_new_etcd_to_config'] | default([]) ))
+ | oo_collect('openshift.common.hostname')
+ | default(none, true) }}"
+ openshift_master_etcd_port: "{{ (etcd_client_port | default('2379')) if (groups.oo_etcd_to_config is defined and groups.oo_etcd_to_config) else none }}"
+ roles:
+ - role: openshift_master_facts
+ post_tasks:
+ - include_role:
+ name: openshift_master
+ tasks_from: update_etcd_client_urls
diff --git a/playbooks/common/openshift-etcd/server_certificates.yml b/playbooks/common/openshift-etcd/server_certificates.yml
new file mode 100644
index 000000000..10e06747b
--- /dev/null
+++ b/playbooks/common/openshift-etcd/server_certificates.yml
@@ -0,0 +1,15 @@
+---
+- name: Create etcd server certificates for etcd hosts
+ hosts: oo_etcd_to_config
+ any_errors_fatal: true
+ roles:
+ - role: openshift_etcd_facts
+ post_tasks:
+ - include_role:
+ name: etcd
+ tasks_from: server_certificates
+ vars:
+ etcd_ca_host: "{{ groups.oo_etcd_to_config.0 }}"
+ etcd_peers: "{{ groups.oo_etcd_to_config | default([], true) }}"
+ etcd_certificates_etcd_hosts: "{{ groups.oo_etcd_to_config | default([], true) }}"
+ r_etcd_common_etcd_runtime: "{{ openshift.common.etcd_runtime }}"
diff --git a/playbooks/common/openshift-etcd/service.yml b/playbooks/common/openshift-etcd/service.yml
deleted file mode 100644
index ced4bddc5..000000000
--- a/playbooks/common/openshift-etcd/service.yml
+++ /dev/null
@@ -1,23 +0,0 @@
----
-- name: Populate g_service_masters host group if needed
- hosts: localhost
- connection: local
- become: no
- gather_facts: no
- tasks:
- - fail: msg="new_cluster_state is required to be injected in this playbook"
- when: new_cluster_state is not defined
-
- - name: Evaluate g_service_etcd
- add_host:
- name: "{{ item }}"
- groups: g_service_etcd
- with_items: "{{ oo_host_group_exp | default([]) }}"
- changed_when: False
-
-- name: Change etcd state on etcd instance(s)
- hosts: g_service_etcd
- connection: ssh
- gather_facts: no
- tasks:
- - service: name=etcd state="{{ new_cluster_state }}"
diff --git a/playbooks/common/openshift-glusterfs/config.yml b/playbooks/common/openshift-glusterfs/config.yml
index 1efdfb336..c2ae5f313 100644
--- a/playbooks/common/openshift-glusterfs/config.yml
+++ b/playbooks/common/openshift-glusterfs/config.yml
@@ -1,23 +1,56 @@
---
-- name: Open firewall ports for GlusterFS
- hosts: oo_glusterfs_to_config
- vars:
- os_firewall_allow:
- - service: glusterfs_sshd
- port: "2222/tcp"
- - service: glusterfs_daemon
- port: "24007/tcp"
- - service: glusterfs_management
- port: "24008/tcp"
- - service: glusterfs_bricks
- port: "49152-49251/tcp"
- roles:
- - role: os_firewall
+- name: GlusterFS Install Checkpoint Start
+ hosts: oo_all_hosts
+ gather_facts: false
+ tasks:
+ - name: Set GlusterFS install 'In Progress'
+ set_stats:
+ data:
+ installer_phase_glusterfs: "In Progress"
+ aggregate: false
+
+- name: Open firewall ports for GlusterFS nodes
+ hosts: glusterfs
+ tasks:
+ - include_role:
+ name: openshift_storage_glusterfs
+ tasks_from: firewall.yml
+ when:
+ - openshift_storage_glusterfs_is_native | default(True) | bool
+ - include_role:
+ name: openshift_storage_glusterfs
+ tasks_from: kernel_modules.yml
+ when:
+ - openshift_storage_glusterfs_is_native | default(True) | bool
+
+- name: Open firewall ports for GlusterFS registry nodes
+ hosts: glusterfs_registry
+ tasks:
+ - include_role:
+ name: openshift_storage_glusterfs
+ tasks_from: firewall.yml
+ when:
+ - openshift_storage_glusterfs_registry_is_native | default(True) | bool
+ - include_role:
+ name: openshift_storage_glusterfs
+ tasks_from: kernel_modules.yml
when:
- - openshift_storage_glusterfs_is_native | default(True)
+ - openshift_storage_glusterfs_registry_is_native | default(True) | bool
- name: Configure GlusterFS
hosts: oo_first_master
- roles:
- - role: openshift_storage_glusterfs
+ tasks:
+ - name: setup glusterfs
+ include_role:
+ name: openshift_storage_glusterfs
when: groups.oo_glusterfs_to_config | default([]) | count > 0
+
+- name: GlusterFS Install Checkpoint End
+ hosts: oo_all_hosts
+ gather_facts: false
+ tasks:
+ - name: Set GlusterFS install 'Complete'
+ set_stats:
+ data:
+ installer_phase_glusterfs: "Complete"
+ aggregate: false
diff --git a/playbooks/common/openshift-loadbalancer/config.yml b/playbooks/common/openshift-loadbalancer/config.yml
index c414913bf..2a703cb61 100644
--- a/playbooks/common/openshift-loadbalancer/config.yml
+++ b/playbooks/common/openshift-loadbalancer/config.yml
@@ -1,4 +1,23 @@
---
+- name: Load Balancer Install Checkpoint Start
+ hosts: oo_all_hosts
+ gather_facts: false
+ tasks:
+ - name: Set load balancer install 'In Progress'
+ set_stats:
+ data:
+ installer_phase_loadbalancer: "In Progress"
+ aggregate: false
+
+- name: Configure firewall and docker for load balancers
+ hosts: oo_lb_to_config:!oo_masters_to_config:!oo_nodes_to_config
+ vars:
+ openshift_image_tag: "{{ hostvars[groups.oo_first_master.0].openshift_image_tag }}"
+ roles:
+ - role: os_firewall
+ - role: openshift_docker
+ when: openshift.common.is_containerized | default(False) | bool and not skip_docker_role | default(False) | bool
+
- name: Configure load balancers
hosts: oo_lb_to_config
vars:
@@ -12,5 +31,17 @@
openshift_use_nuage | default(false),
nuage_mon_rest_server_port | default(none)))
+ openshift_loadbalancer_additional_backends | default([]) }}"
+ openshift_image_tag: "{{ hostvars[groups.oo_first_master.0].openshift_image_tag }}"
roles:
- role: openshift_loadbalancer
+ - role: tuned
+
+- name: Load Balancer Install Checkpoint End
+ hosts: oo_all_hosts
+ gather_facts: false
+ tasks:
+ - name: Set load balancer install 'Complete'
+ set_stats:
+ data:
+ installer_phase_loadbalancer: "Complete"
+ aggregate: false
diff --git a/playbooks/common/openshift-loadbalancer/service.yml b/playbooks/common/openshift-loadbalancer/service.yml
deleted file mode 100644
index d3762c961..000000000
--- a/playbooks/common/openshift-loadbalancer/service.yml
+++ /dev/null
@@ -1,23 +0,0 @@
----
-- name: Populate g_service_nodes host group if needed
- hosts: localhost
- connection: local
- become: no
- gather_facts: no
- tasks:
- - fail: msg="new_cluster_state is required to be injected in this playbook"
- when: new_cluster_state is not defined
-
- - name: Evaluate g_service_lb
- add_host:
- name: "{{ item }}"
- groups: g_service_lb
- with_items: "{{ oo_host_group_exp | default([]) }}"
- changed_when: False
-
-- name: Change state on lb instance(s)
- hosts: g_service_lb
- connection: ssh
- gather_facts: no
- tasks:
- - service: name=haproxy state="{{ new_cluster_state }}"
diff --git a/playbooks/common/openshift-management/add_container_provider.yml b/playbooks/common/openshift-management/add_container_provider.yml
new file mode 100644
index 000000000..facb3a5b9
--- /dev/null
+++ b/playbooks/common/openshift-management/add_container_provider.yml
@@ -0,0 +1,8 @@
+---
+- name: Add Container Provider to Management
+ hosts: oo_first_master
+ tasks:
+ - name: Run the Management Integration Tasks
+ include_role:
+ name: openshift_management
+ tasks_from: add_container_provider
diff --git a/playbooks/common/openshift-management/config.yml b/playbooks/common/openshift-management/config.yml
new file mode 100644
index 000000000..908679e81
--- /dev/null
+++ b/playbooks/common/openshift-management/config.yml
@@ -0,0 +1,35 @@
+---
+- name: Management Install Checkpoint Start
+ hosts: oo_all_hosts
+ gather_facts: false
+ tasks:
+ - name: Set Management install 'In Progress'
+ set_stats:
+ data:
+ installer_phase_management: "In Progress"
+ aggregate: false
+
+- name: Setup CFME
+ hosts: oo_first_master
+ pre_tasks:
+ - name: Create a temporary place to evaluate the PV templates
+ command: mktemp -d /tmp/openshift-ansible-XXXXXXX
+ register: r_openshift_management_mktemp
+ changed_when: false
+
+ tasks:
+ - name: Run the CFME Setup Role
+ include_role:
+ name: openshift_management
+ vars:
+ template_dir: "{{ hostvars[groups.masters.0].r_openshift_management_mktemp.stdout }}"
+
+- name: Management Install Checkpoint End
+ hosts: oo_all_hosts
+ gather_facts: false
+ tasks:
+ - name: Set Management install 'Complete'
+ set_stats:
+ data:
+ installer_phase_management: "Complete"
+ aggregate: false
diff --git a/playbooks/common/openshift-cfme/filter_plugins b/playbooks/common/openshift-management/filter_plugins
index 99a95e4ca..99a95e4ca 120000
--- a/playbooks/common/openshift-cfme/filter_plugins
+++ b/playbooks/common/openshift-management/filter_plugins
diff --git a/playbooks/common/openshift-cfme/library b/playbooks/common/openshift-management/library
index ba40d2f56..ba40d2f56 120000
--- a/playbooks/common/openshift-cfme/library
+++ b/playbooks/common/openshift-management/library
diff --git a/playbooks/common/openshift-cfme/roles b/playbooks/common/openshift-management/roles
index 20c4c58cf..20c4c58cf 120000
--- a/playbooks/common/openshift-cfme/roles
+++ b/playbooks/common/openshift-management/roles
diff --git a/playbooks/common/openshift-cfme/uninstall.yml b/playbooks/common/openshift-management/uninstall.yml
index 78b8e7668..9f35cc276 100644
--- a/playbooks/common/openshift-cfme/uninstall.yml
+++ b/playbooks/common/openshift-management/uninstall.yml
@@ -1,8 +1,8 @@
---
- name: Uninstall CFME
- hosts: masters
+ hosts: masters[0]
tasks:
- name: Run the CFME Uninstall Role Tasks
include_role:
- name: openshift_cfme
+ name: openshift_management
tasks_from: uninstall
diff --git a/playbooks/common/openshift-master/additional_config.yml b/playbooks/common/openshift-master/additional_config.yml
new file mode 100644
index 000000000..350557f19
--- /dev/null
+++ b/playbooks/common/openshift-master/additional_config.yml
@@ -0,0 +1,46 @@
+---
+- name: Master Additional Install Checkpoint Start
+ hosts: oo_all_hosts
+ gather_facts: false
+ tasks:
+ - name: Set Master Additional install 'In Progress'
+ set_stats:
+ data:
+ installer_phase_master_additional: "In Progress"
+ aggregate: false
+
+- name: Additional master configuration
+ hosts: oo_first_master
+ vars:
+ cockpit_plugins: "{{ osm_cockpit_plugins | default(['cockpit-kubernetes']) }}"
+ etcd_urls: "{{ openshift.master.etcd_urls }}"
+ openshift_master_ha: "{{ groups.oo_masters | length > 1 }}"
+ omc_cluster_hosts: "{{ groups.oo_masters | join(' ')}}"
+ roles:
+ - role: openshift_master_cluster
+ when: openshift_master_ha | bool and openshift.master.cluster_method == "pacemaker"
+ - role: openshift_examples
+ when: openshift_install_examples | default(true, true) | bool
+ registry_url: "{{ openshift.master.registry_url }}"
+ - role: openshift_hosted_templates
+ registry_url: "{{ openshift.master.registry_url }}"
+ - role: openshift_manageiq
+ when: openshift_use_manageiq | default(true) | bool
+ - role: cockpit
+ when:
+ - not openshift.common.is_atomic | bool
+ - deployment_type == 'openshift-enterprise'
+ - osm_use_cockpit is undefined or osm_use_cockpit | bool
+ - openshift.common.deployment_subtype != 'registry'
+ - role: flannel_register
+ when: openshift_use_flannel | default(false) | bool
+
+- name: Master Additional Install Checkpoint End
+ hosts: oo_all_hosts
+ gather_facts: false
+ tasks:
+ - name: Set Master Additional install 'Complete'
+ set_stats:
+ data:
+ installer_phase_master_additional: "Complete"
+ aggregate: false
diff --git a/playbooks/common/openshift-master/certificates.yml b/playbooks/common/openshift-master/certificates.yml
new file mode 100644
index 000000000..f6afbc36f
--- /dev/null
+++ b/playbooks/common/openshift-master/certificates.yml
@@ -0,0 +1,14 @@
+---
+- name: Create OpenShift certificates for master hosts
+ hosts: oo_masters_to_config
+ vars:
+ openshift_ca_host: "{{ groups.oo_first_master.0 }}"
+ roles:
+ - role: openshift_master_facts
+ - role: openshift_named_certificates
+ - role: openshift_ca
+ - role: openshift_master_certificates
+ openshift_master_etcd_hosts: "{{ hostvars
+ | oo_select_keys(groups['oo_etcd_to_config'] | default([]))
+ | oo_collect('openshift.common.hostname')
+ | default(none, true) }}"
diff --git a/playbooks/common/openshift-master/config.yml b/playbooks/common/openshift-master/config.yml
index 70108fb7a..b359919ba 100644
--- a/playbooks/common/openshift-master/config.yml
+++ b/playbooks/common/openshift-master/config.yml
@@ -1,10 +1,40 @@
---
-- name: Gather and set facts for master hosts
+- name: Master Install Checkpoint Start
+ hosts: oo_all_hosts
+ gather_facts: false
+ tasks:
+ - name: Set Master install 'In Progress'
+ set_stats:
+ data:
+ installer_phase_master: "In Progress"
+ aggregate: false
+
+- include: certificates.yml
+
+- name: Disable excluders
hosts: oo_masters_to_config
- vars:
- t_oo_option_master_debug_level: "{{ lookup('oo_option', 'openshift_master_debug_level') }}"
+ gather_facts: no
+ roles:
+ - role: openshift_excluder
+ r_openshift_excluder_action: disable
+ r_openshift_excluder_service_type: "{{ openshift.common.service_type }}"
+- name: Gather and set facts for master hosts
+ hosts: oo_masters_to_config
pre_tasks:
+ # Per https://bugzilla.redhat.com/show_bug.cgi?id=1469336
+ #
+ # When scaling up a cluster upgraded from OCP <= 3.5, ensure that
+ # OPENSHIFT_DEFAULT_REGISTRY is present as defined on the existing
+ # masters, or absent if such is the case.
+ - name: Detect if this host is a new master in a scale up
+ set_fact:
+ g_openshift_master_is_scaleup: "{{ openshift.common.hostname in ( groups['new_masters'] | default([]) ) }}"
+
+ - name: Scaleup Detection
+ debug:
+ var: g_openshift_master_is_scaleup
+
- name: Check for RPM generated config marker file .config_managed
stat:
path: /etc/origin/.config_managed
@@ -14,35 +44,14 @@
file:
path: "/etc/origin/{{ item }}"
state: absent
- when: rpmgenerated_config.stat.exists == true and deployment_type in ['openshift-enterprise', 'atomic-enterprise']
+ when:
+ - rpmgenerated_config.stat.exists == true
+ - deployment_type == 'openshift-enterprise'
with_items:
- master
- node
- .config_managed
- - name: Check for existing configuration
- stat:
- path: /etc/origin/master/master-config.yaml
- register: master_config_stat
-
- - name: Set clean install fact
- set_fact:
- l_clean_install: "{{ not master_config_stat.stat.exists | bool }}"
-
- - name: Determine if etcd3 storage is in use
- command: grep -Pzo "storage-backend:\n.*etcd3" /etc/origin/master/master-config.yaml -q
- register: etcd3_grep
- failed_when: false
- changed_when: false
-
- - name: Set etcd3 fact
- set_fact:
- l_etcd3_enabled: "{{ etcd3_grep.rc == 0 | bool }}"
-
- - set_fact:
- openshift_master_pod_eviction_timeout: "{{ lookup('oo_option', 'openshift_master_pod_eviction_timeout') | default(none, true) }}"
- when: openshift_master_pod_eviction_timeout is not defined
-
- set_fact:
openshift_master_etcd_port: "{{ (etcd_client_port | default('2379')) if (groups.oo_etcd_to_config is defined and groups.oo_etcd_to_config) else none }}"
openshift_master_etcd_hosts: "{{ hostvars
@@ -50,23 +59,6 @@
| default([]))
| oo_collect('openshift.common.hostname')
| default(none, true) }}"
-
- - set_fact:
- openshift_master_debug_level: "{{ t_oo_option_master_debug_level }}"
- when: openshift_master_debug_level is not defined and t_oo_option_master_debug_level != ""
-
- - set_fact:
- openshift_master_default_subdomain: "{{ lookup('oo_option', 'openshift_master_default_subdomain') | default(None, true) }}"
- when: openshift_master_default_subdomain is not defined
- - set_fact:
- openshift_hosted_metrics_deploy: "{{ lookup('oo_option', 'openshift_hosted_metrics_deploy') | default(false, true) }}"
- when: openshift_hosted_metrics_deploy is not defined
- - set_fact:
- openshift_hosted_metrics_duration: "{{ lookup('oo_option', 'openshift_hosted_metrics_duration') | default(7) }}"
- when: openshift_hosted_metrics_duration is not defined
- - set_fact:
- openshift_hosted_metrics_resolution: "{{ lookup('oo_option', 'openshift_hosted_metrics_resolution') | default('10s', true) }}"
- when: openshift_hosted_metrics_resolution is not defined
roles:
- openshift_facts
post_tasks:
@@ -88,7 +80,7 @@
ha: "{{ openshift_master_ha | default(groups.oo_masters | length > 1) }}"
master_count: "{{ openshift_master_count | default(groups.oo_masters | length) }}"
-- name: Determine if session secrets must be generated
+- name: Inspect state of first master config settings
hosts: oo_first_master
roles:
- role: openshift_facts
@@ -98,6 +90,60 @@
local_facts:
session_auth_secrets: "{{ openshift_master_session_auth_secrets | default(openshift.master.session_auth_secrets | default(None)) }}"
session_encryption_secrets: "{{ openshift_master_session_encryption_secrets | default(openshift.master.session_encryption_secrets | default(None)) }}"
+ - name: Check for existing configuration
+ stat:
+ path: /etc/origin/master/master-config.yaml
+ register: master_config_stat
+
+ - name: Set clean install fact
+ set_fact:
+ l_clean_install: "{{ not master_config_stat.stat.exists | bool }}"
+
+ - name: Determine if etcd3 storage is in use
+ command: grep -Pzo "storage-backend:\n.*etcd3" /etc/origin/master/master-config.yaml -q
+ register: etcd3_grep
+ failed_when: false
+ changed_when: false
+
+ - name: Set etcd3 fact
+ set_fact:
+ l_etcd3_enabled: "{{ etcd3_grep.rc == 0 | bool }}"
+
+ - name: Check if atomic-openshift-master sysconfig exists yet
+ stat:
+ path: /etc/sysconfig/atomic-openshift-master
+ register: l_aom_exists
+
+ - name: Preserve OPENSHIFT_DEFAULT_REGISTRY master parameter if present
+ command: awk '/^OPENSHIFT_DEFAULT_REGISTRY/' /etc/sysconfig/atomic-openshift-master
+ register: l_default_registry_defined
+ when: l_aom_exists.stat.exists | bool
+
+ - name: Check if atomic-openshift-master-api sysconfig exists yet
+ stat:
+ path: /etc/sysconfig/atomic-openshift-master-api
+ register: l_aom_api_exists
+
+ - name: Preserve OPENSHIFT_DEFAULT_REGISTRY master-api parameter if present
+ command: awk '/^OPENSHIFT_DEFAULT_REGISTRY/' /etc/sysconfig/atomic-openshift-master-api
+ register: l_default_registry_defined_api
+ when: l_aom_api_exists.stat.exists | bool
+
+ - name: Check if atomic-openshift-master-controllers sysconfig exists yet
+ stat:
+ path: /etc/sysconfig/atomic-openshift-master-controllers
+ register: l_aom_controllers_exists
+
+ - name: Preserve OPENSHIFT_DEFAULT_REGISTRY master-controllers parameter if present
+ command: awk '/^OPENSHIFT_DEFAULT_REGISTRY/' /etc/sysconfig/atomic-openshift-master-controllers
+ register: l_default_registry_defined_controllers
+ when: l_aom_controllers_exists.stat.exists | bool
+
+ - name: Update facts with OPENSHIFT_DEFAULT_REGISTRY value
+ set_fact:
+ l_default_registry_value: "{{ l_default_registry_defined.stdout | default('') }}"
+ l_default_registry_value_api: "{{ l_default_registry_defined_api.stdout | default('') }}"
+ l_default_registry_value_controllers: "{{ l_default_registry_defined_controllers.stdout | default('') }}"
- name: Generate master session secrets
hosts: oo_first_master
@@ -123,32 +169,74 @@
openshift_master_count: "{{ openshift.master.master_count }}"
openshift_master_session_auth_secrets: "{{ hostvars[groups.oo_first_master.0].openshift.master.session_auth_secrets }}"
openshift_master_session_encryption_secrets: "{{ hostvars[groups.oo_first_master.0].openshift.master.session_encryption_secrets }}"
- openshift_no_proxy_internal_hostnames: "{{ hostvars | oo_select_keys(groups['oo_nodes_to_config']
- | union(groups['oo_masters_to_config'])
- | union(groups['oo_etcd_to_config'] | default([])))
- | oo_collect('openshift.common.hostname') | default([]) | join (',')
- }}"
- roles:
- - role: openshift_master
openshift_ca_host: "{{ groups.oo_first_master.0 }}"
openshift_master_etcd_hosts: "{{ hostvars
| oo_select_keys(groups['oo_etcd_to_config'] | default([]))
| oo_collect('openshift.common.hostname')
| default(none, true) }}"
+ openshift_no_proxy_etcd_host_ips: "{{ hostvars | oo_select_keys(groups['oo_etcd_to_config'] | default([]))
+ | oo_collect('openshift.common.ip') | default([]) | join(',')
+ }}"
+ roles:
+ - role: os_firewall
+ - role: openshift_master_facts
+ - role: openshift_hosted_facts
+ - role: openshift_clock
+ - role: openshift_cloud_provider
+ - role: openshift_builddefaults
+ - role: openshift_buildoverrides
+ - role: nickhammond.logrotate
+ - role: contiv
+ contiv_role: netmaster
+ when: openshift_use_contiv | default(False) | bool
+ - role: openshift_master
openshift_master_hosts: "{{ groups.oo_masters_to_config }}"
- r_etcd_common_etcd_runtime: "{{ openshift.common.etcd_runtime }}"
- etcd_ca_host: "{{ groups.oo_etcd_to_config.0 }}"
- etcd_cert_subdir: "openshift-master-{{ openshift.common.hostname }}"
- etcd_cert_config_dir: "{{ openshift.common.config_base }}/master"
- etcd_cert_prefix: "master.etcd-"
r_openshift_master_clean_install: "{{ hostvars[groups.oo_first_master.0].l_clean_install }}"
r_openshift_master_etcd3_storage: "{{ hostvars[groups.oo_first_master.0].l_etcd3_enabled }}"
+ openshift_master_is_scaleup_host: "{{ g_openshift_master_is_scaleup | default(false) }}"
+ openshift_master_default_registry_value: "{{ hostvars[groups.oo_first_master.0].l_default_registry_value }}"
+ openshift_master_default_registry_value_api: "{{ hostvars[groups.oo_first_master.0].l_default_registry_value_api }}"
+ openshift_master_default_registry_value_controllers: "{{ hostvars[groups.oo_first_master.0].l_default_registry_value_controllers }}"
+ - role: tuned
+ - role: nuage_ca
+ when: openshift_use_nuage | default(false) | bool
+ - role: nuage_common
+ when: openshift_use_nuage | default(false) | bool
- role: nuage_master
- when: openshift.common.use_nuage | bool
+ when: openshift_use_nuage | default(false) | bool
- role: calico_master
- when: openshift.common.use_calico | bool
+ when: openshift_use_calico | default(false) | bool
+ tasks:
+ - include_role:
+ name: kuryr
+ tasks_from: master
+ when: openshift_use_kuryr | default(false) | bool
post_tasks:
- name: Create group for deployment type
group_by: key=oo_masters_deployment_type_{{ openshift.common.deployment_type }}
changed_when: False
+
+- name: Configure API Aggregation on masters
+ hosts: oo_masters
+ serial: 1
+ tasks:
+ - include: tasks/wire_aggregator.yml
+
+- name: Re-enable excluder if it was previously enabled
+ hosts: oo_masters_to_config
+ gather_facts: no
+ roles:
+ - role: openshift_excluder
+ r_openshift_excluder_action: enable
+ r_openshift_excluder_service_type: "{{ openshift.common.service_type }}"
+
+- name: Master Install Checkpoint End
+ hosts: oo_all_hosts
+ gather_facts: false
+ tasks:
+ - name: Set Master install 'Complete'
+ set_stats:
+ data:
+ installer_phase_master: "Complete"
+ aggregate: false
diff --git a/playbooks/common/openshift-master/files/openshift-ansible-catalog-console.js b/playbooks/common/openshift-master/files/openshift-ansible-catalog-console.js
new file mode 100644
index 000000000..d0a9f11dc
--- /dev/null
+++ b/playbooks/common/openshift-master/files/openshift-ansible-catalog-console.js
@@ -0,0 +1,2 @@
+// empty file so that the master-config can still point to a file that exists
+// this file will be replaced by the template service broker role if enabled
diff --git a/playbooks/common/openshift-master/restart.yml b/playbooks/common/openshift-master/restart.yml
index 6fec346c3..4d73b8124 100644
--- a/playbooks/common/openshift-master/restart.yml
+++ b/playbooks/common/openshift-master/restart.yml
@@ -7,7 +7,7 @@
openshift_master_ha: "{{ groups.oo_masters_to_config | length > 1 }}"
serial: 1
handlers:
- - include: roles/openshift_master/handlers/main.yml
+ - include: ../../../roles/openshift_master/handlers/main.yml
static: yes
roles:
- openshift_facts
diff --git a/playbooks/common/openshift-master/restart_hosts.yml b/playbooks/common/openshift-master/restart_hosts.yml
index 67ba0aa2e..a5dbe0590 100644
--- a/playbooks/common/openshift-master/restart_hosts.yml
+++ b/playbooks/common/openshift-master/restart_hosts.yml
@@ -37,3 +37,4 @@
state: started
delay: 10
port: "{{ openshift.master.api_port }}"
+ timeout: 600
diff --git a/playbooks/common/openshift-master/restart_services.yml b/playbooks/common/openshift-master/restart_services.yml
index 508b5a3ac..4f8b758fd 100644
--- a/playbooks/common/openshift-master/restart_services.yml
+++ b/playbooks/common/openshift-master/restart_services.yml
@@ -1,9 +1,4 @@
---
-- name: Restart master
- service:
- name: "{{ openshift.common.service_type }}-master"
- state: restarted
- when: not openshift_master_ha | bool
- name: Restart master API
service:
name: "{{ openshift.common.service_type }}-master-api"
@@ -15,6 +10,7 @@
state: started
delay: 10
port: "{{ openshift.master.api_port }}"
+ timeout: 600
when: openshift_master_ha | bool
- name: Restart master controllers
service:
diff --git a/playbooks/common/openshift-master/scaleup.yml b/playbooks/common/openshift-master/scaleup.yml
index bc61ee9bb..f4dc9df8a 100644
--- a/playbooks/common/openshift-master/scaleup.yml
+++ b/playbooks/common/openshift-master/scaleup.yml
@@ -1,11 +1,4 @@
---
-- include: ../openshift-cluster/evaluate_groups.yml
-
-- name: Gather facts
- hosts: oo_etcd_to_config:oo_masters_to_config:oo_nodes_to_config
- roles:
- - openshift_facts
-
- name: Update master count
hosts: oo_masters:!oo_masters_to_config
serial: 1
@@ -50,38 +43,14 @@
delay: 1
changed_when: false
-- name: Configure docker hosts
- hosts: oo_masters_to_config:oo_nodes_to_config
- vars:
- docker_additional_registries: "{{ lookup('oo_option', 'docker_additional_registries') | oo_split }}"
- docker_insecure_registries: "{{ lookup('oo_option', 'docker_insecure_registries') | oo_split }}"
- docker_blocked_registries: "{{ lookup('oo_option', 'docker_blocked_registries') | oo_split }}"
- roles:
- - openshift_facts
- - openshift_docker
+- include: ../openshift-master/set_network_facts.yml
-- name: Disable excluders
- hosts: oo_masters_to_config
- tags:
- - always
- gather_facts: no
- roles:
- - role: openshift_excluder
- r_openshift_excluder_action: disable
- r_openshift_excluder_service_type: "{{ openshift.common.service_type }}"
+- include: ../openshift-etcd/certificates.yml
- include: ../openshift-master/config.yml
- include: ../openshift-loadbalancer/config.yml
-- include: ../openshift-node/config.yml
+- include: ../openshift-node/certificates.yml
-- name: Re-enable excluder if it was previously enabled
- hosts: oo_masters_to_config
- tags:
- - always
- gather_facts: no
- roles:
- - role: openshift_excluder
- r_openshift_excluder_action: enable
- r_openshift_excluder_service_type: "{{ openshift.common.service_type }}"
+- include: ../openshift-node/config.yml
diff --git a/playbooks/common/openshift-master/service.yml b/playbooks/common/openshift-master/service.yml
deleted file mode 100644
index 48a2731aa..000000000
--- a/playbooks/common/openshift-master/service.yml
+++ /dev/null
@@ -1,23 +0,0 @@
----
-- name: Populate g_service_masters host group if needed
- hosts: localhost
- gather_facts: no
- connection: local
- become: no
- tasks:
- - fail: msg="new_cluster_state is required to be injected in this playbook"
- when: new_cluster_state is not defined
-
- - name: Evaluate g_service_masters
- add_host:
- name: "{{ item }}"
- groups: g_service_masters
- with_items: "{{ oo_host_group_exp | default([]) }}"
- changed_when: False
-
-- name: Change state on master instance(s)
- hosts: g_service_masters
- connection: ssh
- gather_facts: no
- tasks:
- - service: name={{ openshift.common.service_type }}-master state="{{ new_cluster_state }}"
diff --git a/playbooks/common/openshift-master/set_network_facts.yml b/playbooks/common/openshift-master/set_network_facts.yml
new file mode 100644
index 000000000..9a6cf26fc
--- /dev/null
+++ b/playbooks/common/openshift-master/set_network_facts.yml
@@ -0,0 +1,34 @@
+---
+- name: Read first master\'s config
+ hosts: oo_first_master
+ gather_facts: no
+ tasks:
+ - stat:
+ path: "{{ openshift.common.config_base }}/master/master-config.yaml"
+ register: g_master_config_stat
+ - slurp:
+ src: "{{ openshift.common.config_base }}/master/master-config.yaml"
+ register: g_master_config_slurp
+
+- name: Set network facts for masters
+ hosts: oo_masters_to_config
+ gather_facts: no
+ roles:
+ - role: openshift_facts
+ post_tasks:
+ - block:
+ - set_fact:
+ osm_cluster_network_cidr: "{{ (hostvars[groups.oo_first_master.0].g_master_config_slurp.content|b64decode|from_yaml).networkConfig.clusterNetworkCIDR }}"
+ when: osm_cluster_network_cidr is not defined
+ - set_fact:
+ osm_host_subnet_length: "{{ (hostvars[groups.oo_first_master.0].g_master_config_slurp.content|b64decode|from_yaml).networkConfig.hostSubnetLength }}"
+ when: osm_host_subnet_length is not defined
+ - set_fact:
+ openshift_portal_net: "{{ (hostvars[groups.oo_first_master.0].g_master_config_slurp.content|b64decode|from_yaml).networkConfig.serviceNetworkCIDR }}"
+ when: openshift_portal_net is not defined
+ - openshift_facts:
+ role: common
+ local_facts:
+ portal_net: "{{ openshift_portal_net | default(openshift_master_portal_net) | default(None) }}"
+ when:
+ - hostvars[groups.oo_first_master.0].g_master_config_stat.stat.exists | bool
diff --git a/playbooks/common/openshift-master/tasks/wire_aggregator.yml b/playbooks/common/openshift-master/tasks/wire_aggregator.yml
new file mode 100644
index 000000000..560eea785
--- /dev/null
+++ b/playbooks/common/openshift-master/tasks/wire_aggregator.yml
@@ -0,0 +1,215 @@
+---
+- name: Make temp cert dir
+ command: mktemp -d /tmp/openshift-service-catalog-ansible-XXXXXX
+ register: certtemp
+ changed_when: False
+
+- name: Check for First Master Aggregator Signer cert
+ stat:
+ path: /etc/origin/master/front-proxy-ca.crt
+ register: first_proxy_ca_crt
+ changed_when: false
+ delegate_to: "{{ groups.oo_first_master.0 }}"
+
+- name: Check for First Master Aggregator Signer key
+ stat:
+ path: /etc/origin/master/front-proxy-ca.crt
+ register: first_proxy_ca_key
+ changed_when: false
+ delegate_to: "{{ groups.oo_first_master.0 }}"
+
+# TODO: this currently has a bug where hostnames are required
+- name: Creating First Master Aggregator signer certs
+ command: >
+ {{ hostvars[groups.oo_first_master.0].openshift.common.client_binary }} adm ca create-signer-cert
+ --cert=/etc/origin/master/front-proxy-ca.crt
+ --key=/etc/origin/master/front-proxy-ca.key
+ --serial=/etc/origin/master/ca.serial.txt
+ delegate_to: "{{ groups.oo_first_master.0 }}"
+ when:
+ - not first_proxy_ca_crt.stat.exists
+ - not first_proxy_ca_key.stat.exists
+
+- name: Check for Aggregator Signer cert
+ stat:
+ path: /etc/origin/master/front-proxy-ca.crt
+ register: proxy_ca_crt
+ changed_when: false
+
+- name: Check for Aggregator Signer key
+ stat:
+ path: /etc/origin/master/front-proxy-ca.crt
+ register: proxy_ca_key
+ changed_when: false
+
+- name: Copy Aggregator Signer certs from first master
+ fetch:
+ src: "/etc/origin/master/{{ item }}"
+ dest: "{{ certtemp.stdout }}/{{ item }}"
+ flat: yes
+ with_items:
+ - front-proxy-ca.crt
+ - front-proxy-ca.key
+ delegate_to: "{{ groups.oo_first_master.0 }}"
+ when:
+ - not proxy_ca_key.stat.exists
+ - not proxy_ca_crt.stat.exists
+
+- name: Copy Aggregator Signer certs to host
+ copy:
+ src: "{{ certtemp.stdout }}/{{ item }}"
+ dest: "/etc/origin/master/{{ item }}"
+ with_items:
+ - front-proxy-ca.crt
+ - front-proxy-ca.key
+ when:
+ - not proxy_ca_key.stat.exists
+ - not proxy_ca_crt.stat.exists
+
+# oc_adm_ca_server_cert:
+# cert: /etc/origin/master/front-proxy-ca.crt
+# key: /etc/origin/master/front-proxy-ca.key
+
+- name: Check for first master api-client config
+ stat:
+ path: /etc/origin/master/aggregator-front-proxy.kubeconfig
+ register: first_front_proxy_kubeconfig
+ delegate_to: "{{ groups.oo_first_master.0 }}"
+ run_once: true
+
+# create-api-client-config generates a ca.crt file which will
+# overwrite the OpenShift CA certificate. Generate the aggregator
+# kubeconfig in a temporary directory and then copy files into the
+# master config dir to avoid overwriting ca.crt.
+- block:
+ - name: Create first master api-client config for Aggregator
+ command: >
+ {{ hostvars[groups.oo_first_master.0].openshift.common.client_binary }} adm create-api-client-config
+ --certificate-authority=/etc/origin/master/front-proxy-ca.crt
+ --signer-cert=/etc/origin/master/front-proxy-ca.crt
+ --signer-key=/etc/origin/master/front-proxy-ca.key
+ --user aggregator-front-proxy
+ --client-dir={{ certtemp.stdout }}
+ --signer-serial=/etc/origin/master/ca.serial.txt
+ delegate_to: "{{ groups.oo_first_master.0 }}"
+ run_once: true
+ - name: Copy first master api-client config for Aggregator
+ copy:
+ src: "{{ certtemp.stdout }}/{{ item }}"
+ dest: "/etc/origin/master/"
+ remote_src: true
+ with_items:
+ - aggregator-front-proxy.crt
+ - aggregator-front-proxy.key
+ - aggregator-front-proxy.kubeconfig
+ delegate_to: "{{ groups.oo_first_master.0 }}"
+ run_once: true
+ when:
+ - not first_front_proxy_kubeconfig.stat.exists
+
+- name: Check for api-client config
+ stat:
+ path: /etc/origin/master/aggregator-front-proxy.kubeconfig
+ register: front_proxy_kubeconfig
+
+- name: Copy api-client config from first master
+ fetch:
+ src: "/etc/origin/master/{{ item }}"
+ dest: "{{ certtemp.stdout }}/{{ item }}"
+ flat: yes
+ delegate_to: "{{ groups.oo_first_master.0 }}"
+ with_items:
+ - aggregator-front-proxy.crt
+ - aggregator-front-proxy.key
+ - aggregator-front-proxy.kubeconfig
+ when:
+ - not front_proxy_kubeconfig.stat.exists
+
+- name: Copy api-client config to host
+ copy:
+ src: "{{ certtemp.stdout }}/{{ item }}"
+ dest: "/etc/origin/master/{{ item }}"
+ with_items:
+ - aggregator-front-proxy.crt
+ - aggregator-front-proxy.key
+ - aggregator-front-proxy.kubeconfig
+ when:
+ - not front_proxy_kubeconfig.stat.exists
+
+- name: copy tech preview extension file for service console UI
+ copy:
+ src: openshift-ansible-catalog-console.js
+ dest: /etc/origin/master/openshift-ansible-catalog-console.js
+
+- name: Update master config
+ yedit:
+ state: present
+ src: /etc/origin/master/master-config.yaml
+ edits:
+ - key: aggregatorConfig.proxyClientInfo.certFile
+ value: aggregator-front-proxy.crt
+ - key: aggregatorConfig.proxyClientInfo.keyFile
+ value: aggregator-front-proxy.key
+ - key: authConfig.requestHeader.clientCA
+ value: front-proxy-ca.crt
+ - key: authConfig.requestHeader.clientCommonNames
+ value: [aggregator-front-proxy]
+ - key: authConfig.requestHeader.usernameHeaders
+ value: [X-Remote-User]
+ - key: authConfig.requestHeader.groupHeaders
+ value: [X-Remote-Group]
+ - key: authConfig.requestHeader.extraHeaderPrefixes
+ value: [X-Remote-Extra-]
+ - key: assetConfig.extensionScripts
+ value: [/etc/origin/master/openshift-ansible-catalog-console.js]
+ - key: kubernetesMasterConfig.apiServerArguments.runtime-config
+ value: [apis/settings.k8s.io/v1alpha1=true]
+ - key: admissionConfig.pluginConfig.PodPreset.configuration.kind
+ value: DefaultAdmissionConfig
+ - key: admissionConfig.pluginConfig.PodPreset.configuration.apiVersion
+ value: v1
+ - key: admissionConfig.pluginConfig.PodPreset.configuration.disable
+ value: false
+ register: yedit_output
+
+#restart master serially here
+- name: restart master api
+ systemd: name={{ openshift.common.service_type }}-master-api state=restarted
+ when:
+ - yedit_output.changed
+ - openshift.master.cluster_method == 'native'
+
+- name: restart master controllers
+ systemd: name={{ openshift.common.service_type }}-master-controllers state=restarted
+ when:
+ - yedit_output.changed
+ - openshift.master.cluster_method == 'native'
+
+- name: Verify API Server
+ # Using curl here since the uri module requires python-httplib2 and
+ # wait_for port doesn't provide health information.
+ command: >
+ curl --silent --tlsv1.2
+ {% if openshift.common.version_gte_3_2_or_1_2 | bool %}
+ --cacert {{ openshift.common.config_base }}/master/ca-bundle.crt
+ {% else %}
+ --cacert {{ openshift.common.config_base }}/master/ca.crt
+ {% endif %}
+ {{ openshift.master.api_url }}/healthz/ready
+ args:
+ # Disables the following warning:
+ # Consider using get_url or uri module rather than running curl
+ warn: no
+ register: api_available_output
+ until: api_available_output.stdout == 'ok'
+ retries: 120
+ delay: 1
+ changed_when: false
+ when:
+ - yedit_output.changed
+
+- name: Delete temp directory
+ file:
+ name: "{{ certtemp.stdout }}"
+ state: absent
+ changed_when: False
diff --git a/playbooks/common/openshift-nfs/config.yml b/playbooks/common/openshift-nfs/config.yml
index 000e46e80..ce672daf5 100644
--- a/playbooks/common/openshift-nfs/config.yml
+++ b/playbooks/common/openshift-nfs/config.yml
@@ -1,6 +1,26 @@
---
+- name: NFS Install Checkpoint Start
+ hosts: oo_all_hosts
+ gather_facts: false
+ tasks:
+ - name: Set NFS install 'In Progress'
+ set_stats:
+ data:
+ installer_phase_nfs: "In Progress"
+ aggregate: false
+
- name: Configure nfs
hosts: oo_nfs_to_config
roles:
- - role: openshift_facts
+ - role: os_firewall
- role: openshift_storage_nfs
+
+- name: NFS Install Checkpoint End
+ hosts: oo_all_hosts
+ gather_facts: false
+ tasks:
+ - name: Set NFS install 'Complete'
+ set_stats:
+ data:
+ installer_phase_nfs: "Complete"
+ aggregate: false
diff --git a/playbooks/common/openshift-nfs/service.yml b/playbooks/common/openshift-nfs/service.yml
deleted file mode 100644
index b1e35e4b1..000000000
--- a/playbooks/common/openshift-nfs/service.yml
+++ /dev/null
@@ -1,21 +0,0 @@
----
-- name: Populate g_service_nfs host group if needed
- hosts: localhost
- gather_facts: no
- tasks:
- - fail: msg="new_cluster_state is required to be injected in this playbook"
- when: new_cluster_state is not defined
-
- - name: Evaluate g_service_nfs
- add_host:
- name: "{{ item }}"
- groups: g_service_nfs
- with_items: "{{ oo_host_group_exp | default([]) }}"
- changed_when: False
-
-- name: Change state on nfs instance(s)
- hosts: g_service_nfs
- connection: ssh
- gather_facts: no
- tasks:
- - service: name=nfs-server state="{{ new_cluster_state }}"
diff --git a/playbooks/common/openshift-node/additional_config.yml b/playbooks/common/openshift-node/additional_config.yml
new file mode 100644
index 000000000..ac757397b
--- /dev/null
+++ b/playbooks/common/openshift-node/additional_config.yml
@@ -0,0 +1,64 @@
+---
+- name: create additional node network plugin groups
+ hosts: "{{ openshift_node_scale_up_group | default('oo_nodes_to_config') }}"
+ tasks:
+ # Creating these node groups will prevent a ton of skipped tasks.
+ # Create group for flannel nodes
+ - group_by:
+ key: oo_nodes_use_{{ (openshift_use_flannel | default(False)) | ternary('flannel','nothing') }}
+ changed_when: False
+ # Create group for calico nodes
+ - group_by:
+ key: oo_nodes_use_{{ (openshift_use_calico | default(False)) | ternary('calico','nothing') }}
+ changed_when: False
+ # Create group for nuage nodes
+ - group_by:
+ key: oo_nodes_use_{{ (openshift_use_nuage | default(False)) | ternary('nuage','nothing') }}
+ changed_when: False
+ # Create group for contiv nodes
+ - group_by:
+ key: oo_nodes_use_{{ (openshift_use_contiv | default(False)) | ternary('contiv','nothing') }}
+ changed_when: False
+ # Create group for kuryr nodes
+ - group_by:
+ key: oo_nodes_use_{{ (openshift_use_kuryr | default(False)) | ternary('kuryr','nothing') }}
+ changed_when: False
+
+- include: etcd_client_config.yml
+ vars:
+ openshift_node_scale_up_group: "oo_nodes_use_flannel:oo_nodes_use_calico:oo_nodes_use_contiv:oo_nodes_use_kuryr"
+
+- name: Additional node config
+ hosts: oo_nodes_use_flannel
+ roles:
+ - role: flannel
+ etcd_urls: "{{ hostvars[groups.oo_first_master.0].openshift.master.etcd_urls }}"
+ embedded_etcd: "{{ hostvars[groups.oo_first_master.0].openshift.master.embedded_etcd }}"
+ when: openshift_use_flannel | default(false) | bool
+
+- name: Additional node config
+ hosts: oo_nodes_use_calico
+ roles:
+ - role: calico
+ when: openshift_use_calico | default(false) | bool
+
+- name: Additional node config
+ hosts: oo_nodes_use_nuage
+ roles:
+ - role: nuage_node
+ when: openshift_use_nuage | default(false) | bool
+
+- name: Additional node config
+ hosts: oo_nodes_use_contiv
+ roles:
+ - role: contiv
+ contiv_role: netplugin
+ when: openshift_use_contiv | default(false) | bool
+
+- name: Configure Kuryr node
+ hosts: oo_nodes_use_kuryr
+ tasks:
+ - include_role:
+ name: kuryr
+ tasks_from: node
+ when: openshift_use_kuryr | default(false) | bool
diff --git a/playbooks/common/openshift-node/certificates.yml b/playbooks/common/openshift-node/certificates.yml
new file mode 100644
index 000000000..908885ee6
--- /dev/null
+++ b/playbooks/common/openshift-node/certificates.yml
@@ -0,0 +1,8 @@
+---
+- name: Create OpenShift certificates for node hosts
+ hosts: oo_nodes_to_config
+ gather_facts: no
+ roles:
+ - role: openshift_node_certificates
+ openshift_ca_host: "{{ groups.oo_first_master.0 }}"
+ when: not openshift_node_bootstrap | default(false) | bool
diff --git a/playbooks/common/openshift-node/clean_image.yml b/playbooks/common/openshift-node/clean_image.yml
new file mode 100644
index 000000000..38753d0af
--- /dev/null
+++ b/playbooks/common/openshift-node/clean_image.yml
@@ -0,0 +1,10 @@
+---
+- name: Configure nodes
+ hosts: oo_nodes_to_config:!oo_containerized_master_nodes
+ tasks:
+ - name: Remove any ansible facts created during AMI creation
+ file:
+ path: "/etc/ansible/facts.d/{{ item }}"
+ state: absent
+ with_items:
+ - openshift.fact
diff --git a/playbooks/common/openshift-node/config.yml b/playbooks/common/openshift-node/config.yml
index acebabc91..4f8f98aef 100644
--- a/playbooks/common/openshift-node/config.yml
+++ b/playbooks/common/openshift-node/config.yml
@@ -1,98 +1,34 @@
---
-- name: Gather and set facts for node hosts
- hosts: oo_nodes_to_config
- vars:
- t_oo_option_node_debug_level: "{{ lookup('oo_option', 'openshift_node_debug_level') }}"
- pre_tasks:
- - set_fact:
- openshift_node_debug_level: "{{ t_oo_option_node_debug_level }}"
- when: openshift_node_debug_level is not defined and t_oo_option_node_debug_level != ""
- roles:
- - openshift_facts
+- name: Node Install Checkpoint Start
+ hosts: oo_all_hosts
+ gather_facts: false
tasks:
- # Since the master is generating the node certificates before they are
- # configured, we need to make sure to set the node properties beforehand if
- # we do not want the defaults
- - openshift_facts:
- role: node
- local_facts:
- labels: "{{ openshift_node_labels | default(None) }}"
- annotations: "{{ openshift_node_annotations | default(None) }}"
- schedulable: "{{ openshift_schedulable | default(openshift_scheduleable) | default(None) }}"
+ - name: Set Node install 'In Progress'
+ set_stats:
+ data:
+ installer_phase_node: "In Progress"
+ aggregate: false
-- name: Evaluate node groups
- hosts: localhost
- become: no
- connection: local
- tasks:
- - name: Evaluate oo_containerized_master_nodes
- add_host:
- name: "{{ item }}"
- groups: oo_containerized_master_nodes
- ansible_ssh_user: "{{ g_ssh_user | default(omit) }}"
- ansible_become: "{{ g_sudo | default(omit) }}"
- with_items: "{{ groups.oo_nodes_to_config | default([]) }}"
- when: hostvars[item].openshift is defined and hostvars[item].openshift.common is defined and hostvars[item].openshift.common.is_containerized | bool and (item in groups.oo_nodes_to_config and item in groups.oo_masters_to_config)
- changed_when: False
+- include: certificates.yml
+
+- include: setup.yml
+
+- include: containerized_nodes.yml
+
+- include: configure_nodes.yml
+
+- include: additional_config.yml
-- name: Configure containerized nodes
- hosts: oo_containerized_master_nodes
- serial: 1
- vars:
- openshift_node_master_api_url: "{{ hostvars[groups.oo_first_master.0].openshift.master.api_url }}"
- openshift_node_first_master_ip: "{{ hostvars[groups.oo_first_master.0].openshift.common.ip }}"
- openshift_docker_hosted_registry_network: "{{ hostvars[groups.oo_first_master.0].openshift.common.portal_net }}"
- openshift_no_proxy_internal_hostnames: "{{ hostvars | oo_select_keys(groups['oo_nodes_to_config']
- | union(groups['oo_masters_to_config'])
- | union(groups['oo_etcd_to_config'] | default([])))
- | oo_collect('openshift.common.hostname') | default([]) | join (',')
- }}"
- when: "{{ (openshift_http_proxy is defined or openshift_https_proxy is defined) and
- openshift_generate_no_proxy_hosts | default(True) | bool }}"
- roles:
- - role: openshift_node
- openshift_ca_host: "{{ groups.oo_first_master.0 }}"
+- include: manage_node.yml
-- name: Configure nodes
- hosts: oo_nodes_to_config:!oo_containerized_master_nodes
- vars:
- openshift_node_master_api_url: "{{ hostvars[groups.oo_first_master.0].openshift.master.api_url }}"
- openshift_node_first_master_ip: "{{ hostvars[groups.oo_first_master.0].openshift.common.ip }}"
- openshift_docker_hosted_registry_network: "{{ hostvars[groups.oo_first_master.0].openshift.common.portal_net }}"
- openshift_no_proxy_internal_hostnames: "{{ hostvars | oo_select_keys(groups['oo_nodes_to_config']
- | union(groups['oo_masters_to_config'])
- | union(groups['oo_etcd_to_config'] | default([])))
- | oo_collect('openshift.common.hostname') | default([]) | join (',')
- }}"
- when: "{{ (openshift_http_proxy is defined or openshift_https_proxy is defined) and
- openshift_generate_no_proxy_hosts | default(True) | bool }}"
- roles:
- - role: openshift_node
- openshift_ca_host: "{{ groups.oo_first_master.0 }}"
+- include: enable_excluders.yml
-- name: Additional node config
- hosts: oo_nodes_to_config
- vars:
- openshift_node_master_api_url: "{{ hostvars[groups.oo_first_master.0].openshift.master.api_url }}"
- roles:
- - role: flannel
- etcd_urls: "{{ hostvars[groups.oo_first_master.0].openshift.master.etcd_urls }}"
- embedded_etcd: "{{ hostvars[groups.oo_first_master.0].openshift.master.embedded_etcd }}"
- etcd_ca_host: "{{ groups.oo_etcd_to_config.0 }}"
- etcd_cert_subdir: "openshift-node-{{ openshift.common.hostname }}"
- etcd_cert_config_dir: "{{ openshift.common.config_base }}/node"
- when: openshift.common.use_flannel | bool
- - role: calico
- when: openshift.common.use_calico | bool
- - role: nuage_node
- when: openshift.common.use_nuage | bool
- - role: contiv
- contiv_role: netplugin
- when: openshift.common.use_contiv | bool
- - role: nickhammond.logrotate
- - role: openshift_manage_node
- openshift_master_host: "{{ groups.oo_first_master.0 }}"
+- name: Node Install Checkpoint End
+ hosts: oo_all_hosts
+ gather_facts: false
tasks:
- - name: Create group for deployment type
- group_by: key=oo_nodes_deployment_type_{{ openshift.common.deployment_type }}
- changed_when: False
+ - name: Set Node install 'Complete'
+ set_stats:
+ data:
+ installer_phase_node: "Complete"
+ aggregate: false
diff --git a/playbooks/common/openshift-node/configure_nodes.yml b/playbooks/common/openshift-node/configure_nodes.yml
new file mode 100644
index 000000000..17259422d
--- /dev/null
+++ b/playbooks/common/openshift-node/configure_nodes.yml
@@ -0,0 +1,17 @@
+---
+- name: Configure nodes
+ hosts: oo_nodes_to_config:!oo_containerized_master_nodes
+ vars:
+ openshift_node_master_api_url: "{{ hostvars[groups.oo_first_master.0].openshift.master.api_url }}"
+ openshift_node_first_master_ip: "{{ hostvars[groups.oo_first_master.0].openshift.common.ip }}"
+ openshift_docker_hosted_registry_network: "{{ hostvars[groups.oo_first_master.0].openshift.common.portal_net }}"
+ openshift_no_proxy_internal_hostnames: "{{ hostvars | oo_select_keys(groups['oo_nodes_to_config']
+ | union(groups['oo_masters_to_config'])
+ | union(groups['oo_etcd_to_config'] | default([])))
+ | oo_collect('openshift.common.hostname') | default([]) | join (',')
+ }}"
+ roles:
+ - role: os_firewall
+ - role: openshift_node
+ - role: tuned
+ - role: nickhammond.logrotate
diff --git a/playbooks/common/openshift-node/containerized_nodes.yml b/playbooks/common/openshift-node/containerized_nodes.yml
new file mode 100644
index 000000000..6fac937e3
--- /dev/null
+++ b/playbooks/common/openshift-node/containerized_nodes.yml
@@ -0,0 +1,19 @@
+---
+- name: Configure containerized nodes
+ hosts: oo_containerized_master_nodes
+ serial: 1
+ vars:
+ openshift_node_master_api_url: "{{ hostvars[groups.oo_first_master.0].openshift.master.api_url }}"
+ openshift_node_first_master_ip: "{{ hostvars[groups.oo_first_master.0].openshift.common.ip }}"
+ openshift_docker_hosted_registry_network: "{{ hostvars[groups.oo_first_master.0].openshift.common.portal_net }}"
+ openshift_no_proxy_internal_hostnames: "{{ hostvars | oo_select_keys(groups['oo_nodes_to_config']
+ | union(groups['oo_masters_to_config'])
+ | union(groups['oo_etcd_to_config'] | default([])))
+ | oo_collect('openshift.common.hostname') | default([]) | join (',')
+ }}"
+
+ roles:
+ - role: os_firewall
+ - role: openshift_node
+ openshift_ca_host: "{{ groups.oo_first_master.0 }}"
+ - role: nickhammond.logrotate
diff --git a/playbooks/common/openshift-node/enable_excluders.yml b/playbooks/common/openshift-node/enable_excluders.yml
new file mode 100644
index 000000000..5288b14f9
--- /dev/null
+++ b/playbooks/common/openshift-node/enable_excluders.yml
@@ -0,0 +1,8 @@
+---
+- name: Re-enable excluder if it was previously enabled
+ hosts: oo_nodes_to_config
+ gather_facts: no
+ roles:
+ - role: openshift_excluder
+ r_openshift_excluder_action: enable
+ r_openshift_excluder_service_type: "{{ openshift.common.service_type }}"
diff --git a/playbooks/common/openshift-node/etcd_client_config.yml b/playbooks/common/openshift-node/etcd_client_config.yml
new file mode 100644
index 000000000..c3fa38a81
--- /dev/null
+++ b/playbooks/common/openshift-node/etcd_client_config.yml
@@ -0,0 +1,11 @@
+---
+- name: etcd_client node config
+ hosts: "{{ openshift_node_scale_up_group | default('this_group_does_not_exist') }}"
+ roles:
+ - role: openshift_facts
+ - role: openshift_etcd_facts
+ - role: openshift_etcd_client_certificates
+ etcd_cert_prefix: flannel.etcd-
+ etcd_ca_host: "{{ groups.oo_etcd_to_config.0 }}"
+ etcd_cert_subdir: "openshift-node-{{ openshift.common.hostname }}"
+ etcd_cert_config_dir: "{{ openshift.common.config_base }}/node"
diff --git a/playbooks/common/openshift-node/image_prep.yml b/playbooks/common/openshift-node/image_prep.yml
new file mode 100644
index 000000000..30651a1df
--- /dev/null
+++ b/playbooks/common/openshift-node/image_prep.yml
@@ -0,0 +1,24 @@
+---
+- name: normalize groups
+ include: ../../byo/openshift-cluster/initialize_groups.yml
+
+- name: evaluate the groups
+ include: ../openshift-cluster/evaluate_groups.yml
+
+- name: initialize the facts
+ include: ../openshift-cluster/initialize_facts.yml
+
+- name: initialize the repositories
+ include: ../openshift-cluster/initialize_openshift_repos.yml
+
+- name: run node config setup
+ include: setup.yml
+
+- name: run node config
+ include: configure_nodes.yml
+
+- name: Re-enable excluders
+ include: enable_excluders.yml
+
+- name: Remove any undesired artifacts from build
+ include: clean_image.yml
diff --git a/playbooks/common/openshift-node/manage_node.yml b/playbooks/common/openshift-node/manage_node.yml
new file mode 100644
index 000000000..f48a19a9c
--- /dev/null
+++ b/playbooks/common/openshift-node/manage_node.yml
@@ -0,0 +1,12 @@
+---
+- name: Additional node config
+ hosts: "{{ openshift_node_scale_up_group | default('oo_nodes_to_config') }}"
+ vars:
+ openshift_node_master_api_url: "{{ hostvars[groups.oo_first_master.0].openshift.master.api_url }}"
+ roles:
+ - role: openshift_manage_node
+ openshift_master_host: "{{ groups.oo_first_master.0 }}"
+ tasks:
+ - name: Create group for deployment type
+ group_by: key=oo_nodes_deployment_type_{{ openshift.common.deployment_type }}
+ changed_when: False
diff --git a/playbooks/common/openshift-node/network_manager.yml b/playbooks/common/openshift-node/network_manager.yml
index 0014a5dbd..b3a7399dc 100644
--- a/playbooks/common/openshift-node/network_manager.yml
+++ b/playbooks/common/openshift-node/network_manager.yml
@@ -1,4 +1,6 @@
---
+- include: ../openshift-cluster/evaluate_groups.yml
+
- name: Install and configure NetworkManager
hosts: oo_all_hosts
become: yes
diff --git a/playbooks/common/openshift-node/restart.yml b/playbooks/common/openshift-node/restart.yml
index 01cf948e0..c3beb59b7 100644
--- a/playbooks/common/openshift-node/restart.yml
+++ b/playbooks/common/openshift-node/restart.yml
@@ -11,6 +11,10 @@
service:
name: docker
state: restarted
+ register: l_docker_restart_docker_in_node_result
+ until: not l_docker_restart_docker_in_node_result | failed
+ retries: 3
+ delay: 30
- name: Update docker facts
openshift_facts:
@@ -23,7 +27,6 @@
with_items:
- etcd_container
- openvswitch
- - "{{ openshift.common.service_type }}-master"
- "{{ openshift.common.service_type }}-master-api"
- "{{ openshift.common.service_type }}-master-controllers"
- "{{ openshift.common.service_type }}-node"
@@ -36,6 +39,7 @@
state: started
delay: 10
port: "{{ openshift.master.api_port }}"
+ timeout: 600
when: inventory_hostname in groups.oo_masters_to_config
- name: restart node
diff --git a/playbooks/common/openshift-node/scaleup.yml b/playbooks/common/openshift-node/scaleup.yml
deleted file mode 100644
index 40da8990d..000000000
--- a/playbooks/common/openshift-node/scaleup.yml
+++ /dev/null
@@ -1,50 +0,0 @@
----
-- include: ../openshift-cluster/evaluate_groups.yml
-
-- name: Gather facts
- hosts: oo_etcd_to_config:oo_masters_to_config:oo_nodes_to_config
- roles:
- - openshift_facts
-
-- name: Gather and set facts for first master
- hosts: oo_first_master
- vars:
- openshift_master_count: "{{ groups.oo_masters | length }}"
- pre_tasks:
- - set_fact:
- openshift_master_default_subdomain: "{{ lookup('oo_option', 'openshift_master_default_subdomain') | default(None, true) }}"
- when: openshift_master_default_subdomain is not defined
- roles:
- - openshift_master_facts
-
-- name: Configure docker hosts
- hosts: oo_nodes_to_config
- vars:
- docker_additional_registries: "{{ lookup('oo_option', 'docker_additional_registries') | oo_split }}"
- docker_insecure_registries: "{{ lookup('oo_option', 'docker_insecure_registries') | oo_split }}"
- docker_blocked_registries: "{{ lookup('oo_option', 'docker_blocked_registries') | oo_split }}"
- roles:
- - openshift_facts
- - openshift_docker
-
-- name: Disable excluders
- hosts: oo_nodes_to_config
- tags:
- - always
- gather_facts: no
- roles:
- - role: openshift_excluder
- r_openshift_excluder_action: disable
- r_openshift_excluder_service_type: "{{ openshift.common.service_type }}"
-
-- include: ../openshift-node/config.yml
-
-- name: Re-enable excluder if it was previously enabled
- hosts: oo_nodes_to_config
- tags:
- - always
- gather_facts: no
- roles:
- - role: openshift_excluder
- r_openshift_excluder_action: enable
- r_openshift_excluder_service_type: "{{ openshift.common.service_type }}"
diff --git a/playbooks/common/openshift-node/service.yml b/playbooks/common/openshift-node/service.yml
deleted file mode 100644
index 130a5416f..000000000
--- a/playbooks/common/openshift-node/service.yml
+++ /dev/null
@@ -1,26 +0,0 @@
----
-- name: Populate g_service_nodes host group if needed
- hosts: localhost
- connection: local
- become: no
- gather_facts: no
- tasks:
- - fail: msg="new_cluster_state is required to be injected in this playbook"
- when: new_cluster_state is not defined
-
- - name: Evaluate g_service_nodes
- add_host:
- name: "{{ item }}"
- groups: g_service_nodes
- with_items: "{{ oo_host_group_exp | default([]) }}"
- changed_when: False
-
-- name: Change state on node instance(s)
- hosts: g_service_nodes
- connection: ssh
- gather_facts: no
- tasks:
- - name: Change state on node instance(s)
- service:
- name: "{{ service_type }}-node"
- state: "{{ new_cluster_state }}"
diff --git a/playbooks/common/openshift-node/setup.yml b/playbooks/common/openshift-node/setup.yml
new file mode 100644
index 000000000..794c03a67
--- /dev/null
+++ b/playbooks/common/openshift-node/setup.yml
@@ -0,0 +1,27 @@
+---
+- name: Disable excluders
+ hosts: oo_nodes_to_config
+ gather_facts: no
+ roles:
+ - role: openshift_excluder
+ r_openshift_excluder_action: disable
+ r_openshift_excluder_service_type: "{{ openshift.common.service_type }}"
+
+- name: Evaluate node groups
+ hosts: localhost
+ become: no
+ connection: local
+ tasks:
+ - name: Evaluate oo_containerized_master_nodes
+ add_host:
+ name: "{{ item }}"
+ groups: oo_containerized_master_nodes
+ ansible_ssh_user: "{{ g_ssh_user | default(omit) }}"
+ ansible_become: "{{ g_sudo | default(omit) }}"
+ with_items: "{{ groups.oo_nodes_to_config | default([]) }}"
+ when:
+ - hostvars[item].openshift is defined
+ - hostvars[item].openshift.common is defined
+ - hostvars[item].openshift.common.is_containerized | bool
+ - (item in groups.oo_nodes_to_config and item in groups.oo_masters_to_config)
+ changed_when: False