summaryrefslogtreecommitdiffstats
path: root/playbooks
diff options
context:
space:
mode:
Diffstat (limited to 'playbooks')
-rw-r--r--playbooks/adhoc/openshift_hosted_logging_efk.yaml16
-rw-r--r--playbooks/aws/provisioning_vars.yml.example24
-rw-r--r--playbooks/byo/openshift-cluster/upgrades/v3_9/upgrade_control_plane.yml2
-rw-r--r--playbooks/common/openshift-cluster/upgrades/post_control_plane.yml4
-rw-r--r--playbooks/common/openshift-cluster/upgrades/pre/config.yml4
-rw-r--r--playbooks/common/openshift-cluster/upgrades/pre/verify_cluster.yml22
-rw-r--r--playbooks/common/openshift-cluster/upgrades/upgrade_control_plane.yml41
-rw-r--r--playbooks/common/openshift-cluster/upgrades/upgrade_nodes.yml15
-rw-r--r--playbooks/common/openshift-cluster/upgrades/v3_6/upgrade.yml1
-rw-r--r--playbooks/common/openshift-cluster/upgrades/v3_6/upgrade_control_plane.yml1
-rw-r--r--playbooks/common/openshift-cluster/upgrades/v3_7/upgrade.yml1
-rw-r--r--playbooks/common/openshift-cluster/upgrades/v3_7/upgrade_control_plane.yml1
-rw-r--r--playbooks/common/openshift-cluster/upgrades/v3_8/upgrade.yml1
-rw-r--r--playbooks/common/openshift-cluster/upgrades/v3_8/upgrade_control_plane.yml1
-rw-r--r--playbooks/common/openshift-cluster/upgrades/v3_9/upgrade.yml1
-rw-r--r--playbooks/common/openshift-cluster/upgrades/v3_9/upgrade_control_plane.yml36
-rw-r--r--playbooks/deploy_cluster.yml8
-rw-r--r--playbooks/gcp/openshift-cluster/build_base_image.yml3
-rw-r--r--playbooks/init/base_packages.yml1
-rw-r--r--playbooks/init/basic_facts.yml8
-rw-r--r--playbooks/init/evaluate_groups.yml6
-rw-r--r--playbooks/openshift-hosted/private/openshift_default_storage_class.yml4
-rw-r--r--playbooks/openshift-logging/private/config.yml1
-rw-r--r--playbooks/openshift-master/private/additional_config.yml1
-rw-r--r--playbooks/openshift-master/private/config.yml3
-rw-r--r--playbooks/openshift-master/private/restart.yml9
-rw-r--r--playbooks/openshift-master/private/scaleup.yml1
-rw-r--r--playbooks/openshift-master/private/tasks/restart_services.yml4
-rw-r--r--playbooks/openshift-master/private/tasks/wire_aggregator.yml86
-rw-r--r--playbooks/openshift-metrics/private/config.yml1
-rw-r--r--playbooks/openshift-prometheus/private/uninstall.yml8
-rw-r--r--playbooks/openshift-prometheus/uninstall.yml2
-rwxr-xr-xplaybooks/openstack/inventory.py48
33 files changed, 245 insertions, 120 deletions
diff --git a/playbooks/adhoc/openshift_hosted_logging_efk.yaml b/playbooks/adhoc/openshift_hosted_logging_efk.yaml
deleted file mode 100644
index faeb332ad..000000000
--- a/playbooks/adhoc/openshift_hosted_logging_efk.yaml
+++ /dev/null
@@ -1,16 +0,0 @@
----
-- hosts: masters[0]
- roles:
- - role: openshift_logging
- openshift_hosted_logging_cleanup: no
-
-- name: Update master-config for publicLoggingURL
- hosts: masters:!masters[0]
- pre_tasks:
- - set_fact:
- openshift_logging_kibana_hostname: "{{ openshift_hosted_logging_hostname | default('kibana.' ~ openshift_master_default_subdomain }}"
- tasks:
- - import_role:
- name: openshift_logging
- tasks_from: update_master_config
- when: openshift_hosted_logging_deploy | default(false) | bool
diff --git a/playbooks/aws/provisioning_vars.yml.example b/playbooks/aws/provisioning_vars.yml.example
index f6b1a6b5d..78484fdbd 100644
--- a/playbooks/aws/provisioning_vars.yml.example
+++ b/playbooks/aws/provisioning_vars.yml.example
@@ -21,6 +21,12 @@ openshift_release: # v3.7
# This will be dependent on the version provided by the yum repository
openshift_pkg_version: # -3.7.0
+# OpenShift api port
+# Fulfills a chicken/egg scenario with how Ansible treats host inventory file
+# and extra_vars. This is used for SecurityGroups, ELB Listeners as well as
+# an override to installer inventory openshift_master_api_port key
+# openshift_master_api_port: 8443
+
# specify a clusterid
# This value is also used as the default value for many other components.
#openshift_aws_clusterid: default
@@ -41,11 +47,27 @@ openshift_pkg_version: # -3.7.0
# a vpc, set this to false.
#openshift_aws_create_vpc: true
+# when openshift_aws_create_vpc is true (the default), the VPC defined in
+# openshift_aws_vpc will be created
+#openshift_aws_vpc:
+# name: "{{ openshift_aws_vpc_name }}"
+# cidr: 172.31.0.0/16
+# subnets:
+# us-east-1:
+# - cidr: 172.31.48.0/20
+# az: "us-east-1c"
+# default_az: true
+# - cidr: 172.31.32.0/20
+# az: "us-east-1e"
+# - cidr: 172.31.16.0/20
+# az: "us-east-1a"
+
# Name of the vpc. Needs to be set if using a pre-existing vpc.
#openshift_aws_vpc_name: "{{ openshift_aws_clusterid }}"
# Name of the subnet in the vpc to use. Needs to be set if using a pre-existing
-# vpc + subnet.
+# vpc + subnet. Otherwise will use the subnet with 'default_az' set (see above
+# example VPC structure)
#openshift_aws_subnet_az:
# -------------- #
diff --git a/playbooks/byo/openshift-cluster/upgrades/v3_9/upgrade_control_plane.yml b/playbooks/byo/openshift-cluster/upgrades/v3_9/upgrade_control_plane.yml
index 869e185af..c8f397186 100644
--- a/playbooks/byo/openshift-cluster/upgrades/v3_9/upgrade_control_plane.yml
+++ b/playbooks/byo/openshift-cluster/upgrades/v3_9/upgrade_control_plane.yml
@@ -12,3 +12,5 @@
# You can run the upgrade_nodes.yml playbook after this to upgrade these components separately.
#
- import_playbook: ../../../../common/openshift-cluster/upgrades/v3_9/upgrade_control_plane.yml
+
+- import_playbook: ../../../../openshift-master/private/restart.yml
diff --git a/playbooks/common/openshift-cluster/upgrades/post_control_plane.yml b/playbooks/common/openshift-cluster/upgrades/post_control_plane.yml
index f790fd98d..de612da21 100644
--- a/playbooks/common/openshift-cluster/upgrades/post_control_plane.yml
+++ b/playbooks/common/openshift-cluster/upgrades/post_control_plane.yml
@@ -6,7 +6,9 @@
hosts: oo_first_master
roles:
- role: openshift_web_console
- when: openshift_web_console_install | default(true) | bool
+ when:
+ - openshift_web_console_install | default(true) | bool
+ - openshift_upgrade_target is version_compare('3.9','>=')
- name: Upgrade default router and default registry
hosts: oo_first_master
diff --git a/playbooks/common/openshift-cluster/upgrades/pre/config.yml b/playbooks/common/openshift-cluster/upgrades/pre/config.yml
index da63450b8..edc541ef9 100644
--- a/playbooks/common/openshift-cluster/upgrades/pre/config.yml
+++ b/playbooks/common/openshift-cluster/upgrades/pre/config.yml
@@ -49,7 +49,7 @@
# to a more specific version, respecting openshift_image_tag and openshift_pkg_version if
# defined, and overriding the normal behavior of protecting the installed version
openshift_release: "{{ openshift_upgrade_target }}"
- openshift_protect_installed_version: False
+ # openshift_protect_installed_version is passed n via upgrade_control_plane.yml
# l_openshift_version_set_hosts is passed via upgrade_control_plane.yml
# l_openshift_version_check_hosts is passed via upgrade_control_plane.yml
@@ -60,7 +60,7 @@
- fail: msg="Master running {{ openshift.common.version }} must be upgraded to {{ openshift_version }} before node upgrade can be run."
when:
- l_upgrade_nodes_only | default(False) | bool
- - openshift.common.version != openshift_version
+ - not openshift.common.version | match(openshift_version)
# If we're only upgrading nodes, skip this.
- import_playbook: ../../../../openshift-master/private/validate_restart.yml
diff --git a/playbooks/common/openshift-cluster/upgrades/pre/verify_cluster.yml b/playbooks/common/openshift-cluster/upgrades/pre/verify_cluster.yml
index 693ab2d96..5ee8a9d78 100644
--- a/playbooks/common/openshift-cluster/upgrades/pre/verify_cluster.yml
+++ b/playbooks/common/openshift-cluster/upgrades/pre/verify_cluster.yml
@@ -92,3 +92,25 @@
state: started
enabled: yes
with_items: "{{ master_services }}"
+
+# Until openshift-ansible is determining which host is the CA host we
+# must (unfortunately) ensure that the first host in the etcd group is
+# the etcd CA host.
+# https://bugzilla.redhat.com/show_bug.cgi?id=1469358
+- name: Verify we can proceed on first etcd
+ hosts: oo_first_etcd
+ gather_facts: no
+ tasks:
+ - name: Ensure CA exists on first etcd
+ stat:
+ path: /etc/etcd/generated_certs
+ register: __etcd_ca_stat
+
+ - fail:
+ msg: >
+ In order to correct an etcd certificate signing problem
+ upgrading may require re-generating etcd certificates. Please
+ ensure that the /etc/etcd/generated_certs directory exists on
+ the first host defined in your [etcd] group.
+ when:
+ - not __etcd_ca_stat.stat.exists | bool
diff --git a/playbooks/common/openshift-cluster/upgrades/upgrade_control_plane.yml b/playbooks/common/openshift-cluster/upgrades/upgrade_control_plane.yml
index e89f06f17..c27118f6f 100644
--- a/playbooks/common/openshift-cluster/upgrades/upgrade_control_plane.yml
+++ b/playbooks/common/openshift-cluster/upgrades/upgrade_control_plane.yml
@@ -2,6 +2,30 @@
###############################################################################
# Upgrade Masters
###############################################################################
+
+# Prior to 3.6, openshift-ansible created etcd serving certificates
+# without a SubjectAlternativeName entry for the system hostname. The
+# SAN list in Go 1.8 is now (correctly) authoritative and since
+# openshift-ansible configures masters to talk to etcd hostnames
+# rather than IP addresses, we must correct etcd certificates.
+#
+# This play examines the etcd serving certificate SANs on each etcd
+# host and records whether or not the system hostname is missing.
+- name: Examine etcd serving certificate SAN
+ hosts: oo_etcd_to_config
+ tasks:
+ - slurp:
+ src: /etc/etcd/server.crt
+ register: etcd_serving_cert
+ - set_fact:
+ __etcd_cert_lacks_hostname: "{{ (openshift.common.hostname not in (etcd_serving_cert.content | b64decode | lib_utils_oo_parse_certificate_san)) | bool }}"
+
+# Redeploy etcd certificates when hostnames were missing from etcd
+# serving certificate SANs.
+- import_playbook: ../../../openshift-etcd/redeploy-certificates.yml
+ when:
+ - true in hostvars | lib_utils_oo_select_keys(groups['oo_etcd_to_config']) | lib_utils_oo_collect('__etcd_cert_lacks_hostname') | default([false])
+
- name: Backup and upgrade etcd
import_playbook: ../../../openshift-etcd/private/upgrade_main.yml
@@ -48,8 +72,6 @@
# support for optional hooks to be defined.
- name: Upgrade master
hosts: oo_masters_to_config
- vars:
- openshift_master_ha: "{{ groups.oo_masters_to_config | length > 1 }}"
serial: 1
roles:
- openshift_facts
@@ -310,13 +332,8 @@
- import_role:
name: openshift_node
tasks_from: upgrade.yml
- - name: Set node schedulability
- oc_adm_manage_node:
- node: "{{ openshift.node.nodename | lower }}"
- schedulable: True
- delegate_to: "{{ groups.oo_first_master.0 }}"
- retries: 10
- delay: 5
- register: node_schedulable
- until: node_schedulable is succeeded
- when: node_unschedulable is changed
+ - import_role:
+ name: openshift_manage_node
+ tasks_from: config.yml
+ vars:
+ openshift_master_host: "{{ groups.oo_first_master.0 }}"
diff --git a/playbooks/common/openshift-cluster/upgrades/upgrade_nodes.yml b/playbooks/common/openshift-cluster/upgrades/upgrade_nodes.yml
index 850442b3b..915fae9fd 100644
--- a/playbooks/common/openshift-cluster/upgrades/upgrade_nodes.yml
+++ b/playbooks/common/openshift-cluster/upgrades/upgrade_nodes.yml
@@ -50,16 +50,11 @@
- import_role:
name: openshift_node
tasks_from: upgrade.yml
- - name: Set node schedulability
- oc_adm_manage_node:
- node: "{{ openshift.node.nodename | lower }}"
- schedulable: True
- delegate_to: "{{ groups.oo_first_master.0 }}"
- retries: 10
- delay: 5
- register: node_schedulable
- until: node_schedulable is succeeded
- when: node_unschedulable is changed
+ - import_role:
+ name: openshift_manage_node
+ tasks_from: config.yml
+ vars:
+ openshift_master_host: "{{ groups.oo_first_master.0 }}"
- name: Re-enable excluders
hosts: oo_nodes_to_upgrade:!oo_masters_to_config
diff --git a/playbooks/common/openshift-cluster/upgrades/v3_6/upgrade.yml b/playbooks/common/openshift-cluster/upgrades/v3_6/upgrade.yml
index d520c6aee..a2d21b69f 100644
--- a/playbooks/common/openshift-cluster/upgrades/v3_6/upgrade.yml
+++ b/playbooks/common/openshift-cluster/upgrades/v3_6/upgrade.yml
@@ -23,6 +23,7 @@
l_upgrade_verify_targets_hosts: "oo_masters_to_config:oo_nodes_to_upgrade"
l_upgrade_docker_target_hosts: "oo_masters_to_config:oo_nodes_to_upgrade:oo_etcd_to_config"
l_upgrade_excluder_hosts: "oo_nodes_to_config:oo_masters_to_config"
+ openshift_protect_installed_version: False
- import_playbook: validator.yml
diff --git a/playbooks/common/openshift-cluster/upgrades/v3_6/upgrade_control_plane.yml b/playbooks/common/openshift-cluster/upgrades/v3_6/upgrade_control_plane.yml
index d88880140..9aa5a3b64 100644
--- a/playbooks/common/openshift-cluster/upgrades/v3_6/upgrade_control_plane.yml
+++ b/playbooks/common/openshift-cluster/upgrades/v3_6/upgrade_control_plane.yml
@@ -35,6 +35,7 @@
l_upgrade_verify_targets_hosts: "oo_masters_to_config"
l_upgrade_docker_target_hosts: "oo_masters_to_config:oo_etcd_to_config"
l_upgrade_excluder_hosts: "oo_masters_to_config"
+ openshift_protect_installed_version: False
- import_playbook: validator.yml
diff --git a/playbooks/common/openshift-cluster/upgrades/v3_7/upgrade.yml b/playbooks/common/openshift-cluster/upgrades/v3_7/upgrade.yml
index 4daa9e490..cc2ec2709 100644
--- a/playbooks/common/openshift-cluster/upgrades/v3_7/upgrade.yml
+++ b/playbooks/common/openshift-cluster/upgrades/v3_7/upgrade.yml
@@ -23,6 +23,7 @@
l_upgrade_verify_targets_hosts: "oo_masters_to_config:oo_nodes_to_upgrade"
l_upgrade_docker_target_hosts: "oo_masters_to_config:oo_nodes_to_upgrade:oo_etcd_to_config"
l_upgrade_excluder_hosts: "oo_nodes_to_config:oo_masters_to_config"
+ openshift_protect_installed_version: False
- import_playbook: validator.yml
diff --git a/playbooks/common/openshift-cluster/upgrades/v3_7/upgrade_control_plane.yml b/playbooks/common/openshift-cluster/upgrades/v3_7/upgrade_control_plane.yml
index ce069e2d0..b1ecc75d3 100644
--- a/playbooks/common/openshift-cluster/upgrades/v3_7/upgrade_control_plane.yml
+++ b/playbooks/common/openshift-cluster/upgrades/v3_7/upgrade_control_plane.yml
@@ -35,6 +35,7 @@
l_upgrade_verify_targets_hosts: "oo_masters_to_config"
l_upgrade_docker_target_hosts: "oo_masters_to_config:oo_etcd_to_config"
l_upgrade_excluder_hosts: "oo_masters_to_config"
+ openshift_protect_installed_version: False
- import_playbook: validator.yml
diff --git a/playbooks/common/openshift-cluster/upgrades/v3_8/upgrade.yml b/playbooks/common/openshift-cluster/upgrades/v3_8/upgrade.yml
index a9bf354cc..a73b7d63a 100644
--- a/playbooks/common/openshift-cluster/upgrades/v3_8/upgrade.yml
+++ b/playbooks/common/openshift-cluster/upgrades/v3_8/upgrade.yml
@@ -23,6 +23,7 @@
l_upgrade_verify_targets_hosts: "oo_masters_to_config:oo_nodes_to_upgrade"
l_upgrade_docker_target_hosts: "oo_masters_to_config:oo_nodes_to_upgrade:oo_etcd_to_config"
l_upgrade_excluder_hosts: "oo_nodes_to_config:oo_masters_to_config"
+ openshift_protect_installed_version: False
- import_playbook: validator.yml
diff --git a/playbooks/common/openshift-cluster/upgrades/v3_8/upgrade_control_plane.yml b/playbooks/common/openshift-cluster/upgrades/v3_8/upgrade_control_plane.yml
index 3f26a6297..723b2e533 100644
--- a/playbooks/common/openshift-cluster/upgrades/v3_8/upgrade_control_plane.yml
+++ b/playbooks/common/openshift-cluster/upgrades/v3_8/upgrade_control_plane.yml
@@ -36,6 +36,7 @@
l_upgrade_verify_targets_hosts: "oo_masters_to_config"
l_upgrade_docker_target_hosts: "oo_masters_to_config:oo_etcd_to_config"
l_upgrade_excluder_hosts: "oo_masters_to_config"
+ openshift_protect_installed_version: False
- import_playbook: validator.yml
diff --git a/playbooks/common/openshift-cluster/upgrades/v3_9/upgrade.yml b/playbooks/common/openshift-cluster/upgrades/v3_9/upgrade.yml
index 20e0c165e..bf6e8605e 100644
--- a/playbooks/common/openshift-cluster/upgrades/v3_9/upgrade.yml
+++ b/playbooks/common/openshift-cluster/upgrades/v3_9/upgrade.yml
@@ -20,6 +20,7 @@
l_upgrade_verify_targets_hosts: "oo_masters_to_config:oo_nodes_to_upgrade"
l_upgrade_docker_target_hosts: "oo_masters_to_config:oo_nodes_to_upgrade:oo_etcd_to_config"
l_upgrade_excluder_hosts: "oo_nodes_to_config:oo_masters_to_config"
+ openshift_protect_installed_version: False
- import_playbook: validator.yml
diff --git a/playbooks/common/openshift-cluster/upgrades/v3_9/upgrade_control_plane.yml b/playbooks/common/openshift-cluster/upgrades/v3_9/upgrade_control_plane.yml
index 0f48725f6..c8a42322d 100644
--- a/playbooks/common/openshift-cluster/upgrades/v3_9/upgrade_control_plane.yml
+++ b/playbooks/common/openshift-cluster/upgrades/v3_9/upgrade_control_plane.yml
@@ -25,10 +25,18 @@
openshift_upgrade_target: '3.8'
openshift_upgrade_min: '3.7'
openshift_release: '3.8'
- _requested_pkg_version: "{{openshift_pkg_version if openshift_pkg_version is defined else omit }}"
- _requested_image_tag: "{{openshift_image_tag if openshift_image_tag is defined else omit }}"
+ _requested_pkg_version: "{{ openshift_pkg_version if openshift_pkg_version is defined else omit }}"
+ _requested_image_tag: "{{ openshift_image_tag if openshift_image_tag is defined else omit }}"
+ l_double_upgrade_cp: True
when: hostvars[groups.oo_first_master.0].openshift_currently_installed_version | version_compare('3.8','<')
+ - name: set l_force_image_tag_to_version = True
+ set_fact:
+ # Need to set this during 3.8 upgrade to ensure image_tag is set correctly
+ # to match 3.8 version
+ l_force_image_tag_to_version: True
+ when: _requested_image_tag is defined
+
- import_playbook: ../pre/config.yml
# These vars a meant to exclude oo_nodes from plays that would otherwise include
# them by default.
@@ -41,6 +49,7 @@
l_upgrade_verify_targets_hosts: "oo_masters_to_config"
l_upgrade_docker_target_hosts: "oo_masters_to_config:oo_etcd_to_config"
l_upgrade_excluder_hosts: "oo_masters_to_config"
+ openshift_protect_installed_version: False
when: hostvars[groups.oo_first_master.0].openshift_currently_installed_version | version_compare('3.8','<')
- name: Flag pre-upgrade checks complete for hosts without errors 3.8
@@ -55,6 +64,7 @@
- import_playbook: ../upgrade_control_plane.yml
vars:
openshift_release: '3.8'
+ openshift_pkg_version: ''
when: hostvars[groups.oo_first_master.0].openshift_currently_installed_version | version_compare('3.8','<')
## 3.8 upgrade complete we should now be able to upgrade to 3.9
@@ -68,7 +78,20 @@
openshift_upgrade_min: '3.8'
openshift_release: '3.9'
openshift_pkg_version: "{{ _requested_pkg_version | default ('-3.9*') }}"
- openshift_image_tag: "{{ _requested_image_tag | default('v3.9') }}"
+ # Set the user's specified image_tag for 3.9 upgrade if it was provided.
+ - set_fact:
+ openshift_image_tag: "{{ _requested_image_tag }}"
+ l_force_image_tag_to_version: False
+ when: _requested_image_tag is defined
+ # If the user didn't specify an image_tag, we need to force update image_tag
+ # because it will have already been set during 3.8. If we aren't running
+ # a double upgrade, then we can preserve image_tag because it will still
+ # be the user provided value.
+ - set_fact:
+ l_force_image_tag_to_version: True
+ when:
+ - l_double_upgrade_cp is defined and l_double_upgrade_cp
+ - _requested_image_tag is not defined
- import_playbook: ../pre/config.yml
# These vars a meant to exclude oo_nodes from plays that would otherwise include
@@ -82,6 +105,7 @@
l_upgrade_verify_targets_hosts: "oo_masters_to_config"
l_upgrade_docker_target_hosts: "oo_masters_to_config:oo_etcd_to_config"
l_upgrade_excluder_hosts: "oo_masters_to_config"
+ openshift_protect_installed_version: False
- name: Flag pre-upgrade checks complete for hosts without errors
hosts: oo_masters_to_config:oo_etcd_to_config
@@ -110,3 +134,9 @@
state: started
- import_playbook: ../post_control_plane.yml
+
+- hosts: oo_masters
+ tasks:
+ - import_role:
+ name: openshift_web_console
+ tasks_from: remove_old_asset_config
diff --git a/playbooks/deploy_cluster.yml b/playbooks/deploy_cluster.yml
index 361553ee4..c8e30ddbc 100644
--- a/playbooks/deploy_cluster.yml
+++ b/playbooks/deploy_cluster.yml
@@ -6,11 +6,3 @@
- import_playbook: openshift-node/private/config.yml
- import_playbook: common/private/components.yml
-
-- name: Print deprecated variable warning message if necessary
- hosts: oo_first_master
- gather_facts: no
- tasks:
- - debug: msg="{{__deprecation_message}}"
- when:
- - __deprecation_message | default ('') | length > 0
diff --git a/playbooks/gcp/openshift-cluster/build_base_image.yml b/playbooks/gcp/openshift-cluster/build_base_image.yml
index 75d0ddf9d..8e9b0024a 100644
--- a/playbooks/gcp/openshift-cluster/build_base_image.yml
+++ b/playbooks/gcp/openshift-cluster/build_base_image.yml
@@ -90,6 +90,8 @@
repo_gpgcheck: no
state: present
when: ansible_os_family == "RedHat"
+ - name: Accept GPG keys for the repos
+ command: yum -q makecache -y --disablerepo='*' --enablerepo='google-cloud,jdetiber-qemu-user-static'
- name: Install qemu-user-static
package:
name: qemu-user-static
@@ -121,7 +123,6 @@
with_items:
# required by Ansible
- PyYAML
- - docker
- google-compute-engine
- google-compute-engine-init
- google-config
diff --git a/playbooks/init/base_packages.yml b/playbooks/init/base_packages.yml
index e1052fb6c..0a730a88a 100644
--- a/playbooks/init/base_packages.yml
+++ b/playbooks/init/base_packages.yml
@@ -16,6 +16,7 @@
- iproute
- "{{ 'python3-dbus' if ansible_distribution == 'Fedora' else 'dbus-python' }}"
- "{{ 'python3-PyYAML' if ansible_distribution == 'Fedora' else 'PyYAML' }}"
+ - "{{ 'python-ipaddress' if ansible_distribution != 'Fedora' else omit }}"
- yum-utils
register: result
until: result is succeeded
diff --git a/playbooks/init/basic_facts.yml b/playbooks/init/basic_facts.yml
index 06a4e7291..a9bf06693 100644
--- a/playbooks/init/basic_facts.yml
+++ b/playbooks/init/basic_facts.yml
@@ -67,3 +67,11 @@
first_master_client_binary: "{{ openshift_client_binary }}"
#Some roles may require this to be set for first master
openshift_client_binary: "{{ openshift_client_binary }}"
+
+- name: Disable web console if required
+ hosts: oo_masters_to_config
+ gather_facts: no
+ tasks:
+ - set_fact:
+ openshift_web_console_install: False
+ when: openshift_deployment_subtype == 'registry' or ( osm_disabled_features is defined and 'WebConsole' in osm_disabled_features )
diff --git a/playbooks/init/evaluate_groups.yml b/playbooks/init/evaluate_groups.yml
index 924ae481a..e8bf1892c 100644
--- a/playbooks/init/evaluate_groups.yml
+++ b/playbooks/init/evaluate_groups.yml
@@ -45,7 +45,11 @@
- name: Evaluate groups - Fail if no etcd hosts group is defined
fail:
msg: >
- Running etcd as an embedded service is no longer supported.
+ Running etcd as an embedded service is no longer supported. If this is a
+ new install please define an 'etcd' group with either one, three or five
+ hosts. These hosts may be the same hosts as your masters. If this is an
+ upgrade please see https://docs.openshift.com/container-platform/latest/install_config/upgrading/migrating_embedded_etcd.html
+ for documentation on how to migrate from embedded to external etcd.
when:
- g_etcd_hosts | default([]) | length not in [5,3,1]
- not (openshift_node_bootstrap | default(False))
diff --git a/playbooks/openshift-hosted/private/openshift_default_storage_class.yml b/playbooks/openshift-hosted/private/openshift_default_storage_class.yml
index 62fe0dd60..c59ebcead 100644
--- a/playbooks/openshift-hosted/private/openshift_default_storage_class.yml
+++ b/playbooks/openshift-hosted/private/openshift_default_storage_class.yml
@@ -3,4 +3,6 @@
hosts: oo_first_master
roles:
- role: openshift_default_storage_class
- when: openshift_cloudprovider_kind is defined and (openshift_cloudprovider_kind == 'aws' or openshift_cloudprovider_kind == 'gce' or openshift_cloudprovider_kind == 'openstack')
+ when:
+ - openshift_cloudprovider_kind is defined
+ - openshift_cloudprovider_kind in ['aws','gce','openstack','vsphere']
diff --git a/playbooks/openshift-logging/private/config.yml b/playbooks/openshift-logging/private/config.yml
index d6b26647c..07aa8bfde 100644
--- a/playbooks/openshift-logging/private/config.yml
+++ b/playbooks/openshift-logging/private/config.yml
@@ -24,6 +24,7 @@
- import_role:
name: openshift_logging
tasks_from: update_master_config
+ when: not openshift.common.version_gte_3_9
- name: Logging Install Checkpoint End
hosts: all
diff --git a/playbooks/openshift-master/private/additional_config.yml b/playbooks/openshift-master/private/additional_config.yml
index 85be0e600..ca514ed26 100644
--- a/playbooks/openshift-master/private/additional_config.yml
+++ b/playbooks/openshift-master/private/additional_config.yml
@@ -16,7 +16,6 @@
vars:
cockpit_plugins: "{{ osm_cockpit_plugins | default(['cockpit-kubernetes']) }}"
etcd_urls: "{{ openshift.master.etcd_urls }}"
- openshift_master_ha: "{{ groups.oo_masters | length > 1 }}"
omc_cluster_hosts: "{{ groups.oo_masters | join(' ')}}"
roles:
- role: openshift_project_request_template
diff --git a/playbooks/openshift-master/private/config.yml b/playbooks/openshift-master/private/config.yml
index 153ea9993..d2fc2eed8 100644
--- a/playbooks/openshift-master/private/config.yml
+++ b/playbooks/openshift-master/private/config.yml
@@ -78,7 +78,6 @@
console_url: "{{ openshift_master_console_url | default(None) }}"
console_use_ssl: "{{ openshift_master_console_use_ssl | default(None) }}"
public_console_url: "{{ openshift_master_public_console_url | default(None) }}"
- ha: "{{ openshift_master_ha | default(groups.oo_masters | length > 1) }}"
master_count: "{{ openshift_master_count | default(groups.oo_masters | length) }}"
- name: Inspect state of first master config settings
@@ -166,7 +165,6 @@
hosts: oo_masters_to_config
any_errors_fatal: true
vars:
- openshift_master_ha: "{{ openshift.master.ha }}"
openshift_master_count: "{{ openshift.master.master_count }}"
openshift_master_session_auth_secrets: "{{ hostvars[groups.oo_first_master.0].openshift.master.session_auth_secrets }}"
openshift_master_session_encryption_secrets: "{{ hostvars[groups.oo_first_master.0].openshift.master.session_encryption_secrets }}"
@@ -186,6 +184,7 @@
- role: openshift_buildoverrides
- role: nickhammond.logrotate
- role: openshift_master
+ openshift_master_ha: "{{ (groups.oo_masters | length > 1) | bool }}"
openshift_master_hosts: "{{ groups.oo_masters_to_config }}"
r_openshift_master_clean_install: "{{ hostvars[groups.oo_first_master.0].l_clean_install }}"
r_openshift_master_etcd3_storage: "{{ hostvars[groups.oo_first_master.0].l_etcd3_enabled }}"
diff --git a/playbooks/openshift-master/private/restart.yml b/playbooks/openshift-master/private/restart.yml
index 5cb284935..17d90533c 100644
--- a/playbooks/openshift-master/private/restart.yml
+++ b/playbooks/openshift-master/private/restart.yml
@@ -3,16 +3,13 @@
- name: Restart masters
hosts: oo_masters_to_config
- vars:
- openshift_master_ha: "{{ groups.oo_masters_to_config | length > 1 }}"
serial: 1
- handlers:
- - import_tasks: ../../../roles/openshift_master/handlers/main.yml
roles:
- openshift_facts
post_tasks:
- include_tasks: tasks/restart_hosts.yml
when: openshift_rolling_restart_mode | default('services') == 'system'
-
- - include_tasks: tasks/restart_services.yml
+ - import_role:
+ name: openshift_master
+ tasks_from: restart.yml
when: openshift_rolling_restart_mode | default('services') == 'services'
diff --git a/playbooks/openshift-master/private/scaleup.yml b/playbooks/openshift-master/private/scaleup.yml
index 007b23ea3..20ebf70d3 100644
--- a/playbooks/openshift-master/private/scaleup.yml
+++ b/playbooks/openshift-master/private/scaleup.yml
@@ -8,7 +8,6 @@
- openshift_facts:
role: master
local_facts:
- ha: "{{ openshift_master_ha | default(groups.oo_masters | length > 1) }}"
master_count: "{{ openshift_master_count | default(groups.oo_masters | length) }}"
- name: Update master count
modify_yaml:
diff --git a/playbooks/openshift-master/private/tasks/restart_services.yml b/playbooks/openshift-master/private/tasks/restart_services.yml
deleted file mode 100644
index cf2c282e3..000000000
--- a/playbooks/openshift-master/private/tasks/restart_services.yml
+++ /dev/null
@@ -1,4 +0,0 @@
----
-- import_role:
- name: openshift_master
- tasks_from: restart.yml
diff --git a/playbooks/openshift-master/private/tasks/wire_aggregator.yml b/playbooks/openshift-master/private/tasks/wire_aggregator.yml
index 59e2b515c..cc812c300 100644
--- a/playbooks/openshift-master/private/tasks/wire_aggregator.yml
+++ b/playbooks/openshift-master/private/tasks/wire_aggregator.yml
@@ -142,11 +142,6 @@
state: absent
changed_when: False
-- name: Setup extension file for service console UI
- template:
- src: ../templates/openshift-ansible-catalog-console.js
- dest: /etc/origin/master/openshift-ansible-catalog-console.js
-
- name: Update master config
yedit:
state: present
@@ -166,8 +161,6 @@
value: [X-Remote-Group]
- key: authConfig.requestHeader.extraHeaderPrefixes
value: [X-Remote-Extra-]
- - key: assetConfig.extensionScripts
- value: [/etc/origin/master/openshift-ansible-catalog-console.js]
- key: kubernetesMasterConfig.apiServerArguments.runtime-config
value: [apis/settings.k8s.io/v1alpha1=true]
- key: admissionConfig.pluginConfig.PodPreset.configuration.kind
@@ -178,37 +171,50 @@
value: false
register: yedit_output
-#restart master serially here
-- name: restart master api
- systemd: name={{ openshift_service_type }}-master-api state=restarted
- when:
- - yedit_output.changed
-
-# We retry the controllers because the API may not be 100% initialized yet.
-- name: restart master controllers
- command: "systemctl restart {{ openshift_service_type }}-master-controllers"
- retries: 3
- delay: 5
- register: result
- until: result.rc == 0
- when:
- - yedit_output.changed
+# Only add the catalog extension script if not 3.9. From 3.9 on, the console
+# can discover if template service broker is running.
+- when: not openshift.common.version_gte_3_9
+ block:
+ - name: Setup extension file for service console UI
+ template:
+ src: ../templates/openshift-ansible-catalog-console.js
+ dest: /etc/origin/master/openshift-ansible-catalog-console.js
+
+ - name: Update master config
+ yedit:
+ state: present
+ src: /etc/origin/master/master-config.yaml
+ key: assetConfig.extensionScripts
+ value: [/etc/origin/master/openshift-ansible-catalog-console.js]
+ register: yedit_asset_config_output
-- name: Verify API Server
- # Using curl here since the uri module requires python-httplib2 and
- # wait_for port doesn't provide health information.
- command: >
- curl --silent --tlsv1.2
- --cacert {{ openshift.common.config_base }}/master/ca-bundle.crt
- {{ openshift.master.api_url }}/healthz/ready
- args:
- # Disables the following warning:
- # Consider using get_url or uri module rather than running curl
- warn: no
- register: api_available_output
- until: api_available_output.stdout == 'ok'
- retries: 120
- delay: 1
- changed_when: false
- when:
- - yedit_output.changed
+#restart master serially here
+- when: yedit_output.changed or (yedit_asset_config_output is defined and yedit_asset_config_output.changed)
+ block:
+ - name: restart master api
+ systemd: name={{ openshift_service_type }}-master-api state=restarted
+
+ # We retry the controllers because the API may not be 100% initialized yet.
+ - name: restart master controllers
+ command: "systemctl restart {{ openshift_service_type }}-master-controllers"
+ retries: 3
+ delay: 5
+ register: result
+ until: result.rc == 0
+
+ - name: Verify API Server
+ # Using curl here since the uri module requires python-httplib2 and
+ # wait_for port doesn't provide health information.
+ command: >
+ curl --silent --tlsv1.2
+ --cacert {{ openshift.common.config_base }}/master/ca-bundle.crt
+ {{ openshift.master.api_url }}/healthz/ready
+ args:
+ # Disables the following warning:
+ # Consider using get_url or uri module rather than running curl
+ warn: no
+ register: api_available_output
+ until: api_available_output.stdout == 'ok'
+ retries: 120
+ delay: 1
+ changed_when: false
diff --git a/playbooks/openshift-metrics/private/config.yml b/playbooks/openshift-metrics/private/config.yml
index 1e237e3f0..889ea77b1 100644
--- a/playbooks/openshift-metrics/private/config.yml
+++ b/playbooks/openshift-metrics/private/config.yml
@@ -25,6 +25,7 @@
import_role:
name: openshift_metrics
tasks_from: update_master_config.yaml
+ when: not openshift.common.version_gte_3_9
- name: Metrics Install Checkpoint End
hosts: all
diff --git a/playbooks/openshift-prometheus/private/uninstall.yml b/playbooks/openshift-prometheus/private/uninstall.yml
new file mode 100644
index 000000000..2df39c2a8
--- /dev/null
+++ b/playbooks/openshift-prometheus/private/uninstall.yml
@@ -0,0 +1,8 @@
+---
+- name: Uninstall Prometheus
+ hosts: masters[0]
+ tasks:
+ - name: Run the Prometheus Uninstall Role Tasks
+ include_role:
+ name: openshift_prometheus
+ tasks_from: uninstall
diff --git a/playbooks/openshift-prometheus/uninstall.yml b/playbooks/openshift-prometheus/uninstall.yml
new file mode 100644
index 000000000..c92ade786
--- /dev/null
+++ b/playbooks/openshift-prometheus/uninstall.yml
@@ -0,0 +1,2 @@
+---
+- import_playbook: private/uninstall.yml
diff --git a/playbooks/openstack/inventory.py b/playbooks/openstack/inventory.py
index 76e658eb7..d5a8c3e24 100755
--- a/playbooks/openstack/inventory.py
+++ b/playbooks/openstack/inventory.py
@@ -15,18 +15,10 @@ import json
import shade
-def build_inventory():
- '''Build the dynamic inventory.'''
- cloud = shade.openstack_cloud()
-
+def base_openshift_inventory(cluster_hosts):
+ '''Set the base openshift inventory.'''
inventory = {}
- # TODO(shadower): filter the servers based on the `OPENSHIFT_CLUSTER`
- # environment variable.
- cluster_hosts = [
- server for server in cloud.list_servers()
- if 'metadata' in server and 'clusterid' in server.metadata]
-
masters = [server.name for server in cluster_hosts
if server.metadata['host-type'] == 'master']
@@ -67,6 +59,34 @@ def build_inventory():
inventory['dns'] = {'hosts': dns}
inventory['lb'] = {'hosts': load_balancers}
+ return inventory
+
+
+def get_docker_storage_mountpoints(volumes):
+ '''Check volumes to see if they're being used for docker storage'''
+ docker_storage_mountpoints = {}
+ for volume in volumes:
+ if volume.metadata.get('purpose') == "openshift_docker_storage":
+ for attachment in volume.attachments:
+ if attachment.server_id in docker_storage_mountpoints:
+ docker_storage_mountpoints[attachment.server_id].append(attachment.device)
+ else:
+ docker_storage_mountpoints[attachment.server_id] = [attachment.device]
+ return docker_storage_mountpoints
+
+
+def build_inventory():
+ '''Build the dynamic inventory.'''
+ cloud = shade.openstack_cloud()
+
+ # TODO(shadower): filter the servers based on the `OPENSHIFT_CLUSTER`
+ # environment variable.
+ cluster_hosts = [
+ server for server in cloud.list_servers()
+ if 'metadata' in server and 'clusterid' in server.metadata]
+
+ inventory = base_openshift_inventory(cluster_hosts)
+
for server in cluster_hosts:
if 'group' in server.metadata:
group = server.metadata.group
@@ -76,6 +96,9 @@ def build_inventory():
inventory['_meta'] = {'hostvars': {}}
+ # cinder volumes used for docker storage
+ docker_storage_mountpoints = get_docker_storage_mountpoints(cloud.list_volumes())
+
for server in cluster_hosts:
ssh_ip_address = server.public_v4 or server.private_v4
hostvars = {
@@ -111,6 +134,11 @@ def build_inventory():
if node_labels:
hostvars['openshift_node_labels'] = node_labels
+ # check for attached docker storage volumes
+ if 'os-extended-volumes:volumes_attached' in server:
+ if server.id in docker_storage_mountpoints:
+ hostvars['docker_storage_mountpoints'] = ' '.join(docker_storage_mountpoints[server.id])
+
inventory['_meta']['hostvars'][server.name] = hostvars
return inventory