summaryrefslogtreecommitdiffstats
path: root/playbooks
diff options
context:
space:
mode:
Diffstat (limited to 'playbooks')
-rw-r--r--playbooks/aws/openshift-cluster/hosted.yml25
-rw-r--r--playbooks/aws/openshift-cluster/install.yml27
-rw-r--r--playbooks/aws/openshift-cluster/provision_install.yml4
-rw-r--r--playbooks/aws/provisioning_vars.yml.example24
-rw-r--r--playbooks/byo/openshift-cluster/upgrades/v3_9/upgrade_control_plane.yml2
-rw-r--r--playbooks/common/openshift-cluster/upgrades/post_control_plane.yml4
-rw-r--r--playbooks/common/openshift-cluster/upgrades/pre/config.yml2
-rw-r--r--playbooks/common/openshift-cluster/upgrades/pre/verify_cluster.yml22
-rw-r--r--playbooks/common/openshift-cluster/upgrades/upgrade_control_plane.yml39
-rw-r--r--playbooks/common/openshift-cluster/upgrades/upgrade_nodes.yml15
-rw-r--r--playbooks/common/openshift-cluster/upgrades/v3_6/upgrade.yml1
-rw-r--r--playbooks/common/openshift-cluster/upgrades/v3_6/upgrade_control_plane.yml1
-rw-r--r--playbooks/common/openshift-cluster/upgrades/v3_7/upgrade.yml1
-rw-r--r--playbooks/common/openshift-cluster/upgrades/v3_7/upgrade_control_plane.yml1
-rw-r--r--playbooks/common/openshift-cluster/upgrades/v3_8/upgrade.yml1
-rw-r--r--playbooks/common/openshift-cluster/upgrades/v3_8/upgrade_control_plane.yml1
-rw-r--r--playbooks/common/openshift-cluster/upgrades/v3_9/upgrade.yml1
-rw-r--r--playbooks/common/openshift-cluster/upgrades/v3_9/upgrade_control_plane.yml35
-rw-r--r--playbooks/common/private/components.yml38
-rw-r--r--playbooks/common/private/control_plane.yml34
-rw-r--r--playbooks/container-runtime/private/build_container_groups.yml4
-rw-r--r--playbooks/container-runtime/private/config.yml4
-rw-r--r--playbooks/container-runtime/private/setup_storage.yml4
-rw-r--r--playbooks/deploy_cluster.yml45
-rw-r--r--playbooks/gcp/openshift-cluster/build_base_image.yml163
-rw-r--r--playbooks/gcp/openshift-cluster/build_image.yml106
-rw-r--r--playbooks/gcp/openshift-cluster/deprovision.yml10
-rw-r--r--playbooks/gcp/openshift-cluster/install.yml33
-rw-r--r--playbooks/gcp/openshift-cluster/install_gcp.yml21
-rw-r--r--playbooks/gcp/openshift-cluster/inventory.yml10
-rw-r--r--playbooks/gcp/openshift-cluster/launch.yml12
-rw-r--r--playbooks/gcp/openshift-cluster/provision.yml (renamed from playbooks/gcp/provision.yml)9
-rw-r--r--playbooks/gcp/openshift-cluster/publish_image.yml9
l---------playbooks/gcp/openshift-cluster/roles1
-rw-r--r--playbooks/init/base_packages.yml1
-rw-r--r--playbooks/init/basic_facts.yml8
-rw-r--r--playbooks/init/evaluate_groups.yml8
-rw-r--r--playbooks/init/validate_hostnames.yml4
-rw-r--r--playbooks/openshift-etcd/scaleup.yml47
-rw-r--r--playbooks/openshift-master/private/tasks/wire_aggregator.yml86
-rw-r--r--playbooks/openshift-prometheus/private/uninstall.yml8
-rw-r--r--playbooks/openshift-prometheus/uninstall.yml2
-rw-r--r--playbooks/openstack/sample-inventory/group_vars/OSEv3.yml2
43 files changed, 702 insertions, 173 deletions
diff --git a/playbooks/aws/openshift-cluster/hosted.yml b/playbooks/aws/openshift-cluster/hosted.yml
deleted file mode 100644
index 9d9ed29de..000000000
--- a/playbooks/aws/openshift-cluster/hosted.yml
+++ /dev/null
@@ -1,25 +0,0 @@
----
-- import_playbook: ../../openshift-hosted/private/config.yml
-
-- import_playbook: ../../openshift-metrics/private/config.yml
- when: openshift_metrics_install_metrics | default(false) | bool
-
-- import_playbook: ../../openshift-logging/private/config.yml
- when: openshift_logging_install_logging | default(false) | bool
-
-- import_playbook: ../../openshift-prometheus/private/config.yml
- when: openshift_hosted_prometheus_deploy | default(false) | bool
-
-- import_playbook: ../../openshift-service-catalog/private/config.yml
- when: openshift_enable_service_catalog | default(false) | bool
-
-- import_playbook: ../../openshift-management/private/config.yml
- when: openshift_management_install_management | default(false) | bool
-
-- name: Print deprecated variable warning message if necessary
- hosts: oo_first_master
- gather_facts: no
- tasks:
- - debug: msg="{{__deprecation_message}}"
- when:
- - __deprecation_message | default ('') | length > 0
diff --git a/playbooks/aws/openshift-cluster/install.yml b/playbooks/aws/openshift-cluster/install.yml
index a3fc82f9a..938e83f5e 100644
--- a/playbooks/aws/openshift-cluster/install.yml
+++ b/playbooks/aws/openshift-cluster/install.yml
@@ -18,29 +18,8 @@
- name: run the init
import_playbook: ../../init/main.yml
-- name: perform the installer openshift-checks
- import_playbook: ../../openshift-checks/private/install.yml
+- name: configure the control plane
+ import_playbook: ../../common/private/control_plane.yml
-- name: etcd install
- import_playbook: ../../openshift-etcd/private/config.yml
-
-- name: include nfs
- import_playbook: ../../openshift-nfs/private/config.yml
- when: groups.oo_nfs_to_config | default([]) | count > 0
-
-- name: include loadbalancer
- import_playbook: ../../openshift-loadbalancer/private/config.yml
- when: groups.oo_lb_to_config | default([]) | count > 0
-
-- name: include openshift-master config
- import_playbook: ../../openshift-master/private/config.yml
-
-- name: include master additional config
- import_playbook: ../../openshift-master/private/additional_config.yml
-
-- name: include master additional config
+- name: ensure the masters are configured as nodes
import_playbook: ../../openshift-node/private/config.yml
-
-- name: include openshift-glusterfs
- import_playbook: ../../openshift-glusterfs/private/config.yml
- when: groups.oo_glusterfs_to_config | default([]) | count > 0
diff --git a/playbooks/aws/openshift-cluster/provision_install.yml b/playbooks/aws/openshift-cluster/provision_install.yml
index f98f5be9a..bd154fa83 100644
--- a/playbooks/aws/openshift-cluster/provision_install.yml
+++ b/playbooks/aws/openshift-cluster/provision_install.yml
@@ -15,5 +15,5 @@
- name: Include the accept.yml playbook to accept nodes into the cluster
import_playbook: accept.yml
-- name: Include the hosted.yml playbook to finish the hosted configuration
- import_playbook: hosted.yml
+- name: Include the components playbook to finish the hosted configuration
+ import_playbook: ../../common/private/components.yml
diff --git a/playbooks/aws/provisioning_vars.yml.example b/playbooks/aws/provisioning_vars.yml.example
index f6b1a6b5d..78484fdbd 100644
--- a/playbooks/aws/provisioning_vars.yml.example
+++ b/playbooks/aws/provisioning_vars.yml.example
@@ -21,6 +21,12 @@ openshift_release: # v3.7
# This will be dependent on the version provided by the yum repository
openshift_pkg_version: # -3.7.0
+# OpenShift api port
+# Fulfills a chicken/egg scenario with how Ansible treats host inventory file
+# and extra_vars. This is used for SecurityGroups, ELB Listeners as well as
+# an override to installer inventory openshift_master_api_port key
+# openshift_master_api_port: 8443
+
# specify a clusterid
# This value is also used as the default value for many other components.
#openshift_aws_clusterid: default
@@ -41,11 +47,27 @@ openshift_pkg_version: # -3.7.0
# a vpc, set this to false.
#openshift_aws_create_vpc: true
+# when openshift_aws_create_vpc is true (the default), the VPC defined in
+# openshift_aws_vpc will be created
+#openshift_aws_vpc:
+# name: "{{ openshift_aws_vpc_name }}"
+# cidr: 172.31.0.0/16
+# subnets:
+# us-east-1:
+# - cidr: 172.31.48.0/20
+# az: "us-east-1c"
+# default_az: true
+# - cidr: 172.31.32.0/20
+# az: "us-east-1e"
+# - cidr: 172.31.16.0/20
+# az: "us-east-1a"
+
# Name of the vpc. Needs to be set if using a pre-existing vpc.
#openshift_aws_vpc_name: "{{ openshift_aws_clusterid }}"
# Name of the subnet in the vpc to use. Needs to be set if using a pre-existing
-# vpc + subnet.
+# vpc + subnet. Otherwise will use the subnet with 'default_az' set (see above
+# example VPC structure)
#openshift_aws_subnet_az:
# -------------- #
diff --git a/playbooks/byo/openshift-cluster/upgrades/v3_9/upgrade_control_plane.yml b/playbooks/byo/openshift-cluster/upgrades/v3_9/upgrade_control_plane.yml
index 869e185af..c8f397186 100644
--- a/playbooks/byo/openshift-cluster/upgrades/v3_9/upgrade_control_plane.yml
+++ b/playbooks/byo/openshift-cluster/upgrades/v3_9/upgrade_control_plane.yml
@@ -12,3 +12,5 @@
# You can run the upgrade_nodes.yml playbook after this to upgrade these components separately.
#
- import_playbook: ../../../../common/openshift-cluster/upgrades/v3_9/upgrade_control_plane.yml
+
+- import_playbook: ../../../../openshift-master/private/restart.yml
diff --git a/playbooks/common/openshift-cluster/upgrades/post_control_plane.yml b/playbooks/common/openshift-cluster/upgrades/post_control_plane.yml
index f790fd98d..de612da21 100644
--- a/playbooks/common/openshift-cluster/upgrades/post_control_plane.yml
+++ b/playbooks/common/openshift-cluster/upgrades/post_control_plane.yml
@@ -6,7 +6,9 @@
hosts: oo_first_master
roles:
- role: openshift_web_console
- when: openshift_web_console_install | default(true) | bool
+ when:
+ - openshift_web_console_install | default(true) | bool
+ - openshift_upgrade_target is version_compare('3.9','>=')
- name: Upgrade default router and default registry
hosts: oo_first_master
diff --git a/playbooks/common/openshift-cluster/upgrades/pre/config.yml b/playbooks/common/openshift-cluster/upgrades/pre/config.yml
index da63450b8..2b27f8dd0 100644
--- a/playbooks/common/openshift-cluster/upgrades/pre/config.yml
+++ b/playbooks/common/openshift-cluster/upgrades/pre/config.yml
@@ -49,7 +49,7 @@
# to a more specific version, respecting openshift_image_tag and openshift_pkg_version if
# defined, and overriding the normal behavior of protecting the installed version
openshift_release: "{{ openshift_upgrade_target }}"
- openshift_protect_installed_version: False
+ # openshift_protect_installed_version is passed n via upgrade_control_plane.yml
# l_openshift_version_set_hosts is passed via upgrade_control_plane.yml
# l_openshift_version_check_hosts is passed via upgrade_control_plane.yml
diff --git a/playbooks/common/openshift-cluster/upgrades/pre/verify_cluster.yml b/playbooks/common/openshift-cluster/upgrades/pre/verify_cluster.yml
index 693ab2d96..5ee8a9d78 100644
--- a/playbooks/common/openshift-cluster/upgrades/pre/verify_cluster.yml
+++ b/playbooks/common/openshift-cluster/upgrades/pre/verify_cluster.yml
@@ -92,3 +92,25 @@
state: started
enabled: yes
with_items: "{{ master_services }}"
+
+# Until openshift-ansible is determining which host is the CA host we
+# must (unfortunately) ensure that the first host in the etcd group is
+# the etcd CA host.
+# https://bugzilla.redhat.com/show_bug.cgi?id=1469358
+- name: Verify we can proceed on first etcd
+ hosts: oo_first_etcd
+ gather_facts: no
+ tasks:
+ - name: Ensure CA exists on first etcd
+ stat:
+ path: /etc/etcd/generated_certs
+ register: __etcd_ca_stat
+
+ - fail:
+ msg: >
+ In order to correct an etcd certificate signing problem
+ upgrading may require re-generating etcd certificates. Please
+ ensure that the /etc/etcd/generated_certs directory exists on
+ the first host defined in your [etcd] group.
+ when:
+ - not __etcd_ca_stat.stat.exists | bool
diff --git a/playbooks/common/openshift-cluster/upgrades/upgrade_control_plane.yml b/playbooks/common/openshift-cluster/upgrades/upgrade_control_plane.yml
index e89f06f17..a10fd4bee 100644
--- a/playbooks/common/openshift-cluster/upgrades/upgrade_control_plane.yml
+++ b/playbooks/common/openshift-cluster/upgrades/upgrade_control_plane.yml
@@ -2,6 +2,30 @@
###############################################################################
# Upgrade Masters
###############################################################################
+
+# Prior to 3.6, openshift-ansible created etcd serving certificates
+# without a SubjectAlternativeName entry for the system hostname. The
+# SAN list in Go 1.8 is now (correctly) authoritative and since
+# openshift-ansible configures masters to talk to etcd hostnames
+# rather than IP addresses, we must correct etcd certificates.
+#
+# This play examines the etcd serving certificate SANs on each etcd
+# host and records whether or not the system hostname is missing.
+- name: Examine etcd serving certificate SAN
+ hosts: oo_etcd_to_config
+ tasks:
+ - slurp:
+ src: /etc/etcd/server.crt
+ register: etcd_serving_cert
+ - set_fact:
+ __etcd_cert_lacks_hostname: "{{ (openshift.common.hostname not in (etcd_serving_cert.content | b64decode | lib_utils_oo_parse_certificate_san)) | bool }}"
+
+# Redeploy etcd certificates when hostnames were missing from etcd
+# serving certificate SANs.
+- import_playbook: ../../../openshift-etcd/redeploy-certificates.yml
+ when:
+ - true in hostvars | lib_utils_oo_select_keys(groups['oo_etcd_to_config']) | lib_utils_oo_collect('__etcd_cert_lacks_hostname') | default([false])
+
- name: Backup and upgrade etcd
import_playbook: ../../../openshift-etcd/private/upgrade_main.yml
@@ -310,13 +334,8 @@
- import_role:
name: openshift_node
tasks_from: upgrade.yml
- - name: Set node schedulability
- oc_adm_manage_node:
- node: "{{ openshift.node.nodename | lower }}"
- schedulable: True
- delegate_to: "{{ groups.oo_first_master.0 }}"
- retries: 10
- delay: 5
- register: node_schedulable
- until: node_schedulable is succeeded
- when: node_unschedulable is changed
+ - import_role:
+ name: openshift_manage_node
+ tasks_from: config.yml
+ vars:
+ openshift_master_host: "{{ groups.oo_first_master.0 }}"
diff --git a/playbooks/common/openshift-cluster/upgrades/upgrade_nodes.yml b/playbooks/common/openshift-cluster/upgrades/upgrade_nodes.yml
index 850442b3b..915fae9fd 100644
--- a/playbooks/common/openshift-cluster/upgrades/upgrade_nodes.yml
+++ b/playbooks/common/openshift-cluster/upgrades/upgrade_nodes.yml
@@ -50,16 +50,11 @@
- import_role:
name: openshift_node
tasks_from: upgrade.yml
- - name: Set node schedulability
- oc_adm_manage_node:
- node: "{{ openshift.node.nodename | lower }}"
- schedulable: True
- delegate_to: "{{ groups.oo_first_master.0 }}"
- retries: 10
- delay: 5
- register: node_schedulable
- until: node_schedulable is succeeded
- when: node_unschedulable is changed
+ - import_role:
+ name: openshift_manage_node
+ tasks_from: config.yml
+ vars:
+ openshift_master_host: "{{ groups.oo_first_master.0 }}"
- name: Re-enable excluders
hosts: oo_nodes_to_upgrade:!oo_masters_to_config
diff --git a/playbooks/common/openshift-cluster/upgrades/v3_6/upgrade.yml b/playbooks/common/openshift-cluster/upgrades/v3_6/upgrade.yml
index d520c6aee..a2d21b69f 100644
--- a/playbooks/common/openshift-cluster/upgrades/v3_6/upgrade.yml
+++ b/playbooks/common/openshift-cluster/upgrades/v3_6/upgrade.yml
@@ -23,6 +23,7 @@
l_upgrade_verify_targets_hosts: "oo_masters_to_config:oo_nodes_to_upgrade"
l_upgrade_docker_target_hosts: "oo_masters_to_config:oo_nodes_to_upgrade:oo_etcd_to_config"
l_upgrade_excluder_hosts: "oo_nodes_to_config:oo_masters_to_config"
+ openshift_protect_installed_version: False
- import_playbook: validator.yml
diff --git a/playbooks/common/openshift-cluster/upgrades/v3_6/upgrade_control_plane.yml b/playbooks/common/openshift-cluster/upgrades/v3_6/upgrade_control_plane.yml
index d88880140..9aa5a3b64 100644
--- a/playbooks/common/openshift-cluster/upgrades/v3_6/upgrade_control_plane.yml
+++ b/playbooks/common/openshift-cluster/upgrades/v3_6/upgrade_control_plane.yml
@@ -35,6 +35,7 @@
l_upgrade_verify_targets_hosts: "oo_masters_to_config"
l_upgrade_docker_target_hosts: "oo_masters_to_config:oo_etcd_to_config"
l_upgrade_excluder_hosts: "oo_masters_to_config"
+ openshift_protect_installed_version: False
- import_playbook: validator.yml
diff --git a/playbooks/common/openshift-cluster/upgrades/v3_7/upgrade.yml b/playbooks/common/openshift-cluster/upgrades/v3_7/upgrade.yml
index 4daa9e490..cc2ec2709 100644
--- a/playbooks/common/openshift-cluster/upgrades/v3_7/upgrade.yml
+++ b/playbooks/common/openshift-cluster/upgrades/v3_7/upgrade.yml
@@ -23,6 +23,7 @@
l_upgrade_verify_targets_hosts: "oo_masters_to_config:oo_nodes_to_upgrade"
l_upgrade_docker_target_hosts: "oo_masters_to_config:oo_nodes_to_upgrade:oo_etcd_to_config"
l_upgrade_excluder_hosts: "oo_nodes_to_config:oo_masters_to_config"
+ openshift_protect_installed_version: False
- import_playbook: validator.yml
diff --git a/playbooks/common/openshift-cluster/upgrades/v3_7/upgrade_control_plane.yml b/playbooks/common/openshift-cluster/upgrades/v3_7/upgrade_control_plane.yml
index ce069e2d0..b1ecc75d3 100644
--- a/playbooks/common/openshift-cluster/upgrades/v3_7/upgrade_control_plane.yml
+++ b/playbooks/common/openshift-cluster/upgrades/v3_7/upgrade_control_plane.yml
@@ -35,6 +35,7 @@
l_upgrade_verify_targets_hosts: "oo_masters_to_config"
l_upgrade_docker_target_hosts: "oo_masters_to_config:oo_etcd_to_config"
l_upgrade_excluder_hosts: "oo_masters_to_config"
+ openshift_protect_installed_version: False
- import_playbook: validator.yml
diff --git a/playbooks/common/openshift-cluster/upgrades/v3_8/upgrade.yml b/playbooks/common/openshift-cluster/upgrades/v3_8/upgrade.yml
index a9bf354cc..a73b7d63a 100644
--- a/playbooks/common/openshift-cluster/upgrades/v3_8/upgrade.yml
+++ b/playbooks/common/openshift-cluster/upgrades/v3_8/upgrade.yml
@@ -23,6 +23,7 @@
l_upgrade_verify_targets_hosts: "oo_masters_to_config:oo_nodes_to_upgrade"
l_upgrade_docker_target_hosts: "oo_masters_to_config:oo_nodes_to_upgrade:oo_etcd_to_config"
l_upgrade_excluder_hosts: "oo_nodes_to_config:oo_masters_to_config"
+ openshift_protect_installed_version: False
- import_playbook: validator.yml
diff --git a/playbooks/common/openshift-cluster/upgrades/v3_8/upgrade_control_plane.yml b/playbooks/common/openshift-cluster/upgrades/v3_8/upgrade_control_plane.yml
index 3f26a6297..723b2e533 100644
--- a/playbooks/common/openshift-cluster/upgrades/v3_8/upgrade_control_plane.yml
+++ b/playbooks/common/openshift-cluster/upgrades/v3_8/upgrade_control_plane.yml
@@ -36,6 +36,7 @@
l_upgrade_verify_targets_hosts: "oo_masters_to_config"
l_upgrade_docker_target_hosts: "oo_masters_to_config:oo_etcd_to_config"
l_upgrade_excluder_hosts: "oo_masters_to_config"
+ openshift_protect_installed_version: False
- import_playbook: validator.yml
diff --git a/playbooks/common/openshift-cluster/upgrades/v3_9/upgrade.yml b/playbooks/common/openshift-cluster/upgrades/v3_9/upgrade.yml
index 20e0c165e..bf6e8605e 100644
--- a/playbooks/common/openshift-cluster/upgrades/v3_9/upgrade.yml
+++ b/playbooks/common/openshift-cluster/upgrades/v3_9/upgrade.yml
@@ -20,6 +20,7 @@
l_upgrade_verify_targets_hosts: "oo_masters_to_config:oo_nodes_to_upgrade"
l_upgrade_docker_target_hosts: "oo_masters_to_config:oo_nodes_to_upgrade:oo_etcd_to_config"
l_upgrade_excluder_hosts: "oo_nodes_to_config:oo_masters_to_config"
+ openshift_protect_installed_version: False
- import_playbook: validator.yml
diff --git a/playbooks/common/openshift-cluster/upgrades/v3_9/upgrade_control_plane.yml b/playbooks/common/openshift-cluster/upgrades/v3_9/upgrade_control_plane.yml
index 0f48725f6..fe1fdefff 100644
--- a/playbooks/common/openshift-cluster/upgrades/v3_9/upgrade_control_plane.yml
+++ b/playbooks/common/openshift-cluster/upgrades/v3_9/upgrade_control_plane.yml
@@ -25,10 +25,18 @@
openshift_upgrade_target: '3.8'
openshift_upgrade_min: '3.7'
openshift_release: '3.8'
- _requested_pkg_version: "{{openshift_pkg_version if openshift_pkg_version is defined else omit }}"
- _requested_image_tag: "{{openshift_image_tag if openshift_image_tag is defined else omit }}"
+ _requested_pkg_version: "{{ openshift_pkg_version if openshift_pkg_version is defined else omit }}"
+ _requested_image_tag: "{{ openshift_image_tag if openshift_image_tag is defined else omit }}"
+ l_double_upgrade_cp: True
when: hostvars[groups.oo_first_master.0].openshift_currently_installed_version | version_compare('3.8','<')
+ - name: set l_force_image_tag_to_version = True
+ set_fact:
+ # Need to set this during 3.8 upgrade to ensure image_tag is set correctly
+ # to match 3.8 version
+ l_force_image_tag_to_version: True
+ when: _requested_image_tag is defined
+
- import_playbook: ../pre/config.yml
# These vars a meant to exclude oo_nodes from plays that would otherwise include
# them by default.
@@ -41,6 +49,7 @@
l_upgrade_verify_targets_hosts: "oo_masters_to_config"
l_upgrade_docker_target_hosts: "oo_masters_to_config:oo_etcd_to_config"
l_upgrade_excluder_hosts: "oo_masters_to_config"
+ openshift_protect_installed_version: False
when: hostvars[groups.oo_first_master.0].openshift_currently_installed_version | version_compare('3.8','<')
- name: Flag pre-upgrade checks complete for hosts without errors 3.8
@@ -68,7 +77,20 @@
openshift_upgrade_min: '3.8'
openshift_release: '3.9'
openshift_pkg_version: "{{ _requested_pkg_version | default ('-3.9*') }}"
- openshift_image_tag: "{{ _requested_image_tag | default('v3.9') }}"
+ # Set the user's specified image_tag for 3.9 upgrade if it was provided.
+ - set_fact:
+ openshift_image_tag: "{{ _requested_image_tag }}"
+ l_force_image_tag_to_version: False
+ when: _requested_image_tag is defined
+ # If the user didn't specify an image_tag, we need to force update image_tag
+ # because it will have already been set during 3.8. If we aren't running
+ # a double upgrade, then we can preserve image_tag because it will still
+ # be the user provided value.
+ - set_fact:
+ l_force_image_tag_to_version: True
+ when:
+ - l_double_upgrade_cp is defined and l_double_upgrade_cp
+ - _requested_image_tag is not defined
- import_playbook: ../pre/config.yml
# These vars a meant to exclude oo_nodes from plays that would otherwise include
@@ -82,6 +104,7 @@
l_upgrade_verify_targets_hosts: "oo_masters_to_config"
l_upgrade_docker_target_hosts: "oo_masters_to_config:oo_etcd_to_config"
l_upgrade_excluder_hosts: "oo_masters_to_config"
+ openshift_protect_installed_version: False
- name: Flag pre-upgrade checks complete for hosts without errors
hosts: oo_masters_to_config:oo_etcd_to_config
@@ -110,3 +133,9 @@
state: started
- import_playbook: ../post_control_plane.yml
+
+- hosts: oo_masters
+ tasks:
+ - import_role:
+ name: openshift_web_console
+ tasks_from: remove_old_asset_config
diff --git a/playbooks/common/private/components.yml b/playbooks/common/private/components.yml
new file mode 100644
index 000000000..089645d07
--- /dev/null
+++ b/playbooks/common/private/components.yml
@@ -0,0 +1,38 @@
+---
+# These are the core component plays that configure the layers above the control
+# plane. A component is generally considered any part of OpenShift that runs on
+# top of the cluster and may be considered optional. Over time, much of OpenShift
+# above the Kubernetes apiserver and masters may be considered components.
+#
+# Preconditions:
+#
+# 1. The control plane is configured and reachable from nodes inside the cluster
+# 2. An admin kubeconfig file in /etc/origin/master/admin.kubeconfig that can
+# perform root level actions against the cluster
+# 3. On cloud providers, persistent volume provisioners are configured
+# 4. A subset of nodes is available to allow components to schedule - this must
+# include the masters and usually includes infra nodes.
+# 5. The init/main.yml playbook has been invoked
+
+- import_playbook: ../../openshift-glusterfs/private/config.yml
+ when: groups.oo_glusterfs_to_config | default([]) | count > 0
+
+- import_playbook: ../../openshift-hosted/private/config.yml
+
+- import_playbook: ../../openshift-web-console/private/config.yml
+ when: openshift_web_console_install | default(true) | bool
+
+- import_playbook: ../../openshift-metrics/private/config.yml
+ when: openshift_metrics_install_metrics | default(false) | bool
+
+- import_playbook: ../../openshift-logging/private/config.yml
+ when: openshift_logging_install_logging | default(false) | bool
+
+- import_playbook: ../../openshift-prometheus/private/config.yml
+ when: openshift_hosted_prometheus_deploy | default(false) | bool
+
+- import_playbook: ../../openshift-service-catalog/private/config.yml
+ when: openshift_enable_service_catalog | default(true) | bool
+
+- import_playbook: ../../openshift-management/private/config.yml
+ when: openshift_management_install_management | default(false) | bool
diff --git a/playbooks/common/private/control_plane.yml b/playbooks/common/private/control_plane.yml
new file mode 100644
index 000000000..0a5f1142b
--- /dev/null
+++ b/playbooks/common/private/control_plane.yml
@@ -0,0 +1,34 @@
+---
+# These are the control plane plays that configure a control plane on top of hosts
+# identified as masters. Over time, some of the pieces of the current control plane
+# may be moved to the components list.
+#
+# It is not required for any nodes to be configured, or passed to be configured,
+# when this playbook is invoked.
+#
+# Preconditions:
+#
+# 1. A set of machines have been identified to act as masters
+# 2. On cloud providers, a load balancer has been configured to point to the masters
+# and that load balancer has a DNS name
+# 3. The init/main.yml playbook has been invoked
+#
+# Postconditions:
+#
+# 1. The control plane is reachable from the outside of the cluster
+# 2. The master has an /etc/origin/master/admin.kubeconfig file that gives cluster-admin
+# access.
+
+- import_playbook: ../../openshift-checks/private/install.yml
+
+- import_playbook: ../../openshift-etcd/private/config.yml
+
+- import_playbook: ../../openshift-nfs/private/config.yml
+ when: groups.oo_nfs_to_config | default([]) | count > 0
+
+- import_playbook: ../../openshift-loadbalancer/private/config.yml
+ when: groups.oo_lb_to_config | default([]) | count > 0
+
+- import_playbook: ../../openshift-master/private/config.yml
+
+- import_playbook: ../../openshift-master/private/additional_config.yml
diff --git a/playbooks/container-runtime/private/build_container_groups.yml b/playbooks/container-runtime/private/build_container_groups.yml
index a2361d50c..8fb7b63e8 100644
--- a/playbooks/container-runtime/private/build_container_groups.yml
+++ b/playbooks/container-runtime/private/build_container_groups.yml
@@ -1,6 +1,8 @@
---
+# l_build_container_groups_hosts is passed in via prerequisites.yml during
+# etcd scaleup plays.
- name: create oo_hosts_containerized_managed_true host group
- hosts: oo_all_hosts:!oo_nodes_to_config
+ hosts: "{{ l_build_container_groups_hosts | default('oo_all_hosts:!oo_nodes_to_config') }}"
tasks:
- group_by:
key: oo_hosts_containerized_managed_{{ (openshift_is_containerized | default(False)) | ternary('true','false') }}
diff --git a/playbooks/container-runtime/private/config.yml b/playbooks/container-runtime/private/config.yml
index 817a8bf30..5396df20a 100644
--- a/playbooks/container-runtime/private/config.yml
+++ b/playbooks/container-runtime/private/config.yml
@@ -1,9 +1,11 @@
---
# l_scale_up_hosts may be passed in via prerequisites.yml during scaleup plays.
+# l_etcd_scale_up_hosts may be passed in via prerequisites.yml during etcd
+# scaleup plays.
- import_playbook: build_container_groups.yml
-- hosts: "{{ l_scale_up_hosts | default(l_default_container_runtime_hosts) }}"
+- hosts: "{{ l_etcd_scale_up_hosts | default(l_scale_up_hosts) | default(l_default_container_runtime_hosts) }}"
vars:
l_default_container_runtime_hosts: "oo_nodes_to_config:oo_hosts_containerized_managed_true"
roles:
diff --git a/playbooks/container-runtime/private/setup_storage.yml b/playbooks/container-runtime/private/setup_storage.yml
index 65630be62..586149b1d 100644
--- a/playbooks/container-runtime/private/setup_storage.yml
+++ b/playbooks/container-runtime/private/setup_storage.yml
@@ -1,9 +1,11 @@
---
# l_scale_up_hosts may be passed in via prerequisites.yml during scaleup plays.
+# l_etcd_scale_up_hosts may be passed in via prerequisites.yml during etcd
+# scaleup plays.
- import_playbook: build_container_groups.yml
-- hosts: "{{ l_scale_up_hosts | default(l_default_container_storage_hosts) }}"
+- hosts: "{{ l_etcd_scale_up_hosts | default(l_scale_up_hosts) | default(l_default_container_storage_hosts) }}"
vars:
l_default_container_storage_hosts: "oo_nodes_to_config:oo_hosts_containerized_managed_true"
l_chg_temp: "{{ hostvars[groups['oo_first_master'][0]]['openshift_containerized_host_groups'] | default([]) }}"
diff --git a/playbooks/deploy_cluster.yml b/playbooks/deploy_cluster.yml
index 5efdc486a..c8e30ddbc 100644
--- a/playbooks/deploy_cluster.yml
+++ b/playbooks/deploy_cluster.yml
@@ -1,49 +1,8 @@
---
- import_playbook: init/main.yml
-- import_playbook: openshift-checks/private/install.yml
-
-- import_playbook: openshift-etcd/private/config.yml
-
-- import_playbook: openshift-nfs/private/config.yml
- when: groups.oo_nfs_to_config | default([]) | count > 0
-
-- import_playbook: openshift-loadbalancer/private/config.yml
- when: groups.oo_lb_to_config | default([]) | count > 0
-
-- import_playbook: openshift-master/private/config.yml
-
-- import_playbook: openshift-master/private/additional_config.yml
+- import_playbook: common/private/control_plane.yml
- import_playbook: openshift-node/private/config.yml
-- import_playbook: openshift-glusterfs/private/config.yml
- when: groups.oo_glusterfs_to_config | default([]) | count > 0
-
-- import_playbook: openshift-hosted/private/config.yml
-
-- import_playbook: openshift-web-console/private/config.yml
- when: openshift_web_console_install | default(true) | bool
-
-- import_playbook: openshift-metrics/private/config.yml
- when: openshift_metrics_install_metrics | default(false) | bool
-
-- import_playbook: openshift-logging/private/config.yml
- when: openshift_logging_install_logging | default(false) | bool
-
-- import_playbook: openshift-prometheus/private/config.yml
- when: openshift_hosted_prometheus_deploy | default(false) | bool
-
-- import_playbook: openshift-service-catalog/private/config.yml
- when: openshift_enable_service_catalog | default(true) | bool
-
-- import_playbook: openshift-management/private/config.yml
- when: openshift_management_install_management | default(false) | bool
-
-- name: Print deprecated variable warning message if necessary
- hosts: oo_first_master
- gather_facts: no
- tasks:
- - debug: msg="{{__deprecation_message}}"
- when:
- - __deprecation_message | default ('') | length > 0
+- import_playbook: common/private/components.yml
diff --git a/playbooks/gcp/openshift-cluster/build_base_image.yml b/playbooks/gcp/openshift-cluster/build_base_image.yml
new file mode 100644
index 000000000..8e9b0024a
--- /dev/null
+++ b/playbooks/gcp/openshift-cluster/build_base_image.yml
@@ -0,0 +1,163 @@
+---
+# This playbook ensures that a base image is up to date with all of the required settings
+- name: Launch image build instance
+ hosts: localhost
+ connection: local
+ gather_facts: no
+ tasks:
+ - name: Require openshift_gcp_root_image
+ fail:
+ msg: "A root OS image name or family is required for base image building. Please ensure `openshift_gcp_root_image` is defined."
+ when: openshift_gcp_root_image is undefined
+
+ - name: Create the image instance disk
+ gce_pd:
+ service_account_email: "{{ (lookup('file', openshift_gcp_iam_service_account_keyfile ) | from_json ).client_email }}"
+ credentials_file: "{{ openshift_gcp_iam_service_account_keyfile }}"
+ project_id: "{{ openshift_gcp_project }}"
+ zone: "{{ openshift_gcp_zone }}"
+ name: "{{ openshift_gcp_prefix }}build-image-instance"
+ disk_type: pd-ssd
+ image: "{{ openshift_gcp_root_image }}"
+ size_gb: 10
+ state: present
+
+ - name: Launch the image build instance
+ gce:
+ service_account_email: "{{ (lookup('file', openshift_gcp_iam_service_account_keyfile ) | from_json ).client_email }}"
+ credentials_file: "{{ openshift_gcp_iam_service_account_keyfile }}"
+ project_id: "{{ openshift_gcp_project }}"
+ zone: "{{ openshift_gcp_zone }}"
+ machine_type: n1-standard-1
+ instance_names: "{{ openshift_gcp_prefix }}build-image-instance"
+ state: present
+ tags:
+ - build-image-instance
+ disk_auto_delete: false
+ disks:
+ - "{{ openshift_gcp_prefix }}build-image-instance"
+ register: gce
+
+ - add_host:
+ hostname: "{{ item.public_ip }}"
+ groupname: build_instance_ips
+ with_items: "{{ gce.instance_data }}"
+
+ - name: Wait for instance to respond to SSH
+ wait_for:
+ delay: 1
+ host: "{{ item.public_ip }}"
+ port: 22
+ state: started
+ timeout: 120
+ with_items: "{{ gce.instance_data }}"
+
+- name: Prepare instance content sources
+ pre_tasks:
+ - set_fact:
+ allow_rhel_subscriptions: "{{ rhsub_skip | default('no', True) | lower in ['no', 'false'] }}"
+ - set_fact:
+ using_rhel_subscriptions: "{{ (deployment_type in ['enterprise', 'atomic-enterprise', 'openshift-enterprise'] or ansible_distribution == 'RedHat') and allow_rhel_subscriptions }}"
+ hosts: build_instance_ips
+ roles:
+ - role: rhel_subscribe
+ when: using_rhel_subscriptions
+ - role: openshift_repos
+ vars:
+ openshift_additional_repos: []
+ post_tasks:
+ - name: Add custom repositories
+ include_role:
+ name: openshift_gcp
+ tasks_from: add_custom_repositories.yml
+ - name: Add the Google Cloud repo
+ yum_repository:
+ name: google-cloud
+ description: Google Cloud Compute
+ baseurl: https://packages.cloud.google.com/yum/repos/google-cloud-compute-el7-x86_64
+ gpgkey: https://packages.cloud.google.com/yum/doc/yum-key.gpg https://packages.cloud.google.com/yum/doc/rpm-package-key.gpg
+ gpgcheck: yes
+ repo_gpgcheck: yes
+ state: present
+ when: ansible_os_family == "RedHat"
+ - name: Add the jdetiber-qemu-user-static copr repo
+ yum_repository:
+ name: jdetiber-qemu-user-static
+ description: QEMU user static COPR
+ baseurl: https://copr-be.cloud.fedoraproject.org/results/jdetiber/qemu-user-static/epel-7-$basearch/
+ gpgkey: https://copr-be.cloud.fedoraproject.org/results/jdetiber/qemu-user-static/pubkey.gpg
+ gpgcheck: yes
+ repo_gpgcheck: no
+ state: present
+ when: ansible_os_family == "RedHat"
+ - name: Accept GPG keys for the repos
+ command: yum -q makecache -y --disablerepo='*' --enablerepo='google-cloud,jdetiber-qemu-user-static'
+ - name: Install qemu-user-static
+ package:
+ name: qemu-user-static
+ state: present
+ - name: Start and enable systemd-binfmt service
+ systemd:
+ name: systemd-binfmt
+ state: started
+ enabled: yes
+
+- name: Build image
+ hosts: build_instance_ips
+ pre_tasks:
+ - name: Set up core host GCP configuration
+ include_role:
+ name: openshift_gcp
+ tasks_from: configure_gcp_base_image.yml
+ roles:
+ - role: os_update_latest
+ post_tasks:
+ - name: Disable all repos on RHEL
+ command: subscription-manager repos --disable="*"
+ when: using_rhel_subscriptions
+ - name: Enable repos for packages on RHEL
+ command: subscription-manager repos --enable="rhel-7-server-rpms" --enable="rhel-7-server-extras-rpms"
+ when: using_rhel_subscriptions
+ - name: Install common image prerequisites
+ package: name={{ item }} state=latest
+ with_items:
+ # required by Ansible
+ - PyYAML
+ - google-compute-engine
+ - google-compute-engine-init
+ - google-config
+ - wget
+ - git
+ - net-tools
+ - bind-utils
+ - iptables-services
+ - bridge-utils
+ - bash-completion
+ - name: Clean yum metadata
+ command: yum clean all
+ args:
+ warn: no
+ when: ansible_os_family == "RedHat"
+
+- name: Commit image
+ hosts: localhost
+ connection: local
+ tasks:
+ - name: Terminate the image build instance
+ gce:
+ service_account_email: "{{ (lookup('file', openshift_gcp_iam_service_account_keyfile ) | from_json ).client_email }}"
+ credentials_file: "{{ openshift_gcp_iam_service_account_keyfile }}"
+ project_id: "{{ openshift_gcp_project }}"
+ zone: "{{ openshift_gcp_zone }}"
+ instance_names: "{{ openshift_gcp_prefix }}build-image-instance"
+ state: absent
+ - name: Save the new image
+ command: gcloud --project "{{ openshift_gcp_project}}" compute images create "{{ openshift_gcp_base_image_name | default(openshift_gcp_base_image + '-' + lookup('pipe','date +%Y%m%d-%H%M%S')) }}" --source-disk "{{ openshift_gcp_prefix }}build-image-instance" --source-disk-zone "{{ openshift_gcp_zone }}" --family "{{ openshift_gcp_base_image }}"
+ - name: Remove the image instance disk
+ gce_pd:
+ service_account_email: "{{ (lookup('file', openshift_gcp_iam_service_account_keyfile ) | from_json ).client_email }}"
+ credentials_file: "{{ openshift_gcp_iam_service_account_keyfile }}"
+ project_id: "{{ openshift_gcp_project }}"
+ zone: "{{ openshift_gcp_zone }}"
+ name: "{{ openshift_gcp_prefix }}build-image-instance"
+ state: absent
diff --git a/playbooks/gcp/openshift-cluster/build_image.yml b/playbooks/gcp/openshift-cluster/build_image.yml
new file mode 100644
index 000000000..787de8ebc
--- /dev/null
+++ b/playbooks/gcp/openshift-cluster/build_image.yml
@@ -0,0 +1,106 @@
+---
+- name: Verify prerequisites for image build
+ hosts: localhost
+ connection: local
+ gather_facts: no
+ tasks:
+ - name: Require openshift_gcp_base_image
+ fail:
+ msg: "A base image name or family is required for image building. Please ensure `openshift_gcp_base_image` is defined."
+ when: openshift_gcp_base_image is undefined
+
+- name: Launch image build instance
+ hosts: localhost
+ connection: local
+ gather_facts: no
+ tasks:
+ - name: Set facts
+ set_fact:
+ openshift_node_bootstrap: True
+ openshift_master_unsupported_embedded_etcd: True
+
+ - name: Create the image instance disk
+ gce_pd:
+ service_account_email: "{{ (lookup('file', openshift_gcp_iam_service_account_keyfile ) | from_json ).client_email }}"
+ credentials_file: "{{ openshift_gcp_iam_service_account_keyfile }}"
+ project_id: "{{ openshift_gcp_project }}"
+ zone: "{{ openshift_gcp_zone }}"
+ name: "{{ openshift_gcp_prefix }}build-image-instance"
+ disk_type: pd-ssd
+ image: "{{ openshift_gcp_base_image }}"
+ size_gb: 10
+ state: present
+
+ - name: Launch the image build instance
+ gce:
+ service_account_email: "{{ (lookup('file', openshift_gcp_iam_service_account_keyfile ) | from_json ).client_email }}"
+ credentials_file: "{{ openshift_gcp_iam_service_account_keyfile }}"
+ project_id: "{{ openshift_gcp_project }}"
+ zone: "{{ openshift_gcp_zone }}"
+ machine_type: n1-standard-1
+ instance_names: "{{ openshift_gcp_prefix }}build-image-instance"
+ state: present
+ tags:
+ - build-image-instance
+ disk_auto_delete: false
+ disks:
+ - "{{ openshift_gcp_prefix }}build-image-instance"
+ register: gce
+
+ - name: add host to nodes
+ add_host:
+ hostname: "{{ item.public_ip }}"
+ groupname: nodes
+ with_items: "{{ gce.instance_data }}"
+
+ - name: Wait for instance to respond to SSH
+ wait_for:
+ delay: 1
+ host: "{{ item.public_ip }}"
+ port: 22
+ state: started
+ timeout: 120
+ with_items: "{{ gce.instance_data }}"
+
+- hosts: nodes
+ tasks:
+ - name: Set facts
+ set_fact:
+ openshift_node_bootstrap: True
+
+# This is the part that installs all of the software and configs for the instance
+# to become a node.
+- import_playbook: ../../openshift-node/private/image_prep.yml
+
+# Add additional GCP specific behavior
+- hosts: nodes
+ tasks:
+ - include_role:
+ name: openshift_gcp
+ tasks_from: node_cloud_config.yml
+ - include_role:
+ name: openshift_gcp
+ tasks_from: frequent_log_rotation.yml
+
+- name: Commit image
+ hosts: localhost
+ connection: local
+ tasks:
+ - name: Terminate the image build instance
+ gce:
+ service_account_email: "{{ (lookup('file', openshift_gcp_iam_service_account_keyfile ) | from_json ).client_email }}"
+ credentials_file: "{{ openshift_gcp_iam_service_account_keyfile }}"
+ project_id: "{{ openshift_gcp_project }}"
+ zone: "{{ openshift_gcp_zone }}"
+ instance_names: "{{ openshift_gcp_prefix }}build-image-instance"
+ state: absent
+ - name: Save the new image
+ command: gcloud --project "{{ openshift_gcp_project}}" compute images create "{{ openshift_gcp_image_name | default(openshift_gcp_image + '-' + lookup('pipe','date +%Y%m%d-%H%M%S')) }}" --source-disk "{{ openshift_gcp_prefix }}build-image-instance" --source-disk-zone "{{ openshift_gcp_zone }}" --family "{{ openshift_gcp_image }}"
+ - name: Remove the image instance disk
+ gce_pd:
+ service_account_email: "{{ (lookup('file', openshift_gcp_iam_service_account_keyfile ) | from_json ).client_email }}"
+ credentials_file: "{{ openshift_gcp_iam_service_account_keyfile }}"
+ project_id: "{{ openshift_gcp_project }}"
+ zone: "{{ openshift_gcp_zone }}"
+ name: "{{ openshift_gcp_prefix }}build-image-instance"
+ state: absent
diff --git a/playbooks/gcp/openshift-cluster/deprovision.yml b/playbooks/gcp/openshift-cluster/deprovision.yml
new file mode 100644
index 000000000..589fddd2f
--- /dev/null
+++ b/playbooks/gcp/openshift-cluster/deprovision.yml
@@ -0,0 +1,10 @@
+# This playbook terminates a running cluster
+---
+- name: Terminate running cluster and remove all supporting resources in GCE
+ hosts: localhost
+ connection: local
+ tasks:
+ - include_role:
+ name: openshift_gcp
+ vars:
+ state: absent
diff --git a/playbooks/gcp/openshift-cluster/install.yml b/playbooks/gcp/openshift-cluster/install.yml
new file mode 100644
index 000000000..fb35b4348
--- /dev/null
+++ b/playbooks/gcp/openshift-cluster/install.yml
@@ -0,0 +1,33 @@
+# This playbook installs onto a provisioned cluster
+---
+- hosts: localhost
+ connection: local
+ tasks:
+ - name: place all scale groups into Ansible groups
+ include_role:
+ name: openshift_gcp
+ tasks_from: setup_scale_group_facts.yml
+
+- name: run the init
+ import_playbook: ../../init/main.yml
+
+- name: configure the control plane
+ import_playbook: ../../common/private/control_plane.yml
+
+- name: ensure the masters are configured as nodes
+ import_playbook: ../../openshift-node/private/config.yml
+
+- name: run the GCP specific post steps
+ import_playbook: install_gcp.yml
+
+- name: install components
+ import_playbook: ../../common/private/components.yml
+
+- hosts: primary_master
+ gather_facts: no
+ tasks:
+ - name: Retrieve cluster configuration
+ fetch:
+ src: "{{ openshift.common.config_base }}/master/admin.kubeconfig"
+ dest: "/tmp/"
+ flat: yes
diff --git a/playbooks/gcp/openshift-cluster/install_gcp.yml b/playbooks/gcp/openshift-cluster/install_gcp.yml
new file mode 100644
index 000000000..09db78971
--- /dev/null
+++ b/playbooks/gcp/openshift-cluster/install_gcp.yml
@@ -0,0 +1,21 @@
+---
+- hosts: masters
+ gather_facts: no
+ tasks:
+ - name: create master health check service
+ include_role:
+ name: openshift_gcp
+ tasks_from: configure_master_healthcheck.yml
+ - name: configure node bootstrapping
+ include_role:
+ name: openshift_gcp
+ tasks_from: configure_master_bootstrap.yml
+ when:
+ - openshift_master_bootstrap_enabled | default(False)
+ - name: configure node bootstrap autoapprover
+ include_role:
+ name: openshift_bootstrap_autoapprover
+ tasks_from: main
+ when:
+ - openshift_master_bootstrap_enabled | default(False)
+ - openshift_master_bootstrap_auto_approve | default(False) | bool
diff --git a/playbooks/gcp/openshift-cluster/inventory.yml b/playbooks/gcp/openshift-cluster/inventory.yml
new file mode 100644
index 000000000..96de6d6db
--- /dev/null
+++ b/playbooks/gcp/openshift-cluster/inventory.yml
@@ -0,0 +1,10 @@
+---
+- name: Set up the connection variables for retrieving inventory from GCE
+ hosts: localhost
+ connection: local
+ gather_facts: no
+ tasks:
+ - name: materialize the inventory
+ include_role:
+ name: openshift_gcp
+ tasks_from: dynamic_inventory.yml
diff --git a/playbooks/gcp/openshift-cluster/launch.yml b/playbooks/gcp/openshift-cluster/launch.yml
new file mode 100644
index 000000000..02f00408a
--- /dev/null
+++ b/playbooks/gcp/openshift-cluster/launch.yml
@@ -0,0 +1,12 @@
+# This playbook launches a new cluster or converges it if already launched
+---
+- import_playbook: build_image.yml
+ when: openshift_gcp_build_image | default(False) | bool
+
+- import_playbook: provision.yml
+
+- hosts: localhost
+ tasks:
+ - meta: refresh_inventory
+
+- import_playbook: install.yml
diff --git a/playbooks/gcp/provision.yml b/playbooks/gcp/openshift-cluster/provision.yml
index b6edf9961..293a195c9 100644
--- a/playbooks/gcp/provision.yml
+++ b/playbooks/gcp/openshift-cluster/provision.yml
@@ -3,11 +3,10 @@
hosts: localhost
connection: local
gather_facts: no
+ roles:
+ - openshift_gcp
tasks:
-
- - name: provision a GCP cluster in the specified project
+ - name: recalculate the dynamic inventory
import_role:
name: openshift_gcp
-
-- name: run the cluster deploy
- import_playbook: ../deploy_cluster.yml
+ tasks_from: dynamic_inventory.yml
diff --git a/playbooks/gcp/openshift-cluster/publish_image.yml b/playbooks/gcp/openshift-cluster/publish_image.yml
new file mode 100644
index 000000000..76fd49e9c
--- /dev/null
+++ b/playbooks/gcp/openshift-cluster/publish_image.yml
@@ -0,0 +1,9 @@
+---
+- name: Publish the most recent image
+ hosts: localhost
+ connection: local
+ gather_facts: no
+ tasks:
+ - import_role:
+ name: openshift_gcp
+ tasks_from: publish_image.yml
diff --git a/playbooks/gcp/openshift-cluster/roles b/playbooks/gcp/openshift-cluster/roles
new file mode 120000
index 000000000..20c4c58cf
--- /dev/null
+++ b/playbooks/gcp/openshift-cluster/roles
@@ -0,0 +1 @@
+../../../roles \ No newline at end of file
diff --git a/playbooks/init/base_packages.yml b/playbooks/init/base_packages.yml
index e1052fb6c..0a730a88a 100644
--- a/playbooks/init/base_packages.yml
+++ b/playbooks/init/base_packages.yml
@@ -16,6 +16,7 @@
- iproute
- "{{ 'python3-dbus' if ansible_distribution == 'Fedora' else 'dbus-python' }}"
- "{{ 'python3-PyYAML' if ansible_distribution == 'Fedora' else 'PyYAML' }}"
+ - "{{ 'python-ipaddress' if ansible_distribution != 'Fedora' else omit }}"
- yum-utils
register: result
until: result is succeeded
diff --git a/playbooks/init/basic_facts.yml b/playbooks/init/basic_facts.yml
index 06a4e7291..a9bf06693 100644
--- a/playbooks/init/basic_facts.yml
+++ b/playbooks/init/basic_facts.yml
@@ -67,3 +67,11 @@
first_master_client_binary: "{{ openshift_client_binary }}"
#Some roles may require this to be set for first master
openshift_client_binary: "{{ openshift_client_binary }}"
+
+- name: Disable web console if required
+ hosts: oo_masters_to_config
+ gather_facts: no
+ tasks:
+ - set_fact:
+ openshift_web_console_install: False
+ when: openshift_deployment_subtype == 'registry' or ( osm_disabled_features is defined and 'WebConsole' in osm_disabled_features )
diff --git a/playbooks/init/evaluate_groups.yml b/playbooks/init/evaluate_groups.yml
index c4cd226c9..e8bf1892c 100644
--- a/playbooks/init/evaluate_groups.yml
+++ b/playbooks/init/evaluate_groups.yml
@@ -45,9 +45,13 @@
- name: Evaluate groups - Fail if no etcd hosts group is defined
fail:
msg: >
- Running etcd as an embedded service is no longer supported.
+ Running etcd as an embedded service is no longer supported. If this is a
+ new install please define an 'etcd' group with either one, three or five
+ hosts. These hosts may be the same hosts as your masters. If this is an
+ upgrade please see https://docs.openshift.com/container-platform/latest/install_config/upgrading/migrating_embedded_etcd.html
+ for documentation on how to migrate from embedded to external etcd.
when:
- - g_etcd_hosts | default([]) | length not in [3,1]
+ - g_etcd_hosts | default([]) | length not in [5,3,1]
- not (openshift_node_bootstrap | default(False))
- name: Evaluate oo_all_hosts
diff --git a/playbooks/init/validate_hostnames.yml b/playbooks/init/validate_hostnames.yml
index 86e0b2416..b49f7dd08 100644
--- a/playbooks/init/validate_hostnames.yml
+++ b/playbooks/init/validate_hostnames.yml
@@ -25,7 +25,7 @@
when:
- lookupip.stdout != '127.0.0.1'
- lookupip.stdout not in ansible_all_ipv4_addresses
- - openshift_hostname_check | default(true)
+ - openshift_hostname_check | default(true) | bool
- name: Validate openshift_ip exists on node when defined
fail:
@@ -40,4 +40,4 @@
when:
- openshift_ip is defined
- openshift_ip not in ansible_all_ipv4_addresses
- - openshift_ip_check | default(true)
+ - openshift_ip_check | default(true) | bool
diff --git a/playbooks/openshift-etcd/scaleup.yml b/playbooks/openshift-etcd/scaleup.yml
index 7e9ab6834..656454fe3 100644
--- a/playbooks/openshift-etcd/scaleup.yml
+++ b/playbooks/openshift-etcd/scaleup.yml
@@ -1,4 +1,51 @@
---
+- import_playbook: ../init/evaluate_groups.yml
+
+- name: Ensure there are new_etcd
+ hosts: localhost
+ connection: local
+ gather_facts: no
+ tasks:
+ - fail:
+ msg: >
+ Detected no new_etcd in inventory. Please add hosts to the
+ new_etcd host group to add etcd hosts.
+ when:
+ - g_new_etcd_hosts | default([]) | length == 0
+
+ - fail:
+ msg: >
+ Detected new_etcd host is member of new_masters or new_nodes. Please
+ run playbooks/openshift-master/scaleup.yml or
+ playbooks/openshift-node/scaleup.yml before running this play.
+ when: >
+ inventory_hostname in (groups['new_masters'] | default([]))
+ or inventory_hostname in (groups['new_nodes'] | default([]))
+
+# We only need to run this if etcd is being installed on a standalone host;
+# If etcd is part of master or node group, there's no need to
+# re-run prerequisites
+- import_playbook: ../prerequisites.yml
+ vars:
+ # We need to ensure container_runtime is only processed for containerized
+ # etcd hosts by setting l_build_container_groups_hosts and l_etcd_scale_up_hosts
+ l_build_container_groups_hosts: "oo_new_etcd_to_config"
+ l_etcd_scale_up_hosts: "oo_hosts_containerized_managed_true"
+ l_scale_up_hosts: "oo_new_etcd_to_config"
+ l_init_fact_hosts: "oo_masters_to_config:oo_etcd_to_config:oo_lb_to_config:oo_new_etcd_to_config"
+ l_sanity_check_hosts: "{{ groups['oo_new_etcd_to_config'] | union(groups['oo_masters_to_config']) | union(groups['oo_etcd_to_config']) }}"
+ when:
+ - inventory_hostname not in groups['oo_masters']
+ - inventory_hostname not in groups['oo_nodes_to_config']
+
+# If this etcd host is part of a master or node, we don't need to run
+# prerequisites, we can just init facts as normal.
- import_playbook: ../init/main.yml
+ vars:
+ skip_verison: True
+ l_init_fact_hosts: "oo_masters_to_config:oo_etcd_to_config:oo_lb_to_config:oo_new_etcd_to_config"
+ when:
+ - inventory_hostname in groups['oo_masters']
+ - inventory_hostname in groups['oo_nodes_to_config']
- import_playbook: private/scaleup.yml
diff --git a/playbooks/openshift-master/private/tasks/wire_aggregator.yml b/playbooks/openshift-master/private/tasks/wire_aggregator.yml
index 59e2b515c..cc812c300 100644
--- a/playbooks/openshift-master/private/tasks/wire_aggregator.yml
+++ b/playbooks/openshift-master/private/tasks/wire_aggregator.yml
@@ -142,11 +142,6 @@
state: absent
changed_when: False
-- name: Setup extension file for service console UI
- template:
- src: ../templates/openshift-ansible-catalog-console.js
- dest: /etc/origin/master/openshift-ansible-catalog-console.js
-
- name: Update master config
yedit:
state: present
@@ -166,8 +161,6 @@
value: [X-Remote-Group]
- key: authConfig.requestHeader.extraHeaderPrefixes
value: [X-Remote-Extra-]
- - key: assetConfig.extensionScripts
- value: [/etc/origin/master/openshift-ansible-catalog-console.js]
- key: kubernetesMasterConfig.apiServerArguments.runtime-config
value: [apis/settings.k8s.io/v1alpha1=true]
- key: admissionConfig.pluginConfig.PodPreset.configuration.kind
@@ -178,37 +171,50 @@
value: false
register: yedit_output
-#restart master serially here
-- name: restart master api
- systemd: name={{ openshift_service_type }}-master-api state=restarted
- when:
- - yedit_output.changed
-
-# We retry the controllers because the API may not be 100% initialized yet.
-- name: restart master controllers
- command: "systemctl restart {{ openshift_service_type }}-master-controllers"
- retries: 3
- delay: 5
- register: result
- until: result.rc == 0
- when:
- - yedit_output.changed
+# Only add the catalog extension script if not 3.9. From 3.9 on, the console
+# can discover if template service broker is running.
+- when: not openshift.common.version_gte_3_9
+ block:
+ - name: Setup extension file for service console UI
+ template:
+ src: ../templates/openshift-ansible-catalog-console.js
+ dest: /etc/origin/master/openshift-ansible-catalog-console.js
+
+ - name: Update master config
+ yedit:
+ state: present
+ src: /etc/origin/master/master-config.yaml
+ key: assetConfig.extensionScripts
+ value: [/etc/origin/master/openshift-ansible-catalog-console.js]
+ register: yedit_asset_config_output
-- name: Verify API Server
- # Using curl here since the uri module requires python-httplib2 and
- # wait_for port doesn't provide health information.
- command: >
- curl --silent --tlsv1.2
- --cacert {{ openshift.common.config_base }}/master/ca-bundle.crt
- {{ openshift.master.api_url }}/healthz/ready
- args:
- # Disables the following warning:
- # Consider using get_url or uri module rather than running curl
- warn: no
- register: api_available_output
- until: api_available_output.stdout == 'ok'
- retries: 120
- delay: 1
- changed_when: false
- when:
- - yedit_output.changed
+#restart master serially here
+- when: yedit_output.changed or (yedit_asset_config_output is defined and yedit_asset_config_output.changed)
+ block:
+ - name: restart master api
+ systemd: name={{ openshift_service_type }}-master-api state=restarted
+
+ # We retry the controllers because the API may not be 100% initialized yet.
+ - name: restart master controllers
+ command: "systemctl restart {{ openshift_service_type }}-master-controllers"
+ retries: 3
+ delay: 5
+ register: result
+ until: result.rc == 0
+
+ - name: Verify API Server
+ # Using curl here since the uri module requires python-httplib2 and
+ # wait_for port doesn't provide health information.
+ command: >
+ curl --silent --tlsv1.2
+ --cacert {{ openshift.common.config_base }}/master/ca-bundle.crt
+ {{ openshift.master.api_url }}/healthz/ready
+ args:
+ # Disables the following warning:
+ # Consider using get_url or uri module rather than running curl
+ warn: no
+ register: api_available_output
+ until: api_available_output.stdout == 'ok'
+ retries: 120
+ delay: 1
+ changed_when: false
diff --git a/playbooks/openshift-prometheus/private/uninstall.yml b/playbooks/openshift-prometheus/private/uninstall.yml
new file mode 100644
index 000000000..2df39c2a8
--- /dev/null
+++ b/playbooks/openshift-prometheus/private/uninstall.yml
@@ -0,0 +1,8 @@
+---
+- name: Uninstall Prometheus
+ hosts: masters[0]
+ tasks:
+ - name: Run the Prometheus Uninstall Role Tasks
+ include_role:
+ name: openshift_prometheus
+ tasks_from: uninstall
diff --git a/playbooks/openshift-prometheus/uninstall.yml b/playbooks/openshift-prometheus/uninstall.yml
new file mode 100644
index 000000000..c92ade786
--- /dev/null
+++ b/playbooks/openshift-prometheus/uninstall.yml
@@ -0,0 +1,2 @@
+---
+- import_playbook: private/uninstall.yml
diff --git a/playbooks/openstack/sample-inventory/group_vars/OSEv3.yml b/playbooks/openstack/sample-inventory/group_vars/OSEv3.yml
index a8663f946..1287b25f3 100644
--- a/playbooks/openstack/sample-inventory/group_vars/OSEv3.yml
+++ b/playbooks/openstack/sample-inventory/group_vars/OSEv3.yml
@@ -43,7 +43,7 @@ openshift_hosted_registry_wait: True
# NOTE(shadower): the hostname check seems to always fail because the
# host's floating IP address doesn't match the address received from
# inside the host.
-openshift_override_hostname_check: true
+openshift_hostname_check: false
# For POCs or demo environments that are using smaller instances than
# the official recommended values for RAM and DISK, uncomment the line below.