summaryrefslogtreecommitdiffstats
path: root/playbooks
diff options
context:
space:
mode:
Diffstat (limited to 'playbooks')
-rw-r--r--playbooks/adhoc/openshift_hosted_logging_efk.yaml16
-rw-r--r--playbooks/aws/README.md10
-rw-r--r--playbooks/aws/openshift-cluster/uninstall_s3.yml10
-rw-r--r--playbooks/aws/provisioning_vars.yml.example6
-rw-r--r--playbooks/byo/openshift-cluster/upgrades/v3_9/upgrade_control_plane.yml2
-rw-r--r--playbooks/byo/openshift-cluster/upgrades/v3_9/upgrade_scale_groups.yml (renamed from playbooks/byo/openshift-cluster/upgrades/v3_7/upgrade_scale_groups.yml)0
-rw-r--r--playbooks/common/openshift-cluster/upgrades/post_control_plane.yml20
-rw-r--r--playbooks/common/openshift-cluster/upgrades/pre/config.yml6
-rw-r--r--playbooks/common/openshift-cluster/upgrades/pre/verify_cluster.yml2
-rw-r--r--playbooks/common/openshift-cluster/upgrades/upgrade_control_plane.yml11
-rw-r--r--playbooks/common/openshift-cluster/upgrades/v3_9/upgrade.yml52
-rw-r--r--playbooks/common/openshift-cluster/upgrades/v3_9/upgrade_control_plane.yml58
-rw-r--r--playbooks/deploy_cluster.yml8
-rw-r--r--playbooks/gcp/openshift-cluster/build_base_image.yml3
-rw-r--r--playbooks/init/base_packages.yml2
-rw-r--r--playbooks/init/basic_facts.yml8
-rw-r--r--playbooks/openshift-hosted/deploy_registry.yml4
-rw-r--r--playbooks/openshift-hosted/deploy_router.yml4
-rw-r--r--playbooks/openshift-hosted/private/openshift_default_storage_class.yml4
-rw-r--r--playbooks/openshift-logging/private/config.yml1
-rw-r--r--playbooks/openshift-master/private/additional_config.yml1
-rw-r--r--playbooks/openshift-master/private/config.yml3
-rw-r--r--playbooks/openshift-master/private/restart.yml9
-rw-r--r--playbooks/openshift-master/private/scaleup.yml1
-rw-r--r--playbooks/openshift-master/private/tasks/wire_aggregator.yml86
-rw-r--r--playbooks/openshift-metrics/private/config.yml1
-rw-r--r--playbooks/openshift-node/private/restart.yml1
-rw-r--r--playbooks/openshift-node/redeploy-certificates.yml2
-rw-r--r--playbooks/openshift-prometheus/private/uninstall.yml8
-rw-r--r--playbooks/openshift-prometheus/uninstall.yml2
-rwxr-xr-xplaybooks/openstack/inventory.py48
31 files changed, 226 insertions, 163 deletions
diff --git a/playbooks/adhoc/openshift_hosted_logging_efk.yaml b/playbooks/adhoc/openshift_hosted_logging_efk.yaml
deleted file mode 100644
index faeb332ad..000000000
--- a/playbooks/adhoc/openshift_hosted_logging_efk.yaml
+++ /dev/null
@@ -1,16 +0,0 @@
----
-- hosts: masters[0]
- roles:
- - role: openshift_logging
- openshift_hosted_logging_cleanup: no
-
-- name: Update master-config for publicLoggingURL
- hosts: masters:!masters[0]
- pre_tasks:
- - set_fact:
- openshift_logging_kibana_hostname: "{{ openshift_hosted_logging_hostname | default('kibana.' ~ openshift_master_default_subdomain }}"
- tasks:
- - import_role:
- name: openshift_logging
- tasks_from: update_master_config
- when: openshift_hosted_logging_deploy | default(false) | bool
diff --git a/playbooks/aws/README.md b/playbooks/aws/README.md
index bdc98d1e0..cf811ca84 100644
--- a/playbooks/aws/README.md
+++ b/playbooks/aws/README.md
@@ -201,9 +201,7 @@ There are more enhancements that are arriving for provisioning. These will incl
## Uninstall / Deprovisioning
-At this time, only deprovisioning of the output of the prerequisites step is provided. You can/must manually remove things like ELBs and scale groups before attempting to undo the work by the preprovisiong step.
-
-To undo the work done by the prerequisites playbook, simply call the uninstall_prerequisites.yml playbook. You should use the same inventory file and provisioning_vars.yml file that was used during provisioning.
+To undo the work done by the prerequisites playbook, simply call the uninstall_prerequisites.yml playbook. You will have needed to remove any of the other objects (ie ELBs, instances, etc) before attempting. You should use the same inventory file and provisioning_vars.yml file that was used during provisioning.
```
ansible-playbook -i <previous inventory file> -e @<previous provisioning_vars file> uninstall_prerequisites.yml
@@ -211,4 +209,10 @@ ansible-playbook -i <previous inventory file> -e @<previous provisioning_vars fi
This should result in removal of the security groups and VPC that were created.
+Cleaning up the S3 bucket contents can be accomplished with:
+
+```
+ansible-playbook -i <previous inventory file> -e @<previous provisioning_vars file> uninstall_s3.yml
+```
+
NOTE: If you want to also remove the ssh keys that were uploaded (**these ssh keys would be shared if you are running multiple clusters in the same AWS account** so we don't remove these by default) then you should add 'openshift_aws_enable_uninstall_shared_objects: True' to your provisioning_vars.yml file.
diff --git a/playbooks/aws/openshift-cluster/uninstall_s3.yml b/playbooks/aws/openshift-cluster/uninstall_s3.yml
new file mode 100644
index 000000000..448b47aee
--- /dev/null
+++ b/playbooks/aws/openshift-cluster/uninstall_s3.yml
@@ -0,0 +1,10 @@
+---
+- name: Empty/delete s3 bucket
+ hosts: localhost
+ connection: local
+ tasks:
+ - name: empty/delete s3 bucket
+ include_role:
+ name: openshift_aws
+ tasks_from: uninstall_s3.yml
+ when: openshift_aws_create_s3 | default(true) | bool
diff --git a/playbooks/aws/provisioning_vars.yml.example b/playbooks/aws/provisioning_vars.yml.example
index a1a8a5b08..78484fdbd 100644
--- a/playbooks/aws/provisioning_vars.yml.example
+++ b/playbooks/aws/provisioning_vars.yml.example
@@ -21,6 +21,12 @@ openshift_release: # v3.7
# This will be dependent on the version provided by the yum repository
openshift_pkg_version: # -3.7.0
+# OpenShift api port
+# Fulfills a chicken/egg scenario with how Ansible treats host inventory file
+# and extra_vars. This is used for SecurityGroups, ELB Listeners as well as
+# an override to installer inventory openshift_master_api_port key
+# openshift_master_api_port: 8443
+
# specify a clusterid
# This value is also used as the default value for many other components.
#openshift_aws_clusterid: default
diff --git a/playbooks/byo/openshift-cluster/upgrades/v3_9/upgrade_control_plane.yml b/playbooks/byo/openshift-cluster/upgrades/v3_9/upgrade_control_plane.yml
index 869e185af..c8f397186 100644
--- a/playbooks/byo/openshift-cluster/upgrades/v3_9/upgrade_control_plane.yml
+++ b/playbooks/byo/openshift-cluster/upgrades/v3_9/upgrade_control_plane.yml
@@ -12,3 +12,5 @@
# You can run the upgrade_nodes.yml playbook after this to upgrade these components separately.
#
- import_playbook: ../../../../common/openshift-cluster/upgrades/v3_9/upgrade_control_plane.yml
+
+- import_playbook: ../../../../openshift-master/private/restart.yml
diff --git a/playbooks/byo/openshift-cluster/upgrades/v3_7/upgrade_scale_groups.yml b/playbooks/byo/openshift-cluster/upgrades/v3_9/upgrade_scale_groups.yml
index 23a3fcbb5..23a3fcbb5 100644
--- a/playbooks/byo/openshift-cluster/upgrades/v3_7/upgrade_scale_groups.yml
+++ b/playbooks/byo/openshift-cluster/upgrades/v3_9/upgrade_scale_groups.yml
diff --git a/playbooks/common/openshift-cluster/upgrades/post_control_plane.yml b/playbooks/common/openshift-cluster/upgrades/post_control_plane.yml
index f790fd98d..f44ab3580 100644
--- a/playbooks/common/openshift-cluster/upgrades/post_control_plane.yml
+++ b/playbooks/common/openshift-cluster/upgrades/post_control_plane.yml
@@ -6,7 +6,9 @@
hosts: oo_first_master
roles:
- role: openshift_web_console
- when: openshift_web_console_install | default(true) | bool
+ when:
+ - openshift_web_console_install | default(true) | bool
+ - openshift_upgrade_target is version_compare('3.9','>=')
- name: Upgrade default router and default registry
hosts: oo_first_master
@@ -111,6 +113,22 @@
registry_url: "{{ openshift.master.registry_url }}"
openshift_hosted_templates_import_command: replace
+ post_tasks:
+ # we need to migrate customers to the new pattern of pushing to the registry via dns
+ # Step 1: verify the certificates have the docker registry service name
+ - shell: >
+ echo -n | openssl s_client -showcerts -servername docker-registry.default.svc -connect docker-registry.default.svc:5000 | openssl x509 -text | grep -A1 'X509v3 Subject Alternative Name:' | grep -Pq 'DNS:docker-registry\.default\.svc(,|$)'
+ register: cert_output
+
+ # Step 2: Set a fact to be used to determine if we should run the redeploy of registry certs
+ - name: set a fact to include the registry certs playbook if needed
+ set_fact:
+ openshift_hosted_rollout_certs_and_registry: "{{ cert_output.rc == 0 }}"
+
+# Run the redeploy certs based upon the certificates
+- when: hostvars[groups.oo_first_master.0].openshift_hosted_rollout_certs_and_registry
+ import_playbook: ../../../openshift-hosted/redeploy-registry-certificates.yml
+
# Check for warnings to be printed at the end of the upgrade:
- name: Clean up and display warnings
hosts: oo_masters_to_config
diff --git a/playbooks/common/openshift-cluster/upgrades/pre/config.yml b/playbooks/common/openshift-cluster/upgrades/pre/config.yml
index 2b27f8dd0..44af37b2d 100644
--- a/playbooks/common/openshift-cluster/upgrades/pre/config.yml
+++ b/playbooks/common/openshift-cluster/upgrades/pre/config.yml
@@ -5,8 +5,6 @@
# Pre-upgrade
- import_playbook: ../initialize_nodes_to_upgrade.yml
-- import_playbook: verify_cluster.yml
-
- name: Update repos on upgrade hosts
hosts: "{{ l_upgrade_repo_hosts }}"
roles:
@@ -53,6 +51,8 @@
# l_openshift_version_set_hosts is passed via upgrade_control_plane.yml
# l_openshift_version_check_hosts is passed via upgrade_control_plane.yml
+- import_playbook: verify_cluster.yml
+
# If we're only upgrading nodes, we need to ensure masters are already upgraded
- name: Verify masters are already upgraded
hosts: oo_masters_to_config
@@ -60,7 +60,7 @@
- fail: msg="Master running {{ openshift.common.version }} must be upgraded to {{ openshift_version }} before node upgrade can be run."
when:
- l_upgrade_nodes_only | default(False) | bool
- - openshift.common.version != openshift_version
+ - not openshift.common.version | match(openshift_version)
# If we're only upgrading nodes, skip this.
- import_playbook: ../../../../openshift-master/private/validate_restart.yml
diff --git a/playbooks/common/openshift-cluster/upgrades/pre/verify_cluster.yml b/playbooks/common/openshift-cluster/upgrades/pre/verify_cluster.yml
index 5ee8a9d78..463a05688 100644
--- a/playbooks/common/openshift-cluster/upgrades/pre/verify_cluster.yml
+++ b/playbooks/common/openshift-cluster/upgrades/pre/verify_cluster.yml
@@ -17,6 +17,7 @@
valid version for a {{ openshift_upgrade_target }} upgrade
when:
- openshift_pkg_version is defined
+ - openshift_pkg_version != ""
- openshift_pkg_version.split('-',1).1 is version_compare(openshift_upgrade_target ,'<')
- fail:
@@ -25,6 +26,7 @@
valid version for a {{ openshift_upgrade_target }} upgrade
when:
- openshift_image_tag is defined
+ - openshift_image_tag != ""
- openshift_image_tag.split('v',1).1 is version_compare(openshift_upgrade_target ,'<')
- set_fact:
diff --git a/playbooks/common/openshift-cluster/upgrades/upgrade_control_plane.yml b/playbooks/common/openshift-cluster/upgrades/upgrade_control_plane.yml
index a10fd4bee..9b5ba3482 100644
--- a/playbooks/common/openshift-cluster/upgrades/upgrade_control_plane.yml
+++ b/playbooks/common/openshift-cluster/upgrades/upgrade_control_plane.yml
@@ -56,7 +56,6 @@
register: l_pb_upgrade_control_plane_pre_upgrade_storage
when: openshift_upgrade_pre_storage_migration_enabled | default(true) | bool
failed_when:
- - openshift_upgrade_pre_storage_migration_enabled | default(true) | bool
- l_pb_upgrade_control_plane_pre_upgrade_storage.rc != 0
- openshift_upgrade_pre_storage_migration_fatal | default(true) | bool
@@ -72,8 +71,6 @@
# support for optional hooks to be defined.
- name: Upgrade master
hosts: oo_masters_to_config
- vars:
- openshift_master_ha: "{{ groups.oo_masters_to_config | length > 1 }}"
serial: 1
roles:
- openshift_facts
@@ -96,6 +93,12 @@
- include_tasks: "{{ openshift_master_upgrade_hook }}"
when: openshift_master_upgrade_hook is defined
+ - name: Disable master controller
+ service:
+ name: "{{ openshift_service_type }}-master-controllers"
+ enabled: false
+ when: openshift.common.rolling_restart_mode == 'system'
+
- include_tasks: ../../../openshift-master/private/tasks/restart_hosts.yml
when: openshift.common.rolling_restart_mode == 'system'
@@ -118,7 +121,6 @@
- openshift_upgrade_post_storage_migration_enabled | default(true) | bool
- openshift_version is version_compare('3.7','<')
failed_when:
- - openshift_upgrade_post_storage_migration_enabled | default(true) | bool
- l_pb_upgrade_control_plane_post_upgrade_storage.rc != 0
- openshift_upgrade_post_storage_migration_fatal | default(false) | bool
run_once: true
@@ -254,7 +256,6 @@
register: l_pb_upgrade_control_plane_post_upgrade_storage
when: openshift_upgrade_post_storage_migration_enabled | default(true) | bool
failed_when:
- - openshift_upgrade_post_storage_migration_enabled | default(true) | bool
- l_pb_upgrade_control_plane_post_upgrade_storage.rc != 0
- openshift_upgrade_post_storage_migration_fatal | default(false) | bool
diff --git a/playbooks/common/openshift-cluster/upgrades/v3_9/upgrade.yml b/playbooks/common/openshift-cluster/upgrades/v3_9/upgrade.yml
index bf6e8605e..ec1da6d39 100644
--- a/playbooks/common/openshift-cluster/upgrades/v3_9/upgrade.yml
+++ b/playbooks/common/openshift-cluster/upgrades/v3_9/upgrade.yml
@@ -2,54 +2,6 @@
#
# Full Control Plane + Nodes Upgrade
#
-- import_playbook: ../init.yml
+- import_playbook: upgrade_control_plane.yml
-- name: Configure the upgrade target for the common upgrade tasks
- hosts: oo_all_hosts
- tasks:
- - set_fact:
- openshift_upgrade_target: '3.9'
- openshift_upgrade_min: '3.7'
- openshift_release: '3.9'
-
-- import_playbook: ../pre/config.yml
- vars:
- l_upgrade_repo_hosts: "oo_masters_to_config:oo_nodes_to_upgrade:oo_etcd_to_config:oo_lb_to_config"
- l_upgrade_no_proxy_hosts: "oo_masters_to_config:oo_nodes_to_upgrade"
- l_upgrade_health_check_hosts: "oo_masters_to_config:oo_etcd_to_config:oo_lb_to_config"
- l_upgrade_verify_targets_hosts: "oo_masters_to_config:oo_nodes_to_upgrade"
- l_upgrade_docker_target_hosts: "oo_masters_to_config:oo_nodes_to_upgrade:oo_etcd_to_config"
- l_upgrade_excluder_hosts: "oo_nodes_to_config:oo_masters_to_config"
- openshift_protect_installed_version: False
-
-- import_playbook: validator.yml
-
-- name: Flag pre-upgrade checks complete for hosts without errors
- hosts: oo_masters_to_config:oo_nodes_to_upgrade:oo_etcd_to_config
- tasks:
- - set_fact:
- pre_upgrade_complete: True
-
-# Pre-upgrade completed
-
-- import_playbook: ../upgrade_control_plane.yml
-
-# All controllers must be stopped at the same time then restarted
-- name: Cycle all controller services to force new leader election mode
- hosts: oo_masters_to_config
- gather_facts: no
- roles:
- - role: openshift_facts
- tasks:
- - name: Stop {{ openshift_service_type }}-master-controllers
- systemd:
- name: "{{ openshift_service_type }}-master-controllers"
- state: stopped
- - name: Start {{ openshift_service_type }}-master-controllers
- systemd:
- name: "{{ openshift_service_type }}-master-controllers"
- state: started
-
-- import_playbook: ../upgrade_nodes.yml
-
-- import_playbook: ../post_control_plane.yml
+- import_playbook: upgrade_nodes.yml
diff --git a/playbooks/common/openshift-cluster/upgrades/v3_9/upgrade_control_plane.yml b/playbooks/common/openshift-cluster/upgrades/v3_9/upgrade_control_plane.yml
index 1dcc38def..8792295c6 100644
--- a/playbooks/common/openshift-cluster/upgrades/v3_9/upgrade_control_plane.yml
+++ b/playbooks/common/openshift-cluster/upgrades/v3_9/upgrade_control_plane.yml
@@ -25,10 +25,19 @@
openshift_upgrade_target: '3.8'
openshift_upgrade_min: '3.7'
openshift_release: '3.8'
- _requested_pkg_version: "{{openshift_pkg_version if openshift_pkg_version is defined else omit }}"
- _requested_image_tag: "{{openshift_image_tag if openshift_image_tag is defined else omit }}"
+ _requested_pkg_version: "{{ openshift_pkg_version if openshift_pkg_version is defined else omit }}"
+ openshift_pkg_version: ''
+ _requested_image_tag: "{{ openshift_image_tag if openshift_image_tag is defined else omit }}"
+ l_double_upgrade_cp: True
when: hostvars[groups.oo_first_master.0].openshift_currently_installed_version | version_compare('3.8','<')
+ - name: set l_force_image_tag_to_version = True
+ set_fact:
+ # Need to set this during 3.8 upgrade to ensure image_tag is set correctly
+ # to match 3.8 version
+ l_force_image_tag_to_version: True
+ when: _requested_image_tag is defined
+
- import_playbook: ../pre/config.yml
# These vars a meant to exclude oo_nodes from plays that would otherwise include
# them by default.
@@ -53,9 +62,8 @@
# Pre-upgrade completed
-- import_playbook: ../upgrade_control_plane.yml
- vars:
- openshift_release: '3.8'
+- name: Intermediate 3.8 Upgrade
+ import_playbook: ../upgrade_control_plane.yml
when: hostvars[groups.oo_first_master.0].openshift_currently_installed_version | version_compare('3.8','<')
## 3.8 upgrade complete we should now be able to upgrade to 3.9
@@ -68,8 +76,21 @@
openshift_upgrade_target: '3.9'
openshift_upgrade_min: '3.8'
openshift_release: '3.9'
- openshift_pkg_version: "{{ _requested_pkg_version | default ('-3.9*') }}"
- openshift_image_tag: "{{ _requested_image_tag | default('v3.9') }}"
+ openshift_pkg_version: "{{ _requested_pkg_version if _requested_pkg_version is defined else '' }}"
+ # Set the user's specified image_tag for 3.9 upgrade if it was provided.
+ - set_fact:
+ openshift_image_tag: "{{ _requested_image_tag }}"
+ l_force_image_tag_to_version: False
+ when: _requested_image_tag is defined
+ # If the user didn't specify an image_tag, we need to force update image_tag
+ # because it will have already been set during 3.8. If we aren't running
+ # a double upgrade, then we can preserve image_tag because it will still
+ # be the user provided value.
+ - set_fact:
+ l_force_image_tag_to_version: True
+ when:
+ - l_double_upgrade_cp is defined and l_double_upgrade_cp
+ - _requested_image_tag is not defined
- import_playbook: ../pre/config.yml
# These vars a meant to exclude oo_nodes from plays that would otherwise include
@@ -84,6 +105,7 @@
l_upgrade_docker_target_hosts: "oo_masters_to_config:oo_etcd_to_config"
l_upgrade_excluder_hosts: "oo_masters_to_config"
openshift_protect_installed_version: False
+ openshift_version_reinit: True
- name: Flag pre-upgrade checks complete for hosts without errors
hosts: oo_masters_to_config:oo_etcd_to_config
@@ -92,8 +114,6 @@
pre_upgrade_complete: True
- import_playbook: ../upgrade_control_plane.yml
- vars:
- openshift_release: '3.9'
# All controllers must be stopped at the same time then restarted
- name: Cycle all controller services to force new leader election mode
@@ -102,13 +122,21 @@
roles:
- role: openshift_facts
tasks:
- - name: Stop {{ openshift_service_type }}-master-controllers
- systemd:
+ - name: Restart master controllers to force new leader election mode
+ service:
name: "{{ openshift_service_type }}-master-controllers"
- state: stopped
- - name: Start {{ openshift_service_type }}-master-controllers
- systemd:
+ state: restart
+ when: openshift.common.rolling_restart_mode == 'service'
+ - name: Re-enable master controllers to force new leader election mode
+ service:
name: "{{ openshift_service_type }}-master-controllers"
- state: started
+ enabled: true
+ when: openshift.common.rolling_restart_mode == 'system'
- import_playbook: ../post_control_plane.yml
+
+- hosts: oo_masters
+ tasks:
+ - import_role:
+ name: openshift_web_console
+ tasks_from: remove_old_asset_config
diff --git a/playbooks/deploy_cluster.yml b/playbooks/deploy_cluster.yml
index 361553ee4..c8e30ddbc 100644
--- a/playbooks/deploy_cluster.yml
+++ b/playbooks/deploy_cluster.yml
@@ -6,11 +6,3 @@
- import_playbook: openshift-node/private/config.yml
- import_playbook: common/private/components.yml
-
-- name: Print deprecated variable warning message if necessary
- hosts: oo_first_master
- gather_facts: no
- tasks:
- - debug: msg="{{__deprecation_message}}"
- when:
- - __deprecation_message | default ('') | length > 0
diff --git a/playbooks/gcp/openshift-cluster/build_base_image.yml b/playbooks/gcp/openshift-cluster/build_base_image.yml
index 75d0ddf9d..8e9b0024a 100644
--- a/playbooks/gcp/openshift-cluster/build_base_image.yml
+++ b/playbooks/gcp/openshift-cluster/build_base_image.yml
@@ -90,6 +90,8 @@
repo_gpgcheck: no
state: present
when: ansible_os_family == "RedHat"
+ - name: Accept GPG keys for the repos
+ command: yum -q makecache -y --disablerepo='*' --enablerepo='google-cloud,jdetiber-qemu-user-static'
- name: Install qemu-user-static
package:
name: qemu-user-static
@@ -121,7 +123,6 @@
with_items:
# required by Ansible
- PyYAML
- - docker
- google-compute-engine
- google-compute-engine-init
- google-config
diff --git a/playbooks/init/base_packages.yml b/playbooks/init/base_packages.yml
index e1052fb6c..81f4dd183 100644
--- a/playbooks/init/base_packages.yml
+++ b/playbooks/init/base_packages.yml
@@ -16,7 +16,9 @@
- iproute
- "{{ 'python3-dbus' if ansible_distribution == 'Fedora' else 'dbus-python' }}"
- "{{ 'python3-PyYAML' if ansible_distribution == 'Fedora' else 'PyYAML' }}"
+ - "{{ 'python-ipaddress' if ansible_distribution != 'Fedora' else '' }}"
- yum-utils
+ when: item != ''
register: result
until: result is succeeded
diff --git a/playbooks/init/basic_facts.yml b/playbooks/init/basic_facts.yml
index 06a4e7291..a9bf06693 100644
--- a/playbooks/init/basic_facts.yml
+++ b/playbooks/init/basic_facts.yml
@@ -67,3 +67,11 @@
first_master_client_binary: "{{ openshift_client_binary }}"
#Some roles may require this to be set for first master
openshift_client_binary: "{{ openshift_client_binary }}"
+
+- name: Disable web console if required
+ hosts: oo_masters_to_config
+ gather_facts: no
+ tasks:
+ - set_fact:
+ openshift_web_console_install: False
+ when: openshift_deployment_subtype == 'registry' or ( osm_disabled_features is defined and 'WebConsole' in osm_disabled_features )
diff --git a/playbooks/openshift-hosted/deploy_registry.yml b/playbooks/openshift-hosted/deploy_registry.yml
new file mode 100644
index 000000000..2453329dd
--- /dev/null
+++ b/playbooks/openshift-hosted/deploy_registry.yml
@@ -0,0 +1,4 @@
+---
+- import_playbook: ../init/main.yml
+
+- import_playbook: private/openshift_hosted_registry.yml
diff --git a/playbooks/openshift-hosted/deploy_router.yml b/playbooks/openshift-hosted/deploy_router.yml
new file mode 100644
index 000000000..e832eeeea
--- /dev/null
+++ b/playbooks/openshift-hosted/deploy_router.yml
@@ -0,0 +1,4 @@
+---
+- import_playbook: ../init/main.yml
+
+- import_playbook: private/openshift_hosted_router.yml
diff --git a/playbooks/openshift-hosted/private/openshift_default_storage_class.yml b/playbooks/openshift-hosted/private/openshift_default_storage_class.yml
index 62fe0dd60..c59ebcead 100644
--- a/playbooks/openshift-hosted/private/openshift_default_storage_class.yml
+++ b/playbooks/openshift-hosted/private/openshift_default_storage_class.yml
@@ -3,4 +3,6 @@
hosts: oo_first_master
roles:
- role: openshift_default_storage_class
- when: openshift_cloudprovider_kind is defined and (openshift_cloudprovider_kind == 'aws' or openshift_cloudprovider_kind == 'gce' or openshift_cloudprovider_kind == 'openstack')
+ when:
+ - openshift_cloudprovider_kind is defined
+ - openshift_cloudprovider_kind in ['aws','gce','openstack','vsphere']
diff --git a/playbooks/openshift-logging/private/config.yml b/playbooks/openshift-logging/private/config.yml
index d6b26647c..07aa8bfde 100644
--- a/playbooks/openshift-logging/private/config.yml
+++ b/playbooks/openshift-logging/private/config.yml
@@ -24,6 +24,7 @@
- import_role:
name: openshift_logging
tasks_from: update_master_config
+ when: not openshift.common.version_gte_3_9
- name: Logging Install Checkpoint End
hosts: all
diff --git a/playbooks/openshift-master/private/additional_config.yml b/playbooks/openshift-master/private/additional_config.yml
index 85be0e600..ca514ed26 100644
--- a/playbooks/openshift-master/private/additional_config.yml
+++ b/playbooks/openshift-master/private/additional_config.yml
@@ -16,7 +16,6 @@
vars:
cockpit_plugins: "{{ osm_cockpit_plugins | default(['cockpit-kubernetes']) }}"
etcd_urls: "{{ openshift.master.etcd_urls }}"
- openshift_master_ha: "{{ groups.oo_masters | length > 1 }}"
omc_cluster_hosts: "{{ groups.oo_masters | join(' ')}}"
roles:
- role: openshift_project_request_template
diff --git a/playbooks/openshift-master/private/config.yml b/playbooks/openshift-master/private/config.yml
index 153ea9993..d2fc2eed8 100644
--- a/playbooks/openshift-master/private/config.yml
+++ b/playbooks/openshift-master/private/config.yml
@@ -78,7 +78,6 @@
console_url: "{{ openshift_master_console_url | default(None) }}"
console_use_ssl: "{{ openshift_master_console_use_ssl | default(None) }}"
public_console_url: "{{ openshift_master_public_console_url | default(None) }}"
- ha: "{{ openshift_master_ha | default(groups.oo_masters | length > 1) }}"
master_count: "{{ openshift_master_count | default(groups.oo_masters | length) }}"
- name: Inspect state of first master config settings
@@ -166,7 +165,6 @@
hosts: oo_masters_to_config
any_errors_fatal: true
vars:
- openshift_master_ha: "{{ openshift.master.ha }}"
openshift_master_count: "{{ openshift.master.master_count }}"
openshift_master_session_auth_secrets: "{{ hostvars[groups.oo_first_master.0].openshift.master.session_auth_secrets }}"
openshift_master_session_encryption_secrets: "{{ hostvars[groups.oo_first_master.0].openshift.master.session_encryption_secrets }}"
@@ -186,6 +184,7 @@
- role: openshift_buildoverrides
- role: nickhammond.logrotate
- role: openshift_master
+ openshift_master_ha: "{{ (groups.oo_masters | length > 1) | bool }}"
openshift_master_hosts: "{{ groups.oo_masters_to_config }}"
r_openshift_master_clean_install: "{{ hostvars[groups.oo_first_master.0].l_clean_install }}"
r_openshift_master_etcd3_storage: "{{ hostvars[groups.oo_first_master.0].l_etcd3_enabled }}"
diff --git a/playbooks/openshift-master/private/restart.yml b/playbooks/openshift-master/private/restart.yml
index 5cb284935..17d90533c 100644
--- a/playbooks/openshift-master/private/restart.yml
+++ b/playbooks/openshift-master/private/restart.yml
@@ -3,16 +3,13 @@
- name: Restart masters
hosts: oo_masters_to_config
- vars:
- openshift_master_ha: "{{ groups.oo_masters_to_config | length > 1 }}"
serial: 1
- handlers:
- - import_tasks: ../../../roles/openshift_master/handlers/main.yml
roles:
- openshift_facts
post_tasks:
- include_tasks: tasks/restart_hosts.yml
when: openshift_rolling_restart_mode | default('services') == 'system'
-
- - include_tasks: tasks/restart_services.yml
+ - import_role:
+ name: openshift_master
+ tasks_from: restart.yml
when: openshift_rolling_restart_mode | default('services') == 'services'
diff --git a/playbooks/openshift-master/private/scaleup.yml b/playbooks/openshift-master/private/scaleup.yml
index 007b23ea3..20ebf70d3 100644
--- a/playbooks/openshift-master/private/scaleup.yml
+++ b/playbooks/openshift-master/private/scaleup.yml
@@ -8,7 +8,6 @@
- openshift_facts:
role: master
local_facts:
- ha: "{{ openshift_master_ha | default(groups.oo_masters | length > 1) }}"
master_count: "{{ openshift_master_count | default(groups.oo_masters | length) }}"
- name: Update master count
modify_yaml:
diff --git a/playbooks/openshift-master/private/tasks/wire_aggregator.yml b/playbooks/openshift-master/private/tasks/wire_aggregator.yml
index 59e2b515c..cc812c300 100644
--- a/playbooks/openshift-master/private/tasks/wire_aggregator.yml
+++ b/playbooks/openshift-master/private/tasks/wire_aggregator.yml
@@ -142,11 +142,6 @@
state: absent
changed_when: False
-- name: Setup extension file for service console UI
- template:
- src: ../templates/openshift-ansible-catalog-console.js
- dest: /etc/origin/master/openshift-ansible-catalog-console.js
-
- name: Update master config
yedit:
state: present
@@ -166,8 +161,6 @@
value: [X-Remote-Group]
- key: authConfig.requestHeader.extraHeaderPrefixes
value: [X-Remote-Extra-]
- - key: assetConfig.extensionScripts
- value: [/etc/origin/master/openshift-ansible-catalog-console.js]
- key: kubernetesMasterConfig.apiServerArguments.runtime-config
value: [apis/settings.k8s.io/v1alpha1=true]
- key: admissionConfig.pluginConfig.PodPreset.configuration.kind
@@ -178,37 +171,50 @@
value: false
register: yedit_output
-#restart master serially here
-- name: restart master api
- systemd: name={{ openshift_service_type }}-master-api state=restarted
- when:
- - yedit_output.changed
-
-# We retry the controllers because the API may not be 100% initialized yet.
-- name: restart master controllers
- command: "systemctl restart {{ openshift_service_type }}-master-controllers"
- retries: 3
- delay: 5
- register: result
- until: result.rc == 0
- when:
- - yedit_output.changed
+# Only add the catalog extension script if not 3.9. From 3.9 on, the console
+# can discover if template service broker is running.
+- when: not openshift.common.version_gte_3_9
+ block:
+ - name: Setup extension file for service console UI
+ template:
+ src: ../templates/openshift-ansible-catalog-console.js
+ dest: /etc/origin/master/openshift-ansible-catalog-console.js
+
+ - name: Update master config
+ yedit:
+ state: present
+ src: /etc/origin/master/master-config.yaml
+ key: assetConfig.extensionScripts
+ value: [/etc/origin/master/openshift-ansible-catalog-console.js]
+ register: yedit_asset_config_output
-- name: Verify API Server
- # Using curl here since the uri module requires python-httplib2 and
- # wait_for port doesn't provide health information.
- command: >
- curl --silent --tlsv1.2
- --cacert {{ openshift.common.config_base }}/master/ca-bundle.crt
- {{ openshift.master.api_url }}/healthz/ready
- args:
- # Disables the following warning:
- # Consider using get_url or uri module rather than running curl
- warn: no
- register: api_available_output
- until: api_available_output.stdout == 'ok'
- retries: 120
- delay: 1
- changed_when: false
- when:
- - yedit_output.changed
+#restart master serially here
+- when: yedit_output.changed or (yedit_asset_config_output is defined and yedit_asset_config_output.changed)
+ block:
+ - name: restart master api
+ systemd: name={{ openshift_service_type }}-master-api state=restarted
+
+ # We retry the controllers because the API may not be 100% initialized yet.
+ - name: restart master controllers
+ command: "systemctl restart {{ openshift_service_type }}-master-controllers"
+ retries: 3
+ delay: 5
+ register: result
+ until: result.rc == 0
+
+ - name: Verify API Server
+ # Using curl here since the uri module requires python-httplib2 and
+ # wait_for port doesn't provide health information.
+ command: >
+ curl --silent --tlsv1.2
+ --cacert {{ openshift.common.config_base }}/master/ca-bundle.crt
+ {{ openshift.master.api_url }}/healthz/ready
+ args:
+ # Disables the following warning:
+ # Consider using get_url or uri module rather than running curl
+ warn: no
+ register: api_available_output
+ until: api_available_output.stdout == 'ok'
+ retries: 120
+ delay: 1
+ changed_when: false
diff --git a/playbooks/openshift-metrics/private/config.yml b/playbooks/openshift-metrics/private/config.yml
index 1e237e3f0..889ea77b1 100644
--- a/playbooks/openshift-metrics/private/config.yml
+++ b/playbooks/openshift-metrics/private/config.yml
@@ -25,6 +25,7 @@
import_role:
name: openshift_metrics
tasks_from: update_master_config.yaml
+ when: not openshift.common.version_gte_3_9
- name: Metrics Install Checkpoint End
hosts: all
diff --git a/playbooks/openshift-node/private/restart.yml b/playbooks/openshift-node/private/restart.yml
index 7249ced70..7371bd7ac 100644
--- a/playbooks/openshift-node/private/restart.yml
+++ b/playbooks/openshift-node/private/restart.yml
@@ -16,6 +16,7 @@
until: not (l_docker_restart_docker_in_node_result is failed)
retries: 3
delay: 30
+ when: openshift_node_restart_docker_required | default(True)
- name: Restart containerized services
service:
diff --git a/playbooks/openshift-node/redeploy-certificates.yml b/playbooks/openshift-node/redeploy-certificates.yml
index 8b7272485..cdf816fbf 100644
--- a/playbooks/openshift-node/redeploy-certificates.yml
+++ b/playbooks/openshift-node/redeploy-certificates.yml
@@ -4,3 +4,5 @@
- import_playbook: private/redeploy-certificates.yml
- import_playbook: private/restart.yml
+ vars:
+ openshift_node_restart_docker_required: False
diff --git a/playbooks/openshift-prometheus/private/uninstall.yml b/playbooks/openshift-prometheus/private/uninstall.yml
new file mode 100644
index 000000000..2df39c2a8
--- /dev/null
+++ b/playbooks/openshift-prometheus/private/uninstall.yml
@@ -0,0 +1,8 @@
+---
+- name: Uninstall Prometheus
+ hosts: masters[0]
+ tasks:
+ - name: Run the Prometheus Uninstall Role Tasks
+ include_role:
+ name: openshift_prometheus
+ tasks_from: uninstall
diff --git a/playbooks/openshift-prometheus/uninstall.yml b/playbooks/openshift-prometheus/uninstall.yml
new file mode 100644
index 000000000..c92ade786
--- /dev/null
+++ b/playbooks/openshift-prometheus/uninstall.yml
@@ -0,0 +1,2 @@
+---
+- import_playbook: private/uninstall.yml
diff --git a/playbooks/openstack/inventory.py b/playbooks/openstack/inventory.py
index 76e658eb7..d5a8c3e24 100755
--- a/playbooks/openstack/inventory.py
+++ b/playbooks/openstack/inventory.py
@@ -15,18 +15,10 @@ import json
import shade
-def build_inventory():
- '''Build the dynamic inventory.'''
- cloud = shade.openstack_cloud()
-
+def base_openshift_inventory(cluster_hosts):
+ '''Set the base openshift inventory.'''
inventory = {}
- # TODO(shadower): filter the servers based on the `OPENSHIFT_CLUSTER`
- # environment variable.
- cluster_hosts = [
- server for server in cloud.list_servers()
- if 'metadata' in server and 'clusterid' in server.metadata]
-
masters = [server.name for server in cluster_hosts
if server.metadata['host-type'] == 'master']
@@ -67,6 +59,34 @@ def build_inventory():
inventory['dns'] = {'hosts': dns}
inventory['lb'] = {'hosts': load_balancers}
+ return inventory
+
+
+def get_docker_storage_mountpoints(volumes):
+ '''Check volumes to see if they're being used for docker storage'''
+ docker_storage_mountpoints = {}
+ for volume in volumes:
+ if volume.metadata.get('purpose') == "openshift_docker_storage":
+ for attachment in volume.attachments:
+ if attachment.server_id in docker_storage_mountpoints:
+ docker_storage_mountpoints[attachment.server_id].append(attachment.device)
+ else:
+ docker_storage_mountpoints[attachment.server_id] = [attachment.device]
+ return docker_storage_mountpoints
+
+
+def build_inventory():
+ '''Build the dynamic inventory.'''
+ cloud = shade.openstack_cloud()
+
+ # TODO(shadower): filter the servers based on the `OPENSHIFT_CLUSTER`
+ # environment variable.
+ cluster_hosts = [
+ server for server in cloud.list_servers()
+ if 'metadata' in server and 'clusterid' in server.metadata]
+
+ inventory = base_openshift_inventory(cluster_hosts)
+
for server in cluster_hosts:
if 'group' in server.metadata:
group = server.metadata.group
@@ -76,6 +96,9 @@ def build_inventory():
inventory['_meta'] = {'hostvars': {}}
+ # cinder volumes used for docker storage
+ docker_storage_mountpoints = get_docker_storage_mountpoints(cloud.list_volumes())
+
for server in cluster_hosts:
ssh_ip_address = server.public_v4 or server.private_v4
hostvars = {
@@ -111,6 +134,11 @@ def build_inventory():
if node_labels:
hostvars['openshift_node_labels'] = node_labels
+ # check for attached docker storage volumes
+ if 'os-extended-volumes:volumes_attached' in server:
+ if server.id in docker_storage_mountpoints:
+ hostvars['docker_storage_mountpoints'] = ' '.join(docker_storage_mountpoints[server.id])
+
inventory['_meta']['hostvars'][server.name] = hostvars
return inventory