diff options
Diffstat (limited to 'playbooks/common')
13 files changed, 161 insertions, 127 deletions
diff --git a/playbooks/common/openshift-cluster/upgrades/init.yml b/playbooks/common/openshift-cluster/upgrades/init.yml index ba783638d..a9a35b028 100644 --- a/playbooks/common/openshift-cluster/upgrades/init.yml +++ b/playbooks/common/openshift-cluster/upgrades/init.yml @@ -6,6 +6,7 @@ g_new_node_hosts: [] - import_playbook: ../../../init/basic_facts.yml +- import_playbook: ../../../init/base_packages.yml - import_playbook: ../../../init/cluster_facts.yml - name: Ensure firewall is not switched during upgrade diff --git a/playbooks/common/openshift-cluster/upgrades/post_control_plane.yml b/playbooks/common/openshift-cluster/upgrades/post_control_plane.yml index f44ab3580..86cde2844 100644 --- a/playbooks/common/openshift-cluster/upgrades/post_control_plane.yml +++ b/playbooks/common/openshift-cluster/upgrades/post_control_plane.yml @@ -114,20 +114,27 @@ openshift_hosted_templates_import_command: replace post_tasks: - # we need to migrate customers to the new pattern of pushing to the registry via dns - # Step 1: verify the certificates have the docker registry service name - - shell: > - echo -n | openssl s_client -showcerts -servername docker-registry.default.svc -connect docker-registry.default.svc:5000 | openssl x509 -text | grep -A1 'X509v3 Subject Alternative Name:' | grep -Pq 'DNS:docker-registry\.default\.svc(,|$)' - register: cert_output - - # Step 2: Set a fact to be used to determine if we should run the redeploy of registry certs - - name: set a fact to include the registry certs playbook if needed - set_fact: - openshift_hosted_rollout_certs_and_registry: "{{ cert_output.rc == 0 }}" - -# Run the redeploy certs based upon the certificates -- when: hostvars[groups.oo_first_master.0].openshift_hosted_rollout_certs_and_registry - import_playbook: ../../../openshift-hosted/redeploy-registry-certificates.yml + # Do not perform these tasks when the registry is insecure. The default registry is insecure in openshift_hosted/defaults/main.yml + - when: not (openshift_docker_hosted_registry_insecure | default(True)) + block: + # we need to migrate customers to the new pattern of pushing to the registry via dns + # Step 1: verify the certificates have the docker registry service name + - name: shell command to determine if the docker-registry.default.svc is found in the registry certificate + shell: > + echo -n | openssl s_client -showcerts -servername docker-registry.default.svc -connect docker-registry.default.svc:5000 | openssl x509 -text | grep -A1 'X509v3 Subject Alternative Name:' | grep -Pq 'DNS:docker-registry\.default\.svc(,|$)' + register: cert_output + changed_when: false + failed_when: + - cert_output.rc not in [0, 1] + + # Step 2: Set a fact to be used to determine if we should run the redeploy of registry certs + - name: set a fact to include the registry certs playbook if needed + set_fact: + openshift_hosted_rollout_certs_and_registry: "{{ cert_output.rc == 0 }}" + +# Run the redeploy certs based upon the certificates. Defaults to False for insecure registries +- when: (hostvars[groups.oo_first_master.0].openshift_hosted_rollout_certs_and_registry | default(False)) | bool + import_playbook: ../../../openshift-hosted/private/redeploy-registry-certificates.yml # Check for warnings to be printed at the end of the upgrade: - name: Clean up and display warnings diff --git a/playbooks/common/openshift-cluster/upgrades/pre/config.yml b/playbooks/common/openshift-cluster/upgrades/pre/config.yml index edc541ef9..44af37b2d 100644 --- a/playbooks/common/openshift-cluster/upgrades/pre/config.yml +++ b/playbooks/common/openshift-cluster/upgrades/pre/config.yml @@ -5,8 +5,6 @@ # Pre-upgrade - import_playbook: ../initialize_nodes_to_upgrade.yml -- import_playbook: verify_cluster.yml - - name: Update repos on upgrade hosts hosts: "{{ l_upgrade_repo_hosts }}" roles: @@ -53,6 +51,8 @@ # l_openshift_version_set_hosts is passed via upgrade_control_plane.yml # l_openshift_version_check_hosts is passed via upgrade_control_plane.yml +- import_playbook: verify_cluster.yml + # If we're only upgrading nodes, we need to ensure masters are already upgraded - name: Verify masters are already upgraded hosts: oo_masters_to_config diff --git a/playbooks/common/openshift-cluster/upgrades/pre/verify_cluster.yml b/playbooks/common/openshift-cluster/upgrades/pre/verify_cluster.yml index 5ee8a9d78..4902b9ecd 100644 --- a/playbooks/common/openshift-cluster/upgrades/pre/verify_cluster.yml +++ b/playbooks/common/openshift-cluster/upgrades/pre/verify_cluster.yml @@ -17,6 +17,7 @@ valid version for a {{ openshift_upgrade_target }} upgrade when: - openshift_pkg_version is defined + - openshift_pkg_version != "" - openshift_pkg_version.split('-',1).1 is version_compare(openshift_upgrade_target ,'<') - fail: @@ -25,6 +26,7 @@ valid version for a {{ openshift_upgrade_target }} upgrade when: - openshift_image_tag is defined + - openshift_image_tag != "" - openshift_image_tag.split('v',1).1 is version_compare(openshift_upgrade_target ,'<') - set_fact: @@ -92,25 +94,3 @@ state: started enabled: yes with_items: "{{ master_services }}" - -# Until openshift-ansible is determining which host is the CA host we -# must (unfortunately) ensure that the first host in the etcd group is -# the etcd CA host. -# https://bugzilla.redhat.com/show_bug.cgi?id=1469358 -- name: Verify we can proceed on first etcd - hosts: oo_first_etcd - gather_facts: no - tasks: - - name: Ensure CA exists on first etcd - stat: - path: /etc/etcd/generated_certs - register: __etcd_ca_stat - - - fail: - msg: > - In order to correct an etcd certificate signing problem - upgrading may require re-generating etcd certificates. Please - ensure that the /etc/etcd/generated_certs directory exists on - the first host defined in your [etcd] group. - when: - - not __etcd_ca_stat.stat.exists | bool diff --git a/playbooks/common/openshift-cluster/upgrades/upgrade_control_plane.yml b/playbooks/common/openshift-cluster/upgrades/upgrade_control_plane.yml index c27118f6f..3c0b72832 100644 --- a/playbooks/common/openshift-cluster/upgrades/upgrade_control_plane.yml +++ b/playbooks/common/openshift-cluster/upgrades/upgrade_control_plane.yml @@ -3,29 +3,6 @@ # Upgrade Masters ############################################################################### -# Prior to 3.6, openshift-ansible created etcd serving certificates -# without a SubjectAlternativeName entry for the system hostname. The -# SAN list in Go 1.8 is now (correctly) authoritative and since -# openshift-ansible configures masters to talk to etcd hostnames -# rather than IP addresses, we must correct etcd certificates. -# -# This play examines the etcd serving certificate SANs on each etcd -# host and records whether or not the system hostname is missing. -- name: Examine etcd serving certificate SAN - hosts: oo_etcd_to_config - tasks: - - slurp: - src: /etc/etcd/server.crt - register: etcd_serving_cert - - set_fact: - __etcd_cert_lacks_hostname: "{{ (openshift.common.hostname not in (etcd_serving_cert.content | b64decode | lib_utils_oo_parse_certificate_san)) | bool }}" - -# Redeploy etcd certificates when hostnames were missing from etcd -# serving certificate SANs. -- import_playbook: ../../../openshift-etcd/redeploy-certificates.yml - when: - - true in hostvars | lib_utils_oo_select_keys(groups['oo_etcd_to_config']) | lib_utils_oo_collect('__etcd_cert_lacks_hostname') | default([false]) - - name: Backup and upgrade etcd import_playbook: ../../../openshift-etcd/private/upgrade_main.yml @@ -56,7 +33,6 @@ register: l_pb_upgrade_control_plane_pre_upgrade_storage when: openshift_upgrade_pre_storage_migration_enabled | default(true) | bool failed_when: - - openshift_upgrade_pre_storage_migration_enabled | default(true) | bool - l_pb_upgrade_control_plane_pre_upgrade_storage.rc != 0 - openshift_upgrade_pre_storage_migration_fatal | default(true) | bool @@ -94,6 +70,12 @@ - include_tasks: "{{ openshift_master_upgrade_hook }}" when: openshift_master_upgrade_hook is defined + - name: Disable master controller + service: + name: "{{ openshift_service_type }}-master-controllers" + enabled: false + when: openshift.common.rolling_restart_mode == 'system' + - include_tasks: ../../../openshift-master/private/tasks/restart_hosts.yml when: openshift.common.rolling_restart_mode == 'system' @@ -116,7 +98,6 @@ - openshift_upgrade_post_storage_migration_enabled | default(true) | bool - openshift_version is version_compare('3.7','<') failed_when: - - openshift_upgrade_post_storage_migration_enabled | default(true) | bool - l_pb_upgrade_control_plane_post_upgrade_storage.rc != 0 - openshift_upgrade_post_storage_migration_fatal | default(false) | bool run_once: true @@ -252,7 +233,6 @@ register: l_pb_upgrade_control_plane_post_upgrade_storage when: openshift_upgrade_post_storage_migration_enabled | default(true) | bool failed_when: - - openshift_upgrade_post_storage_migration_enabled | default(true) | bool - l_pb_upgrade_control_plane_post_upgrade_storage.rc != 0 - openshift_upgrade_post_storage_migration_fatal | default(false) | bool @@ -331,9 +311,13 @@ post_tasks: - import_role: name: openshift_node + tasks_from: upgrade_pre.yml + - import_role: + name: openshift_node tasks_from: upgrade.yml - import_role: name: openshift_manage_node tasks_from: config.yml vars: openshift_master_host: "{{ groups.oo_first_master.0 }}" + openshift_manage_node_is_master: true diff --git a/playbooks/common/openshift-cluster/upgrades/v3_10/master_config_upgrade.yml b/playbooks/common/openshift-cluster/upgrades/v3_10/master_config_upgrade.yml new file mode 100644 index 000000000..ed97d539c --- /dev/null +++ b/playbooks/common/openshift-cluster/upgrades/v3_10/master_config_upgrade.yml @@ -0,0 +1 @@ +--- diff --git a/playbooks/common/openshift-cluster/upgrades/v3_10/roles b/playbooks/common/openshift-cluster/upgrades/v3_10/roles new file mode 120000 index 000000000..415645be6 --- /dev/null +++ b/playbooks/common/openshift-cluster/upgrades/v3_10/roles @@ -0,0 +1 @@ +../../../../../roles/
\ No newline at end of file diff --git a/playbooks/common/openshift-cluster/upgrades/v3_10/upgrade.yml b/playbooks/common/openshift-cluster/upgrades/v3_10/upgrade.yml new file mode 100644 index 000000000..ec1da6d39 --- /dev/null +++ b/playbooks/common/openshift-cluster/upgrades/v3_10/upgrade.yml @@ -0,0 +1,7 @@ +--- +# +# Full Control Plane + Nodes Upgrade +# +- import_playbook: upgrade_control_plane.yml + +- import_playbook: upgrade_nodes.yml diff --git a/playbooks/common/openshift-cluster/upgrades/v3_10/upgrade_control_plane.yml b/playbooks/common/openshift-cluster/upgrades/v3_10/upgrade_control_plane.yml new file mode 100644 index 000000000..64ee03562 --- /dev/null +++ b/playbooks/common/openshift-cluster/upgrades/v3_10/upgrade_control_plane.yml @@ -0,0 +1,58 @@ +--- +# +# Control Plane Upgrade Playbook +# +# Upgrades masters and Docker (only on standalone etcd hosts) +# +# This upgrade does not include: +# - node service running on masters +# - docker running on masters +# - node service running on dedicated nodes +# +# You can run the upgrade_nodes.yml playbook after this to upgrade these components separately. +# +- import_playbook: ../init.yml + vars: + l_upgrade_no_switch_firewall_hosts: "oo_masters_to_config:oo_etcd_to_config:oo_lb_to_config" + l_init_fact_hosts: "oo_masters_to_config:oo_etcd_to_config:oo_lb_to_config" + +- name: Configure the upgrade target for the common upgrade tasks 3.10 + hosts: oo_masters_to_config:oo_etcd_to_config:oo_lb_to_config + tasks: + - meta: clear_facts + - set_fact: + openshift_upgrade_target: '3.10' + openshift_upgrade_min: '3.9' + openshift_release: '3.10' + +- import_playbook: ../pre/config.yml + # These vars a meant to exclude oo_nodes from plays that would otherwise include + # them by default. + vars: + l_openshift_version_set_hosts: "oo_etcd_to_config:oo_masters_to_config:!oo_first_master" + l_openshift_version_check_hosts: "oo_masters_to_config:!oo_first_master" + l_upgrade_repo_hosts: "oo_masters_to_config:oo_etcd_to_config:oo_lb_to_config" + l_upgrade_no_proxy_hosts: "oo_masters_to_config" + l_upgrade_health_check_hosts: "oo_masters_to_config:oo_etcd_to_config:oo_lb_to_config" + l_upgrade_verify_targets_hosts: "oo_masters_to_config" + l_upgrade_docker_target_hosts: "oo_masters_to_config:oo_etcd_to_config" + l_upgrade_excluder_hosts: "oo_masters_to_config" + openshift_protect_installed_version: False + +- name: Flag pre-upgrade checks complete for hosts without errors + hosts: oo_masters_to_config:oo_etcd_to_config + tasks: + - set_fact: + pre_upgrade_complete: True + +- import_playbook: ../upgrade_control_plane.yml + vars: + openshift_release: '3.10' + +- import_playbook: ../post_control_plane.yml + +- hosts: oo_masters + tasks: + - import_role: + name: openshift_web_console + tasks_from: remove_old_asset_config diff --git a/playbooks/common/openshift-cluster/upgrades/v3_10/upgrade_nodes.yml b/playbooks/common/openshift-cluster/upgrades/v3_10/upgrade_nodes.yml new file mode 100644 index 000000000..eea1b250e --- /dev/null +++ b/playbooks/common/openshift-cluster/upgrades/v3_10/upgrade_nodes.yml @@ -0,0 +1,35 @@ +--- +# +# Node Upgrade Playbook +# +# Upgrades nodes only, but requires the control plane to have already been upgraded. +# +- import_playbook: ../init.yml + +- name: Configure the upgrade target for the common upgrade tasks + hosts: oo_all_hosts + tasks: + - set_fact: + openshift_upgrade_target: '3.10' + openshift_upgrade_min: '3.9' + openshift_release: '3.10' + +- import_playbook: ../pre/config.yml + vars: + l_upgrade_repo_hosts: "oo_nodes_to_config" + l_upgrade_no_proxy_hosts: "oo_all_hosts" + l_upgrade_health_check_hosts: "oo_nodes_to_config" + l_upgrade_verify_targets_hosts: "oo_nodes_to_config" + l_upgrade_docker_target_hosts: "oo_nodes_to_config" + l_upgrade_excluder_hosts: "oo_nodes_to_config:!oo_masters_to_config" + l_upgrade_nodes_only: True + +- name: Flag pre-upgrade checks complete for hosts without errors + hosts: oo_masters_to_config:oo_nodes_to_upgrade:oo_etcd_to_config + tasks: + - set_fact: + pre_upgrade_complete: True + +# Pre-upgrade completed + +- import_playbook: ../upgrade_nodes.yml diff --git a/playbooks/common/openshift-cluster/upgrades/v3_10/validator.yml b/playbooks/common/openshift-cluster/upgrades/v3_10/validator.yml new file mode 100644 index 000000000..d8540abfb --- /dev/null +++ b/playbooks/common/openshift-cluster/upgrades/v3_10/validator.yml @@ -0,0 +1,7 @@ +--- +- name: Verify 3.8 specific upgrade checks + hosts: oo_first_master + roles: + - { role: lib_openshift } + tasks: + - debug: msg="noop" diff --git a/playbooks/common/openshift-cluster/upgrades/v3_9/upgrade.yml b/playbooks/common/openshift-cluster/upgrades/v3_9/upgrade.yml index bf6e8605e..ec1da6d39 100644 --- a/playbooks/common/openshift-cluster/upgrades/v3_9/upgrade.yml +++ b/playbooks/common/openshift-cluster/upgrades/v3_9/upgrade.yml @@ -2,54 +2,6 @@ # # Full Control Plane + Nodes Upgrade # -- import_playbook: ../init.yml +- import_playbook: upgrade_control_plane.yml -- name: Configure the upgrade target for the common upgrade tasks - hosts: oo_all_hosts - tasks: - - set_fact: - openshift_upgrade_target: '3.9' - openshift_upgrade_min: '3.7' - openshift_release: '3.9' - -- import_playbook: ../pre/config.yml - vars: - l_upgrade_repo_hosts: "oo_masters_to_config:oo_nodes_to_upgrade:oo_etcd_to_config:oo_lb_to_config" - l_upgrade_no_proxy_hosts: "oo_masters_to_config:oo_nodes_to_upgrade" - l_upgrade_health_check_hosts: "oo_masters_to_config:oo_etcd_to_config:oo_lb_to_config" - l_upgrade_verify_targets_hosts: "oo_masters_to_config:oo_nodes_to_upgrade" - l_upgrade_docker_target_hosts: "oo_masters_to_config:oo_nodes_to_upgrade:oo_etcd_to_config" - l_upgrade_excluder_hosts: "oo_nodes_to_config:oo_masters_to_config" - openshift_protect_installed_version: False - -- import_playbook: validator.yml - -- name: Flag pre-upgrade checks complete for hosts without errors - hosts: oo_masters_to_config:oo_nodes_to_upgrade:oo_etcd_to_config - tasks: - - set_fact: - pre_upgrade_complete: True - -# Pre-upgrade completed - -- import_playbook: ../upgrade_control_plane.yml - -# All controllers must be stopped at the same time then restarted -- name: Cycle all controller services to force new leader election mode - hosts: oo_masters_to_config - gather_facts: no - roles: - - role: openshift_facts - tasks: - - name: Stop {{ openshift_service_type }}-master-controllers - systemd: - name: "{{ openshift_service_type }}-master-controllers" - state: stopped - - name: Start {{ openshift_service_type }}-master-controllers - systemd: - name: "{{ openshift_service_type }}-master-controllers" - state: started - -- import_playbook: ../upgrade_nodes.yml - -- import_playbook: ../post_control_plane.yml +- import_playbook: upgrade_nodes.yml diff --git a/playbooks/common/openshift-cluster/upgrades/v3_9/upgrade_control_plane.yml b/playbooks/common/openshift-cluster/upgrades/v3_9/upgrade_control_plane.yml index c8a42322d..9c7677f1b 100644 --- a/playbooks/common/openshift-cluster/upgrades/v3_9/upgrade_control_plane.yml +++ b/playbooks/common/openshift-cluster/upgrades/v3_9/upgrade_control_plane.yml @@ -15,6 +15,7 @@ vars: l_upgrade_no_switch_firewall_hosts: "oo_masters_to_config:oo_etcd_to_config:oo_lb_to_config" l_init_fact_hosts: "oo_masters_to_config:oo_etcd_to_config:oo_lb_to_config" + l_base_packages_hosts: "oo_masters_to_config:oo_etcd_to_config:oo_lb_to_config" ## Check to see if they're running 3.7 and if so upgrade them to 3.8 on control plan ## If they've specified pkg_version or image_tag preserve that for later use @@ -26,6 +27,7 @@ openshift_upgrade_min: '3.7' openshift_release: '3.8' _requested_pkg_version: "{{ openshift_pkg_version if openshift_pkg_version is defined else omit }}" + openshift_pkg_version: '' _requested_image_tag: "{{ openshift_image_tag if openshift_image_tag is defined else omit }}" l_double_upgrade_cp: True when: hostvars[groups.oo_first_master.0].openshift_currently_installed_version | version_compare('3.8','<') @@ -61,10 +63,8 @@ # Pre-upgrade completed -- import_playbook: ../upgrade_control_plane.yml - vars: - openshift_release: '3.8' - openshift_pkg_version: '' +- name: Intermediate 3.8 Upgrade + import_playbook: ../upgrade_control_plane.yml when: hostvars[groups.oo_first_master.0].openshift_currently_installed_version | version_compare('3.8','<') ## 3.8 upgrade complete we should now be able to upgrade to 3.9 @@ -77,7 +77,7 @@ openshift_upgrade_target: '3.9' openshift_upgrade_min: '3.8' openshift_release: '3.9' - openshift_pkg_version: "{{ _requested_pkg_version | default ('-3.9*') }}" + openshift_pkg_version: "{{ _requested_pkg_version if _requested_pkg_version is defined else '' }}" # Set the user's specified image_tag for 3.9 upgrade if it was provided. - set_fact: openshift_image_tag: "{{ _requested_image_tag }}" @@ -106,6 +106,7 @@ l_upgrade_docker_target_hosts: "oo_masters_to_config:oo_etcd_to_config" l_upgrade_excluder_hosts: "oo_masters_to_config" openshift_protect_installed_version: False + openshift_version_reinit: True - name: Flag pre-upgrade checks complete for hosts without errors hosts: oo_masters_to_config:oo_etcd_to_config @@ -114,8 +115,6 @@ pre_upgrade_complete: True - import_playbook: ../upgrade_control_plane.yml - vars: - openshift_release: '3.9' # All controllers must be stopped at the same time then restarted - name: Cycle all controller services to force new leader election mode @@ -124,14 +123,16 @@ roles: - role: openshift_facts tasks: - - name: Stop {{ openshift_service_type }}-master-controllers - systemd: + - name: Restart master controllers to force new leader election mode + service: name: "{{ openshift_service_type }}-master-controllers" - state: stopped - - name: Start {{ openshift_service_type }}-master-controllers - systemd: + state: restarted + when: openshift.common.rolling_restart_mode == 'services' + - name: Re-enable master controllers to force new leader election mode + service: name: "{{ openshift_service_type }}-master-controllers" - state: started + enabled: true + when: openshift.common.rolling_restart_mode == 'system' - import_playbook: ../post_control_plane.yml |