diff options
Diffstat (limited to 'playbooks/common')
14 files changed, 110 insertions, 87 deletions
diff --git a/playbooks/common/openshift-checks/health.yml b/playbooks/common/openshift-checks/health.yml index 7e83b4aa6..ff5b5af67 100644 --- a/playbooks/common/openshift-checks/health.yml +++ b/playbooks/common/openshift-checks/health.yml @@ -1,5 +1,7 @@ --- -- include: ../openshift-cluster/evaluate_groups.yml +- include: ../openshift-cluster/std_include.yml + tags: + - always - name: Run OpenShift health checks hosts: OSEv3 diff --git a/playbooks/common/openshift-checks/pre-install.yml b/playbooks/common/openshift-checks/pre-install.yml index afd4f95e0..861229f21 100644 --- a/playbooks/common/openshift-checks/pre-install.yml +++ b/playbooks/common/openshift-checks/pre-install.yml @@ -1,5 +1,7 @@ --- -- include: ../openshift-cluster/evaluate_groups.yml +- include: ../openshift-cluster/std_include.yml + tags: + - always - hosts: OSEv3 name: run OpenShift pre-install checks diff --git a/playbooks/common/openshift-cluster/config.yml b/playbooks/common/openshift-cluster/config.yml index 14d7d9822..e1df71112 100644 --- a/playbooks/common/openshift-cluster/config.yml +++ b/playbooks/common/openshift-cluster/config.yml @@ -1,8 +1,4 @@ --- -- include: std_include.yml - tags: - - always - # TODO: refactor this into its own include # and pass a variable for ctx - name: Verify Requirements @@ -22,6 +18,10 @@ - docker_image_availability - docker_storage +- include: initialize_firewall.yml + tags: + - always + - hosts: localhost tasks: - fail: diff --git a/playbooks/common/openshift-cluster/initialize_openshift_repos.yml b/playbooks/common/openshift-cluster/initialize_openshift_repos.yml new file mode 100644 index 000000000..a7114fc80 --- /dev/null +++ b/playbooks/common/openshift-cluster/initialize_openshift_repos.yml @@ -0,0 +1,8 @@ +--- +- name: Setup yum repositories for all hosts + hosts: oo_all_hosts + gather_facts: no + tasks: + - name: initialize openshift repos + include_role: + name: openshift_repos diff --git a/playbooks/common/openshift-cluster/initialize_openshift_version.yml b/playbooks/common/openshift-cluster/initialize_openshift_version.yml index f4e52869e..7112a6084 100644 --- a/playbooks/common/openshift-cluster/initialize_openshift_version.yml +++ b/playbooks/common/openshift-cluster/initialize_openshift_version.yml @@ -1,24 +1,5 @@ --- # NOTE: requires openshift_facts be run -- name: Verify compatible yum/subscription-manager combination - hosts: oo_all_hosts - gather_facts: no - tasks: - # See: - # https://bugzilla.redhat.com/show_bug.cgi?id=1395047 - # https://bugzilla.redhat.com/show_bug.cgi?id=1282961 - # https://github.com/openshift/openshift-ansible/issues/1138 - # Consider the repoquery module for this work - - name: Check for bad combinations of yum and subscription-manager - command: > - {{ repoquery_cmd }} --installed --qf '%{version}' "yum" - register: yum_ver_test - changed_when: false - when: not openshift.common.is_atomic | bool - - fail: - msg: Incompatible versions of yum and subscription-manager found. You may need to update yum and yum-utils. - when: not openshift.common.is_atomic | bool and 'Plugin \"search-disabled-repos\" requires API 2.7. Supported API is 2.6.' in yum_ver_test.stdout - - name: Determine openshift_version to configure on first master hosts: oo_first_master roles: diff --git a/playbooks/common/openshift-cluster/openshift_logging.yml b/playbooks/common/openshift-cluster/openshift_logging.yml index c5f0c406a..c1a5d83cd 100644 --- a/playbooks/common/openshift-cluster/openshift_logging.yml +++ b/playbooks/common/openshift-cluster/openshift_logging.yml @@ -1,6 +1,4 @@ --- -- include: std_include.yml - - name: OpenShift Aggregated Logging hosts: oo_first_master roles: diff --git a/playbooks/common/openshift-cluster/openshift_metrics.yml b/playbooks/common/openshift-cluster/openshift_metrics.yml index 2c8ad5b75..1dc180c26 100644 --- a/playbooks/common/openshift-cluster/openshift_metrics.yml +++ b/playbooks/common/openshift-cluster/openshift_metrics.yml @@ -1,6 +1,4 @@ --- -- include: evaluate_groups.yml - - name: OpenShift Metrics hosts: oo_first_master roles: diff --git a/playbooks/common/openshift-cluster/std_include.yml b/playbooks/common/openshift-cluster/std_include.yml index eab16aba0..6cc56889a 100644 --- a/playbooks/common/openshift-cluster/std_include.yml +++ b/playbooks/common/openshift-cluster/std_include.yml @@ -11,10 +11,10 @@ tags: - node -- include: initialize_openshift_version.yml +- include: initialize_openshift_repos.yml tags: - always -- include: initialize_firewall.yml +- include: initialize_openshift_version.yml tags: - always diff --git a/playbooks/common/openshift-cluster/upgrades/containerized_node_upgrade.yml b/playbooks/common/openshift-cluster/upgrades/containerized_node_upgrade.yml deleted file mode 100644 index 9f7961614..000000000 --- a/playbooks/common/openshift-cluster/upgrades/containerized_node_upgrade.yml +++ /dev/null @@ -1,14 +0,0 @@ ---- -# This is a hack to allow us to use systemd_units.yml, but skip the handlers which -# restart services. We will unconditionally restart all containerized services -# because we have to unconditionally restart Docker: -- set_fact: - skip_node_svc_handlers: True - -- name: Update systemd units - include: ../../../../roles/openshift_node/tasks/systemd_units.yml openshift_version={{ openshift_image_tag }} - -# This is a no-op because of skip_node_svc_handlers, but lets us trigger it before end of -# play when the node has already been marked schedulable again. (this would look strange -# in logs otherwise) -- meta: flush_handlers diff --git a/playbooks/common/openshift-cluster/upgrades/master_docker b/playbooks/common/openshift-cluster/upgrades/master_docker deleted file mode 120000 index 6aeca2842..000000000 --- a/playbooks/common/openshift-cluster/upgrades/master_docker +++ /dev/null @@ -1 +0,0 @@ -../../../../roles/openshift_master/templates/master_docker
\ No newline at end of file diff --git a/playbooks/common/openshift-cluster/validate_hostnames.yml b/playbooks/common/openshift-cluster/validate_hostnames.yml index 33fc5630f..be2e6a15a 100644 --- a/playbooks/common/openshift-cluster/validate_hostnames.yml +++ b/playbooks/common/openshift-cluster/validate_hostnames.yml @@ -1,17 +1,22 @@ --- -- name: Gather and set facts for node hosts +- name: Validate node hostnames hosts: oo_nodes_to_config - roles: - - openshift_facts tasks: - - shell: + - name: Query DNS for IP address of {{ openshift.common.hostname }} + shell: getent ahostsv4 {{ openshift.common.hostname }} | head -n 1 | awk '{ print $1 }' register: lookupip changed_when: false failed_when: false - name: Warn user about bad openshift_hostname values pause: - prompt: "The hostname \"{{ openshift.common.hostname }}\" for \"{{ ansible_nodename }}\" doesn't resolve to an ip address owned by this host. Please set openshift_hostname variable to a hostname that when resolved on the host in question resolves to an IP address matching an interface on this host. This host will fail liveness checks for pods utilizing hostPorts, press ENTER to continue or CTRL-C to abort." + prompt: + The hostname {{ openshift.common.hostname }} for {{ ansible_nodename }} + doesn't resolve to an IP address owned by this host. Please set + openshift_hostname variable to a hostname that when resolved on the host + in question resolves to an IP address matching an interface on this + host. This host will fail liveness checks for pods utilizing hostPorts, + press ENTER to continue or CTRL-C to abort. seconds: "{{ 10 if openshift_override_hostname_check | default(false) | bool else omit }}" when: - lookupip.stdout != '127.0.0.1' diff --git a/playbooks/common/openshift-etcd/migrate.yml b/playbooks/common/openshift-etcd/migrate.yml index 3e7a48669..311ff84b6 100644 --- a/playbooks/common/openshift-etcd/migrate.yml +++ b/playbooks/common/openshift-etcd/migrate.yml @@ -17,18 +17,14 @@ tags: - always +# TODO: This will be different for release-3.6 branch - name: Prepare masters for etcd data migration hosts: oo_masters_to_config tasks: - set_fact: master_services: - - "{{ openshift.common.service_type + '-master' }}" - - set_fact: - master_services: - "{{ openshift.common.service_type + '-master-controllers' }}" - "{{ openshift.common.service_type + '-master-api' }}" - when: - - (openshift_master_cluster_method is defined and openshift_master_cluster_method == "native") or openshift.common.is_master_system_container | bool - debug: msg: "master service name: {{ master_services }}" - name: Stop masters @@ -67,16 +63,59 @@ when: - etcd_backup_failed | length > 0 -- name: Migrate etcd data from v2 to v3 +- name: Stop etcd hosts: oo_etcd_to_migrate gather_facts: no tags: - always + pre_tasks: + - set_fact: + l_etcd_service: "{{ 'etcd_container' if openshift.common.is_containerized else 'etcd' }}" + - name: Disable etcd members + service: + name: "{{ l_etcd_service }}" + state: stopped + +- name: Migrate data on first etcd + hosts: oo_etcd_to_migrate[0] + gather_facts: no + tags: + - always roles: - role: etcd_migrate r_etcd_migrate_action: migrate r_etcd_common_embedded_etcd: "{{ groups.oo_etcd_to_config | default([]) | length == 0 }}" etcd_peer: "{{ ansible_default_ipv4.address }}" + etcd_url_scheme: "https" + etcd_peer_url_scheme: "https" + +- name: Clean data stores on remaining etcd hosts + hosts: oo_etcd_to_migrate[1:] + gather_facts: no + tags: + - always + roles: + - role: etcd_migrate + r_etcd_migrate_action: clean_data + r_etcd_common_embedded_etcd: "{{ groups.oo_etcd_to_config | default([]) | length == 0 }}" + etcd_peer: "{{ ansible_default_ipv4.address }}" + etcd_url_scheme: "https" + etcd_peer_url_scheme: "https" + post_tasks: + - name: Add etcd hosts + delegate_to: localhost + add_host: + name: "{{ item }}" + groups: oo_new_etcd_to_config + ansible_ssh_user: "{{ g_ssh_user | default(omit) }}" + ansible_become: "{{ g_sudo | default(omit) }}" + with_items: "{{ groups.oo_etcd_to_migrate[1:] | default([]) }}" + changed_when: no + - name: Set success + set_fact: + r_etcd_migrate_success: true + +- include: ./scaleup.yml - name: Gate on etcd migration hosts: oo_masters_to_config @@ -89,6 +128,16 @@ - set_fact: etcd_migration_failed: "{{ groups.oo_etcd_to_migrate | difference(etcd_migration_completed) }}" +- name: Add TTLs on the first master + hosts: oo_first_master[0] + roles: + - role: etcd_migrate + r_etcd_migrate_action: add_ttls + etcd_peer: "{{ hostvars[groups.oo_etcd_to_migrate.0].ansible_default_ipv4.address }}" + etcd_url_scheme: "https" + etcd_peer_url_scheme: "https" + when: etcd_migration_failed | length == 0 + - name: Configure masters if etcd data migration is succesfull hosts: oo_masters_to_config roles: @@ -100,10 +149,6 @@ msg: "Skipping master re-configuration since migration failed." when: - etcd_migration_failed | length > 0 - -- name: Start masters after etcd data migration - hosts: oo_masters_to_config - tasks: - name: Start master services service: name: "{{ item }}" diff --git a/playbooks/common/openshift-etcd/scaleup.yml b/playbooks/common/openshift-etcd/scaleup.yml index 192305bc8..52b90daca 100644 --- a/playbooks/common/openshift-etcd/scaleup.yml +++ b/playbooks/common/openshift-etcd/scaleup.yml @@ -24,6 +24,9 @@ member add {{ etcd_hostname }} {{ etcd_peer_url_scheme }}://{{ etcd_ip }}:{{ etcd_peer_port }} delegate_to: "{{ etcd_ca_host }}" register: etcd_add_check + retries: 3 + delay: 10 + until: etcd_add_check.rc == 0 roles: - role: openshift_etcd when: etcd_add_check.rc == 0 @@ -36,3 +39,13 @@ r_etcd_common_etcd_runtime: "{{ openshift.common.etcd_runtime }}" - role: nickhammond.logrotate when: etcd_add_check.rc == 0 + post_tasks: + - name: Verify cluster is stable + command: > + /usr/bin/etcdctl --cert-file {{ etcd_peer_cert_file }} + --key-file {{ etcd_peer_key_file }} + --ca-file {{ etcd_peer_ca_file }} + -C {{ etcd_peer_url_scheme }}://{{ hostvars[etcd_ca_host].etcd_hostname }}:{{ etcd_client_port }} + cluster-health + retries: 1 + delay: 30 diff --git a/playbooks/common/openshift-glusterfs/config.yml b/playbooks/common/openshift-glusterfs/config.yml index edc15a3f2..d9de578f3 100644 --- a/playbooks/common/openshift-glusterfs/config.yml +++ b/playbooks/common/openshift-glusterfs/config.yml @@ -1,40 +1,26 @@ --- - name: Open firewall ports for GlusterFS nodes hosts: glusterfs - vars: - os_firewall_allow: - - service: glusterfs_sshd - port: "2222/tcp" - - service: glusterfs_daemon - port: "24007/tcp" - - service: glusterfs_management - port: "24008/tcp" - - service: glusterfs_bricks - port: "49152-49251/tcp" - roles: - - role: os_firewall + tasks: + - include_role: + name: openshift_storage_glusterfs + tasks_from: firewall.yml when: - openshift_storage_glusterfs_is_native | default(True) | bool - name: Open firewall ports for GlusterFS registry nodes hosts: glusterfs_registry - vars: - os_firewall_allow: - - service: glusterfs_sshd - port: "2222/tcp" - - service: glusterfs_daemon - port: "24007/tcp" - - service: glusterfs_management - port: "24008/tcp" - - service: glusterfs_bricks - port: "49152-49251/tcp" - roles: - - role: os_firewall + tasks: + - include_role: + name: openshift_storage_glusterfs + tasks_from: firewall.yml when: - openshift_storage_glusterfs_registry_is_native | default(True) | bool - name: Configure GlusterFS hosts: oo_first_master - roles: - - role: openshift_storage_glusterfs + tasks: + - name: setup glusterfs + include_role: + name: openshift_storage_glusterfs when: groups.oo_glusterfs_to_config | default([]) | count > 0 |