diff options
Diffstat (limited to 'playbooks/common')
23 files changed, 79 insertions, 185 deletions
diff --git a/playbooks/common/openshift-cluster/additional_config.yml b/playbooks/common/openshift-cluster/additional_config.yml index 26b31d313..825f46415 100644 --- a/playbooks/common/openshift-cluster/additional_config.yml +++ b/playbooks/common/openshift-cluster/additional_config.yml @@ -11,6 +11,8 @@ - role: openshift_examples registry_url: "{{ openshift.master.registry_url }}" when: openshift.common.install_examples | bool + - role: openshift_hosted_templates + registry_url: "{{ openshift.master.registry_url }}" - role: openshift_manageiq when: openshift.common.use_manageiq | bool - role: cockpit diff --git a/playbooks/common/openshift-cluster/openshift_hosted.yml b/playbooks/common/openshift-cluster/openshift_hosted.yml index 2ba7fded5..ccbba54b4 100644 --- a/playbooks/common/openshift-cluster/openshift_hosted.yml +++ b/playbooks/common/openshift-cluster/openshift_hosted.yml @@ -56,13 +56,13 @@ openshift_hosted_logging_ops_hostname: "{{ logging_ops_hostname }}" openshift_hosted_logging_master_public_url: "{{ logging_master_public_url }}" openshift_hosted_logging_elasticsearch_cluster_size: "{{ logging_elasticsearch_cluster_size }}" - openshift_hosted_logging_elasticsearch_pvc_dynamic: "{{ 'true' if openshift.hosted.logging.storage_kind | default(none) == 'dynamic' else 'false' }}" - openshift_hosted_logging_elasticsearch_pvc_size: "{{ openshift.hosted.logging.storage.volume.size if openshift.hosted.logging.storage_kind | default(none) == 'dynamic' else '' }}" - openshift_hosted_logging_elasticsearch_pvc_prefix: "{{ 'logging-es' if openshift.hosted.logging.storage_kind | default(none) is not none else '' }}" + openshift_hosted_logging_elasticsearch_pvc_dynamic: "{{ 'true' if openshift_hosted_logging_storage_kind | default(none) == 'dynamic' else '' }}" + openshift_hosted_logging_elasticsearch_pvc_size: "{{ openshift.hosted.logging.storage.volume.size if openshift_hosted_logging_storage_kind | default(none) in ['dynamic','nfs'] else '' }}" + openshift_hosted_logging_elasticsearch_pvc_prefix: "{{ 'logging-es' if openshift_hosted_logging_storage_kind | default(none) == 'dynamic' else '' }}" openshift_hosted_logging_elasticsearch_ops_cluster_size: "{{ logging_elasticsearch_ops_cluster_size }}" - openshift_hosted_logging_elasticsearch_ops_pvc_dynamic: "{{ 'true' if openshift.hosted.logging.storage_kind | default(none) == 'dynamic' else 'false' }}" - openshift_hosted_logging_elasticsearch_ops_pvc_size: "{{ openshift.hosted.logging.storage.volume.size if openshift.hosted.logging.storage_kind | default(none) == 'dynamic' else '' }}" - openshift_hosted_logging_elasticsearch_ops_pvc_prefix: "{{ 'logging-es' if openshift.hosted.logging.storage_kind | default(none) is not none else '' }}" + openshift_hosted_logging_elasticsearch_ops_pvc_dynamic: "{{ 'true' if openshift_hosted_logging_storage_kind | default(none) == 'dynamic' else '' }}" + openshift_hosted_logging_elasticsearch_ops_pvc_size: "{{ openshift.hosted.logging.storage.volume.size if openshift_hosted_logging_storage_kind | default(none) in ['dynamic','nfs' ] else '' }}" + openshift_hosted_logging_elasticsearch_ops_pvc_prefix: "{{ 'logging-es' if openshift_hosted_logging_storage_kind | default(none) =='dynamic' else '' }}" - role: cockpit-ui - when: ( openshift.common.version_gte_3_3_or_1_3 | bool ) and ( openshift_hosted_manage_registry | default(true) | bool ) + when: ( openshift.common.version_gte_3_3_or_1_3 | bool ) and ( openshift_hosted_manage_registry | default(true) | bool ) and not (openshift.docker.hosted_registry_insecure | default(false) | bool) diff --git a/playbooks/common/openshift-cluster/redeploy-certificates.yml b/playbooks/common/openshift-cluster/redeploy-certificates.yml index 4996c56a7..5f008a045 100644 --- a/playbooks/common/openshift-cluster/redeploy-certificates.yml +++ b/playbooks/common/openshift-cluster/redeploy-certificates.yml @@ -224,7 +224,7 @@ - name: Prepare for node evacuation command: > - {{ openshift.common.admin_binary }} --config={{ hostvars[groups.oo_first_master.0].mktemp.stdout }}/admin.kubeconfig + {{ openshift.common.client_binary }} adm --config={{ hostvars[groups.oo_first_master.0].mktemp.stdout }}/admin.kubeconfig manage-node {{ openshift.node.nodename }} --schedulable=false delegate_to: "{{ groups.oo_first_master.0 }}" @@ -232,7 +232,7 @@ - name: Evacuate node command: > - {{ openshift.common.admin_binary }} --config={{ hostvars[groups.oo_first_master.0].mktemp.stdout }}/admin.kubeconfig + {{ openshift.common.client_binary }} adm --config={{ hostvars[groups.oo_first_master.0].mktemp.stdout }}/admin.kubeconfig manage-node {{ openshift.node.nodename }} --evacuate --force delegate_to: "{{ groups.oo_first_master.0 }}" @@ -240,7 +240,7 @@ - name: Set node schedulability command: > - {{ openshift.common.admin_binary }} --config={{ hostvars[groups.oo_first_master.0].mktemp.stdout }}/admin.kubeconfig + {{ openshift.common.client_binary }} adm --config={{ hostvars[groups.oo_first_master.0].mktemp.stdout }}/admin.kubeconfig manage-node {{ openshift.node.nodename }} --schedulable=true delegate_to: "{{ groups.oo_first_master.0 }}" when: openshift_certificates_redeploy_ca | default(false) | bool and was_schedulable | bool diff --git a/playbooks/common/openshift-cluster/upgrades/containerized_node_upgrade.yml b/playbooks/common/openshift-cluster/upgrades/containerized_node_upgrade.yml index 32a3636aa..439df5ffd 100644 --- a/playbooks/common/openshift-cluster/upgrades/containerized_node_upgrade.yml +++ b/playbooks/common/openshift-cluster/upgrades/containerized_node_upgrade.yml @@ -1,5 +1,3 @@ -- include_vars: ../../../../roles/openshift_node/vars/main.yml - - name: Update systemd units include: ../../../../roles/openshift_node/tasks/systemd_units.yml openshift_version={{ openshift_image_tag }} diff --git a/playbooks/common/openshift-cluster/upgrades/create_service_signer_cert.yml b/playbooks/common/openshift-cluster/upgrades/create_service_signer_cert.yml index 78f6c46f3..23cf8cf76 100644 --- a/playbooks/common/openshift-cluster/upgrades/create_service_signer_cert.yml +++ b/playbooks/common/openshift-cluster/upgrades/create_service_signer_cert.yml @@ -22,11 +22,11 @@ - name: Create service signer certificate command: > - {{ openshift.common.admin_binary }} ca create-signer-cert - --cert=service-signer.crt - --key=service-signer.key - --name=openshift-service-serving-signer - --serial=service-signer.serial.txt + {{ openshift.common.client_binary }} adm ca create-signer-cert + --cert="{{ remote_cert_create_tmpdir.stdout }}/"service-signer.crt + --key="{{ remote_cert_create_tmpdir.stdout }}/"service-signer.key + --name="{{ remote_cert_create_tmpdir.stdout }}/"openshift-service-serving-signer + --serial="{{ remote_cert_create_tmpdir.stdout }}/"service-signer.serial.txt args: chdir: "{{ remote_cert_create_tmpdir.stdout }}/" when: not (hostvars[groups.oo_first_master.0].service_signer_cert_stat.stat.exists | bool) diff --git a/playbooks/common/openshift-cluster/upgrades/docker/upgrade_check.yml b/playbooks/common/openshift-cluster/upgrades/docker/upgrade_check.yml index fc26d029e..ee75aa853 100644 --- a/playbooks/common/openshift-cluster/upgrades/docker/upgrade_check.yml +++ b/playbooks/common/openshift-cluster/upgrades/docker/upgrade_check.yml @@ -22,22 +22,24 @@ command: > {{ repoquery_cmd }} --qf '%{version}' "docker" register: avail_docker_version + # Don't expect docker rpm to be available on hosts that don't already have it installed: + when: pkg_check.rc == 0 failed_when: false changed_when: false - fail: msg: This playbook requires access to Docker 1.10 or later # Disable the 1.10 requirement if the user set a specific Docker version - when: docker_version is not defined and (docker_upgrade is not defined or docker_upgrade | bool == True) and (avail_docker_version.stdout == "" or avail_docker_version.stdout | version_compare('1.10','<')) + when: docker_version is not defined and (docker_upgrade is not defined or docker_upgrade | bool == True) and (pkg_check.rc == 0 and (avail_docker_version.stdout == "" or avail_docker_version.stdout | version_compare('1.10','<'))) # Default l_docker_upgrade to False, we'll set to True if an upgrade is required: - set_fact: l_docker_upgrade: False -# Make sure a docker_verison is set if none was requested: +# Make sure a docker_version is set if none was requested: - set_fact: docker_version: "{{ avail_docker_version.stdout }}" - when: docker_version is not defined + when: pkg_check.rc == 0 and docker_version is not defined - name: Flag for Docker upgrade if necessary set_fact: diff --git a/playbooks/common/openshift-cluster/upgrades/init.yml b/playbooks/common/openshift-cluster/upgrades/init.yml index f3b3abe0d..fbdb7900a 100644 --- a/playbooks/common/openshift-cluster/upgrades/init.yml +++ b/playbooks/common/openshift-cluster/upgrades/init.yml @@ -10,7 +10,7 @@ - add_host: name: "{{ item }}" groups: l_oo_all_hosts - with_items: g_all_hosts | default([]) + with_items: "{{ g_all_hosts | default([]) }}" - hosts: l_oo_all_hosts gather_facts: no diff --git a/playbooks/common/openshift-cluster/upgrades/openvswitch-avoid-oom.conf b/playbooks/common/openshift-cluster/upgrades/openvswitch-avoid-oom.conf new file mode 120000 index 000000000..514526fe2 --- /dev/null +++ b/playbooks/common/openshift-cluster/upgrades/openvswitch-avoid-oom.conf @@ -0,0 +1 @@ +../../../../roles/openshift_node/templates/openvswitch-avoid-oom.conf
\ No newline at end of file diff --git a/playbooks/common/openshift-cluster/upgrades/post_control_plane.yml b/playbooks/common/openshift-cluster/upgrades/post_control_plane.yml index e43954453..2bbcbe1f8 100644 --- a/playbooks/common/openshift-cluster/upgrades/post_control_plane.yml +++ b/playbooks/common/openshift-cluster/upgrades/post_control_plane.yml @@ -17,10 +17,14 @@ # not already exist. We could have potentially done a replace --force to # create and update in one step. - openshift_examples + - openshift_hosted_templates # Update the existing templates - role: openshift_examples registry_url: "{{ openshift.master.registry_url }}" openshift_examples_import_command: replace + - role: openshift_hosted_templates + registry_url: "{{ openshift.master.registry_url }}" + openshift_hosted_templates_import_command: replace pre_tasks: - name: Collect all routers command: > @@ -41,7 +45,7 @@ {{ oc_cmd }} patch dc/{{ item['labels']['deploymentconfig'] }} -n {{ item['namespace'] }} -p '{"spec":{"template":{"spec":{"containers":[{"name":"router","image":"{{ router_image }}","livenessProbe":{"tcpSocket":null,"httpGet":{"path": "/healthz", "port": 1936, "host": "localhost", "scheme": "HTTP"},"initialDelaySeconds":10,"timeoutSeconds":1}}]}}}}' --api-version=v1 - with_items: haproxy_routers + with_items: "{{ haproxy_routers }}" - name: Check for default registry command: > diff --git a/playbooks/common/openshift-cluster/upgrades/rpm_upgrade.yml b/playbooks/common/openshift-cluster/upgrades/rpm_upgrade.yml index af77f140f..cd1139b29 100644 --- a/playbooks/common/openshift-cluster/upgrades/rpm_upgrade.yml +++ b/playbooks/common/openshift-cluster/upgrades/rpm_upgrade.yml @@ -5,3 +5,7 @@ - name: Ensure python-yaml present for config upgrade action: "{{ ansible_pkg_mgr }} name=PyYAML state=present" when: not openshift.common.is_atomic | bool + +- name: Restart node service + service: name="{{ openshift.common.service_type }}-node" state=restarted + when: component == "node" diff --git a/playbooks/common/openshift-cluster/upgrades/upgrade_control_plane.yml b/playbooks/common/openshift-cluster/upgrades/upgrade_control_plane.yml index 2c641e21e..764563d28 100644 --- a/playbooks/common/openshift-cluster/upgrades/upgrade_control_plane.yml +++ b/playbooks/common/openshift-cluster/upgrades/upgrade_control_plane.yml @@ -11,12 +11,25 @@ add_host: name: "{{ item }}" groups: etcd_hosts_to_backup - with_items: groups.oo_etcd_to_config if groups.oo_etcd_to_config is defined and groups.oo_etcd_to_config | length > 0 else groups.oo_first_master + with_items: "{{ groups.oo_etcd_to_config if groups.oo_etcd_to_config is defined and groups.oo_etcd_to_config | length > 0 else groups.oo_first_master }}" + +# If facts cache were for some reason deleted, this fact may not be set, and if not set +# it will always default to true. This causes problems for the etcd data dir fact detection +# so we must first make sure this is set correctly before attempting the backup. +- name: Set master embedded_etcd fact + hosts: oo_masters_to_config + roles: + - openshift_facts + tasks: + - openshift_facts: + role: master + local_facts: + embedded_etcd: "{{ groups.oo_etcd_to_config | default([]) | length == 0 }}" - name: Backup etcd hosts: etcd_hosts_to_backup vars: - embedded_etcd: "{{ hostvars[groups.oo_first_master.0].openshift.master.embedded_etcd }}" + embedded_etcd: "{{ groups.oo_etcd_to_config | default([]) | length == 0 }}" timestamp: "{{ lookup('pipe', 'date +%Y%m%d%H%M%S') }}" roles: - openshift_facts @@ -57,7 +70,7 @@ when: (embedded_etcd | bool) and (etcd_disk_usage.stdout|int > avail_disk.stdout|int) - name: Install etcd (for etcdctl) - action: "{{ ansible_pkg_mgr }} name=etcd state=latest" + action: "{{ ansible_pkg_mgr }} name=etcd state=installed" when: not openshift.common.is_atomic | bool - name: Generate etcd backup @@ -99,6 +112,8 @@ - include: rpm_upgrade.yml component=master when: not openshift.common.is_containerized | bool +# Create service signer cert when missing. Service signer certificate +# is added to master config in the master config hook for v3_3. - name: Determine if service signer cert must be created hosts: oo_first_master tasks: @@ -108,8 +123,6 @@ register: service_signer_cert_stat changed_when: false -# Create service signer cert when missing. Service signer certificate -# is added to master config in the master config hook for v3_3. - include: create_service_signer_cert.yml - name: Upgrade master config and systemd units @@ -128,13 +141,6 @@ - name: Update systemd units include: ../../../../roles/openshift_master/tasks/systemd_units.yml -# - name: Upgrade master configuration -# openshift_upgrade_config: -# from_version: '3.1' -# to_version: '3.2' -# role: master -# config_base: "{{ hostvars[inventory_hostname].openshift.common.config_base }}" - - name: Check for ca-bundle.crt stat: path: "{{ openshift.common.config_base }}/master/ca-bundle.crt" @@ -184,6 +190,10 @@ msg: "Upgrade cannot continue. The following masters did not finish updating: {{ master_update_failed | join(',') }}" when: master_update_failed | length > 0 +# We are now ready to restart master services (or entire system +# depending on openshift_rolling_restart_mode): +- include: ../../openshift-master/restart.yml + ############################################################################### # Reconcile Cluster Roles, Cluster Role Bindings and Security Context Constraints ############################################################################### @@ -200,19 +210,15 @@ # restart. skip_docker_role: True tasks: - - name: Verifying the correct commandline tools are available - shell: grep {{ verify_upgrade_version }} {{ openshift.common.admin_binary}} - when: openshift.common.is_containerized | bool and verify_upgrade_version is defined - - name: Reconcile Cluster Roles command: > - {{ openshift.common.admin_binary}} --config={{ openshift.common.config_base }}/master/admin.kubeconfig + {{ openshift.common.client_binary }} adm --config={{ openshift.common.config_base }}/master/admin.kubeconfig policy reconcile-cluster-roles --additive-only=true --confirm run_once: true - name: Reconcile Cluster Role Bindings command: > - {{ openshift.common.admin_binary}} --config={{ openshift.common.config_base }}/master/admin.kubeconfig + {{ openshift.common.client_binary }} adm --config={{ openshift.common.config_base }}/master/admin.kubeconfig policy reconcile-cluster-role-bindings --exclude-groups=system:authenticated --exclude-groups=system:authenticated:oauth @@ -222,9 +228,15 @@ when: origin_reconcile_bindings | bool or ent_reconcile_bindings | bool run_once: true + - name: Reconcile Jenkins Pipeline Role Bindings + command: > + {{ openshift.common.client_binary }} adm --config={{ openshift.common.config_base }}/master/admin.kubeconfig policy reconcile-cluster-role-bindings system:build-strategy-jenkinspipeline --confirm + run_once: true + when: openshift.common.version_gte_3_4_or_1_4 | bool + - name: Reconcile Security Context Constraints command: > - {{ openshift.common.admin_binary}} policy reconcile-sccs --confirm --additive-only=true + {{ openshift.common.client_binary }} adm policy reconcile-sccs --confirm --additive-only=true run_once: true - set_fact: diff --git a/playbooks/common/openshift-cluster/upgrades/upgrade_nodes.yml b/playbooks/common/openshift-cluster/upgrades/upgrade_nodes.yml index 9b572dcdf..1f314c854 100644 --- a/playbooks/common/openshift-cluster/upgrades/upgrade_nodes.yml +++ b/playbooks/common/openshift-cluster/upgrades/upgrade_nodes.yml @@ -29,7 +29,7 @@ - name: Mark unschedulable if host is a node command: > - {{ openshift.common.admin_binary }} manage-node {{ openshift.node.nodename | lower }} --schedulable=false + {{ openshift.common.client_binary }} adm manage-node {{ openshift.node.nodename | lower }} --schedulable=false delegate_to: "{{ groups.oo_first_master.0 }}" when: inventory_hostname in groups.oo_nodes_to_upgrade # NOTE: There is a transient "object has been modified" error here, allow a couple @@ -41,7 +41,7 @@ - name: Evacuate Node for Kubelet upgrade command: > - {{ openshift.common.admin_binary }} manage-node {{ openshift.node.nodename | lower }} --evacuate --force + {{ openshift.common.client_binary }} adm manage-node {{ openshift.node.nodename | lower }} --evacuate --force delegate_to: "{{ groups.oo_first_master.0 }}" when: inventory_hostname in groups.oo_nodes_to_upgrade tasks: @@ -64,7 +64,7 @@ - name: Set node schedulability command: > - {{ openshift.common.admin_binary }} manage-node {{ openshift.node.nodename | lower }} --schedulable=true + {{ openshift.common.client_binary }} adm manage-node {{ openshift.node.nodename | lower }} --schedulable=true delegate_to: "{{ groups.oo_first_master.0 }}" when: inventory_hostname in groups.oo_nodes_to_upgrade and was_schedulable | bool register: node_sched diff --git a/playbooks/common/openshift-etcd/service.yml b/playbooks/common/openshift-etcd/service.yml index fd2bc24ae..f460612ba 100644 --- a/playbooks/common/openshift-etcd/service.yml +++ b/playbooks/common/openshift-etcd/service.yml @@ -10,7 +10,7 @@ - name: Evaluate g_service_etcd add_host: name={{ item }} groups=g_service_etcd - with_items: oo_host_group_exp | default([]) + with_items: "{{ oo_host_group_exp | default([]) }}" - name: Change etcd state on etcd instance(s) hosts: g_service_etcd diff --git a/playbooks/common/openshift-loadbalancer/service.yml b/playbooks/common/openshift-loadbalancer/service.yml index e06a14c89..efc80edf9 100644 --- a/playbooks/common/openshift-loadbalancer/service.yml +++ b/playbooks/common/openshift-loadbalancer/service.yml @@ -10,7 +10,7 @@ - name: Evaluate g_service_lb add_host: name={{ item }} groups=g_service_lb - with_items: oo_host_group_exp | default([]) + with_items: "{{ oo_host_group_exp | default([]) }}" - name: Change state on lb instance(s) hosts: g_service_lb diff --git a/playbooks/common/openshift-master/restart.yml b/playbooks/common/openshift-master/restart.yml index 57a63cfee..5769ef5cd 100644 --- a/playbooks/common/openshift-master/restart.yml +++ b/playbooks/common/openshift-master/restart.yml @@ -66,63 +66,8 @@ current_host: "{{ exists.stat.exists }}" when: openshift.common.rolling_restart_mode == 'system' -- name: Determine which masters are currently active - hosts: oo_masters_to_config - any_errors_fatal: true - tasks: - - name: Check master service status - command: > - systemctl is-active {{ openshift.common.service_type }}-master - register: active_check_output - when: openshift.master.cluster_method | default(None) == 'pacemaker' - failed_when: false - changed_when: false - - set_fact: - is_active: "{{ active_check_output.stdout == 'active' }}" - when: openshift.master.cluster_method | default(None) == 'pacemaker' - -- name: Evaluate master groups - hosts: localhost - become: no - tasks: - - fail: - msg: > - Did not receive active status from any masters. Please verify pacemaker cluster. - when: "{{ hostvars[groups.oo_first_master.0].openshift.master.cluster_method | default(None) == 'pacemaker' and 'True' not in (hostvars - | oo_select_keys(groups['oo_masters_to_config']) - | oo_collect('is_active') - | list) }}" - - name: Evaluate oo_active_masters - add_host: - name: "{{ item }}" - groups: oo_active_masters - ansible_ssh_user: "{{ g_ssh_user | default(omit) }}" - ansible_become: "{{ g_sudo | default(omit) }}" - with_items: "{{ groups.oo_masters_to_config | default([]) }}" - when: (hostvars[item]['is_active'] | default(false)) | bool - - name: Evaluate oo_current_masters - add_host: - name: "{{ item }}" - groups: oo_current_masters - ansible_ssh_user: "{{ g_ssh_user | default(omit) }}" - ansible_become: "{{ g_sudo | default(omit) }}" - with_items: "{{ groups.oo_masters_to_config | default([]) }}" - when: (hostvars[item]['current_host'] | default(false)) | bool - -- name: Validate pacemaker cluster - hosts: oo_active_masters - tasks: - - name: Retrieve pcs status - command: pcs status - register: pcs_status_output - changed_when: false - - fail: - msg: > - Pacemaker cluster validation failed. One or more nodes are not online. - when: not (pcs_status_output.stdout | validate_pcs_cluster(groups.oo_masters_to_config)) | bool - - name: Restart masters - hosts: oo_masters_to_config:!oo_active_masters:!oo_current_masters + hosts: oo_masters_to_config vars: openshift_master_ha: "{{ groups.oo_masters_to_config | length > 1 }}" serial: 1 @@ -132,20 +77,3 @@ - include: restart_services.yml when: openshift.common.rolling_restart_mode == 'services' -- name: Restart active masters - hosts: oo_active_masters - serial: 1 - tasks: - - include: restart_hosts_pacemaker.yml - when: openshift.common.rolling_restart_mode == 'system' - - include: restart_services_pacemaker.yml - when: openshift.common.rolling_restart_mode == 'services' - -- name: Restart current masters - hosts: oo_current_masters - serial: 1 - tasks: - - include: restart_hosts.yml - when: openshift.common.rolling_restart_mode == 'system' - - include: restart_services.yml - when: openshift.common.rolling_restart_mode == 'services' diff --git a/playbooks/common/openshift-master/restart_hosts.yml b/playbooks/common/openshift-master/restart_hosts.yml index ff206f5a2..b1c36718c 100644 --- a/playbooks/common/openshift-master/restart_hosts.yml +++ b/playbooks/common/openshift-master/restart_hosts.yml @@ -5,8 +5,8 @@ poll: 0 ignore_errors: true become: yes -# When cluster_method != pacemaker we can ensure the api_port is -# available. + +# Ensure the api_port is available. - name: Wait for master API to come back online become: no local_action: @@ -15,25 +15,3 @@ state=started delay=10 port="{{ openshift.master.api_port }}" - when: openshift.master.cluster_method != 'pacemaker' -- name: Wait for master to start - become: no - local_action: - module: wait_for - host="{{ inventory_hostname }}" - state=started - delay=10 - port=22 - when: openshift.master.cluster_method == 'pacemaker' -- name: Wait for master to become available - command: pcs status - register: pcs_status_output - until: pcs_status_output.stdout | validate_pcs_cluster([inventory_hostname]) | bool - retries: 15 - delay: 2 - changed_when: false - when: openshift.master.cluster_method == 'pacemaker' -- fail: - msg: > - Pacemaker cluster validation failed {{ inventory hostname }} is not online. - when: openshift.master.cluster_method == 'pacemaker' and not (pcs_status_output.stdout | validate_pcs_cluster([inventory_hostname])) | bool diff --git a/playbooks/common/openshift-master/restart_hosts_pacemaker.yml b/playbooks/common/openshift-master/restart_hosts_pacemaker.yml deleted file mode 100644 index c9219e8de..000000000 --- a/playbooks/common/openshift-master/restart_hosts_pacemaker.yml +++ /dev/null @@ -1,25 +0,0 @@ -- name: Fail over master resource - command: > - pcs resource move master {{ hostvars | oo_select_keys(groups['oo_masters_to_config']) | oo_collect('openshift.common.hostname', {'is_active': 'False'}) | list | first }} -- name: Wait for master API to come back online - become: no - local_action: - module: wait_for - host="{{ openshift.master.cluster_hostname }}" - state=started - delay=10 - port="{{ openshift.master.api_port }}" -- name: Restart master system - # https://github.com/ansible/ansible/issues/10616 - shell: sleep 2 && shutdown -r now "OpenShift Ansible master rolling restart" - async: 1 - poll: 0 - ignore_errors: true - become: yes -- name: Wait for master to start - become: no - local_action: - module: wait_for - host="{{ inventory_hostname }}" - state=started - delay=10 diff --git a/playbooks/common/openshift-master/restart_services_pacemaker.yml b/playbooks/common/openshift-master/restart_services_pacemaker.yml deleted file mode 100644 index e738f3fb6..000000000 --- a/playbooks/common/openshift-master/restart_services_pacemaker.yml +++ /dev/null @@ -1,10 +0,0 @@ -- name: Restart master services - command: pcs resource restart master -- name: Wait for master API to come back online - become: no - local_action: - module: wait_for - host="{{ openshift.master.cluster_hostname }}" - state=started - delay=10 - port="{{ openshift.master.api_port }}" diff --git a/playbooks/common/openshift-master/scaleup.yml b/playbooks/common/openshift-master/scaleup.yml index 56ed09e1b..18e5c665f 100644 --- a/playbooks/common/openshift-master/scaleup.yml +++ b/playbooks/common/openshift-master/scaleup.yml @@ -33,7 +33,7 @@ service: name={{ openshift.common.service_type }}-master-controllers state=restarted - name: verify api server command: > - curl --silent + curl --silent --tlsv1.2 {% if openshift.common.version_gte_3_2_or_1_2 | bool %} --cacert {{ openshift.common.config_base }}/master/ca-bundle.crt {% else %} diff --git a/playbooks/common/openshift-master/service.yml b/playbooks/common/openshift-master/service.yml index f60c5a2b5..5e5198335 100644 --- a/playbooks/common/openshift-master/service.yml +++ b/playbooks/common/openshift-master/service.yml @@ -10,7 +10,7 @@ - name: Evaluate g_service_masters add_host: name={{ item }} groups=g_service_masters - with_items: oo_host_group_exp | default([]) + with_items: "{{ oo_host_group_exp | default([]) }}" - name: Change state on master instance(s) hosts: g_service_masters diff --git a/playbooks/common/openshift-nfs/service.yml b/playbooks/common/openshift-nfs/service.yml index 20c8ca248..8468014da 100644 --- a/playbooks/common/openshift-nfs/service.yml +++ b/playbooks/common/openshift-nfs/service.yml @@ -8,7 +8,7 @@ - name: Evaluate g_service_nfs add_host: name={{ item }} groups=g_service_nfs - with_items: oo_host_group_exp | default([]) + with_items: "{{ oo_host_group_exp | default([]) }}" - name: Change state on nfs instance(s) hosts: g_service_nfs diff --git a/playbooks/common/openshift-node/config.yml b/playbooks/common/openshift-node/config.yml index 364a62dd0..4824eeef3 100644 --- a/playbooks/common/openshift-node/config.yml +++ b/playbooks/common/openshift-node/config.yml @@ -67,7 +67,7 @@ openshift_ca_host: "{{ groups.oo_first_master.0 }}" - role: openshift_cloud_provider - role: openshift_node_dnsmasq - when: openshift.common.use_dnsmasq + when: openshift.common.use_dnsmasq | bool - role: os_firewall os_firewall_allow: - service: Kubernetes kubelet @@ -106,7 +106,7 @@ openshift_ca_host: "{{ groups.oo_first_master.0 }}" - role: openshift_cloud_provider - role: openshift_node_dnsmasq - when: openshift.common.use_dnsmasq + when: openshift.common.use_dnsmasq | bool - role: os_firewall os_firewall_allow: - service: Kubernetes kubelet @@ -165,7 +165,7 @@ # Using curl here since the uri module requires python-httplib2 and # wait_for port doesn't provide health information. command: > - curl --silent + curl --silent --tlsv1.2 {% if openshift.common.version_gte_3_2_or_1_2 | bool %} --cacert {{ openshift.common.config_base }}/master/ca-bundle.crt {% else %} diff --git a/playbooks/common/openshift-node/service.yml b/playbooks/common/openshift-node/service.yml index 0f07add2a..33095c9fb 100644 --- a/playbooks/common/openshift-node/service.yml +++ b/playbooks/common/openshift-node/service.yml @@ -10,7 +10,7 @@ - name: Evaluate g_service_nodes add_host: name={{ item }} groups=g_service_nodes - with_items: oo_host_group_exp | default([]) + with_items: "{{ oo_host_group_exp | default([]) }}" - name: Change state on node instance(s) hosts: g_service_nodes |