diff options
Diffstat (limited to 'playbooks')
33 files changed, 139 insertions, 61 deletions
diff --git a/playbooks/aws/provisioning_vars.yml.example b/playbooks/aws/provisioning_vars.yml.example index 2eb7d23d4..f6b1a6b5d 100644 --- a/playbooks/aws/provisioning_vars.yml.example +++ b/playbooks/aws/provisioning_vars.yml.example @@ -93,6 +93,11 @@ openshift_aws_ssh_key_name: # myuser_key # --------- # # Variables in this section apply to building a node AMI for use in your # openshift cluster. +# openshift-ansible will perform the container runtime storage setup when specified +# The current storage setup with require a drive if using a separate storage device +# for the container runtime. +container_runtime_docker_storage_type: overlay2 +container_runtime_docker_storage_setup_device: /dev/xvdb # must specify a base_ami when building an AMI openshift_aws_base_ami: # ami-12345678 diff --git a/playbooks/common/openshift-cluster/upgrades/create_service_signer_cert.yml b/playbooks/common/openshift-cluster/upgrades/create_service_signer_cert.yml index 372a39e74..6d82fa928 100644 --- a/playbooks/common/openshift-cluster/upgrades/create_service_signer_cert.yml +++ b/playbooks/common/openshift-cluster/upgrades/create_service_signer_cert.yml @@ -2,7 +2,6 @@ - name: Create local temp directory for syncing certs hosts: localhost connection: local - become: no gather_facts: no tasks: - name: Create local temp directory for syncing certs @@ -11,8 +10,15 @@ changed_when: false when: not (hostvars[groups.oo_first_master.0].service_signer_cert_stat.stat.exists | bool) + - name: Chmod local temp directory + local_action: command chmod 777 "{{ local_cert_sync_tmpdir.stdout }}" + changed_when: false + when: not (hostvars[groups.oo_first_master.0].service_signer_cert_stat.stat.exists | bool) + - name: Create service signer certificate hosts: oo_first_master + roles: + - openshift_facts tasks: - name: Create remote temp directory for creating certs command: mktemp -d /tmp/openshift-ansible-XXXXXXX @@ -65,7 +71,6 @@ - name: Delete local temp directory hosts: localhost connection: local - become: no gather_facts: no tasks: - name: Delete local temp directory diff --git a/playbooks/common/openshift-cluster/upgrades/docker/docker_upgrade.yml b/playbooks/common/openshift-cluster/upgrades/docker/docker_upgrade.yml index ffb11670d..8392e21ee 100644 --- a/playbooks/common/openshift-cluster/upgrades/docker/docker_upgrade.yml +++ b/playbooks/common/openshift-cluster/upgrades/docker/docker_upgrade.yml @@ -51,13 +51,19 @@ - name: Drain Node for Kubelet upgrade command: > - {{ openshift_client_binary }} adm drain {{ openshift.node.nodename }} --config={{ openshift.common.config_base }}/master/admin.kubeconfig --force --delete-local-data --ignore-daemonsets + {{ hostvars[groups.oo_first_master.0]['first_master_client_binary'] }} adm drain {{ openshift.node.nodename | lower }} + --config={{ openshift.common.config_base }}/master/admin.kubeconfig + --force --delete-local-data --ignore-daemonsets + --timeout={{ openshift_upgrade_nodes_drain_timeout | default(0) }}s delegate_to: "{{ groups.oo_first_master.0 }}" when: l_docker_upgrade is defined and l_docker_upgrade | bool and inventory_hostname in groups.oo_nodes_to_upgrade register: l_docker_upgrade_drain_result until: not (l_docker_upgrade_drain_result is failed) - retries: 60 - delay: 60 + retries: "{{ 1 if ( openshift_upgrade_nodes_drain_timeout | default(0) | int ) == 0 else 0 }}" + delay: 5 + failed_when: + - l_docker_upgrade_drain_result is failed + - openshift_upgrade_nodes_drain_timeout | default(0) | int == 0 - include_tasks: tasks/upgrade.yml when: l_docker_upgrade is defined and l_docker_upgrade | bool diff --git a/playbooks/common/openshift-cluster/upgrades/pre/config.yml b/playbooks/common/openshift-cluster/upgrades/pre/config.yml index cfc0c8745..da63450b8 100644 --- a/playbooks/common/openshift-cluster/upgrades/pre/config.yml +++ b/playbooks/common/openshift-cluster/upgrades/pre/config.yml @@ -1,4 +1,6 @@ --- +# for control-plane upgrade, several variables may be passed in to this play +# why may affect the tasks here and in imported playbooks. # Pre-upgrade - import_playbook: ../initialize_nodes_to_upgrade.yml @@ -48,6 +50,8 @@ # defined, and overriding the normal behavior of protecting the installed version openshift_release: "{{ openshift_upgrade_target }}" openshift_protect_installed_version: False + # l_openshift_version_set_hosts is passed via upgrade_control_plane.yml + # l_openshift_version_check_hosts is passed via upgrade_control_plane.yml # If we're only upgrading nodes, we need to ensure masters are already upgraded - name: Verify masters are already upgraded diff --git a/playbooks/common/openshift-cluster/upgrades/upgrade_control_plane.yml b/playbooks/common/openshift-cluster/upgrades/upgrade_control_plane.yml index 50be0dee0..e89f06f17 100644 --- a/playbooks/common/openshift-cluster/upgrades/upgrade_control_plane.yml +++ b/playbooks/common/openshift-cluster/upgrades/upgrade_control_plane.yml @@ -22,6 +22,8 @@ # See: https://github.com/openshift/origin/pull/14625#issuecomment-308467060 - name: Pre master upgrade - Upgrade all storage hosts: oo_first_master + roles: + - openshift_facts tasks: - name: Upgrade all storage command: > @@ -49,10 +51,9 @@ vars: openshift_master_ha: "{{ groups.oo_masters_to_config | length > 1 }}" serial: 1 + roles: + - openshift_facts tasks: - - import_role: - name: openshift_facts - # Run the pre-upgrade hook if defined: - debug: msg="Running master pre-upgrade hook {{ openshift_master_upgrade_pre_hook }}" when: openshift_master_upgrade_pre_hook is defined @@ -108,7 +109,6 @@ - name: Gate on master update hosts: localhost connection: local - become: no tasks: - set_fact: master_update_completed: "{{ hostvars @@ -128,6 +128,7 @@ hosts: oo_masters_to_config roles: - { role: openshift_cli } + - { role: openshift_facts } vars: __master_shared_resource_viewer_file: "shared_resource_viewer_role.yaml" tasks: @@ -242,7 +243,6 @@ - name: Gate on reconcile hosts: localhost connection: local - become: no tasks: - set_fact: reconcile_completed: "{{ hostvars @@ -291,12 +291,18 @@ - name: Drain Node for Kubelet upgrade command: > - {{ hostvars[groups.oo_first_master.0]['first_master_client_binary'] }} adm drain {{ openshift.node.nodename | lower }} --config={{ openshift.common.config_base }}/master/admin.kubeconfig --force --delete-local-data --ignore-daemonsets + {{ hostvars[groups.oo_first_master.0]['first_master_client_binary'] }} adm drain {{ openshift.node.nodename | lower }} + --config={{ openshift.common.config_base }}/master/admin.kubeconfig + --force --delete-local-data --ignore-daemonsets + --timeout={{ openshift_upgrade_nodes_drain_timeout | default(0) }}s delegate_to: "{{ groups.oo_first_master.0 }}" register: l_upgrade_control_plane_drain_result until: not (l_upgrade_control_plane_drain_result is failed) - retries: 60 - delay: 60 + retries: "{{ 1 if ( openshift_upgrade_nodes_drain_timeout | default(0) | int ) == 0 else 0 }}" + delay: 5 + failed_when: + - l_upgrade_control_plane_drain_result is failed + - openshift_upgrade_nodes_drain_timeout | default(0) | int == 0 roles: - openshift_facts diff --git a/playbooks/common/openshift-cluster/upgrades/upgrade_nodes.yml b/playbooks/common/openshift-cluster/upgrades/upgrade_nodes.yml index 464af3ae6..850442b3b 100644 --- a/playbooks/common/openshift-cluster/upgrades/upgrade_nodes.yml +++ b/playbooks/common/openshift-cluster/upgrades/upgrade_nodes.yml @@ -33,12 +33,18 @@ - name: Drain Node for Kubelet upgrade command: > - {{ hostvars[groups.oo_first_master.0]['first_master_client_binary'] }} adm drain {{ openshift.node.nodename | lower }} --config={{ openshift.common.config_base }}/master/admin.kubeconfig --force --delete-local-data --ignore-daemonsets + {{ hostvars[groups.oo_first_master.0]['first_master_client_binary'] }} adm drain {{ openshift.node.nodename | lower }} + --config={{ openshift.common.config_base }}/master/admin.kubeconfig + --force --delete-local-data --ignore-daemonsets + --timeout={{ openshift_upgrade_nodes_drain_timeout | default(0) }}s delegate_to: "{{ groups.oo_first_master.0 }}" register: l_upgrade_nodes_drain_result until: not (l_upgrade_nodes_drain_result is failed) - retries: 60 - delay: 60 + retries: "{{ 1 if ( openshift_upgrade_nodes_drain_timeout | default(0) | int ) == 0 else 0 }}" + delay: 5 + failed_when: + - l_upgrade_nodes_drain_result is failed + - openshift_upgrade_nodes_drain_timeout | default(0) | int == 0 post_tasks: - import_role: diff --git a/playbooks/common/openshift-cluster/upgrades/upgrade_scale_group.yml b/playbooks/common/openshift-cluster/upgrades/upgrade_scale_group.yml index 6d59bfd0b..e259b5d09 100644 --- a/playbooks/common/openshift-cluster/upgrades/upgrade_scale_group.yml +++ b/playbooks/common/openshift-cluster/upgrades/upgrade_scale_group.yml @@ -50,11 +50,11 @@ delegate_to: "{{ groups.oo_first_master.0 }}" register: l_upgrade_nodes_drain_result until: not (l_upgrade_nodes_drain_result is failed) - retries: "{{ 1 if openshift_upgrade_nodes_drain_timeout | default(0) == '0' else 0 | int }}" + retries: "{{ 1 if ( openshift_upgrade_nodes_drain_timeout | default(0) | int ) == 0 else 0 }}" delay: 5 failed_when: - l_upgrade_nodes_drain_result is failed - - openshift_upgrade_nodes_drain_timeout | default(0) == '0' + - openshift_upgrade_nodes_drain_timeout | default(0) | int == 0 # Alright, let's clean up! - name: clean up the old scale group diff --git a/playbooks/common/openshift-cluster/upgrades/v3_6/upgrade_control_plane.yml b/playbooks/common/openshift-cluster/upgrades/v3_6/upgrade_control_plane.yml index a956fdde5..eb5f07ae0 100644 --- a/playbooks/common/openshift-cluster/upgrades/v3_6/upgrade_control_plane.yml +++ b/playbooks/common/openshift-cluster/upgrades/v3_6/upgrade_control_plane.yml @@ -14,6 +14,7 @@ - import_playbook: ../init.yml vars: l_upgrade_no_switch_firewall_hosts: "oo_masters_to_config:oo_etcd_to_config:oo_lb_to_config" + l_upgrade_non_node_hosts: "oo_masters_to_config:oo_etcd_to_config:oo_lb_to_config" - name: Configure the upgrade target for the common upgrade tasks hosts: oo_masters_to_config:oo_etcd_to_config:oo_lb_to_config @@ -23,7 +24,11 @@ openshift_upgrade_min: "{{ '1.5' if openshift_deployment_type == 'origin' else '3.5' }}" - import_playbook: ../pre/config.yml + # These vars a meant to exclude oo_nodes from plays that would otherwise include + # them by default. vars: + l_openshift_version_set_hosts: "oo_etcd_to_config:oo_masters_to_config:!oo_first_master" + l_openshift_version_check_hosts: "oo_masters_to_config:!oo_first_master" l_upgrade_repo_hosts: "oo_masters_to_config:oo_etcd_to_config:oo_lb_to_config" l_upgrade_no_proxy_hosts: "oo_masters_to_config" l_upgrade_health_check_hosts: "oo_masters_to_config:oo_etcd_to_config:oo_lb_to_config" diff --git a/playbooks/common/openshift-cluster/upgrades/v3_7/upgrade_control_plane.yml b/playbooks/common/openshift-cluster/upgrades/v3_7/upgrade_control_plane.yml index 1750148d4..8d42e4c91 100644 --- a/playbooks/common/openshift-cluster/upgrades/v3_7/upgrade_control_plane.yml +++ b/playbooks/common/openshift-cluster/upgrades/v3_7/upgrade_control_plane.yml @@ -14,6 +14,7 @@ - import_playbook: ../init.yml vars: l_upgrade_no_switch_firewall_hosts: "oo_masters_to_config:oo_etcd_to_config:oo_lb_to_config" + l_upgrade_non_node_hosts: "oo_masters_to_config:oo_etcd_to_config:oo_lb_to_config" - name: Configure the upgrade target for the common upgrade tasks hosts: oo_masters_to_config:oo_etcd_to_config:oo_lb_to_config @@ -23,7 +24,11 @@ openshift_upgrade_min: '3.6' - import_playbook: ../pre/config.yml + # These vars a meant to exclude oo_nodes from plays that would otherwise include + # them by default. vars: + l_openshift_version_set_hosts: "oo_etcd_to_config:oo_masters_to_config:!oo_first_master" + l_openshift_version_check_hosts: "oo_masters_to_config:!oo_first_master" l_upgrade_repo_hosts: "oo_masters_to_config:oo_etcd_to_config:oo_lb_to_config" l_upgrade_no_proxy_hosts: "oo_masters_to_config" l_upgrade_health_check_hosts: "oo_masters_to_config:oo_etcd_to_config:oo_lb_to_config" diff --git a/playbooks/common/openshift-cluster/upgrades/v3_7/validator.yml b/playbooks/common/openshift-cluster/upgrades/v3_7/validator.yml index 49e691352..9c7688981 100644 --- a/playbooks/common/openshift-cluster/upgrades/v3_7/validator.yml +++ b/playbooks/common/openshift-cluster/upgrades/v3_7/validator.yml @@ -7,6 +7,7 @@ hosts: oo_first_master roles: - { role: lib_openshift } + - { role: openshift_facts } tasks: - name: Check for invalid namespaces and SDN errors diff --git a/playbooks/common/openshift-cluster/upgrades/v3_8/upgrade_control_plane.yml b/playbooks/common/openshift-cluster/upgrades/v3_8/upgrade_control_plane.yml index 08bfd239f..a2f316c25 100644 --- a/playbooks/common/openshift-cluster/upgrades/v3_8/upgrade_control_plane.yml +++ b/playbooks/common/openshift-cluster/upgrades/v3_8/upgrade_control_plane.yml @@ -14,6 +14,7 @@ - import_playbook: ../init.yml vars: l_upgrade_no_switch_firewall_hosts: "oo_masters_to_config:oo_etcd_to_config:oo_lb_to_config" + l_upgrade_non_node_hosts: "oo_masters_to_config:oo_etcd_to_config:oo_lb_to_config" - name: Configure the upgrade target for the common upgrade tasks hosts: oo_masters_to_config:oo_etcd_to_config:oo_lb_to_config @@ -23,7 +24,11 @@ openshift_upgrade_min: '3.7' - import_playbook: ../pre/config.yml + # These vars a meant to exclude oo_nodes from plays that would otherwise include + # them by default. vars: + l_openshift_version_set_hosts: "oo_etcd_to_config:oo_masters_to_config:!oo_first_master" + l_openshift_version_check_hosts: "oo_masters_to_config:!oo_first_master" l_upgrade_repo_hosts: "oo_masters_to_config:oo_etcd_to_config:oo_lb_to_config" l_upgrade_no_proxy_hosts: "oo_masters_to_config" l_upgrade_health_check_hosts: "oo_masters_to_config:oo_etcd_to_config:oo_lb_to_config" diff --git a/playbooks/common/openshift-cluster/upgrades/v3_9/upgrade.yml b/playbooks/common/openshift-cluster/upgrades/v3_9/upgrade.yml index 0aea5069d..552bea5e7 100644 --- a/playbooks/common/openshift-cluster/upgrades/v3_9/upgrade.yml +++ b/playbooks/common/openshift-cluster/upgrades/v3_9/upgrade.yml @@ -41,13 +41,13 @@ roles: - role: openshift_facts tasks: - - name: Stop {{ openshift.common.service_type }}-master-controllers + - name: Stop {{ openshift_service_type }}-master-controllers systemd: - name: "{{ openshift.common.service_type }}-master-controllers" + name: "{{ openshift_service_type }}-master-controllers" state: stopped - - name: Start {{ openshift.common.service_type }}-master-controllers + - name: Start {{ openshift_service_type }}-master-controllers systemd: - name: "{{ openshift.common.service_type }}-master-controllers" + name: "{{ openshift_service_type }}-master-controllers" state: started - import_playbook: ../upgrade_nodes.yml diff --git a/playbooks/common/openshift-cluster/upgrades/v3_9/upgrade_control_plane.yml b/playbooks/common/openshift-cluster/upgrades/v3_9/upgrade_control_plane.yml index 05aa737c6..ef9871008 100644 --- a/playbooks/common/openshift-cluster/upgrades/v3_9/upgrade_control_plane.yml +++ b/playbooks/common/openshift-cluster/upgrades/v3_9/upgrade_control_plane.yml @@ -14,6 +14,7 @@ - import_playbook: ../init.yml vars: l_upgrade_no_switch_firewall_hosts: "oo_masters_to_config:oo_etcd_to_config:oo_lb_to_config" + l_upgrade_non_node_hosts: "oo_masters_to_config:oo_etcd_to_config:oo_lb_to_config" - name: Configure the upgrade target for the common upgrade tasks hosts: oo_masters_to_config:oo_etcd_to_config:oo_lb_to_config @@ -23,7 +24,11 @@ openshift_upgrade_min: '3.7' - import_playbook: ../pre/config.yml + # These vars a meant to exclude oo_nodes from plays that would otherwise include + # them by default. vars: + l_openshift_version_set_hosts: "oo_etcd_to_config:oo_masters_to_config:!oo_first_master" + l_openshift_version_check_hosts: "oo_masters_to_config:!oo_first_master" l_upgrade_repo_hosts: "oo_masters_to_config:oo_etcd_to_config:oo_lb_to_config" l_upgrade_no_proxy_hosts: "oo_masters_to_config" l_upgrade_health_check_hosts: "oo_masters_to_config:oo_etcd_to_config:oo_lb_to_config" diff --git a/playbooks/container-runtime/private/build_container_groups.yml b/playbooks/container-runtime/private/build_container_groups.yml new file mode 100644 index 000000000..7fd60743c --- /dev/null +++ b/playbooks/container-runtime/private/build_container_groups.yml @@ -0,0 +1,6 @@ +--- +- name: create oo_hosts_containerized_managed_true host group + hosts: oo_all_hosts:!oo_nodes_to_config + tasks: + - group_by: + key: oo_hosts_containerized_managed_{{ (containerized | default(False)) | ternary('true','false') }} diff --git a/playbooks/container-runtime/private/config.yml b/playbooks/container-runtime/private/config.yml index dd13fa4a2..7a49adcf0 100644 --- a/playbooks/container-runtime/private/config.yml +++ b/playbooks/container-runtime/private/config.yml @@ -1,10 +1,7 @@ --- -- hosts: "{{ l_containerized_host_groups }}" - vars: - l_chg_temp: "{{ hostvars[groups['oo_first_master'][0]]['openshift_containerized_host_groups'] | default([]) }}" - l_containerized_host_groups: "{{ (['oo_nodes_to_config'] | union(l_chg_temp)) | join(':') }}" - # role: container_runtime is necessary here to bring role default variables - # into the play scope. +- import_playbook: build_container_groups.yml + +- hosts: oo_nodes_to_config:oo_hosts_containerized_managed_true roles: - role: container_runtime tasks: diff --git a/playbooks/container-runtime/private/setup_storage.yml b/playbooks/container-runtime/private/setup_storage.yml index 357f67f0c..a6d396270 100644 --- a/playbooks/container-runtime/private/setup_storage.yml +++ b/playbooks/container-runtime/private/setup_storage.yml @@ -1,5 +1,7 @@ --- -- hosts: "{{ l_containerized_host_groups }}" +- import_playbook: build_container_groups.yml + +- hosts: oo_nodes_to_config:oo_hosts_containerized_managed_true vars: l_chg_temp: "{{ hostvars[groups['oo_first_master'][0]]['openshift_containerized_host_groups'] | default([]) }}" l_containerized_host_groups: "{{ (['oo_nodes_to_config'] | union(l_chg_temp)) | join(':') }}" diff --git a/playbooks/init/evaluate_groups.yml b/playbooks/init/evaluate_groups.yml index 8087f6ffc..c4cd226c9 100644 --- a/playbooks/init/evaluate_groups.yml +++ b/playbooks/init/evaluate_groups.yml @@ -2,7 +2,6 @@ - name: Populate config host groups hosts: localhost connection: local - become: no gather_facts: no tasks: - name: Load group name mapping variables diff --git a/playbooks/init/facts.yml b/playbooks/init/facts.yml index 6759240c9..8e4206948 100644 --- a/playbooks/init/facts.yml +++ b/playbooks/init/facts.yml @@ -5,7 +5,9 @@ tasks: - name: Initialize host facts - hosts: oo_all_hosts + # l_upgrade_non_node_hosts is passed in via play during control-plane-only + # upgrades; otherwise oo_all_hosts is used. + hosts: "{{ l_upgrade_non_node_hosts | default('oo_all_hosts') }}" tasks: - name: load openshift_facts module import_role: @@ -100,3 +102,5 @@ # We need to setup openshift_client_binary here for special uses of delegate_to in # later roles and plays. first_master_client_binary: "{{ openshift_client_binary }}" + #Some roles may require this to be set for first master + openshift_client_binary: "{{ openshift_client_binary }}" diff --git a/playbooks/init/main.yml b/playbooks/init/main.yml index 20457e508..8a3f4682d 100644 --- a/playbooks/init/main.yml +++ b/playbooks/init/main.yml @@ -17,12 +17,12 @@ - import_playbook: facts.yml -- import_playbook: sanity_checks.yml - when: not (skip_sanity_checks | default(False)) - - import_playbook: version.yml when: not (skip_verison | default(False)) +- import_playbook: sanity_checks.yml + when: not (skip_sanity_checks | default(False)) + - name: Initialization Checkpoint End hosts: all gather_facts: false diff --git a/playbooks/init/version.yml b/playbooks/init/version.yml index 37a5284d5..962ee7220 100644 --- a/playbooks/init/version.yml +++ b/playbooks/init/version.yml @@ -2,20 +2,32 @@ # NOTE: requires openshift_facts be run - name: Determine openshift_version to configure on first master hosts: oo_first_master - roles: - - openshift_version + tasks: + - include_role: + name: openshift_version + tasks_from: first_master.yml + - debug: msg="openshift_pkg_version set to {{ openshift_pkg_version | default('') }}" # NOTE: We set this even on etcd hosts as they may also later run as masters, # and we don't want to install wrong version of docker and have to downgrade # later. - name: Set openshift_version for etcd, node, and master hosts - hosts: oo_etcd_to_config:oo_nodes_to_config:oo_masters_to_config:!oo_first_master + hosts: "{{ l_openshift_version_set_hosts | default(l_default_version_set_hosts) }}" vars: - openshift_version: "{{ hostvars[groups.oo_first_master.0].openshift_version }}" - pre_tasks: + l_default_version_set_hosts: "oo_etcd_to_config:oo_nodes_to_config:oo_masters_to_config:!oo_first_master" + l_first_master_openshift_version: "{{ hostvars[groups.oo_first_master.0].openshift_version }}" + l_first_master_openshift_pkg_version: "{{ hostvars[groups.oo_first_master.0].openshift_pkg_version | default('') }}" + l_first_master_openshift_image_tag: "{{ hostvars[groups.oo_first_master.0].openshift_image_tag}}" + tasks: - set_fact: - openshift_pkg_version: -{{ openshift_version }} - when: openshift_pkg_version is not defined - - debug: msg="openshift_pkg_version set to {{ openshift_pkg_version }}" - roles: - - openshift_version + openshift_version: "{{ l_first_master_openshift_version }}" + openshift_pkg_version: "{{ l_first_master_openshift_pkg_version }}" + openshift_image_tag: "{{ l_first_master_openshift_image_tag }}" + +# NOTE: These steps should only be run against masters and nodes. +- name: Ensure the requested version packages are available. + hosts: "{{ l_openshift_version_check_hosts | default('oo_nodes_to_config:oo_masters_to_config:!oo_first_master') }}" + tasks: + - include_role: + name: openshift_version + tasks_from: masters_and_nodes.yml diff --git a/playbooks/openshift-etcd/private/embedded2external.yml b/playbooks/openshift-etcd/private/embedded2external.yml index b71eaacd0..674bd5088 100644 --- a/playbooks/openshift-etcd/private/embedded2external.yml +++ b/playbooks/openshift-etcd/private/embedded2external.yml @@ -89,7 +89,10 @@ local_action: command mktemp -d /tmp/etcd_backup-XXXXXXX register: g_etcd_client_mktemp changed_when: False - become: no + + - name: Chmod local temp directory for syncing etcd backup + local_action: command chmod 777 "{{ g_etcd_client_mktemp.stdout }}" + changed_when: False - import_role: name: etcd @@ -116,7 +119,6 @@ - name: Delete temporary directory local_action: file path="{{ g_etcd_client_mktemp.stdout }}" state=absent changed_when: False - become: no # 7. force new cluster from the backup - name: Force new etcd cluster diff --git a/playbooks/openshift-etcd/private/migrate.yml b/playbooks/openshift-etcd/private/migrate.yml index 0a2ac7f1a..3f8b44032 100644 --- a/playbooks/openshift-etcd/private/migrate.yml +++ b/playbooks/openshift-etcd/private/migrate.yml @@ -2,7 +2,6 @@ - name: Check if the master has embedded etcd hosts: localhost connection: local - become: no gather_facts: no tags: - always @@ -53,7 +52,6 @@ - name: Gate on etcd backup hosts: localhost connection: local - become: no tasks: - set_fact: etcd_backup_completed: "{{ hostvars diff --git a/playbooks/openshift-etcd/private/redeploy-ca.yml b/playbooks/openshift-etcd/private/redeploy-ca.yml index 7b0d99255..a3acf6945 100644 --- a/playbooks/openshift-etcd/private/redeploy-ca.yml +++ b/playbooks/openshift-etcd/private/redeploy-ca.yml @@ -26,7 +26,6 @@ - name: Create temp directory for syncing certs hosts: localhost connection: local - become: no gather_facts: no tasks: - name: Create local temp directory for syncing certs @@ -34,6 +33,10 @@ register: g_etcd_mktemp changed_when: false + - name: Chmod local temp directory for syncing certs + local_action: command chmod 777 "{{ g_etcd_mktemp.stdout }}" + changed_when: false + - name: Distribute etcd CA to etcd hosts hosts: oo_etcd_to_config tasks: @@ -74,7 +77,6 @@ - name: Delete temporary directory on localhost hosts: localhost connection: local - become: no gather_facts: no tasks: - file: diff --git a/playbooks/openshift-etcd/private/upgrade_backup.yml b/playbooks/openshift-etcd/private/upgrade_backup.yml index 97b6edba5..081c024fc 100644 --- a/playbooks/openshift-etcd/private/upgrade_backup.yml +++ b/playbooks/openshift-etcd/private/upgrade_backup.yml @@ -14,7 +14,6 @@ - name: Gate on etcd backup hosts: localhost connection: local - become: no tasks: - set_fact: etcd_backup_completed: "{{ hostvars diff --git a/playbooks/openshift-master/private/redeploy-openshift-ca.yml b/playbooks/openshift-master/private/redeploy-openshift-ca.yml index 9d3c12ba1..663c39868 100644 --- a/playbooks/openshift-master/private/redeploy-openshift-ca.yml +++ b/playbooks/openshift-master/private/redeploy-openshift-ca.yml @@ -125,7 +125,6 @@ - name: Create temp directory for syncing certs hosts: localhost connection: local - become: no gather_facts: no tasks: - name: Create local temp directory for syncing certs @@ -133,6 +132,10 @@ register: g_master_mktemp changed_when: false + - name: Chmod local temp directory for syncing certs + local_action: command chmod 777 "{{ g_master_mktemp.stdout }}" + changed_when: false + - name: Retrieve OpenShift CA hosts: oo_first_master vars: @@ -264,7 +267,6 @@ - name: Delete temporary directory on localhost hosts: localhost connection: local - become: no gather_facts: no tasks: - file: diff --git a/playbooks/openshift-master/private/tasks/restart_hosts.yml b/playbooks/openshift-master/private/tasks/restart_hosts.yml index a5dbe0590..76e1ea5f3 100644 --- a/playbooks/openshift-master/private/tasks/restart_hosts.yml +++ b/playbooks/openshift-master/private/tasks/restart_hosts.yml @@ -27,7 +27,6 @@ delay=10 timeout=600 port="{{ ansible_port | default(ansible_ssh_port | default(22,boolean=True),boolean=True) }}" - become: no # Now that ssh is back up we can wait for API on the remote system, # avoiding some potential connection issues from local system: diff --git a/playbooks/openshift-master/private/validate_restart.yml b/playbooks/openshift-master/private/validate_restart.yml index 1077d0b9c..60b0e5bb6 100644 --- a/playbooks/openshift-master/private/validate_restart.yml +++ b/playbooks/openshift-master/private/validate_restart.yml @@ -21,7 +21,6 @@ - name: Create temp file on localhost hosts: localhost connection: local - become: no gather_facts: no tasks: - local_action: command mktemp @@ -38,7 +37,6 @@ - name: Cleanup temp file on localhost hosts: localhost connection: local - become: no gather_facts: no tasks: - file: path="{{ hostvars.localhost.mktemp.stdout }}" state=absent diff --git a/playbooks/openshift-master/scaleup.yml b/playbooks/openshift-master/scaleup.yml index f717cd0e9..7d31340a2 100644 --- a/playbooks/openshift-master/scaleup.yml +++ b/playbooks/openshift-master/scaleup.yml @@ -4,7 +4,6 @@ - name: Ensure there are new_masters or new_nodes hosts: localhost connection: local - become: no gather_facts: no tasks: - fail: diff --git a/playbooks/openshift-node/private/setup.yml b/playbooks/openshift-node/private/setup.yml index 802dce37e..41c323f2b 100644 --- a/playbooks/openshift-node/private/setup.yml +++ b/playbooks/openshift-node/private/setup.yml @@ -8,7 +8,6 @@ - name: Evaluate node groups hosts: localhost - become: no connection: local tasks: - name: Evaluate oo_containerized_master_nodes diff --git a/playbooks/openshift-node/scaleup.yml b/playbooks/openshift-node/scaleup.yml index bdfd3d3e6..cf13692ae 100644 --- a/playbooks/openshift-node/scaleup.yml +++ b/playbooks/openshift-node/scaleup.yml @@ -4,7 +4,6 @@ - name: Ensure there are new_nodes hosts: localhost connection: local - become: no gather_facts: no tasks: - fail: diff --git a/playbooks/openstack/openshift-cluster/install.yml b/playbooks/openstack/openshift-cluster/install.yml index 3211f619a..2ab7d14a0 100644 --- a/playbooks/openstack/openshift-cluster/install.yml +++ b/playbooks/openstack/openshift-cluster/install.yml @@ -9,4 +9,7 @@ # some logic here? - name: run the cluster deploy + import_playbook: ../../prerequisites.yml + +- name: run the cluster deploy import_playbook: ../../deploy_cluster.yml diff --git a/playbooks/openstack/sample-inventory/group_vars/OSEv3.yml b/playbooks/openstack/sample-inventory/group_vars/OSEv3.yml index 933117127..481807dc9 100644 --- a/playbooks/openstack/sample-inventory/group_vars/OSEv3.yml +++ b/playbooks/openstack/sample-inventory/group_vars/OSEv3.yml @@ -14,12 +14,12 @@ openshift_hosted_router_wait: True openshift_hosted_registry_wait: True ## Openstack credentials -#openshift_cloudprovider_kind=openstack +#openshift_cloudprovider_kind: openstack #openshift_cloudprovider_openstack_auth_url: "{{ lookup('env','OS_AUTH_URL') }}" #openshift_cloudprovider_openstack_username: "{{ lookup('env','OS_USERNAME') }}" #openshift_cloudprovider_openstack_password: "{{ lookup('env','OS_PASSWORD') }}" #openshift_cloudprovider_openstack_tenant_name: "{{ lookup('env','OS_TENANT_NAME') }}" -#openshift_cloudprovider_openstack_region="{{ lookup('env', 'OS_REGION_NAME') }}" +#openshift_cloudprovider_openstack_region: "{{ lookup('env', 'OS_REGION_NAME') }}" ## Use Cinder volume for Openshift registry: diff --git a/playbooks/openstack/sample-inventory/inventory.py b/playbooks/openstack/sample-inventory/inventory.py index 084b5c0a0..45cc4e15a 100755 --- a/playbooks/openstack/sample-inventory/inventory.py +++ b/playbooks/openstack/sample-inventory/inventory.py @@ -9,6 +9,7 @@ environment. from __future__ import print_function +from collections import Mapping import json import shade @@ -101,6 +102,10 @@ def build_inventory(): hostvars['glusterfs_devices'] = ['/dev/nvme0n1'] node_labels = server.metadata.get('node_labels') + # NOTE(shadower): the node_labels value must be a dict not string + if not isinstance(node_labels, Mapping): + node_labels = json.loads(node_labels) + if node_labels: hostvars['openshift_node_labels'] = node_labels |