diff options
Diffstat (limited to 'playbooks')
23 files changed, 192 insertions, 73 deletions
diff --git a/playbooks/aws/README.md b/playbooks/aws/README.md index d203b9cda..bdc98d1e0 100644 --- a/playbooks/aws/README.md +++ b/playbooks/aws/README.md @@ -198,3 +198,17 @@ At this point your cluster should be ready for workloads. Proceed to deploy app ### Still to come There are more enhancements that are arriving for provisioning. These will include more playbooks that enhance the provisioning capabilities. + +## Uninstall / Deprovisioning + +At this time, only deprovisioning of the output of the prerequisites step is provided. You can/must manually remove things like ELBs and scale groups before attempting to undo the work by the preprovisiong step. + +To undo the work done by the prerequisites playbook, simply call the uninstall_prerequisites.yml playbook. You should use the same inventory file and provisioning_vars.yml file that was used during provisioning. + +``` +ansible-playbook -i <previous inventory file> -e @<previous provisioning_vars file> uninstall_prerequisites.yml +``` + +This should result in removal of the security groups and VPC that were created. + +NOTE: If you want to also remove the ssh keys that were uploaded (**these ssh keys would be shared if you are running multiple clusters in the same AWS account** so we don't remove these by default) then you should add 'openshift_aws_enable_uninstall_shared_objects: True' to your provisioning_vars.yml file. diff --git a/playbooks/aws/openshift-cluster/uninstall_prerequisites.yml b/playbooks/aws/openshift-cluster/uninstall_prerequisites.yml new file mode 100644 index 000000000..180c2281a --- /dev/null +++ b/playbooks/aws/openshift-cluster/uninstall_prerequisites.yml @@ -0,0 +1,6 @@ +--- +- import_playbook: uninstall_sec_group.yml + +- import_playbook: uninstall_vpc.yml + +- import_playbook: uninstall_ssh_keypair.yml diff --git a/playbooks/aws/openshift-cluster/uninstall_sec_group.yml b/playbooks/aws/openshift-cluster/uninstall_sec_group.yml new file mode 100644 index 000000000..642e5b169 --- /dev/null +++ b/playbooks/aws/openshift-cluster/uninstall_sec_group.yml @@ -0,0 +1,10 @@ +--- +- hosts: localhost + connection: local + gather_facts: no + tasks: + - name: delete security groups + include_role: + name: openshift_aws + tasks_from: uninstall_security_group.yml + when: openshift_aws_create_security_groups | default(True) | bool diff --git a/playbooks/aws/openshift-cluster/uninstall_ssh_keypair.yml b/playbooks/aws/openshift-cluster/uninstall_ssh_keypair.yml new file mode 100644 index 000000000..ec9caa51b --- /dev/null +++ b/playbooks/aws/openshift-cluster/uninstall_ssh_keypair.yml @@ -0,0 +1,10 @@ +--- +- hosts: localhost + connection: local + gather_facts: no + tasks: + - name: remove ssh keypair(s) + include_role: + name: openshift_aws + tasks_from: uninstall_ssh_keys.yml + when: openshift_aws_users | default([]) | length > 0 diff --git a/playbooks/aws/openshift-cluster/uninstall_vpc.yml b/playbooks/aws/openshift-cluster/uninstall_vpc.yml new file mode 100644 index 000000000..4c988bcc5 --- /dev/null +++ b/playbooks/aws/openshift-cluster/uninstall_vpc.yml @@ -0,0 +1,10 @@ +--- +- hosts: localhost + connection: local + gather_facts: no + tasks: + - name: delete vpc + include_role: + name: openshift_aws + tasks_from: uninstall_vpc.yml + when: openshift_aws_create_vpc | default(True) | bool diff --git a/playbooks/common/openshift-cluster/upgrades/init.yml b/playbooks/common/openshift-cluster/upgrades/init.yml index 8ee83819e..ba783638d 100644 --- a/playbooks/common/openshift-cluster/upgrades/init.yml +++ b/playbooks/common/openshift-cluster/upgrades/init.yml @@ -5,7 +5,8 @@ g_new_master_hosts: [] g_new_node_hosts: [] -- import_playbook: ../../../init/facts.yml +- import_playbook: ../../../init/basic_facts.yml +- import_playbook: ../../../init/cluster_facts.yml - name: Ensure firewall is not switched during upgrade hosts: "{{ l_upgrade_no_switch_firewall_hosts | default('oo_all_hosts') }}" diff --git a/playbooks/common/openshift-cluster/upgrades/v3_6/upgrade_control_plane.yml b/playbooks/common/openshift-cluster/upgrades/v3_6/upgrade_control_plane.yml index eb5f07ae0..d88880140 100644 --- a/playbooks/common/openshift-cluster/upgrades/v3_6/upgrade_control_plane.yml +++ b/playbooks/common/openshift-cluster/upgrades/v3_6/upgrade_control_plane.yml @@ -14,7 +14,7 @@ - import_playbook: ../init.yml vars: l_upgrade_no_switch_firewall_hosts: "oo_masters_to_config:oo_etcd_to_config:oo_lb_to_config" - l_upgrade_non_node_hosts: "oo_masters_to_config:oo_etcd_to_config:oo_lb_to_config" + l_init_fact_hosts: "oo_masters_to_config:oo_etcd_to_config:oo_lb_to_config" - name: Configure the upgrade target for the common upgrade tasks hosts: oo_masters_to_config:oo_etcd_to_config:oo_lb_to_config diff --git a/playbooks/common/openshift-cluster/upgrades/v3_7/upgrade_control_plane.yml b/playbooks/common/openshift-cluster/upgrades/v3_7/upgrade_control_plane.yml index 8d42e4c91..ce069e2d0 100644 --- a/playbooks/common/openshift-cluster/upgrades/v3_7/upgrade_control_plane.yml +++ b/playbooks/common/openshift-cluster/upgrades/v3_7/upgrade_control_plane.yml @@ -14,7 +14,7 @@ - import_playbook: ../init.yml vars: l_upgrade_no_switch_firewall_hosts: "oo_masters_to_config:oo_etcd_to_config:oo_lb_to_config" - l_upgrade_non_node_hosts: "oo_masters_to_config:oo_etcd_to_config:oo_lb_to_config" + l_init_fact_hosts: "oo_masters_to_config:oo_etcd_to_config:oo_lb_to_config" - name: Configure the upgrade target for the common upgrade tasks hosts: oo_masters_to_config:oo_etcd_to_config:oo_lb_to_config diff --git a/playbooks/common/openshift-cluster/upgrades/v3_8/upgrade_control_plane.yml b/playbooks/common/openshift-cluster/upgrades/v3_8/upgrade_control_plane.yml index 51da45311..3f26a6297 100644 --- a/playbooks/common/openshift-cluster/upgrades/v3_8/upgrade_control_plane.yml +++ b/playbooks/common/openshift-cluster/upgrades/v3_8/upgrade_control_plane.yml @@ -14,7 +14,7 @@ - import_playbook: ../init.yml vars: l_upgrade_no_switch_firewall_hosts: "oo_masters_to_config:oo_etcd_to_config:oo_lb_to_config" - l_upgrade_non_node_hosts: "oo_masters_to_config:oo_etcd_to_config:oo_lb_to_config" + l_init_fact_hosts: "oo_masters_to_config:oo_etcd_to_config:oo_lb_to_config" when: not skip_version_info | default(false) - name: Configure the upgrade target for the common upgrade tasks diff --git a/playbooks/common/openshift-cluster/upgrades/v3_9/upgrade_control_plane.yml b/playbooks/common/openshift-cluster/upgrades/v3_9/upgrade_control_plane.yml index 384eeed4c..0f48725f6 100644 --- a/playbooks/common/openshift-cluster/upgrades/v3_9/upgrade_control_plane.yml +++ b/playbooks/common/openshift-cluster/upgrades/v3_9/upgrade_control_plane.yml @@ -14,7 +14,7 @@ - import_playbook: ../init.yml vars: l_upgrade_no_switch_firewall_hosts: "oo_masters_to_config:oo_etcd_to_config:oo_lb_to_config" - l_upgrade_non_node_hosts: "oo_masters_to_config:oo_etcd_to_config:oo_lb_to_config" + l_init_fact_hosts: "oo_masters_to_config:oo_etcd_to_config:oo_lb_to_config" ## Check to see if they're running 3.7 and if so upgrade them to 3.8 on control plan ## If they've specified pkg_version or image_tag preserve that for later use diff --git a/playbooks/container-runtime/private/config.yml b/playbooks/container-runtime/private/config.yml index 7a49adcf0..817a8bf30 100644 --- a/playbooks/container-runtime/private/config.yml +++ b/playbooks/container-runtime/private/config.yml @@ -1,7 +1,11 @@ --- +# l_scale_up_hosts may be passed in via prerequisites.yml during scaleup plays. + - import_playbook: build_container_groups.yml -- hosts: oo_nodes_to_config:oo_hosts_containerized_managed_true +- hosts: "{{ l_scale_up_hosts | default(l_default_container_runtime_hosts) }}" + vars: + l_default_container_runtime_hosts: "oo_nodes_to_config:oo_hosts_containerized_managed_true" roles: - role: container_runtime tasks: diff --git a/playbooks/container-runtime/private/setup_storage.yml b/playbooks/container-runtime/private/setup_storage.yml index a6d396270..65630be62 100644 --- a/playbooks/container-runtime/private/setup_storage.yml +++ b/playbooks/container-runtime/private/setup_storage.yml @@ -1,8 +1,11 @@ --- +# l_scale_up_hosts may be passed in via prerequisites.yml during scaleup plays. + - import_playbook: build_container_groups.yml -- hosts: oo_nodes_to_config:oo_hosts_containerized_managed_true +- hosts: "{{ l_scale_up_hosts | default(l_default_container_storage_hosts) }}" vars: + l_default_container_storage_hosts: "oo_nodes_to_config:oo_hosts_containerized_managed_true" l_chg_temp: "{{ hostvars[groups['oo_first_master'][0]]['openshift_containerized_host_groups'] | default([]) }}" l_containerized_host_groups: "{{ (['oo_nodes_to_config'] | union(l_chg_temp)) | join(':') }}" # role: container_runtime is necessary here to bring role default variables diff --git a/playbooks/init/base_packages.yml b/playbooks/init/base_packages.yml index 15b3dd492..e1052fb6c 100644 --- a/playbooks/init/base_packages.yml +++ b/playbooks/init/base_packages.yml @@ -1,6 +1,8 @@ --- +# l_scale_up_hosts may be passed in via prerequisites.yml during scaleup plays. + - name: Install packages necessary for installer - hosts: oo_all_hosts + hosts: "{{ l_scale_up_hosts | default('oo_all_hosts') }}" any_errors_fatal: true tasks: - when: diff --git a/playbooks/init/facts.yml b/playbooks/init/basic_facts.yml index 8e4206948..06a4e7291 100644 --- a/playbooks/init/facts.yml +++ b/playbooks/init/basic_facts.yml @@ -4,15 +4,13 @@ any_errors_fatal: true tasks: -- name: Initialize host facts - # l_upgrade_non_node_hosts is passed in via play during control-plane-only - # upgrades; otherwise oo_all_hosts is used. - hosts: "{{ l_upgrade_non_node_hosts | default('oo_all_hosts') }}" +- name: Initialize basic host facts + # l_init_fact_hosts is passed in via play during control-plane-only + # upgrades and scale-up plays; otherwise oo_all_hosts is used. + hosts: "{{ l_init_fact_hosts | default('oo_all_hosts') }}" + roles: + - role: openshift_facts tasks: - - name: load openshift_facts module - import_role: - name: openshift_facts - # TODO: Should this role be refactored into health_checks?? - name: Run openshift_sanitize_inventory to set variables import_role: @@ -58,41 +56,6 @@ - l_atomic_docker_version.stdout | replace('"', '') is version_compare('1.12','>=') msg: Installation on Atomic Host requires Docker 1.12 or later. Please upgrade and restart the Atomic Host. - - name: Gather Cluster facts - openshift_facts: - role: common - local_facts: - deployment_type: "{{ openshift_deployment_type }}" - deployment_subtype: "{{ openshift_deployment_subtype | default(None) }}" - hostname: "{{ openshift_hostname | default(None) }}" - ip: "{{ openshift_ip | default(None) }}" - public_hostname: "{{ openshift_public_hostname | default(None) }}" - public_ip: "{{ openshift_public_ip | default(None) }}" - portal_net: "{{ openshift_portal_net | default(openshift_master_portal_net) | default(None) }}" - http_proxy: "{{ openshift_http_proxy | default(None) }}" - https_proxy: "{{ openshift_https_proxy | default(None) }}" - no_proxy: "{{ openshift_no_proxy | default(None) }}" - generate_no_proxy_hosts: "{{ openshift_generate_no_proxy_hosts | default(True) }}" - - - name: Set fact of no_proxy_internal_hostnames - openshift_facts: - role: common - local_facts: - no_proxy_internal_hostnames: "{{ hostvars | lib_utils_oo_select_keys(groups['oo_nodes_to_config'] - | union(groups['oo_masters_to_config']) - | union(groups['oo_etcd_to_config'] | default([]))) - | lib_utils_oo_collect('openshift.common.hostname') | default([]) | join (',') - }}" - when: - - openshift_http_proxy is defined or openshift_https_proxy is defined - - openshift_generate_no_proxy_hosts | default(True) | bool - - - name: Initialize openshift.node.sdn_mtu - openshift_facts: - role: node - local_facts: - sdn_mtu: "{{ openshift_node_sdn_mtu | default(None) }}" - - name: Initialize special first-master variables hosts: oo_first_master roles: diff --git a/playbooks/init/cluster_facts.yml b/playbooks/init/cluster_facts.yml new file mode 100644 index 000000000..636679e32 --- /dev/null +++ b/playbooks/init/cluster_facts.yml @@ -0,0 +1,42 @@ +--- +- name: Initialize cluster facts + # l_init_fact_hosts is passed in via play during control-plane-only + # upgrades and scale-up plays; otherwise oo_all_hosts is used. + hosts: "{{ l_init_fact_hosts | default('oo_all_hosts') }}" + roles: + - role: openshift_facts + tasks: + - name: Gather Cluster facts + openshift_facts: + role: common + local_facts: + deployment_type: "{{ openshift_deployment_type }}" + deployment_subtype: "{{ openshift_deployment_subtype | default(None) }}" + hostname: "{{ openshift_hostname | default(None) }}" + ip: "{{ openshift_ip | default(None) }}" + public_hostname: "{{ openshift_public_hostname | default(None) }}" + public_ip: "{{ openshift_public_ip | default(None) }}" + portal_net: "{{ openshift_portal_net | default(openshift_master_portal_net) | default(None) }}" + http_proxy: "{{ openshift_http_proxy | default(None) }}" + https_proxy: "{{ openshift_https_proxy | default(None) }}" + no_proxy: "{{ openshift_no_proxy | default(None) }}" + generate_no_proxy_hosts: "{{ openshift_generate_no_proxy_hosts | default(True) }}" + + - name: Set fact of no_proxy_internal_hostnames + openshift_facts: + role: common + local_facts: + no_proxy_internal_hostnames: "{{ hostvars | lib_utils_oo_select_keys(groups['oo_nodes_to_config'] + | union(groups['oo_masters_to_config']) + | union(groups['oo_etcd_to_config'] | default([]))) + | lib_utils_oo_collect('openshift.common.hostname') | default([]) | join (',') + }}" + when: + - openshift_http_proxy is defined or openshift_https_proxy is defined + - openshift_generate_no_proxy_hosts | default(True) | bool + + - name: Initialize openshift.node.sdn_mtu + openshift_facts: + role: node + local_facts: + sdn_mtu: "{{ openshift_node_sdn_mtu | default(None) }}" diff --git a/playbooks/init/main.yml b/playbooks/init/main.yml index 8a3f4682d..9886691e0 100644 --- a/playbooks/init/main.yml +++ b/playbooks/init/main.yml @@ -1,4 +1,7 @@ --- +# skip_verison and l_install_base_packages are passed in via prerequistes.yml. +# skip_sanity_checks is passed in via openshift-node/private/image_prep.yml + - name: Initialization Checkpoint Start hosts: all gather_facts: false @@ -15,7 +18,13 @@ - import_playbook: evaluate_groups.yml -- import_playbook: facts.yml +- import_playbook: basic_facts.yml + +# base_packages needs to be setup for openshift_facts.py to run correctly. +- import_playbook: base_packages.yml + when: l_install_base_packages | default(False) | bool + +- import_playbook: cluster_facts.yml - import_playbook: version.yml when: not (skip_verison | default(False)) diff --git a/playbooks/init/repos.yml b/playbooks/init/repos.yml index 667f38ddd..655a7e83a 100644 --- a/playbooks/init/repos.yml +++ b/playbooks/init/repos.yml @@ -1,6 +1,8 @@ --- +# l_scale_up_hosts may be passed in via prerequisites.yml during scaleup plays. + - name: Setup yum repositories for all hosts - hosts: oo_all_hosts + hosts: "{{ l_scale_up_hosts | default('oo_all_hosts') }}" gather_facts: no tasks: - name: subscribe instances to Red Hat Subscription Manager diff --git a/playbooks/init/sanity_checks.yml b/playbooks/init/sanity_checks.yml index 52bcf42c0..fbbb3f8fb 100644 --- a/playbooks/init/sanity_checks.yml +++ b/playbooks/init/sanity_checks.yml @@ -1,4 +1,5 @@ --- +# l_sanity_check_hosts may be passed in during scale-up plays - name: Verify Requirements hosts: oo_first_master roles: @@ -11,5 +12,5 @@ # Thus, sanity_checks cannot gather new information about any hosts. - name: Run variable sanity checks sanity_checks: - check_hosts: "{{ groups['oo_all_hosts'] }}" + check_hosts: "{{ l_sanity_check_hosts | default(groups['oo_all_hosts']) }}" run_once: True diff --git a/playbooks/openshift-etcd/upgrade.yml b/playbooks/openshift-etcd/upgrade.yml index 71606e7e4..b1ce6b220 100644 --- a/playbooks/openshift-etcd/upgrade.yml +++ b/playbooks/openshift-etcd/upgrade.yml @@ -2,6 +2,6 @@ - import_playbook: ../init/main.yml vars: skip_verison: True - l_upgrade_non_node_hosts: "oo_masters_to_config:oo_etcd_to_config:oo_lb_to_config" + l_init_fact_hosts: "oo_masters_to_config:oo_etcd_to_config:oo_lb_to_config" - import_playbook: private/upgrade_main.yml diff --git a/playbooks/openshift-master/scaleup.yml b/playbooks/openshift-master/scaleup.yml index 7d31340a2..09e205afc 100644 --- a/playbooks/openshift-master/scaleup.yml +++ b/playbooks/openshift-master/scaleup.yml @@ -1,22 +1,43 @@ --- - import_playbook: ../init/evaluate_groups.yml -- name: Ensure there are new_masters or new_nodes +- name: Ensure there are new_masters and new_nodes hosts: localhost connection: local gather_facts: no tasks: - fail: + # new_masters must be part of new_nodes as well; otherwise if new_nodes + # is not present, oo_nodes_to_config will contain all existing nodes. msg: > - Detected no new_masters or no new_nodes in inventory. Please - add hosts to the new_masters and new_nodes host groups to add - masters. - when: - - g_new_master_hosts | default([]) | length == 0 - - g_new_node_hosts | default([]) | length == 0 + Detected no new_masters and/or no new_nodes in inventory. New + masters must be part of both new_masters and new_nodes groups. + If you are adding just new_nodes, use the + playbooks/openshift-node/scaleup.yml play. + when: > + g_new_master_hosts | default([]) | length == 0 + or g_new_node_hosts | default([]) | length == 0 -# Need a better way to do the above check for node without -# running evaluate_groups and init/main.yml -- import_playbook: ../init/main.yml +- name: Ensure there are new_masters and new_nodes + hosts: oo_masters_to_config + connection: local + gather_facts: no + tasks: + - fail: + # new_masters must be part of new_nodes as well; + msg: > + Each host in new_masters must also appear in new_nodes + when: inventory_hostname not in groups['oo_nodes_to_config'] + +- import_playbook: ../prerequisites.yml + vars: + l_scale_up_hosts: "oo_nodes_to_config:oo_masters_to_config" + l_init_fact_hosts: "oo_masters_to_config:oo_etcd_to_config:oo_lb_to_config:oo_nodes_to_config" + l_sanity_check_hosts: "{{ groups['oo_nodes_to_config'] | union(groups['oo_masters_to_config']) }}" + +- import_playbook: ../init/version.yml + vars: + l_openshift_version_set_hosts: "oo_masters_to_config:oo_nodes_to_config:!oo_first_master" + l_openshift_version_check_hosts: "oo_masters_to_config:oo_nodes_to_config" - import_playbook: private/scaleup.yml diff --git a/playbooks/openshift-node/scaleup.yml b/playbooks/openshift-node/scaleup.yml index cf13692ae..9cc7263b7 100644 --- a/playbooks/openshift-node/scaleup.yml +++ b/playbooks/openshift-node/scaleup.yml @@ -12,9 +12,27 @@ new_nodes host group to add nodes. when: - g_new_node_hosts | default([]) | length == 0 + - fail: + msg: > + Please run playbooks/openshift-master/scaleup.yml if you need to + scale up both masters and nodes. This playbook is only needed if + you are only adding new nodes and not new masters. + when: + - g_new_node_hosts | default([]) | length > 0 + - g_new_master_hosts | default([]) | length > 0 + +# if g_new_node_hosts is not empty, oo_nodes_to_config will be set to +# g_new_node_hosts via evaluate_groups.yml + +- import_playbook: ../prerequisites.yml + vars: + l_scale_up_hosts: "oo_nodes_to_config" + l_init_fact_hosts: "oo_masters_to_config:oo_etcd_to_config:oo_lb_to_config:oo_nodes_to_config" + l_sanity_check_hosts: "{{ groups['oo_nodes_to_config'] | union(groups['oo_masters_to_config']) }}" -# Need a better way to do the above check for node without -# running evaluate_groups and init/main.yml -- import_playbook: ../init/main.yml +- import_playbook: ../init/version.yml + vars: + l_openshift_version_set_hosts: "oo_nodes_to_config:!oo_first_master" + l_openshift_version_check_hosts: "oo_nodes_to_config" - import_playbook: private/config.yml diff --git a/playbooks/openstack/openshift-cluster/provision.yml b/playbooks/openstack/openshift-cluster/provision.yml index a38d7bff7..73c1926a0 100644 --- a/playbooks/openstack/openshift-cluster/provision.yml +++ b/playbooks/openstack/openshift-cluster/provision.yml @@ -26,8 +26,8 @@ - name: Gather facts for the new nodes setup: -- name: set common facts - import_playbook: ../../init/facts.yml +- import_playbook: ../../init/basic_facts.yml +- import_playbook: ../../init/cluster_facts.yml # TODO(shadower): consider splitting this up so people can stop here diff --git a/playbooks/prerequisites.yml b/playbooks/prerequisites.yml index 7802f83d9..0b76ca862 100644 --- a/playbooks/prerequisites.yml +++ b/playbooks/prerequisites.yml @@ -1,18 +1,21 @@ --- +# l_scale_up_hosts may be passed in via various scaleup plays. + - import_playbook: init/main.yml vars: skip_verison: True + l_install_base_packages: True - import_playbook: init/validate_hostnames.yml when: not (skip_validate_hostnames | default(False)) - import_playbook: init/repos.yml -- import_playbook: init/base_packages.yml - # This is required for container runtime for crio, only needs to run once. - name: Configure os_firewall - hosts: oo_masters_to_config:oo_etcd_to_config:oo_lb_to_config:oo_nfs_to_config:oo_nodes_to_config + hosts: "{{ l_scale_up_hosts | default(l_default_firewall_hosts) }}" + vars: + l_default_firewall_hosts: "oo_masters_to_config:oo_etcd_to_config:oo_lb_to_config:oo_nfs_to_config:oo_nodes_to_config" roles: - role: os_firewall |