diff options
Diffstat (limited to 'playbooks/aws')
-rw-r--r-- | playbooks/aws/ansible-tower/config.yml | 2 | ||||
-rw-r--r-- | playbooks/aws/ansible-tower/launch.yml | 5 | ||||
-rw-r--r-- | playbooks/aws/openshift-cluster/add_nodes.yml | 40 | ||||
-rw-r--r-- | playbooks/aws/openshift-cluster/cluster_hosts.yml | 21 | ||||
-rw-r--r-- | playbooks/aws/openshift-cluster/config.yml | 32 | ||||
-rw-r--r-- | playbooks/aws/openshift-cluster/launch.yml | 14 | ||||
-rw-r--r-- | playbooks/aws/openshift-cluster/list.yml | 4 | ||||
-rw-r--r-- | playbooks/aws/openshift-cluster/scaleup.yml | 32 | ||||
-rw-r--r-- | playbooks/aws/openshift-cluster/service.yml | 7 | ||||
-rw-r--r-- | playbooks/aws/openshift-cluster/tasks/launch_instances.yml | 65 | ||||
-rw-r--r-- | playbooks/aws/openshift-cluster/templates/user_data.j2 | 11 | ||||
-rw-r--r-- | playbooks/aws/openshift-cluster/terminate.yml | 42 | ||||
-rw-r--r-- | playbooks/aws/openshift-cluster/update.yml | 11 | ||||
-rw-r--r-- | playbooks/aws/openshift-cluster/upgrades/v3_0_to_v3_1/upgrade.yml | 17 | ||||
-rw-r--r-- | playbooks/aws/openshift-cluster/vars.yml | 32 |
15 files changed, 236 insertions, 99 deletions
diff --git a/playbooks/aws/ansible-tower/config.yml b/playbooks/aws/ansible-tower/config.yml index efd1b9911..eb3f1a1da 100644 --- a/playbooks/aws/ansible-tower/config.yml +++ b/playbooks/aws/ansible-tower/config.yml @@ -2,6 +2,8 @@ - name: "populate oo_hosts_to_config host group if needed" hosts: localhost gather_facts: no + connection: local + become: no tasks: - name: Evaluate oo_host_group_exp if it's set add_host: "name={{ item }} groups=oo_hosts_to_config" diff --git a/playbooks/aws/ansible-tower/launch.yml b/playbooks/aws/ansible-tower/launch.yml index 850238ffb..d40529435 100644 --- a/playbooks/aws/ansible-tower/launch.yml +++ b/playbooks/aws/ansible-tower/launch.yml @@ -2,6 +2,7 @@ - name: Launch instance(s) hosts: localhost connection: local + become: no gather_facts: no vars: @@ -71,8 +72,8 @@ tasks: - - name: Yum update - yum: name=* state=latest + - name: Update All Things + action: "{{ ansible_pkg_mgr }} name=* state=latest" # Apply the configs, seprate so that just the configs can be run by themselves - include: config.yml diff --git a/playbooks/aws/openshift-cluster/add_nodes.yml b/playbooks/aws/openshift-cluster/add_nodes.yml new file mode 100644 index 000000000..3d88e6b23 --- /dev/null +++ b/playbooks/aws/openshift-cluster/add_nodes.yml @@ -0,0 +1,40 @@ +--- +- name: Launch instance(s) + hosts: localhost + connection: local + become: no + gather_facts: no + vars_files: + - vars.yml + - ["vars.{{ deployment_type }}.{{ cluster_id }}.yml", vars.defaults.yml] + vars: + oo_extend_env: True + tasks: + - fail: + msg: Deployment type not supported for aws provider yet + when: deployment_type == 'enterprise' + + - include: ../../common/openshift-cluster/tasks/set_node_launch_facts.yml + vars: + type: "compute" + count: "{{ num_nodes }}" + - include: tasks/launch_instances.yml + vars: + instances: "{{ node_names }}" + cluster: "{{ cluster_id }}" + type: "{{ k8s_type }}" + g_sub_host_type: "{{ sub_host_type }}" + + - include: ../../common/openshift-cluster/tasks/set_node_launch_facts.yml + vars: + type: "infra" + count: "{{ num_infra }}" + - include: tasks/launch_instances.yml + vars: + instances: "{{ node_names }}" + cluster: "{{ cluster_id }}" + type: "{{ k8s_type }}" + g_sub_host_type: "{{ sub_host_type }}" + +- include: scaleup.yml +- include: list.yml diff --git a/playbooks/aws/openshift-cluster/cluster_hosts.yml b/playbooks/aws/openshift-cluster/cluster_hosts.yml new file mode 100644 index 000000000..119b376aa --- /dev/null +++ b/playbooks/aws/openshift-cluster/cluster_hosts.yml @@ -0,0 +1,21 @@ +--- +g_all_hosts: "{{ groups['tag_clusterid_' ~ cluster_id] | default([]) + | intersect(groups['tag_environment_' ~ cluster_env] | default([])) }}" + +g_etcd_hosts: "{{ g_all_hosts | intersect(groups['tag_host-type_etcd'] | default([])) }}" + +g_lb_hosts: "{{ g_all_hosts | intersect(groups['tag_host-type_lb'] | default([])) }}" + +g_nfs_hosts: "{{ g_all_hosts | intersect(groups['tag_host-type_nfs'] | default([])) }}" + +g_master_hosts: "{{ g_all_hosts | intersect(groups['tag_host-type_master'] | default([])) }}" + +g_new_master_hosts: "{{ g_all_hosts | intersect(groups['tag_host-type_new_master'] | default([])) }}" + +g_node_hosts: "{{ g_all_hosts | intersect(groups['tag_host-type_node'] | default([])) }}" + +g_new_node_hosts: "{{ g_all_hosts | intersect(groups['tag_host-type_new_node'] | default([])) }}" + +g_infra_hosts: "{{ g_node_hosts | intersect(groups['tag_sub-host-type_infra'] | default([])) }}" + +g_compute_hosts: "{{ g_node_hosts | intersect(groups['tag_sub-host-type_compute'] | default([])) }}" diff --git a/playbooks/aws/openshift-cluster/config.yml b/playbooks/aws/openshift-cluster/config.yml index a8e3e27bb..9fba856a2 100644 --- a/playbooks/aws/openshift-cluster/config.yml +++ b/playbooks/aws/openshift-cluster/config.yml @@ -1,23 +1,21 @@ ---- -- hosts: localhost - gather_facts: no - vars_files: - - vars.yml - tasks: - - set_fact: - g_ssh_user_tmp: "{{ deployment_vars[deployment_type].ssh_user }}" - g_sudo_tmp: "{{ deployment_vars[deployment_type].sudo }}" - - include: ../../common/openshift-cluster/config.yml + vars_files: + - ../../aws/openshift-cluster/vars.yml + - ../../aws/openshift-cluster/cluster_hosts.yml vars: - g_etcd_group: "{{ 'tag_env-host-type_' ~ cluster_id ~ '-openshift-etcd' }}" - g_masters_group: "{{ 'tag_env-host-type_' ~ cluster_id ~ '-openshift-master' }}" - g_nodes_group: "{{ 'tag_env-host-type_' ~ cluster_id ~ '-openshift-node' }}" - g_ssh_user: "{{ hostvars.localhost.g_ssh_user_tmp }}" - g_sudo: "{{ hostvars.localhost.g_sudo_tmp }}" + g_ssh_user: "{{ deployment_vars[deployment_type].ssh_user }}" + g_sudo: "{{ deployment_vars[deployment_type].sudo }}" g_nodeonmaster: true openshift_cluster_id: "{{ cluster_id }}" - openshift_debug_level: 2 + openshift_debug_level: "{{ debug_level }}" openshift_deployment_type: "{{ deployment_type }}" - openshift_hostname: "{{ ec2_private_ip_address }}" openshift_public_hostname: "{{ ec2_ip_address }}" + openshift_registry_selector: 'type=infra' + openshift_router_selector: 'type=infra' + openshift_infra_nodes: "{{ g_infra_hosts }}" + openshift_node_labels: '{"region": "{{ ec2_region }}", "type": "{{ hostvars[inventory_hostname]["ec2_tag_sub-host-type"] if inventory_hostname in groups["tag_host-type_node"] else hostvars[inventory_hostname]["ec2_tag_host-type"] }}"}' + openshift_master_cluster_method: 'native' + openshift_use_openshift_sdn: "{{ lookup('oo_option', 'use_openshift_sdn') }}" + os_sdn_network_plugin_name: "{{ lookup('oo_option', 'sdn_network_plugin_name') }}" + openshift_use_flannel: "{{ lookup('oo_option', 'use_flannel') }}" + openshift_use_fluentd: "{{ lookup('oo_option', 'use_fluentd') }}" diff --git a/playbooks/aws/openshift-cluster/launch.yml b/playbooks/aws/openshift-cluster/launch.yml index a89275597..15b83dfad 100644 --- a/playbooks/aws/openshift-cluster/launch.yml +++ b/playbooks/aws/openshift-cluster/launch.yml @@ -2,6 +2,7 @@ - name: Launch instance(s) hosts: localhost connection: local + become: no gather_facts: no vars_files: - vars.yml @@ -11,7 +12,7 @@ msg: Deployment type not supported for aws provider yet when: deployment_type == 'enterprise' - - include: ../../common/openshift-cluster/set_etcd_launch_facts_tasks.yml + - include: ../../common/openshift-cluster/tasks/set_etcd_launch_facts.yml - include: tasks/launch_instances.yml vars: instances: "{{ etcd_names }}" @@ -19,7 +20,7 @@ type: "{{ k8s_type }}" g_sub_host_type: "default" - - include: ../../common/openshift-cluster/set_master_launch_facts_tasks.yml + - include: ../../common/openshift-cluster/tasks/set_master_launch_facts.yml - include: tasks/launch_instances.yml vars: instances: "{{ master_names }}" @@ -27,7 +28,7 @@ type: "{{ k8s_type }}" g_sub_host_type: "default" - - include: ../../common/openshift-cluster/set_node_launch_facts_tasks.yml + - include: ../../common/openshift-cluster/tasks/set_node_launch_facts.yml vars: type: "compute" count: "{{ num_nodes }}" @@ -38,7 +39,7 @@ type: "{{ k8s_type }}" g_sub_host_type: "{{ sub_host_type }}" - - include: ../../common/openshift-cluster/set_node_launch_facts_tasks.yml + - include: ../../common/openshift-cluster/tasks/set_node_launch_facts.yml vars: type: "infra" count: "{{ num_infra }}" @@ -55,9 +56,4 @@ when: master_names is defined and master_names.0 is defined - include: update.yml - -- include: ../../common/openshift-cluster/create_services.yml - vars: - g_svc_master: "{{ service_master }}" - - include: list.yml diff --git a/playbooks/aws/openshift-cluster/list.yml b/playbooks/aws/openshift-cluster/list.yml index 04fcdc0a1..8b41a355e 100644 --- a/playbooks/aws/openshift-cluster/list.yml +++ b/playbooks/aws/openshift-cluster/list.yml @@ -2,10 +2,12 @@ - name: Generate oo_list_hosts group hosts: localhost gather_facts: no + connection: local + become: no vars_files: - vars.yml tasks: - - set_fact: scratch_group=tag_env_{{ cluster_id }} + - set_fact: scratch_group=tag_clusterid_{{ cluster_id }} when: cluster_id != '' - set_fact: scratch_group=all when: cluster_id == '' diff --git a/playbooks/aws/openshift-cluster/scaleup.yml b/playbooks/aws/openshift-cluster/scaleup.yml new file mode 100644 index 000000000..7e3a47964 --- /dev/null +++ b/playbooks/aws/openshift-cluster/scaleup.yml @@ -0,0 +1,32 @@ +--- + +- hosts: localhost + gather_facts: no + connection: local + become: no + vars_files: + - vars.yml + tasks: + - name: Evaluate oo_hosts_to_update + add_host: + name: "{{ item }}" + groups: oo_hosts_to_update + ansible_ssh_user: "{{ deployment_vars[deployment_type].ssh_user }}" + ansible_sudo: "{{ deployment_vars[deployment_type].sudo }}" + with_items: "{{ groups.nodes_to_add }}" + +- include: ../../common/openshift-cluster/update_repos_and_packages.yml + +- include: ../../common/openshift-cluster/scaleup.yml + vars_files: + - ../../aws/openshift-cluster/vars.yml + - ../../aws/openshift-cluster/cluster_hosts.yml + vars: + g_new_node_hosts: "{{ groups.nodes_to_add }}" + g_ssh_user: "{{ deployment_vars[deployment_type].ssh_user }}" + g_sudo: "{{ deployment_vars[deployment_type].sudo }}" + g_nodeonmaster: true + openshift_cluster_id: "{{ cluster_id }}" + openshift_debug_level: "{{ debug_level }}" + openshift_deployment_type: "{{ deployment_type }}" + openshift_public_hostname: "{{ ec2_ip_address }}" diff --git a/playbooks/aws/openshift-cluster/service.yml b/playbooks/aws/openshift-cluster/service.yml index 25cf48505..d5f7d6b19 100644 --- a/playbooks/aws/openshift-cluster/service.yml +++ b/playbooks/aws/openshift-cluster/service.yml @@ -1,9 +1,12 @@ --- - name: Call same systemctl command for openshift on all instance(s) hosts: localhost + connection: local + become: no gather_facts: no vars_files: - vars.yml + - cluster_hosts.yml tasks: - fail: msg="cluster_id is required to be injected in this playbook" when: cluster_id is not defined @@ -14,7 +17,7 @@ groups: g_service_masters ansible_ssh_user: "{{ deployment_vars[deployment_type].ssh_user }}" ansible_sudo: "{{ deployment_vars[deployment_type].sudo }}" - with_items: groups["tag_env-host-type_{{ cluster_id }}-openshift-master"] | default([]) + with_items: "{{ master_hosts | default([]) }}" - name: Evaluate g_service_nodes add_host: @@ -22,7 +25,7 @@ groups: g_service_nodes ansible_ssh_user: "{{ deployment_vars[deployment_type].ssh_user }}" ansible_sudo: "{{ deployment_vars[deployment_type].sudo }}" - with_items: groups["tag_env-host-type_{{ cluster_id }}-openshift-node"] | default([]) + with_items: "{{ node_hosts | default([]) }}" - include: ../../common/openshift-node/service.yml - include: ../../common/openshift-master/service.yml diff --git a/playbooks/aws/openshift-cluster/tasks/launch_instances.yml b/playbooks/aws/openshift-cluster/tasks/launch_instances.yml index b77bcdc1a..63be06ecf 100644 --- a/playbooks/aws/openshift-cluster/tasks/launch_instances.yml +++ b/playbooks/aws/openshift-cluster/tasks/launch_instances.yml @@ -2,8 +2,8 @@ - set_fact: created_by: "{{ lookup('env', 'LOGNAME')|default(cluster, true) }}" docker_vol_ephemeral: "{{ lookup('env', 'os_docker_vol_ephemeral') | default(false, true) }}" - env: "{{ cluster }}" - env_host_type: "{{ cluster }}-openshift-{{ type }}" + cluster: "{{ cluster_id }}" + env: "{{ cluster_env }}" host_type: "{{ type }}" sub_host_type: "{{ g_sub_host_type }}" @@ -20,10 +20,6 @@ | default(deployment_vars[deployment_type].image, true) }}" when: ec2_image is not defined and not ec2_image_name - set_fact: - ec2_instance_type: "{{ lookup('env', 'ec2_instance_type') - | default(deployment_vars[deployment_type].type, true) }}" - when: ec2_instance_type is not defined -- set_fact: ec2_keypair: "{{ lookup('env', 'ec2_keypair') | default(deployment_vars[deployment_type].keypair, true) }}" when: ec2_keypair is not defined @@ -37,27 +33,23 @@ when: ec2_assign_public_ip is not defined - set_fact: - ec2_instance_type: "{{ ec2_master_instance_type | default(deployment_vars[deployment_type].type, true) }}" - ec2_security_groups: "{{ ec2_master_security_groups - | default(deployment_vars[deployment_type].security_groups, true) }}" + ec2_instance_type: "{{ ec2_master_instance_type | default(lookup('env', 'ec2_master_instance_type') | default(lookup('env', 'ec2_instance_type') | default(deployment_vars[deployment_type].type, true), true), true) }}" + ec2_security_groups: "{{ ec2_master_security_groups | default(lookup('env', 'ec2_master_security_groups') | default(lookup('env', 'ec2_security_groups') | default(deployment_vars[deployment_type].security_groups, true), true), true) }}" when: host_type == "master" and sub_host_type == "default" - set_fact: - ec2_instance_type: "{{ ec2_etcd_instance_type | default(deployment_vars[deployment_type].type, true) }}" - ec2_security_groups: "{{ ec2_etcd_security_groups - | default(deployment_vars[deployment_type].security_groups, true)}}" + ec2_instance_type: "{{ ec2_etcd_instance_type | default(lookup('env', 'ec2_etcd_instance_type') | default(lookup('env', 'ec2_instance_type') | default(deployment_vars[deployment_type].type, true), true), true) }}" + ec2_security_groups: "{{ ec2_etcd_security_groups | default(lookup('env', 'ec2_etcd_security_groups') | default(lookup('env', 'ec2_security_groups') | default(deployment_vars[deployment_type].security_groups, true), true), true) }}" when: host_type == "etcd" and sub_host_type == "default" - set_fact: - ec2_instance_type: "{{ ec2_infra_instance_type | default(deployment_vars[deployment_type].type, true) }}" - ec2_security_groups: "{{ ec2_infra_security_groups - | default(deployment_vars[deployment_type].security_groups, true) }}" + ec2_instance_type: "{{ ec2_infra_instance_type | default(lookup('env', 'ec2_infra_instance_type') | default(lookup('env', 'ec2_instance_type') | default(deployment_vars[deployment_type].type, true), true), true) }}" + ec2_security_groups: "{{ ec2_infra_security_groups | default(lookup('env', 'ec2_infra_security_groups') | default(lookup('env', 'ec2_security_groups') | default(deployment_vars[deployment_type].security_groups, true), true), true) }}" when: host_type == "node" and sub_host_type == "infra" - set_fact: - ec2_instance_type: "{{ ec2_node_instance_type | default(deployment_vars[deployment_type].type, true) }}" - ec2_security_groups: "{{ ec2_node_security_groups - | default(deployment_vars[deployment_type].security_groups, true) }}" + ec2_instance_type: "{{ ec2_node_instance_type | default(lookup('env', 'ec2_node_instance_type') | default(lookup('env', 'ec2_instance_type') | default(deployment_vars[deployment_type].type, true), true), true) }}" + ec2_security_groups: "{{ ec2_node_security_groups | default(lookup('env', 'ec2_node_security_groups') | default(lookup('env', 'ec2_security_groups') | default(deployment_vars[deployment_type].security_groups, true), true), true) }}" when: host_type == "node" and sub_host_type == "compute" - set_fact: @@ -65,8 +57,7 @@ | default(deployment_vars[deployment_type].type, true) }}" when: ec2_instance_type is not defined - set_fact: - ec2_security_groups: "{{ lookup('env', 'ec2_security_groups') - | default(deployment_vars[deployment_type].security_groups, true) }}" + ec2_security_groups: "{{ lookup('env', 'ec2_security_groups') | default(deployment_vars[deployment_type].security_groups, true) }}" when: ec2_security_groups is not defined - name: Find amis for deployment_type @@ -81,7 +72,6 @@ - set_fact: latest_ami: "{{ ami_result.results | oo_ami_selector(ec2_image_name) }}" - user_data: "{{ lookup('template', '../templates/user_data.j2') }}" volume_defs: etcd: root: @@ -97,6 +87,10 @@ volume_size: "{{ lookup('env', 'os_master_root_vol_size') | default(25, true) }}" device_type: "{{ lookup('env', 'os_master_root_vol_type') | default('gp2', true) }}" iops: "{{ lookup('env', 'os_master_root_vol_iops') | default(500, true) }}" + docker: + volume_size: "{{ lookup('env', 'os_docker_vol_size') | default(10, true) }}" + device_type: "{{ lookup('env', 'os_docker_vol_type') | default('gp2', true) }}" + iops: "{{ lookup('env', 'os_docker_vol_iops') | default(500, true) }}" node: root: volume_size: "{{ lookup('env', 'os_node_root_vol_size') | default(85, true) }}" @@ -121,14 +115,13 @@ count: "{{ instances | length }}" vpc_subnet_id: "{{ ec2_vpc_subnet | default(omit, true) }}" assign_public_ip: "{{ ec2_assign_public_ip | default(omit, true) }}" - user_data: "{{ user_data }}" + user_data: "{{ lookup('template', '../templates/user_data.j2') }}" wait: yes instance_tags: created-by: "{{ created_by }}" - environment: "{{ env }}" - env: "{{ env }}" + clusterid: "{{ cluster }}" + environment: "{{ cluster_env }}" host-type: "{{ host_type }}" - env-host-type: "{{ env_host_type }}" sub-host-type: "{{ sub_host_type }}" volumes: "{{ volumes }}" register: ec2 @@ -143,9 +136,8 @@ Name: "{{ item.0 }}" - set_fact: - instance_groups: "tag_created-by_{{ created_by }}, tag_env_{{ env }}, - tag_host-type_{{ host_type }}, tag_env-host-type_{{ env_host_type }}, - tag_sub-host-type_{{ sub_host_type }}" + instance_groups: "tag_created-by_{{ created_by }}, tag_clusterid_{{ cluster }}, tag_environment_{{ cluster_env }}, + tag_host-type_{{ host_type }}, tag_sub-host-type_{{ sub_host_type }}" - set_fact: node_label: @@ -172,6 +164,7 @@ - rotate 7 - compress - sharedscripts + - missingok scripts: postrotate: "/bin/kill -HUP `cat /var/run/syslogd.pid 2> /dev/null` 2> /dev/null || true" @@ -190,6 +183,22 @@ - instances - ec2.instances +- name: Add new instances to nodes_to_add group if needed + add_host: + hostname: "{{ item.0 }}" + ansible_ssh_host: "{{ item.1.dns_name }}" + ansible_ssh_user: "{{ deployment_vars[deployment_type].ssh_user }}" + ansible_sudo: "{{ deployment_vars[deployment_type].sudo }}" + groups: nodes_to_add + ec2_private_ip_address: "{{ item.1.private_ip }}" + ec2_ip_address: "{{ item.1.public_ip }}" + openshift_node_labels: "{{ node_label }}" + logrotate_scripts: "{{ logrotate }}" + with_together: + - instances + - ec2.instances + when: oo_extend_env is defined and oo_extend_env | bool + - name: Wait for ssh wait_for: "port=22 host={{ item.dns_name }}" with_items: ec2.instances diff --git a/playbooks/aws/openshift-cluster/templates/user_data.j2 b/playbooks/aws/openshift-cluster/templates/user_data.j2 index 82c2f4d57..3621a7d7d 100644 --- a/playbooks/aws/openshift-cluster/templates/user_data.j2 +++ b/playbooks/aws/openshift-cluster/templates/user_data.j2 @@ -1,5 +1,5 @@ #cloud-config -{% if type =='etcd' %} +{% if type == 'etcd' and 'etcd' in volume_defs[type] %} cloud_config_modules: - disk_setup - mounts @@ -19,7 +19,7 @@ fs_setup: partition: auto {% endif %} -{% if type == 'node' %} +{% if type in ['node', 'master'] and 'docker' in volume_defs[type] %} mounts: - [ xvdb ] - [ ephemeral0 ] @@ -43,3 +43,10 @@ growpart: runcmd: - xfs_growfs /var {% endif %} + +{% if deployment_vars[deployment_type].sudo %} +- path: /etc/sudoers.d/99-{{ deployment_vars[deployment_type].ssh_user }}-cloud-init-requiretty + permissions: 440 + content: | + Defaults:{{ deployment_vars[deployment_type].ssh_user }} !requiretty +{% endif %} diff --git a/playbooks/aws/openshift-cluster/terminate.yml b/playbooks/aws/openshift-cluster/terminate.yml index 77287cad0..6dd5d8b62 100644 --- a/playbooks/aws/openshift-cluster/terminate.yml +++ b/playbooks/aws/openshift-cluster/terminate.yml @@ -1,23 +1,24 @@ --- - name: Terminate instance(s) hosts: localhost + connection: local + become: no gather_facts: no vars_files: - vars.yml tasks: - - set_fact: scratch_group=tag_env_{{ cluster_id }} - add_host: name: "{{ item }}" groups: oo_hosts_to_terminate ansible_ssh_user: "{{ deployment_vars[deployment_type].ssh_user }}" ansible_sudo: "{{ deployment_vars[deployment_type].sudo }}" - with_items: groups[scratch_group] | default([]) | difference(['localhost']) + with_items: (groups['tag_clusterid_' ~ cluster_id] | default([])) | difference(['localhost']) - name: Unsubscribe VMs hosts: oo_hosts_to_terminate roles: - role: rhel_unsubscribe - when: deployment_type == "enterprise" and + when: deployment_type in ['atomic-enterprise', 'enterprise', 'openshift-enterprise'] and ansible_distribution == "RedHat" and lookup('oo_option', 'rhel_skip_subscription') | default(rhsub_skip, True) | default('no', True) | lower in ['no', 'false'] @@ -25,36 +26,37 @@ - name: Terminate instances hosts: localhost connection: local + become: no gather_facts: no - vars: - host_vars: "{{ hostvars - | oo_select_keys(groups['oo_hosts_to_terminate']) }}" tasks: - name: Remove tags from instances - ec2_tag: resource={{ item.ec2_id }} region={{ item.ec2_region }} state=absent - args: + ec2_tag: + resource: "{{ hostvars[item]['ec2_id'] }}" + region: "{{ hostvars[item]['ec2_region'] }}" + state: absent tags: - env: "{{ item['ec2_tag_env'] }}" - host-type: "{{ item['ec2_tag_host-type'] }}" - env-host-type: "{{ item['ec2_tag_env-host-type'] }}" - sub_host_type: "{{ item['ec2_tag_sub-host-type'] }}" - with_items: host_vars + environment: "{{ hostvars[item]['ec2_tag_environment'] }}" + clusterid: "{{ hostvars[item]['ec2_tag_clusterid'] }}" + host-type: "{{ hostvars[item]['ec2_tag_host-type'] }}" + sub_host_type: "{{ hostvars[item]['ec2_tag_sub-host-type'] }}" + with_items: groups.oo_hosts_to_terminate when: "'oo_hosts_to_terminate' in groups" - name: Terminate instances ec2: state: absent - instance_ids: ["{{ item.ec2_id }}"] - region: "{{ item.ec2_region }}" + instance_ids: ["{{ hostvars[item].ec2_id }}"] + region: "{{ hostvars[item].ec2_region }}" ignore_errors: yes register: ec2_term - with_items: host_vars + with_items: groups.oo_hosts_to_terminate when: "'oo_hosts_to_terminate' in groups" # Fail if any of the instances failed to terminate with an error other # than 403 Forbidden - - fail: msg=Terminating instance {{ item.ec2_id }} failed with message {{ item.msg }} - when: "'oo_hosts_to_terminate' in groups and item.failed and not item.msg | search(\"error: EC2ResponseError: 403 Forbidden\")" + - fail: + msg: "Terminating instance {{ item.ec2_id }} failed with message {{ item.msg }}" + when: "'oo_hosts_to_terminate' in groups and item.has_key('failed') and item.failed" with_items: ec2_term.results - name: Stop instance if termination failed @@ -63,7 +65,7 @@ instance_ids: ["{{ item.item.ec2_id }}"] region: "{{ item.item.ec2_region }}" register: ec2_stop - when: "'oo_hosts_to_terminate' in groups and item.failed" + when: "'oo_hosts_to_terminate' in groups and item.has_key('failed') and item.failed" with_items: ec2_term.results - name: Rename stopped instances @@ -72,4 +74,4 @@ tags: Name: "{{ item.item.item.ec2_tag_Name }}-terminate" with_items: ec2_stop.results - when: "'oo_hosts_to_terminate' in groups" + when: ec2_stop | changed diff --git a/playbooks/aws/openshift-cluster/update.yml b/playbooks/aws/openshift-cluster/update.yml index e006aa74a..32bab76b5 100644 --- a/playbooks/aws/openshift-cluster/update.yml +++ b/playbooks/aws/openshift-cluster/update.yml @@ -1,19 +1,20 @@ --- -- name: Populate oo_hosts_to_update group +- name: Update - Populate oo_hosts_to_update group hosts: localhost + connection: local + become: no gather_facts: no vars_files: - vars.yml + - cluster_hosts.yml tasks: - - name: Evaluate oo_hosts_to_update + - name: Update - Evaluate oo_hosts_to_update add_host: name: "{{ item }}" groups: oo_hosts_to_update ansible_ssh_user: "{{ deployment_vars[deployment_type].ssh_user }}" ansible_sudo: "{{ deployment_vars[deployment_type].sudo }}" - with_items: (groups["tag_env-host-type_{{ cluster_id }}-openshift-master"] | default([])) - | union(groups["tag_env-host-type_{{ cluster_id }}-openshift-node"] | default([])) - | union(groups["tag_env-host-type_{{ cluster_id }}-openshift-etcd"] | default([])) + with_items: "{{ g_all_hosts | default([]) }}" - include: ../../common/openshift-cluster/update_repos_and_packages.yml diff --git a/playbooks/aws/openshift-cluster/upgrades/v3_0_to_v3_1/upgrade.yml b/playbooks/aws/openshift-cluster/upgrades/v3_0_to_v3_1/upgrade.yml new file mode 100644 index 000000000..11026e38d --- /dev/null +++ b/playbooks/aws/openshift-cluster/upgrades/v3_0_to_v3_1/upgrade.yml @@ -0,0 +1,17 @@ +--- +# This playbook upgrades an existing AWS cluster, leaving nodes untouched if used with an 'online' deployment type. +# Usage: +# ansible-playbook playbooks/aws/openshift-cluster/upgrades/v3_0_to_v3_1/upgrade.yml -e deployment_type=online -e cluster_id=<cluster_id> +- include: ../../../../common/openshift-cluster/upgrades/v3_0_to_v3_1/upgrade.yml + vars_files: + - "{{lookup('file', '../../../../aws/openshift-cluster/vars.yml')}}" + - "{{lookup('file', '../../../../aws/openshift-cluster/cluster_hosts.yml')}}" + vars: + g_ssh_user: "{{ deployment_vars[deployment_type].ssh_user }}" + g_sudo: "{{ deployment_vars[deployment_type].sudo }}" + g_nodeonmaster: true + openshift_cluster_id: "{{ cluster_id }}" + openshift_debug_level: "{{ debug_level }}" + openshift_deployment_type: "{{ deployment_type }}" + openshift_hostname: "{{ ec2_private_ip_address }}" + openshift_public_hostname: "{{ ec2_ip_address }}" diff --git a/playbooks/aws/openshift-cluster/vars.yml b/playbooks/aws/openshift-cluster/vars.yml index 95bc4b3e2..ae12286bd 100644 --- a/playbooks/aws/openshift-cluster/vars.yml +++ b/playbooks/aws/openshift-cluster/vars.yml @@ -1,8 +1,23 @@ --- +debug_level: 2 + +deployment_rhel7_ent_base: + # rhel-7.1, requires cloud access subscription + image: ami-10663b78 + image_name: + region: us-east-1 + ssh_user: ec2-user + sudo: yes + keypair: libra + type: m4.large + security_groups: [ 'public' ] + vpc_subnet: + assign_public_ip: + deployment_vars: origin: # centos-7, requires marketplace - image: ami-96a818fe + image: ami-61bbf104 image_name: region: us-east-1 ssh_user: centos @@ -24,15 +39,6 @@ deployment_vars: security_groups: [ 'public' ] vpc_subnet: assign_public_ip: - enterprise: - # rhel-7.1, requires cloud access subscription - image: ami-10663b78 - image_name: - region: us-east-1 - ssh_user: ec2-user - sudo: yes - keypair: libra - type: m4.large - security_groups: [ 'public' ] - vpc_subnet: - assign_public_ip: + enterprise: "{{ deployment_rhel7_ent_base }}" + openshift-enterprise: "{{ deployment_rhel7_ent_base }}" + atomic-enterprise: "{{ deployment_rhel7_ent_base }}" |