summaryrefslogtreecommitdiffstats
path: root/roles/openshift_aws
diff options
context:
space:
mode:
Diffstat (limited to 'roles/openshift_aws')
-rw-r--r--roles/openshift_aws/README.md6
-rw-r--r--roles/openshift_aws/defaults/main.yml207
-rw-r--r--roles/openshift_aws/filter_plugins/openshift_aws_filters.py41
-rw-r--r--roles/openshift_aws/tasks/accept_nodes.yml15
-rw-r--r--roles/openshift_aws/tasks/build_node_group.yml32
-rw-r--r--roles/openshift_aws/tasks/elb.yml14
-rw-r--r--roles/openshift_aws/tasks/iam_role.yml14
-rw-r--r--roles/openshift_aws/tasks/launch_config.yml37
-rw-r--r--roles/openshift_aws/tasks/launch_config_create.yml26
-rw-r--r--roles/openshift_aws/tasks/master_facts.yml2
-rw-r--r--roles/openshift_aws/tasks/provision.yml26
-rw-r--r--roles/openshift_aws/tasks/provision_elb.yml14
-rw-r--r--roles/openshift_aws/tasks/provision_instance.yml10
-rw-r--r--roles/openshift_aws/tasks/provision_nodes.yml34
-rw-r--r--roles/openshift_aws/tasks/remove_scale_group.yml9
-rw-r--r--roles/openshift_aws/tasks/s3.yml2
-rw-r--r--roles/openshift_aws/tasks/scale_group.yml36
-rw-r--r--roles/openshift_aws/tasks/seal_ami.yml6
-rw-r--r--roles/openshift_aws/tasks/security_group.yml30
-rw-r--r--roles/openshift_aws/tasks/security_group_create.yml25
-rw-r--r--roles/openshift_aws/tasks/setup_master_group.yml6
-rw-r--r--roles/openshift_aws/tasks/setup_scale_group_facts.yml22
-rw-r--r--roles/openshift_aws/tasks/uninstall_security_group.yml14
-rw-r--r--roles/openshift_aws/tasks/uninstall_ssh_keys.yml9
-rw-r--r--roles/openshift_aws/tasks/uninstall_vpc.yml36
-rw-r--r--roles/openshift_aws/tasks/upgrade_node_group.yml18
-rw-r--r--roles/openshift_aws/tasks/wait_for_groups.yml23
-rw-r--r--roles/openshift_aws/templates/user_data.j26
28 files changed, 395 insertions, 325 deletions
diff --git a/roles/openshift_aws/README.md b/roles/openshift_aws/README.md
index 4aca5c7a8..de73ab01d 100644
--- a/roles/openshift_aws/README.md
+++ b/roles/openshift_aws/README.md
@@ -7,9 +7,9 @@ This role contains many task-areas to provision resources and perform actions
against an AWS account for the purposes of dynamically building an openshift
cluster.
-This role is primarily intended to be used with "include_role" and "tasks_from".
+This role is primarily intended to be used with "import_role" and "tasks_from".
-include_role can be called from the tasks section in a play. See example
+import_role can be called from the tasks section in a play. See example
playbook below for reference.
These task-areas are:
@@ -40,7 +40,7 @@ Example Playbook
----------------
```yaml
-- include_role:
+- import_role:
name: openshift_aws
tasks_from: vpc.yml
vars:
diff --git a/roles/openshift_aws/defaults/main.yml b/roles/openshift_aws/defaults/main.yml
index 42ef22846..178e0849c 100644
--- a/roles/openshift_aws/defaults/main.yml
+++ b/roles/openshift_aws/defaults/main.yml
@@ -6,9 +6,7 @@ openshift_aws_create_security_groups: True
openshift_aws_create_launch_config: True
openshift_aws_create_scale_group: True
-openshift_aws_current_version: ''
-openshift_aws_new_version: ''
-
+openshift_aws_node_group_upgrade: False
openshift_aws_wait_for_ssh: True
openshift_aws_clusterid: default
@@ -19,7 +17,6 @@ openshift_aws_build_ami_group: "{{ openshift_aws_clusterid }}"
openshift_aws_iam_cert_name: "{{ openshift_aws_clusterid }}-master-external"
openshift_aws_iam_cert_path: ''
openshift_aws_iam_cert_key_path: ''
-openshift_aws_scale_group_basename: "{{ openshift_aws_clusterid }} openshift"
openshift_aws_iam_role_name: openshift_node_describe_instances
openshift_aws_iam_role_policy_json: "{{ lookup('file', 'describeinstances.json') }}"
@@ -34,86 +31,110 @@ openshift_aws_ami_name: openshift-gi
openshift_aws_base_ami_name: ami_base
openshift_aws_launch_config_bootstrap_token: ''
-openshift_aws_launch_config_basename: "{{ openshift_aws_clusterid }}"
openshift_aws_users: []
openshift_aws_ami_tags:
bootstrap: "true"
openshift-created: "true"
- clusterid: "{{ openshift_aws_clusterid }}"
parent: "{{ openshift_aws_base_ami | default('unknown') }}"
openshift_aws_s3_mode: create
openshift_aws_s3_bucket_name: "{{ openshift_aws_clusterid }}-docker-registry"
-openshift_aws_elb_health_check:
- ping_protocol: tcp
- ping_port: 443
- response_timeout: 5
- interval: 30
- unhealthy_threshold: 2
- healthy_threshold: 2
-
openshift_aws_elb_basename: "{{ openshift_aws_clusterid }}"
-openshift_aws_elb_name_dict:
- master:
- external: "{{ openshift_aws_elb_basename }}-master-external"
- internal: "{{ openshift_aws_elb_basename }}-master-internal"
- infra:
- external: "{{ openshift_aws_elb_basename }}-infra"
-
-openshift_aws_elb_idle_timout: 400
-openshift_aws_elb_scheme: internet-facing
-openshift_aws_elb_cert_arn: ''
openshift_aws_elb_dict:
master:
external:
- - protocol: tcp
- load_balancer_port: 80
- instance_protocol: ssl
- instance_port: 443
- - protocol: ssl
- load_balancer_port: 443
- instance_protocol: ssl
- instance_port: 443
- # ssl certificate required for https or ssl
- ssl_certificate_id: "{{ openshift_aws_elb_cert_arn }}"
+ cross_az_load_balancing: False
+ health_check:
+ ping_protocol: tcp
+ ping_port: "{{ openshift_master_api_port | default(8443) }}"
+ response_timeout: 5
+ interval: 30
+ unhealthy_threshold: 2
+ healthy_threshold: 2
+ idle_timout: 400
+ listeners:
+ - protocol: tcp
+ load_balancer_port: 80
+ instance_protocol: ssl
+ instance_port: "{{ openshift_master_api_port | default(8443) }}"
+ - protocol: ssl
+ load_balancer_port: "{{ openshift_master_api_port | default(8443) }}"
+ instance_protocol: ssl
+ instance_port: "{{ openshift_master_api_port | default(8443) }}"
+ ssl_certificate_id: ''
+ name: "{{ openshift_aws_elb_basename }}-master-external"
+ tags: "{{ openshift_aws_kube_tags }}"
internal:
- - protocol: tcp
- load_balancer_port: 80
- instance_protocol: tcp
- instance_port: 80
- - protocol: tcp
- load_balancer_port: 443
- instance_protocol: tcp
- instance_port: 443
+ cross_az_load_balancing: False
+ health_check:
+ ping_protocol: tcp
+ ping_port: "{{ openshift_master_api_port | default(8443) }}"
+ response_timeout: 5
+ interval: 30
+ unhealthy_threshold: 2
+ healthy_threshold: 2
+ idle_timout: 400
+ listeners:
+ - protocol: tcp
+ load_balancer_port: 80
+ instance_protocol: tcp
+ instance_port: 80
+ - protocol: tcp
+ load_balancer_port: "{{ openshift_master_api_port | default(8443) }}"
+ instance_protocol: tcp
+ instance_port: "{{ openshift_master_api_port | default(8443) }}"
+ name: "{{ openshift_aws_elb_basename }}-master-internal"
+ tags: "{{ openshift_aws_kube_tags }}"
infra:
external:
- - protocol: tcp
- load_balancer_port: 80
- instance_protocol: tcp
- instance_port: 443
- proxy_protocol: True
- - protocol: tcp
- load_balancer_port: 443
- instance_protocol: tcp
- instance_port: 443
- proxy_protocol: True
+ cross_az_load_balancing: False
+ health_check:
+ ping_protocol: tcp
+ ping_port: 443
+ response_timeout: 5
+ interval: 30
+ unhealthy_threshold: 2
+ healthy_threshold: 2
+ idle_timout: 400
+ listeners:
+ - protocol: tcp
+ load_balancer_port: 80
+ instance_protocol: tcp
+ instance_port: 443
+ proxy_protocol: True
+ - protocol: tcp
+ load_balancer_port: 443
+ instance_protocol: tcp
+ instance_port: 443
+ proxy_protocol: True
+ name: "{{ openshift_aws_elb_basename }}-infra"
+ tags: "{{ openshift_aws_kube_tags }}"
openshift_aws_node_group_config_master_volumes:
+- device_name: /dev/sda1
+ volume_size: 100
+ device_type: gp2
+ delete_on_termination: False
- device_name: /dev/sdb
volume_size: 100
device_type: gp2
delete_on_termination: False
openshift_aws_node_group_config_node_volumes:
+- device_name: /dev/sda1
+ volume_size: 100
+ device_type: gp2
+ delete_on_termination: True
- device_name: /dev/sdb
volume_size: 100
device_type: gp2
delete_on_termination: True
+# build_instance_tags is a custom filter in role lib_utils
openshift_aws_node_group_config_tags: "{{ openshift_aws_clusterid | build_instance_tags }}"
openshift_aws_node_group_termination_policy: Default
openshift_aws_node_group_replace_instances: []
@@ -124,6 +145,33 @@ openshift_aws_ami_map:
infra: "{{ openshift_aws_ami }}"
compute: "{{ openshift_aws_ami }}"
+openshift_aws_master_group:
+- name: "{{ openshift_aws_clusterid }} master group"
+ group: master
+ tags:
+ host-type: master
+ sub-host-type: default
+ runtime: docker
+
+openshift_aws_node_groups:
+- name: "{{ openshift_aws_clusterid }} compute group"
+ group: compute
+ tags:
+ host-type: node
+ sub-host-type: compute
+ runtime: docker
+
+- name: "{{ openshift_aws_clusterid }} infra group"
+ group: infra
+ tags:
+ host-type: node
+ sub-host-type: infra
+ runtime: docker
+
+openshift_aws_created_asgs: []
+openshift_aws_current_asgs: []
+
+# these will be used during upgrade
openshift_aws_master_group_config:
# The 'master' key is always required here.
master:
@@ -135,18 +183,13 @@ openshift_aws_master_group_config:
min_size: 3
max_size: 3
desired_size: 3
- tags:
- host-type: master
- sub-host-type: default
- runtime: docker
- version: "{{ openshift_aws_new_version }}"
wait_for_instances: True
termination_policy: "{{ openshift_aws_node_group_termination_policy }}"
replace_all_instances: "{{ openshift_aws_node_group_replace_all_instances }}"
iam_role: "{{ openshift_aws_iam_role_name }}"
policy_name: "{{ openshift_aws_iam_role_policy_name }}"
policy_json: "{{ openshift_aws_iam_role_policy_json }}"
- elbs: "{{ openshift_aws_elb_name_dict['master'].keys()| map('extract', openshift_aws_elb_name_dict['master']) | list }}"
+ elbs: "{{ openshift_aws_elb_dict | json_query('master.[*][0][*].name') }}"
openshift_aws_node_group_config:
# The 'compute' key is always required here.
@@ -159,11 +202,6 @@ openshift_aws_node_group_config:
min_size: 3
max_size: 100
desired_size: 3
- tags:
- host-type: node
- sub-host-type: compute
- runtime: docker
- version: "{{ openshift_aws_new_version }}"
termination_policy: "{{ openshift_aws_node_group_termination_policy }}"
replace_all_instances: "{{ openshift_aws_node_group_replace_all_instances }}"
iam_role: "{{ openshift_aws_iam_role_name }}"
@@ -179,21 +217,14 @@ openshift_aws_node_group_config:
min_size: 2
max_size: 20
desired_size: 2
- tags:
- host-type: node
- sub-host-type: infra
- runtime: docker
- version: "{{ openshift_aws_new_version }}"
termination_policy: "{{ openshift_aws_node_group_termination_policy }}"
replace_all_instances: "{{ openshift_aws_node_group_replace_all_instances }}"
iam_role: "{{ openshift_aws_iam_role_name }}"
policy_name: "{{ openshift_aws_iam_role_policy_name }}"
policy_json: "{{ openshift_aws_iam_role_policy_json }}"
- elbs: "{{ openshift_aws_elb_name_dict['infra'].keys()| map('extract', openshift_aws_elb_name_dict['infra']) | list }}"
-
-openshift_aws_elb_tags: "{{ openshift_aws_kube_tags }}"
-openshift_aws_elb_az_load_balancing: False
+ elbs: "{{ openshift_aws_elb_dict | json_query('infra.[*][0][*].name') }}"
+# build_instance_tags is a custom filter in role lib_utils
openshift_aws_kube_tags: "{{ openshift_aws_clusterid | build_instance_tags }}"
openshift_aws_elb_security_groups: "{{ openshift_aws_launch_config_security_groups }}"
@@ -236,8 +267,8 @@ openshift_aws_node_security_groups:
to_port: 80
cidr_ip: 0.0.0.0/0
- proto: tcp
- from_port: 443
- to_port: 443
+ from_port: "{{ openshift_master_api_port | default(8443) }}"
+ to_port: "{{ openshift_master_api_port | default(8443) }}"
cidr_ip: 0.0.0.0/0
compute:
name: "{{ openshift_aws_clusterid }}_compute"
@@ -251,8 +282,8 @@ openshift_aws_node_security_groups:
to_port: 80
cidr_ip: 0.0.0.0/0
- proto: tcp
- from_port: 443
- to_port: 443
+ from_port: "{{ openshift_master_api_port | default(8443) }}"
+ to_port: "{{ openshift_master_api_port | default(8443) }}"
cidr_ip: 0.0.0.0/0
- proto: tcp
from_port: 30000
@@ -265,8 +296,6 @@ openshift_aws_node_security_groups:
openshift_aws_vpc_tags:
Name: "{{ openshift_aws_vpc_name }}"
-openshift_aws_subnet_az: us-east-1c
-
openshift_aws_vpc:
name: "{{ openshift_aws_vpc_name }}"
cidr: 172.31.0.0/16
@@ -274,30 +303,20 @@ openshift_aws_vpc:
us-east-1:
- cidr: 172.31.48.0/20
az: "us-east-1c"
+ default_az: true
- cidr: 172.31.32.0/20
az: "us-east-1e"
- cidr: 172.31.16.0/20
az: "us-east-1a"
+openshift_aws_subnet_az: "{{ openshift_aws_vpc.subnets[openshift_aws_region] | get_default_az }}"
+
openshift_aws_node_run_bootstrap_startup: True
openshift_aws_node_user_data: ''
openshift_aws_node_config_namespace: openshift-node
-openshift_aws_node_groups: nodes
-
openshift_aws_masters_groups: masters,etcd,nodes
-# If creating extra node groups, you'll need to define all of the following
-
-# The format is the same as openshift_aws_node_group_config, but the top-level
-# key names should be different (ie, not == master or infra).
-# openshift_aws_node_group_config_extra: {}
-
-# This variable should look like openshift_aws_launch_config_security_groups
-# and contain a one-to-one mapping of top level keys that are defined in
-# openshift_aws_node_group_config_extra.
-# openshift_aws_launch_config_security_groups_extra: {}
-
-# openshift_aws_node_security_groups_extra: {}
-
-# openshift_aws_ami_map_extra: {}
+# By default, don't delete things like the shared IAM instance
+# profile and uploaded ssh keys
+openshift_aws_enable_uninstall_shared_objects: False
diff --git a/roles/openshift_aws/filter_plugins/openshift_aws_filters.py b/roles/openshift_aws/filter_plugins/openshift_aws_filters.py
deleted file mode 100644
index e707abd3f..000000000
--- a/roles/openshift_aws/filter_plugins/openshift_aws_filters.py
+++ /dev/null
@@ -1,41 +0,0 @@
-#!/usr/bin/python
-# -*- coding: utf-8 -*-
-'''
-Custom filters for use in openshift_aws
-'''
-
-
-class FilterModule(object):
- ''' Custom ansible filters for use by openshift_aws role'''
-
- @staticmethod
- def scale_groups_match_capacity(scale_group_info):
- ''' This function will verify that the scale group instance count matches
- the scale group desired capacity
-
- '''
- for scale_group in scale_group_info:
- if scale_group['desired_capacity'] != len(scale_group['instances']):
- return False
-
- return True
-
- @staticmethod
- def build_instance_tags(clusterid):
- ''' This function will return a dictionary of the instance tags.
-
- The main desire to have this inside of a filter_plugin is that we
- need to build the following key.
-
- {"kubernetes.io/cluster/{{ openshift_aws_clusterid }}": "{{ openshift_aws_clusterid}}"}
-
- '''
- tags = {'clusterid': clusterid,
- 'kubernetes.io/cluster/{}'.format(clusterid): clusterid}
-
- return tags
-
- def filters(self):
- ''' returns a mapping of filters to methods '''
- return {'build_instance_tags': self.build_instance_tags,
- 'scale_groups_match_capacity': self.scale_groups_match_capacity}
diff --git a/roles/openshift_aws/tasks/accept_nodes.yml b/roles/openshift_aws/tasks/accept_nodes.yml
index ae320962f..db30fe5c9 100644
--- a/roles/openshift_aws/tasks/accept_nodes.yml
+++ b/roles/openshift_aws/tasks/accept_nodes.yml
@@ -1,6 +1,8 @@
---
+- include_tasks: setup_master_group.yml
+
- name: fetch masters
- ec2_remote_facts:
+ ec2_instance_facts:
region: "{{ openshift_aws_region | default('us-east-1') }}"
filters:
"{{ {'tag:kubernetes.io/cluster/' ~ openshift_aws_clusterid: openshift_aws_clusterid,
@@ -11,7 +13,7 @@
until: "'instances' in mastersout and mastersout.instances|length > 0"
- name: fetch new node instances
- ec2_remote_facts:
+ ec2_instance_facts:
region: "{{ openshift_aws_region }}"
filters:
"{{ {'tag:kubernetes.io/cluster/' ~ openshift_aws_clusterid: openshift_aws_clusterid,
@@ -22,13 +24,18 @@
delay: 3
until: "'instances' in instancesout and instancesout.instances|length > 0"
-- debug:
+- name: Dump the private dns names
+ debug:
msg: "{{ instancesout.instances|map(attribute='private_dns_name') | list }}"
+- name: Dump the master public ip address
+ debug:
+ msg: "{{ mastersout.instances[0].public_ip_address }}"
+
- name: approve nodes
oc_adm_csr:
#approve_all: True
nodes: "{{ instancesout.instances|map(attribute='private_dns_name') | list }}"
timeout: 60
register: nodeout
- delegate_to: "{{ mastersout.instances[0].public_ip_address }}"
+ delegate_to: "{{ groups.masters.0 }}"
diff --git a/roles/openshift_aws/tasks/build_node_group.yml b/roles/openshift_aws/tasks/build_node_group.yml
index 2c1e88cfb..a9f9cc3c4 100644
--- a/roles/openshift_aws/tasks/build_node_group.yml
+++ b/roles/openshift_aws/tasks/build_node_group.yml
@@ -1,6 +1,4 @@
---
-# This task file expects l_nodes_to_build to be passed in.
-
# When openshift_aws_use_custom_ami is '' then
# we retrieve the latest build AMI.
# Then set openshift_aws_ami to the ami.
@@ -26,6 +24,36 @@
# Need to set epoch time in one place to use for launch_config and scale_group
- set_fact:
l_epoch_time: "{{ ansible_date_time.epoch }}"
+#
+# query asg's and determine if we need to create the others.
+# if we find more than 1 for each type, then exit
+- name: query all asg's for this cluster
+ ec2_asg_facts:
+ region: "{{ openshift_aws_region }}"
+ tags: "{{ {'kubernetes.io/cluster/' ~ openshift_aws_clusterid: openshift_aws_clusterid} | combine(openshift_aws_node_group.tags) }}"
+ register: asgs
+
+- fail:
+ msg: "Found more than 1 auto scaling group that matches the query for group: {{ openshift_aws_node_group }}"
+ when:
+ - asgs.results|length > 1
+
+- debug:
+ msg: "{{ asgs }}"
+
+- name: set the value for the deployment_serial and the current asgs
+ set_fact:
+ # scale_groups_serial is a custom filter in role lib_utils
+ l_deployment_serial: "{{ openshift_aws_node_group_deployment_serial if openshift_aws_node_group_deployment_serial is defined else asgs.results | scale_groups_serial(openshift_aws_node_group_upgrade) }}"
+ openshift_aws_current_asgs: "{{ asgs.results | map(attribute='auto_scaling_group_name') | list | union(openshift_aws_current_asgs) }}"
+
+- name: dump deployment serial
+ debug:
+ msg: "Deployment serial: {{ l_deployment_serial }}"
+
+- name: dump current_asgs
+ debug:
+ msg: "openshift_aws_current_asgs: {{ openshift_aws_current_asgs }}"
- when: openshift_aws_create_iam_role
include_tasks: iam_role.yml
diff --git a/roles/openshift_aws/tasks/elb.yml b/roles/openshift_aws/tasks/elb.yml
index 5d371ec7a..d8257cf31 100644
--- a/roles/openshift_aws/tasks/elb.yml
+++ b/roles/openshift_aws/tasks/elb.yml
@@ -5,18 +5,18 @@
- name: "Create ELB {{ l_elb_dict_item.key }}"
ec2_elb_lb:
- name: "{{ l_openshift_aws_elb_name_dict[l_elb_dict_item.key][item.key] }}"
+ name: "{{ item.value.name }}"
state: present
- cross_az_load_balancing: "{{ openshift_aws_elb_az_load_balancing }}"
+ cross_az_load_balancing: "{{ item.value.cross_az_load_balancing }}"
security_group_names: "{{ l_elb_security_groups[l_elb_dict_item.key] }}"
- idle_timeout: "{{ openshift_aws_elb_idle_timout }}"
+ idle_timeout: "{{ item.value.idle_timout }}"
region: "{{ openshift_aws_region }}"
subnets:
- "{{ subnetout.subnets[0].id }}"
- health_check: "{{ openshift_aws_elb_health_check }}"
- listeners: "{{ item.value }}"
- scheme: "{{ openshift_aws_elb_scheme }}"
- tags: "{{ openshift_aws_elb_tags }}"
+ health_check: "{{ item.value.health_check }}"
+ listeners: "{{ item.value.listeners }}"
+ scheme: "{{ (item.key == 'internal') | ternary('internal','internet-facing') }}"
+ tags: "{{ item.value.tags }}"
wait: True
register: new_elb
with_dict: "{{ l_elb_dict_item.value }}"
diff --git a/roles/openshift_aws/tasks/iam_role.yml b/roles/openshift_aws/tasks/iam_role.yml
index d9910d938..cf3bb28fb 100644
--- a/roles/openshift_aws/tasks/iam_role.yml
+++ b/roles/openshift_aws/tasks/iam_role.yml
@@ -13,11 +13,10 @@
#####
- name: Create an iam role
iam_role:
- name: "{{ item.value.iam_role }}"
+ name: "{{ l_node_group_config[openshift_aws_node_group.group].iam_role }}"
assume_role_policy_document: "{{ lookup('file','trustpolicy.json') }}"
state: "{{ openshift_aws_iam_role_state | default('present') }}"
- when: item.value.iam_role is defined
- with_dict: "{{ l_nodes_to_build }}"
+ when: l_node_group_config[openshift_aws_node_group.group].iam_role is defined
#####
# The second part of this task file is linking the role to a policy
@@ -28,9 +27,8 @@
- name: create an iam policy
iam_policy:
iam_type: role
- iam_name: "{{ item.value.iam_role }}"
- policy_json: "{{ item.value.policy_json }}"
- policy_name: "{{ item.value.policy_name }}"
+ iam_name: "{{ l_node_group_config[openshift_aws_node_group.group].iam_role }}"
+ policy_json: "{{ l_node_group_config[openshift_aws_node_group.group].policy_json }}"
+ policy_name: "{{ l_node_group_config[openshift_aws_node_group.group].policy_name }}"
state: "{{ openshift_aws_iam_role_state | default('present') }}"
- when: item.value.iam_role is defined
- with_dict: "{{ l_nodes_to_build }}"
+ when: "'iam_role' in l_node_group_config[openshift_aws_node_group.group]"
diff --git a/roles/openshift_aws/tasks/launch_config.yml b/roles/openshift_aws/tasks/launch_config.yml
index fed80b7eb..6f78ee00a 100644
--- a/roles/openshift_aws/tasks/launch_config.yml
+++ b/roles/openshift_aws/tasks/launch_config.yml
@@ -1,15 +1,26 @@
---
-- fail:
- msg: "Ensure that an AMI value is defined for openshift_aws_ami or openshift_aws_launch_config_custom_image."
- when:
- - openshift_aws_ami is undefined
+- name: fetch the security groups for launch config
+ ec2_group_facts:
+ filters:
+ group-name: "{{ openshift_aws_launch_config_security_groups[openshift_aws_node_group.group] }}"
+ vpc-id: "{{ vpcout.vpcs[0].id }}"
+ region: "{{ openshift_aws_region }}"
+ register: ec2sgs
-- fail:
- msg: "Ensure that openshift_deployment_type is defined."
- when:
- - openshift_deployment_type is undefined
-
-- include_tasks: launch_config_create.yml
- with_dict: "{{ l_nodes_to_build }}"
- loop_control:
- loop_var: launch_config_item
+# Create the scale group config
+- name: Create the node scale group launch config
+ ec2_lc:
+ name: "{{ openshift_aws_node_group.name }}-{{ openshift_aws_ami_map[openshift_aws_node_group.group] | default(openshift_aws_ami) }}-{{ l_epoch_time }}"
+ region: "{{ openshift_aws_region }}"
+ image_id: "{{ openshift_aws_ami_map[openshift_aws_node_group.group] | default(openshift_aws_ami) }}"
+ instance_type: "{{ l_node_group_config[openshift_aws_node_group.group].instance_type }}"
+ security_groups: "{{ openshift_aws_launch_config_security_group_id | default(ec2sgs.security_groups | map(attribute='group_id')| list) }}"
+ instance_profile_name: "{{ l_node_group_config[openshift_aws_node_group.group].iam_role if l_node_group_config[openshift_aws_node_group.group].iam_role is defined and
+ l_node_group_config[openshift_aws_node_group.group].iam_role != '' and
+ openshift_aws_create_iam_role
+ else omit }}"
+ user_data: "{{ lookup('template', 'user_data.j2') }}"
+ key_name: "{{ openshift_aws_ssh_key_name }}"
+ ebs_optimized: False
+ volumes: "{{ l_node_group_config[openshift_aws_node_group.group].volumes }}"
+ assign_public_ip: True
diff --git a/roles/openshift_aws/tasks/launch_config_create.yml b/roles/openshift_aws/tasks/launch_config_create.yml
deleted file mode 100644
index f7f0f0953..000000000
--- a/roles/openshift_aws/tasks/launch_config_create.yml
+++ /dev/null
@@ -1,26 +0,0 @@
----
-- name: fetch the security groups for launch config
- ec2_group_facts:
- filters:
- group-name: "{{ l_launch_config_security_groups[launch_config_item.key] }}"
- vpc-id: "{{ vpcout.vpcs[0].id }}"
- region: "{{ openshift_aws_region }}"
- register: ec2sgs
-
-# Create the scale group config
-- name: Create the node scale group launch config
- ec2_lc:
- name: "{{ openshift_aws_launch_config_basename }}-{{ launch_config_item.key }}{{'-' ~ openshift_aws_new_version if openshift_aws_new_version != '' else '' }}"
- region: "{{ openshift_aws_region }}"
- image_id: "{{ l_aws_ami_map[launch_config_item.key] | default(openshift_aws_ami) }}"
- instance_type: "{{ launch_config_item.value.instance_type }}"
- security_groups: "{{ openshift_aws_launch_config_security_group_id | default(ec2sgs.security_groups | map(attribute='group_id')| list) }}"
- instance_profile_name: "{{ launch_config_item.value.iam_role if launch_config_item.value.iam_role is defined and
- launch_config_item.value.iam_role != '' and
- openshift_aws_create_iam_role
- else omit }}"
- user_data: "{{ lookup('template', 'user_data.j2') }}"
- key_name: "{{ openshift_aws_ssh_key_name }}"
- ebs_optimized: False
- volumes: "{{ launch_config_item.value.volumes }}"
- assign_public_ip: True
diff --git a/roles/openshift_aws/tasks/master_facts.yml b/roles/openshift_aws/tasks/master_facts.yml
index 530b0134d..c2e362acd 100644
--- a/roles/openshift_aws/tasks/master_facts.yml
+++ b/roles/openshift_aws/tasks/master_facts.yml
@@ -3,7 +3,7 @@
ec2_elb_facts:
region: "{{ openshift_aws_region }}"
names:
- - "{{ openshift_aws_elb_name_dict['master']['internal'] }}"
+ - "{{ openshift_aws_elb_dict['master']['internal']['name'] }}"
delegate_to: localhost
register: elbs
diff --git a/roles/openshift_aws/tasks/provision.yml b/roles/openshift_aws/tasks/provision.yml
index 06f649343..2b5f317d8 100644
--- a/roles/openshift_aws/tasks/provision.yml
+++ b/roles/openshift_aws/tasks/provision.yml
@@ -1,32 +1,16 @@
---
-- when: openshift_aws_create_iam_cert | bool
- name: create the iam_cert for elb certificate
- include_tasks: iam_cert.yml
-
-- when: openshift_aws_create_s3 | bool
- name: create s3 bucket for registry
- include_tasks: s3.yml
-
- include_tasks: vpc_and_subnet_id.yml
-- name: create elbs
- include_tasks: elb.yml
- with_dict: "{{ openshift_aws_elb_dict }}"
- vars:
- l_elb_security_groups: "{{ openshift_aws_elb_security_groups }}"
- l_openshift_aws_elb_name_dict: "{{ openshift_aws_elb_name_dict }}"
- loop_control:
- loop_var: l_elb_dict_item
-
- name: include scale group creation for master
include_tasks: build_node_group.yml
+ with_items: "{{ openshift_aws_master_group }}"
vars:
- l_nodes_to_build: "{{ openshift_aws_master_group_config }}"
- l_launch_config_security_groups: "{{ openshift_aws_launch_config_security_groups }}"
- l_aws_ami_map: "{{ openshift_aws_ami_map }}"
+ l_node_group_config: "{{ openshift_aws_master_group_config }}"
+ loop_control:
+ loop_var: openshift_aws_node_group
- name: fetch newly created instances
- ec2_remote_facts:
+ ec2_instance_facts:
region: "{{ openshift_aws_region }}"
filters:
"tag:clusterid": "{{ openshift_aws_clusterid }}"
diff --git a/roles/openshift_aws/tasks/provision_elb.yml b/roles/openshift_aws/tasks/provision_elb.yml
new file mode 100644
index 000000000..fcc49c3ea
--- /dev/null
+++ b/roles/openshift_aws/tasks/provision_elb.yml
@@ -0,0 +1,14 @@
+---
+- when: openshift_aws_create_iam_cert | bool
+ name: create the iam_cert for elb certificate
+ include_tasks: iam_cert.yml
+
+- include_tasks: vpc_and_subnet_id.yml
+
+- name: create elbs
+ include_tasks: elb.yml
+ with_dict: "{{ openshift_aws_elb_dict }}"
+ vars:
+ l_elb_security_groups: "{{ openshift_aws_elb_security_groups }}"
+ loop_control:
+ loop_var: l_elb_dict_item
diff --git a/roles/openshift_aws/tasks/provision_instance.yml b/roles/openshift_aws/tasks/provision_instance.yml
index 8cc75cd0c..786db1570 100644
--- a/roles/openshift_aws/tasks/provision_instance.yml
+++ b/roles/openshift_aws/tasks/provision_instance.yml
@@ -14,11 +14,7 @@
instance_type: m4.xlarge
vpc_subnet_id: "{{ openshift_aws_subnet_id | default(subnetout.subnets[0].id) }}"
image: "{{ openshift_aws_base_ami }}"
- volumes:
- - device_name: /dev/sdb
- volume_type: gp2
- volume_size: 100
- delete_on_termination: true
+ volumes: "{{ openshift_aws_node_group_config_node_volumes }}"
wait: yes
exact_count: 1
count_tag:
@@ -27,7 +23,7 @@
Name: "{{ openshift_aws_base_ami_name }}"
- name: fetch newly created instances
- ec2_remote_facts:
+ ec2_instance_facts:
region: "{{ openshift_aws_region }}"
filters:
"tag:Name": "{{ openshift_aws_base_ami_name }}"
@@ -46,5 +42,5 @@
- name: add host to nodes
add_host:
- groups: nodes
+ groups: nodes,g_new_node_hosts
name: "{{ instancesout.instances[0].public_dns_name }}"
diff --git a/roles/openshift_aws/tasks/provision_nodes.yml b/roles/openshift_aws/tasks/provision_nodes.yml
index 041ed0791..9105b5b4c 100644
--- a/roles/openshift_aws/tasks/provision_nodes.yml
+++ b/roles/openshift_aws/tasks/provision_nodes.yml
@@ -2,25 +2,12 @@
# Get bootstrap config token
# bootstrap should be created on first master
# need to fetch it and shove it into cloud data
-- name: fetch master instances
- ec2_remote_facts:
- region: "{{ openshift_aws_region }}"
- filters:
- "tag:clusterid": "{{ openshift_aws_clusterid }}"
- "tag:host-type": master
- instance-state-name: running
- register: instancesout
- retries: 20
- delay: 3
- until:
- - "'instances' in instancesout"
- - instancesout.instances|length > 0
+- include_tasks: setup_master_group.yml
- name: slurp down the bootstrap.kubeconfig
slurp:
src: /etc/origin/master/bootstrap.kubeconfig
- delegate_to: "{{ instancesout.instances[0].public_ip_address }}"
- remote_user: root
+ delegate_to: "{{ groups.masters.0 }}"
register: bootstrap
- name: set_fact for kubeconfig token
@@ -31,20 +18,15 @@
- name: include build compute and infra node groups
include_tasks: build_node_group.yml
+ with_items: "{{ openshift_aws_node_groups }}"
vars:
- l_nodes_to_build: "{{ openshift_aws_node_group_config }}"
- l_launch_config_security_groups: "{{ openshift_aws_launch_config_security_groups }}"
- l_aws_ami_map: "{{ openshift_aws_ami_map }}"
-
-- name: include build node group for extra nodes
- include_tasks: build_node_group.yml
- when: openshift_aws_node_group_config_extra is defined
- vars:
- l_nodes_to_build: "{{ openshift_aws_node_group_config_extra | default({}) }}"
- l_launch_config_security_groups: "{{ openshift_aws_launch_config_security_groups_extra }}"
- l_aws_ami_map: "{{ openshift_aws_ami_map_extra }}"
+ l_node_group_config: "{{ openshift_aws_node_group_config }}"
+ loop_control:
+ loop_var: openshift_aws_node_group
# instances aren't scaling fast enough here, we need to wait for them
- when: openshift_aws_wait_for_ssh | bool
name: wait for our new nodes to come up
include_tasks: wait_for_groups.yml
+ vars:
+ created_asgs: "{{ openshift_aws_created_asgs }}"
diff --git a/roles/openshift_aws/tasks/remove_scale_group.yml b/roles/openshift_aws/tasks/remove_scale_group.yml
index 55d1af2b5..a01cde294 100644
--- a/roles/openshift_aws/tasks/remove_scale_group.yml
+++ b/roles/openshift_aws/tasks/remove_scale_group.yml
@@ -1,10 +1,13 @@
---
+# FIGURE OUT HOW TO REMOVE SCALE GROUPS
+# use openshift_aws_current_asgs??
- name: fetch the scale groups
ec2_asg_facts:
region: "{{ openshift_aws_region }}"
+ name: "^{{ item }}$"
tags:
- "{{ {'kubernetes.io/cluster/' ~ openshift_aws_clusterid: openshift_aws_clusterid,
- 'version': openshift_aws_current_version} }}"
+ "{{ {'kubernetes.io/cluster/' ~ openshift_aws_clusterid: openshift_aws_clusterid} }}"
+ with_items: "{{ openshift_aws_current_asgs if openshift_aws_current_asgs != [] else openshift_aws_asgs_to_remove }}"
register: qasg
- name: remove non-master scale groups
@@ -14,7 +17,7 @@
name: "{{ item.auto_scaling_group_name }}"
when: "'master' not in item.auto_scaling_group_name"
register: asg_results
- with_items: "{{ qasg.results }}"
+ with_items: "{{ qasg | json_query('results[*]') | sum(attribute='results', start=[]) }}"
async: 600
poll: 0
diff --git a/roles/openshift_aws/tasks/s3.yml b/roles/openshift_aws/tasks/s3.yml
index 9cf37c840..ba70bcff6 100644
--- a/roles/openshift_aws/tasks/s3.yml
+++ b/roles/openshift_aws/tasks/s3.yml
@@ -1,6 +1,6 @@
---
- name: Create an s3 bucket
- s3:
+ aws_s3:
bucket: "{{ openshift_aws_s3_bucket_name }}"
mode: "{{ openshift_aws_s3_mode }}"
region: "{{ openshift_aws_region }}"
diff --git a/roles/openshift_aws/tasks/scale_group.yml b/roles/openshift_aws/tasks/scale_group.yml
index 30df7545d..6ce8c58ba 100644
--- a/roles/openshift_aws/tasks/scale_group.yml
+++ b/roles/openshift_aws/tasks/scale_group.yml
@@ -1,20 +1,30 @@
---
+- name: set node group name
+ set_fact:
+ l_node_group_name: "{{ openshift_aws_node_group.name }} {{ l_deployment_serial }}"
+
- name: Create the scale group
ec2_asg:
- name: "{{ openshift_aws_scale_group_basename }} {{ item.key }}"
- launch_config_name: "{{ openshift_aws_launch_config_basename }}-{{ item.key }}{{ '-' ~ openshift_aws_new_version if openshift_aws_new_version != '' else '' }}"
- health_check_period: "{{ item.value.health_check.period }}"
- health_check_type: "{{ item.value.health_check.type }}"
- min_size: "{{ item.value.min_size }}"
- max_size: "{{ item.value.max_size }}"
- desired_capacity: "{{ item.value.desired_size }}"
+ name: "{{ l_node_group_name }}"
+ launch_config_name: "{{ openshift_aws_node_group.name }}-{{ openshift_aws_ami_map[openshift_aws_node_group.group] | default(openshift_aws_ami) }}-{{ l_epoch_time }}"
+ health_check_period: "{{ l_node_group_config[openshift_aws_node_group.group].health_check.period }}"
+ health_check_type: "{{ l_node_group_config[openshift_aws_node_group.group].health_check.type }}"
+ min_size: "{{ l_node_group_config[openshift_aws_node_group.group].min_size }}"
+ max_size: "{{ l_node_group_config[openshift_aws_node_group.group].max_size }}"
+ desired_capacity: "{{ l_node_group_config[openshift_aws_node_group.group].desired_size }}"
region: "{{ openshift_aws_region }}"
- termination_policies: "{{ item.value.termination_policy if 'termination_policy' in item.value else omit }}"
- load_balancers: "{{ item.value.elbs if 'elbs' in item.value else omit }}"
- wait_for_instances: "{{ item.value.wait_for_instances | default(False)}}"
+ termination_policies: "{{ l_node_group_config[openshift_aws_node_group.group].termination_policy if 'termination_policy' in l_node_group_config[openshift_aws_node_group.group] else omit }}"
+ load_balancers: "{{ l_node_group_config[openshift_aws_node_group.group].elbs if 'elbs' in l_node_group_config[openshift_aws_node_group.group] else omit }}"
+ wait_for_instances: "{{ l_node_group_config[openshift_aws_node_group.group].wait_for_instances | default(False)}}"
vpc_zone_identifier: "{{ subnetout.subnets[0].id }}"
replace_instances: "{{ openshift_aws_node_group_replace_instances if openshift_aws_node_group_replace_instances != [] else omit }}"
- replace_all_instances: "{{ omit if openshift_aws_node_group_replace_instances != [] else (item.value.replace_all_instances | default(omit)) }}"
+ replace_all_instances: "{{ omit if openshift_aws_node_group_replace_instances != []
+ else (l_node_group_config[openshift_aws_node_group.group].replace_all_instances | default(omit)) }}"
tags:
- - "{{ openshift_aws_node_group_config_tags | combine(item.value.tags) }}"
- with_dict: "{{ l_nodes_to_build }}"
+ - "{{ openshift_aws_node_group_config_tags
+ | combine(openshift_aws_node_group.tags)
+ | combine({'deployment_serial': l_deployment_serial, 'ami': openshift_aws_ami_map[openshift_aws_node_group.group] | default(openshift_aws_ami)}) }}"
+
+- name: append the asg name to the openshift_aws_created_asgs fact
+ set_fact:
+ openshift_aws_created_asgs: "{{ [l_node_group_name] | union(openshift_aws_created_asgs) | list }}"
diff --git a/roles/openshift_aws/tasks/seal_ami.yml b/roles/openshift_aws/tasks/seal_ami.yml
index 7a3d0fb68..74877d5c7 100644
--- a/roles/openshift_aws/tasks/seal_ami.yml
+++ b/roles/openshift_aws/tasks/seal_ami.yml
@@ -1,6 +1,6 @@
---
- name: fetch newly created instances
- ec2_remote_facts:
+ ec2_instance_facts:
region: "{{ openshift_aws_region }}"
filters:
"tag:Name": "{{ openshift_aws_base_ami_name }}"
@@ -12,7 +12,7 @@
- name: bundle ami
ec2_ami:
- instance_id: "{{ instancesout.instances.0.id }}"
+ instance_id: "{{ instancesout.instances.0.instance_id }}"
region: "{{ openshift_aws_region }}"
state: present
description: "This was provisioned {{ ansible_date_time.iso8601 }}"
@@ -46,4 +46,4 @@
ec2:
state: absent
region: "{{ openshift_aws_region }}"
- instance_ids: "{{ instancesout.instances.0.id }}"
+ instance_ids: "{{ instancesout.instances.0.instance_id }}"
diff --git a/roles/openshift_aws/tasks/security_group.yml b/roles/openshift_aws/tasks/security_group.yml
index 43834079e..0568a0190 100644
--- a/roles/openshift_aws/tasks/security_group.yml
+++ b/roles/openshift_aws/tasks/security_group.yml
@@ -6,11 +6,27 @@
"tag:Name": "{{ openshift_aws_clusterid }}"
register: vpcout
-- include_tasks: security_group_create.yml
- vars:
- l_security_groups: "{{ openshift_aws_node_security_groups }}"
+- name: create the node group sgs
+ oo_ec2_group:
+ name: "{{ item.value.name}}"
+ description: "{{ item.value.desc }}"
+ rules: "{{ item.value.rules if 'rules' in item.value else [] }}"
+ region: "{{ openshift_aws_region }}"
+ vpc_id: "{{ vpcout.vpcs[0].id }}"
+ with_dict: "{{ openshift_aws_node_security_groups }}"
+
+- name: create the k8s sgs for the node group
+ oo_ec2_group:
+ name: "{{ item.value.name }}_k8s"
+ description: "{{ item.value.desc }} for k8s"
+ region: "{{ openshift_aws_region }}"
+ vpc_id: "{{ vpcout.vpcs[0].id }}"
+ with_dict: "{{ openshift_aws_node_security_groups }}"
+ register: k8s_sg_create
-- include_tasks: security_group_create.yml
- when: openshift_aws_node_security_groups_extra is defined
- vars:
- l_security_groups: "{{ openshift_aws_node_security_groups_extra | default({}) }}"
+- name: tag sg groups with proper tags
+ ec2_tag:
+ tags: "{{ openshift_aws_security_groups_tags }}"
+ resource: "{{ item.group_id }}"
+ region: "{{ openshift_aws_region }}"
+ with_items: "{{ k8s_sg_create.results }}"
diff --git a/roles/openshift_aws/tasks/security_group_create.yml b/roles/openshift_aws/tasks/security_group_create.yml
deleted file mode 100644
index ef6060555..000000000
--- a/roles/openshift_aws/tasks/security_group_create.yml
+++ /dev/null
@@ -1,25 +0,0 @@
----
-- name: create the node group sgs
- ec2_group:
- name: "{{ item.value.name}}"
- description: "{{ item.value.desc }}"
- rules: "{{ item.value.rules if 'rules' in item.value else [] }}"
- region: "{{ openshift_aws_region }}"
- vpc_id: "{{ vpcout.vpcs[0].id }}"
- with_dict: "{{ l_security_groups }}"
-
-- name: create the k8s sgs for the node group
- ec2_group:
- name: "{{ item.value.name }}_k8s"
- description: "{{ item.value.desc }} for k8s"
- region: "{{ openshift_aws_region }}"
- vpc_id: "{{ vpcout.vpcs[0].id }}"
- with_dict: "{{ l_security_groups }}"
- register: k8s_sg_create
-
-- name: tag sg groups with proper tags
- ec2_tag:
- tags: "{{ openshift_aws_security_groups_tags }}"
- resource: "{{ item.group_id }}"
- region: "{{ openshift_aws_region }}"
- with_items: "{{ k8s_sg_create.results }}"
diff --git a/roles/openshift_aws/tasks/setup_master_group.yml b/roles/openshift_aws/tasks/setup_master_group.yml
index 05b68f460..700917ef4 100644
--- a/roles/openshift_aws/tasks/setup_master_group.yml
+++ b/roles/openshift_aws/tasks/setup_master_group.yml
@@ -8,7 +8,7 @@
msg: "openshift_aws_region={{ openshift_aws_region }}"
- name: fetch newly created instances
- ec2_remote_facts:
+ ec2_instance_facts:
region: "{{ openshift_aws_region }}"
filters:
"tag:clusterid": "{{ openshift_aws_clusterid }}"
@@ -19,11 +19,13 @@
delay: 3
until: instancesout.instances|length > 0
+- debug: var=instancesout
+
- name: add new master to masters group
add_host:
groups: "{{ openshift_aws_masters_groups }}"
name: "{{ item.public_dns_name }}"
- hostname: "{{ openshift_aws_clusterid }}-master-{{ item.id[:-5] }}"
+ hostname: "{{ openshift_aws_clusterid }}-master-{{ item.instance_id[:-5] }}"
with_items: "{{ instancesout.instances }}"
- name: wait for ssh to become available
diff --git a/roles/openshift_aws/tasks/setup_scale_group_facts.yml b/roles/openshift_aws/tasks/setup_scale_group_facts.yml
index d65fdc2de..14c5246c9 100644
--- a/roles/openshift_aws/tasks/setup_scale_group_facts.yml
+++ b/roles/openshift_aws/tasks/setup_scale_group_facts.yml
@@ -1,11 +1,15 @@
---
-- name: group scale group nodes
- ec2_remote_facts:
+- name: fetch all created instances
+ ec2_instance_facts:
region: "{{ openshift_aws_region }}"
filters:
- "{{ {'tag:kubernetes.io/cluster/' ~ openshift_aws_clusterid: openshift_aws_clusterid }}}"
+ "{{ {'tag:kubernetes.io/cluster/' ~ openshift_aws_clusterid: openshift_aws_clusterid,
+ 'instance-state-name': 'running'} }}"
register: qinstances
+# The building of new and current groups is dependent of having a list of the current asgs and the created ones
+# that can be found in the variables: openshift_aws_created_asgs, openshift_aws_current_asgs. If these do not exist, we cannot determine which hosts are
+# new and which hosts are current.
- name: Build new node group
add_host:
groups: oo_sg_new_nodes
@@ -13,10 +17,16 @@
name: "{{ item.public_dns_name }}"
hostname: "{{ item.public_dns_name }}"
when:
- - (item.tags.version | default(False)) == openshift_aws_new_version
+ - openshift_aws_created_asgs != []
+ - "'aws:autoscaling:groupName' in item.tags"
+ - item.tags['aws:autoscaling:groupName'] in openshift_aws_created_asgs
- "'node' in item.tags['host-type']"
with_items: "{{ qinstances.instances }}"
+- name: dump openshift_aws_current_asgs
+ debug:
+ msg: "{{ openshift_aws_current_asgs }}"
+
- name: Build current node group
add_host:
groups: oo_sg_current_nodes
@@ -24,7 +34,9 @@
name: "{{ item.public_dns_name }}"
hostname: "{{ item.public_dns_name }}"
when:
- - (item.tags.version | default('')) == openshift_aws_current_version
+ - openshift_aws_current_asgs != []
+ - "'aws:autoscaling:groupName' in item.tags"
+ - item.tags['aws:autoscaling:groupName'] in openshift_aws_current_asgs
- "'node' in item.tags['host-type']"
with_items: "{{ qinstances.instances }}"
diff --git a/roles/openshift_aws/tasks/uninstall_security_group.yml b/roles/openshift_aws/tasks/uninstall_security_group.yml
new file mode 100644
index 000000000..55d40e8ec
--- /dev/null
+++ b/roles/openshift_aws/tasks/uninstall_security_group.yml
@@ -0,0 +1,14 @@
+---
+- name: delete the node group sgs
+ oo_ec2_group:
+ state: absent
+ name: "{{ item.value.name}}"
+ region: "{{ openshift_aws_region }}"
+ with_dict: "{{ openshift_aws_node_security_groups }}"
+
+- name: delete the k8s sgs for the node group
+ oo_ec2_group:
+ state: absent
+ name: "{{ item.value.name }}_k8s"
+ region: "{{ openshift_aws_region }}"
+ with_dict: "{{ openshift_aws_node_security_groups }}"
diff --git a/roles/openshift_aws/tasks/uninstall_ssh_keys.yml b/roles/openshift_aws/tasks/uninstall_ssh_keys.yml
new file mode 100644
index 000000000..27e42da53
--- /dev/null
+++ b/roles/openshift_aws/tasks/uninstall_ssh_keys.yml
@@ -0,0 +1,9 @@
+---
+- name: Remove the public keys for the user(s)
+ ec2_key:
+ state: absent
+ name: "{{ item.key_name }}"
+ region: "{{ openshift_aws_region }}"
+ with_items: "{{ openshift_aws_users }}"
+ no_log: True
+ when: openshift_aws_enable_uninstall_shared_objects | bool
diff --git a/roles/openshift_aws/tasks/uninstall_vpc.yml b/roles/openshift_aws/tasks/uninstall_vpc.yml
new file mode 100644
index 000000000..ecf39f694
--- /dev/null
+++ b/roles/openshift_aws/tasks/uninstall_vpc.yml
@@ -0,0 +1,36 @@
+---
+- name: Fetch the VPC for the vpc.id
+ ec2_vpc_net_facts:
+ region: "{{ openshift_aws_region }}"
+ filters:
+ "tag:Name": "{{ openshift_aws_clusterid }}"
+ register: vpcout
+- debug:
+ var: vpcout
+ verbosity: 1
+
+- when: vpcout.vpcs | length > 0
+ block:
+ - name: delete the vpc igw
+ ec2_vpc_igw:
+ state: absent
+ region: "{{ openshift_aws_region }}"
+ vpc_id: "{{ vpcout.vpcs[0].id }}"
+ register: igw
+
+ - name: delete the vpc subnets
+ ec2_vpc_subnet:
+ state: absent
+ region: "{{ openshift_aws_region }}"
+ vpc_id: "{{ vpcout.vpcs[0].id }}"
+ cidr: "{{ item.cidr }}"
+ az: "{{ item.az }}"
+ with_items: "{{ openshift_aws_vpc.subnets[openshift_aws_region] }}"
+
+ - name: Delete AWS VPC
+ ec2_vpc_net:
+ state: absent
+ region: "{{ openshift_aws_region }}"
+ name: "{{ openshift_aws_clusterid }}"
+ cidr_block: "{{ openshift_aws_vpc.cidr }}"
+ register: vpc
diff --git a/roles/openshift_aws/tasks/upgrade_node_group.yml b/roles/openshift_aws/tasks/upgrade_node_group.yml
index c3f86f523..4f4730dd6 100644
--- a/roles/openshift_aws/tasks/upgrade_node_group.yml
+++ b/roles/openshift_aws/tasks/upgrade_node_group.yml
@@ -1,12 +1,22 @@
---
-- fail:
- msg: 'Please ensure the current_version and new_version variables are not the same.'
+- include_tasks: provision_nodes.yml
+ vars:
+ openshift_aws_node_group_upgrade: True
when:
- - openshift_aws_current_version == openshift_aws_new_version
+ - openshift_aws_upgrade_provision_nodes | default(True)
-- include_tasks: provision_nodes.yml
+- debug: var=openshift_aws_current_asgs
+- debug: var=openshift_aws_created_asgs
+
+- name: fail if asg variables aren't set
+ fail:
+ msg: "Please ensure that openshift_aws_created_asgs and openshift_aws_current_asgs are defined."
+ when:
+ - openshift_aws_created_asgs == []
+ - openshift_aws_current_asgs == []
- include_tasks: accept_nodes.yml
+ when: openshift_aws_upgrade_accept_nodes | default(True)
- include_tasks: setup_scale_group_facts.yml
diff --git a/roles/openshift_aws/tasks/wait_for_groups.yml b/roles/openshift_aws/tasks/wait_for_groups.yml
index 9f1a68a2a..3ad876e37 100644
--- a/roles/openshift_aws/tasks/wait_for_groups.yml
+++ b/roles/openshift_aws/tasks/wait_for_groups.yml
@@ -1,31 +1,42 @@
---
# The idea here is to wait until all scale groups are at
# their desired capacity before continuing.
-- name: fetch the scale groups
+# This is accomplished with a custom filter_plugin and until clause
+- name: "fetch the scale groups"
ec2_asg_facts:
region: "{{ openshift_aws_region }}"
tags:
- "{{ {'kubernetes.io/cluster/' ~ openshift_aws_clusterid: openshift_aws_clusterid} }}"
+ "{{ {'kubernetes.io/cluster/' ~ openshift_aws_clusterid: openshift_aws_clusterid } }}"
register: qasg
- until: qasg.results | scale_groups_match_capacity | bool
+ # scale_groups_match_capacity is a custom filter in role lib_utils
+ until: qasg | json_query('results[*]') | scale_groups_match_capacity | bool
delay: 10
retries: 60
+- debug: var=openshift_aws_created_asgs
+
+# how do we gaurantee the instances are up?
- name: fetch newly created instances
- ec2_remote_facts:
+ ec2_instance_facts:
region: "{{ openshift_aws_region }}"
filters:
"{{ {'tag:kubernetes.io/cluster/' ~ openshift_aws_clusterid: openshift_aws_clusterid,
- 'tag:version': openshift_aws_new_version} }}"
+ 'tag:aws:autoscaling:groupName': item,
+ 'instance-state-name': 'running'} }}"
+ with_items: "{{ openshift_aws_created_asgs if openshift_aws_created_asgs != [] else qasg | sum(attribute='results', start=[]) }}"
register: instancesout
until: instancesout.instances|length > 0
delay: 5
retries: 60
+- name: dump instances
+ debug:
+ msg: "{{ instancesout.results | sum(attribute='instances', start=[]) }}"
+
- name: wait for ssh to become available
wait_for:
port: 22
host: "{{ item.public_ip_address }}"
timeout: 300
search_regex: OpenSSH
- with_items: "{{ instancesout.instances }}"
+ with_items: "{{ instancesout.results | sum(attribute='instances', start=[]) }}"
diff --git a/roles/openshift_aws/templates/user_data.j2 b/roles/openshift_aws/templates/user_data.j2
index fe0fe83d4..bda1334cd 100644
--- a/roles/openshift_aws/templates/user_data.j2
+++ b/roles/openshift_aws/templates/user_data.j2
@@ -7,8 +7,8 @@ write_files:
owner: 'root:root'
permissions: '0640'
content: |
- openshift_group_type: {{ launch_config_item.key }}
-{% if launch_config_item.key != 'master' %}
+ openshift_group_type: {{ openshift_aws_node_group.group }}
+{% if openshift_aws_node_group.group != 'master' %}
- path: /etc/origin/node/bootstrap.kubeconfig
owner: 'root:root'
permissions: '0640'
@@ -19,7 +19,7 @@ runcmd:
{% if openshift_aws_node_run_bootstrap_startup %}
- [ ansible-playbook, /root/openshift_bootstrap/bootstrap.yml]
{% endif %}
-{% if launch_config_item.key != 'master' %}
+{% if openshift_aws_node_group.group != 'master' %}
- [ systemctl, restart, NetworkManager]
- [ systemctl, enable, {% if openshift_deployment_type == 'openshift-enterprise' %}atomic-openshift{% else %}origin{% endif %}-node]
- [ systemctl, start, {% if openshift_deployment_type == 'openshift-enterprise' %}atomic-openshift{% else %}origin{% endif %}-node]