summaryrefslogtreecommitdiffstats
path: root/roles/openshift_aws
diff options
context:
space:
mode:
Diffstat (limited to 'roles/openshift_aws')
-rw-r--r--roles/openshift_aws/defaults/main.yml107
-rw-r--r--roles/openshift_aws/files/describeinstances.json15
-rw-r--r--roles/openshift_aws/files/trustpolicy.json12
-rw-r--r--roles/openshift_aws/filter_plugins/openshift_aws_filters.py6
-rw-r--r--roles/openshift_aws/tasks/build_node_group.yml13
-rw-r--r--roles/openshift_aws/tasks/elb.yml60
-rw-r--r--roles/openshift_aws/tasks/iam_role.yml36
-rw-r--r--roles/openshift_aws/tasks/launch_config.yml32
-rw-r--r--roles/openshift_aws/tasks/launch_config_create.yml26
-rw-r--r--roles/openshift_aws/tasks/master_facts.yml10
-rw-r--r--roles/openshift_aws/tasks/provision.yml31
-rw-r--r--roles/openshift_aws/tasks/provision_instance.yml15
-rw-r--r--roles/openshift_aws/tasks/provision_nodes.yml20
-rw-r--r--roles/openshift_aws/tasks/scale_group.yml32
-rw-r--r--roles/openshift_aws/tasks/seal_ami.yml7
-rw-r--r--roles/openshift_aws/tasks/security_group.yml43
-rw-r--r--roles/openshift_aws/tasks/security_group_create.yml25
-rw-r--r--roles/openshift_aws/tasks/vpc_and_subnet_id.yml18
-rw-r--r--roles/openshift_aws/templates/user_data.j28
19 files changed, 298 insertions, 218 deletions
diff --git a/roles/openshift_aws/defaults/main.yml b/roles/openshift_aws/defaults/main.yml
index 5371588cf..c9a429675 100644
--- a/roles/openshift_aws/defaults/main.yml
+++ b/roles/openshift_aws/defaults/main.yml
@@ -1,10 +1,10 @@
---
openshift_aws_create_s3: True
openshift_aws_create_iam_cert: True
+openshift_aws_create_iam_role: False
openshift_aws_create_security_groups: True
openshift_aws_create_launch_config: True
openshift_aws_create_scale_group: True
-openshift_aws_node_group_type: master
openshift_aws_wait_for_ssh: True
@@ -12,12 +12,15 @@ openshift_aws_clusterid: default
openshift_aws_region: us-east-1
openshift_aws_vpc_name: "{{ openshift_aws_clusterid }}"
openshift_aws_build_ami_group: "{{ openshift_aws_clusterid }}"
-openshift_aws_kubernetes_cluster_status: "{{ openshift_aws_clusterid }}"
openshift_aws_iam_cert_name: "{{ openshift_aws_clusterid }}-master-external"
openshift_aws_iam_cert_path: ''
openshift_aws_iam_cert_key_path: ''
-openshift_aws_scale_group_name: "{{ openshift_aws_clusterid }} openshift {{ openshift_aws_node_group_type }}"
+openshift_aws_scale_group_basename: "{{ openshift_aws_clusterid }} openshift"
+
+openshift_aws_iam_role_name: openshift_node_describe_instances
+openshift_aws_iam_role_policy_json: "{{ lookup('file', 'describeinstances.json') }}"
+openshift_aws_iam_role_policy_name: "describe_instances"
openshift_aws_iam_kms_alias: "alias/{{ openshift_aws_clusterid }}_kms"
openshift_aws_ami: ''
@@ -28,7 +31,7 @@ openshift_aws_ami_name: openshift-gi
openshift_aws_base_ami_name: ami_base
openshift_aws_launch_config_bootstrap_token: ''
-openshift_aws_launch_config_name: "{{ openshift_aws_clusterid }}-{{ openshift_aws_node_group_type }}-{{ ansible_date_time.epoch }}"
+openshift_aws_launch_config_basename: "{{ openshift_aws_clusterid }}"
openshift_aws_users: []
@@ -48,12 +51,19 @@ openshift_aws_elb_health_check:
unhealthy_threshold: 2
healthy_threshold: 2
-openshift_aws_elb_name: "{{ openshift_aws_clusterid }}-{{ openshift_aws_node_group_type }}"
+openshift_aws_elb_basename: "{{ openshift_aws_clusterid }}"
+openshift_aws_elb_name_dict:
+ master:
+ external: "{{ openshift_aws_elb_basename }}-master-external"
+ internal: "{{ openshift_aws_elb_basename }}-master-internal"
+ infra:
+ external: "{{ openshift_aws_elb_basename }}-infra"
+
openshift_aws_elb_idle_timout: 400
openshift_aws_elb_scheme: internet-facing
openshift_aws_elb_cert_arn: ''
-openshift_aws_elb_listeners:
+openshift_aws_elb_dict:
master:
external:
- protocol: tcp
@@ -75,6 +85,18 @@ openshift_aws_elb_listeners:
load_balancer_port: 443
instance_protocol: tcp
instance_port: 443
+ infra:
+ external:
+ - protocol: tcp
+ load_balancer_port: 80
+ instance_protocol: tcp
+ instance_port: 443
+ proxy_protocol: True
+ - protocol: tcp
+ load_balancer_port: 443
+ instance_protocol: tcp
+ instance_port: 443
+ proxy_protocol: True
openshift_aws_node_group_config_master_volumes:
- device_name: /dev/sdb
@@ -88,17 +110,21 @@ openshift_aws_node_group_config_node_volumes:
device_type: gp2
delete_on_termination: True
-openshift_aws_node_group_config_tags: "{{ openshift_aws_clusterid | build_instance_tags(openshift_aws_kubernetes_cluster_status) }}"
+openshift_aws_node_group_config_tags: "{{ openshift_aws_clusterid | build_instance_tags }}"
openshift_aws_node_group_termination_policy: Default
openshift_aws_node_group_replace_instances: []
openshift_aws_node_group_replace_all_instances: False
openshift_aws_node_group_config_extra_labels: {}
-openshift_aws_node_group_config:
- tags: "{{ openshift_aws_node_group_config_tags }}"
+openshift_aws_ami_map:
+ master: "{{ openshift_aws_ami }}"
+ infra: "{{ openshift_aws_ami }}"
+ compute: "{{ openshift_aws_ami }}"
+
+openshift_aws_master_group_config:
+ # The 'master' key is always required here.
master:
instance_type: m4.xlarge
- ami: "{{ openshift_aws_ami }}"
volumes: "{{ openshift_aws_node_group_config_master_volumes }}"
health_check:
period: 60
@@ -114,9 +140,15 @@ openshift_aws_node_group_config:
wait_for_instances: True
termination_policy: "{{ openshift_aws_node_group_termination_policy }}"
replace_all_instances: "{{ openshift_aws_node_group_replace_all_instances }}"
+ iam_role: "{{ openshift_aws_iam_role_name }}"
+ policy_name: "{{ openshift_aws_iam_role_policy_name }}"
+ policy_json: "{{ openshift_aws_iam_role_policy_json }}"
+ elbs: "{{ openshift_aws_elb_name_dict['master'].keys()| map('extract', openshift_aws_elb_name_dict['master']) | list }}"
+
+openshift_aws_node_group_config:
+ # The 'compute' key is always required here.
compute:
instance_type: m4.xlarge
- ami: "{{ openshift_aws_ami }}"
volumes: "{{ openshift_aws_node_group_config_node_volumes }}"
health_check:
period: 60
@@ -131,9 +163,12 @@ openshift_aws_node_group_config:
type: compute
termination_policy: "{{ openshift_aws_node_group_termination_policy }}"
replace_all_instances: "{{ openshift_aws_node_group_replace_all_instances }}"
+ iam_role: "{{ openshift_aws_iam_role_name }}"
+ policy_name: "{{ openshift_aws_iam_role_policy_name }}"
+ policy_json: "{{ openshift_aws_iam_role_policy_json }}"
+ # The 'infra' key is always required here.
infra:
instance_type: m4.xlarge
- ami: "{{ openshift_aws_ami }}"
volumes: "{{ openshift_aws_node_group_config_node_volumes }}"
health_check:
period: 60
@@ -148,20 +183,33 @@ openshift_aws_node_group_config:
type: infra
termination_policy: "{{ openshift_aws_node_group_termination_policy }}"
replace_all_instances: "{{ openshift_aws_node_group_replace_all_instances }}"
+ iam_role: "{{ openshift_aws_iam_role_name }}"
+ policy_name: "{{ openshift_aws_iam_role_policy_name }}"
+ policy_json: "{{ openshift_aws_iam_role_policy_json }}"
+ elbs: "{{ openshift_aws_elb_name_dict['infra'].keys()| map('extract', openshift_aws_elb_name_dict['infra']) | list }}"
-openshift_aws_elb_security_groups:
-- "{{ openshift_aws_clusterid }}"
-- "{{ openshift_aws_clusterid }}_{{ openshift_aws_node_group_type }}"
+openshift_aws_elb_tags: "{{ openshift_aws_kube_tags }}"
+openshift_aws_elb_az_load_balancing: False
-openshift_aws_elb_instance_filter:
- "tag:clusterid": "{{ openshift_aws_clusterid }}"
- "tag:host-type": "{{ openshift_aws_node_group_type }}"
- instance-state-name: running
+openshift_aws_kube_tags: "{{ openshift_aws_clusterid | build_instance_tags }}"
+
+openshift_aws_elb_security_groups: "{{ openshift_aws_launch_config_security_groups }}"
openshift_aws_launch_config_security_groups:
-- "{{ openshift_aws_clusterid }}" # default sg
-- "{{ openshift_aws_clusterid }}_{{ openshift_aws_node_group_type }}" # node type sg
-- "{{ openshift_aws_clusterid }}_{{ openshift_aws_node_group_type }}_k8s" # node type sg k8s
+ compute:
+ - "{{ openshift_aws_clusterid }}" # default sg
+ - "{{ openshift_aws_clusterid }}_compute" # node type sg
+ - "{{ openshift_aws_clusterid }}_compute_k8s" # node type sg k8s
+ infra:
+ - "{{ openshift_aws_clusterid }}" # default sg
+ - "{{ openshift_aws_clusterid }}_infra" # node type sg
+ - "{{ openshift_aws_clusterid }}_infra_k8s" # node type sg k8s
+ master:
+ - "{{ openshift_aws_clusterid }}" # default sg
+ - "{{ openshift_aws_clusterid }}_master" # node type sg
+ - "{{ openshift_aws_clusterid }}_master_k8s" # node type sg k8s
+
+openshift_aws_security_groups_tags: "{{ openshift_aws_kube_tags }}"
openshift_aws_node_security_groups:
default:
@@ -231,3 +279,18 @@ openshift_aws_vpc:
openshift_aws_node_run_bootstrap_startup: True
openshift_aws_node_user_data: ''
openshift_aws_node_config_namespace: openshift-node
+
+# If creating extra node groups, you'll need to define all of the following
+
+# The format is the same as openshift_aws_node_group_config, but the top-level
+# key names should be different (ie, not == master or infra).
+# openshift_aws_node_group_config_extra: {}
+
+# This variable should look like openshift_aws_launch_config_security_groups
+# and contain a one-to-one mapping of top level keys that are defined in
+# openshift_aws_node_group_config_extra.
+# openshift_aws_launch_config_security_groups_extra: {}
+
+# openshift_aws_node_security_groups_extra: {}
+
+# openshift_aws_ami_map_extra: {}
diff --git a/roles/openshift_aws/files/describeinstances.json b/roles/openshift_aws/files/describeinstances.json
new file mode 100644
index 000000000..40de49721
--- /dev/null
+++ b/roles/openshift_aws/files/describeinstances.json
@@ -0,0 +1,15 @@
+{
+ "Version": "2012-10-17",
+ "Statement": [
+ {
+ "Action": [
+ "ec2:DescribeInstances"
+ ],
+ "Resource": [
+ "*"
+ ],
+ "Effect": "Allow",
+ "Sid": "Stmt1438195894000"
+ }
+ ]
+}
diff --git a/roles/openshift_aws/files/trustpolicy.json b/roles/openshift_aws/files/trustpolicy.json
new file mode 100644
index 000000000..87c7d7c42
--- /dev/null
+++ b/roles/openshift_aws/files/trustpolicy.json
@@ -0,0 +1,12 @@
+{
+ "Version": "2012-10-17",
+ "Statement": [
+ {
+ "Effect": "Allow",
+ "Principal": {
+ "Service": "ec2.amazonaws.com"
+ },
+ "Action": "sts:AssumeRole"
+ }
+ ]
+}
diff --git a/roles/openshift_aws/filter_plugins/openshift_aws_filters.py b/roles/openshift_aws/filter_plugins/openshift_aws_filters.py
index 06e1f9602..a9893c0a7 100644
--- a/roles/openshift_aws/filter_plugins/openshift_aws_filters.py
+++ b/roles/openshift_aws/filter_plugins/openshift_aws_filters.py
@@ -9,17 +9,17 @@ class FilterModule(object):
''' Custom ansible filters for use by openshift_aws role'''
@staticmethod
- def build_instance_tags(clusterid, status='owned'):
+ def build_instance_tags(clusterid):
''' This function will return a dictionary of the instance tags.
The main desire to have this inside of a filter_plugin is that we
need to build the following key.
- {"kubernetes.io/cluster/{{ openshift_aws_clusterid }}": 'owned'}
+ {"kubernetes.io/cluster/{{ openshift_aws_clusterid }}": "{{ openshift_aws_clusterid}}"}
'''
tags = {'clusterid': clusterid,
- 'kubernetes.io/cluster/{}'.format(clusterid): status}
+ 'kubernetes.io/cluster/{}'.format(clusterid): clusterid}
return tags
diff --git a/roles/openshift_aws/tasks/build_node_group.yml b/roles/openshift_aws/tasks/build_node_group.yml
index 0dac1c23d..7e8e9b679 100644
--- a/roles/openshift_aws/tasks/build_node_group.yml
+++ b/roles/openshift_aws/tasks/build_node_group.yml
@@ -1,4 +1,6 @@
---
+# This task file expects l_nodes_to_build to be passed in.
+
# When openshift_aws_use_custom_ami is '' then
# we retrieve the latest build AMI.
# Then set openshift_aws_ami to the ami.
@@ -21,14 +23,15 @@
- "'results' in amiout"
- amiout.results|length > 0
-- when: openshift_aws_create_security_groups
- name: "Create {{ openshift_aws_node_group_type }} security groups"
- include: security_group.yml
+# Need to set epoch time in one place to use for launch_config and scale_group
+- set_fact:
+ l_epoch_time: "{{ ansible_date_time.epoch }}"
+
+- when: openshift_aws_create_iam_role
+ include: iam_role.yml
- when: openshift_aws_create_launch_config
- name: "Create {{ openshift_aws_node_group_type }} launch config"
include: launch_config.yml
- when: openshift_aws_create_scale_group
- name: "Create {{ openshift_aws_node_group_type }} node group"
include: scale_group.yml
diff --git a/roles/openshift_aws/tasks/elb.yml b/roles/openshift_aws/tasks/elb.yml
index 7bc3184df..a543222d5 100644
--- a/roles/openshift_aws/tasks/elb.yml
+++ b/roles/openshift_aws/tasks/elb.yml
@@ -1,66 +1,24 @@
---
-- name: query vpc
- ec2_vpc_net_facts:
- region: "{{ openshift_aws_region }}"
- filters:
- 'tag:Name': "{{ openshift_aws_vpc_name }}"
- register: vpcout
-
-- name: debug
- debug: var=vpcout
-
-- name: fetch the remote instances
- ec2_remote_facts:
- region: "{{ openshift_aws_region }}"
- filters: "{{ openshift_aws_elb_instance_filter }}"
- register: instancesout
-
-- name: fetch the default subnet id
- ec2_vpc_subnet_facts:
- region: "{{ openshift_aws_region }}"
- filters:
- "tag:Name": "{{ openshift_aws_subnet_name }}"
- vpc-id: "{{ vpcout.vpcs[0].id }}"
- register: subnetout
-
-- name:
+- name: "dump the elb listeners for {{ l_elb_dict_item.key }}"
debug:
- msg: "{{ openshift_aws_elb_listeners[openshift_aws_node_group_type][openshift_aws_elb_direction]
- if 'master' in openshift_aws_node_group_type or 'infra' in openshift_aws_node_group_type
- else openshift_aws_elb_listeners }}"
+ msg: "{{ l_elb_dict_item.value }}"
-- name: "Create ELB {{ l_openshift_aws_elb_name }}"
+- name: "Create ELB {{ l_elb_dict_item.key }}"
ec2_elb_lb:
- name: "{{ l_openshift_aws_elb_name }}"
+ name: "{{ l_openshift_aws_elb_name_dict[l_elb_dict_item.key][item.key] }}"
state: present
- security_group_names: "{{ openshift_aws_elb_security_groups }}"
+ cross_az_load_balancing: "{{ openshift_aws_elb_az_load_balancing }}"
+ security_group_names: "{{ l_elb_security_groups[l_elb_dict_item.key] }}"
idle_timeout: "{{ openshift_aws_elb_idle_timout }}"
region: "{{ openshift_aws_region }}"
subnets:
- "{{ subnetout.subnets[0].id }}"
health_check: "{{ openshift_aws_elb_health_check }}"
- listeners: "{{ openshift_aws_elb_listeners[openshift_aws_node_group_type][openshift_aws_elb_direction]
- if 'master' in openshift_aws_node_group_type or 'infra' in openshift_aws_node_group_type
- else openshift_aws_elb_listeners }}"
+ listeners: "{{ item.value }}"
scheme: "{{ openshift_aws_elb_scheme }}"
- tags:
- KubernetesCluster: "{{ openshift_aws_clusterid }}"
+ tags: "{{ openshift_aws_elb_tags }}"
register: new_elb
-
-# It is necessary to ignore_errors here because the instances are not in 'ready'
-# state when first added to ELB
-- name: "Add instances to ELB {{ l_openshift_aws_elb_name }}"
- ec2_elb:
- instance_id: "{{ item.id }}"
- ec2_elbs: "{{ l_openshift_aws_elb_name }}"
- state: present
- region: "{{ openshift_aws_region }}"
- wait: False
- with_items: "{{ instancesout.instances }}"
- ignore_errors: True
- retries: 10
- register: elb_call
- until: elb_call|succeeded
+ with_dict: "{{ l_elb_dict_item.value }}"
- debug:
msg: "{{ item }}"
diff --git a/roles/openshift_aws/tasks/iam_role.yml b/roles/openshift_aws/tasks/iam_role.yml
new file mode 100644
index 000000000..d9910d938
--- /dev/null
+++ b/roles/openshift_aws/tasks/iam_role.yml
@@ -0,0 +1,36 @@
+---
+#####
+# Instance profiles consist of two parts. The first part is creating a role
+# in which the instance has access and will use this role's permissions
+# to make API calls on his behalf. This role requires a trust policy
+# which links a service (ec2) to the role. This states that this role
+# has access to make call ec2 API calls.
+# See ../files/trustpolicy.json
+#
+# Currently openshift-node requires
+# access to the AWS API to call describeinstances.
+# https://bugzilla.redhat.com/show_bug.cgi?id=1510519
+#####
+- name: Create an iam role
+ iam_role:
+ name: "{{ item.value.iam_role }}"
+ assume_role_policy_document: "{{ lookup('file','trustpolicy.json') }}"
+ state: "{{ openshift_aws_iam_role_state | default('present') }}"
+ when: item.value.iam_role is defined
+ with_dict: "{{ l_nodes_to_build }}"
+
+#####
+# The second part of this task file is linking the role to a policy
+# that specifies which calls the role can make to the ec2 API.
+# Currently all that is required is DescribeInstances.
+# See ../files/describeinstances.json
+#####
+- name: create an iam policy
+ iam_policy:
+ iam_type: role
+ iam_name: "{{ item.value.iam_role }}"
+ policy_json: "{{ item.value.policy_json }}"
+ policy_name: "{{ item.value.policy_name }}"
+ state: "{{ openshift_aws_iam_role_state | default('present') }}"
+ when: item.value.iam_role is defined
+ with_dict: "{{ l_nodes_to_build }}"
diff --git a/roles/openshift_aws/tasks/launch_config.yml b/roles/openshift_aws/tasks/launch_config.yml
index 8b7b02a0e..0dbeba5a0 100644
--- a/roles/openshift_aws/tasks/launch_config.yml
+++ b/roles/openshift_aws/tasks/launch_config.yml
@@ -9,31 +9,7 @@
when:
- openshift_deployment_type is undefined
-- name: query vpc
- ec2_vpc_net_facts:
- region: "{{ openshift_aws_region }}"
- filters:
- 'tag:Name': "{{ openshift_aws_vpc_name }}"
- register: vpcout
-
-- name: fetch the security groups for launch config
- ec2_group_facts:
- filters:
- group-name: "{{ openshift_aws_launch_config_security_groups }}"
- vpc-id: "{{ vpcout.vpcs[0].id }}"
- region: "{{ openshift_aws_region }}"
- register: ec2sgs
-
-# Create the scale group config
-- name: Create the node scale group launch config
- ec2_lc:
- name: "{{ openshift_aws_launch_config_name }}"
- region: "{{ openshift_aws_region }}"
- image_id: "{{ openshift_aws_ami }}"
- instance_type: "{{ openshift_aws_node_group_config[openshift_aws_node_group_type].instance_type }}"
- security_groups: "{{ openshift_aws_launch_config_security_group_id | default(ec2sgs.security_groups | map(attribute='group_id')| list) }}"
- user_data: "{{ lookup('template', 'user_data.j2') }}"
- key_name: "{{ openshift_aws_ssh_key_name }}"
- ebs_optimized: False
- volumes: "{{ openshift_aws_node_group_config[openshift_aws_node_group_type].volumes }}"
- assign_public_ip: True
+- include: launch_config_create.yml
+ with_dict: "{{ l_nodes_to_build }}"
+ loop_control:
+ loop_var: launch_config_item
diff --git a/roles/openshift_aws/tasks/launch_config_create.yml b/roles/openshift_aws/tasks/launch_config_create.yml
new file mode 100644
index 000000000..a688496d2
--- /dev/null
+++ b/roles/openshift_aws/tasks/launch_config_create.yml
@@ -0,0 +1,26 @@
+---
+- name: fetch the security groups for launch config
+ ec2_group_facts:
+ filters:
+ group-name: "{{ l_launch_config_security_groups[launch_config_item.key] }}"
+ vpc-id: "{{ vpcout.vpcs[0].id }}"
+ region: "{{ openshift_aws_region }}"
+ register: ec2sgs
+
+# Create the scale group config
+- name: Create the node scale group launch config
+ ec2_lc:
+ name: "{{ openshift_aws_launch_config_basename }}-{{ launch_config_item.key }}-{{ l_epoch_time }}"
+ region: "{{ openshift_aws_region }}"
+ image_id: "{{ l_aws_ami_map[launch_config_item.key] | default(openshift_aws_ami) }}"
+ instance_type: "{{ launch_config_item.value.instance_type }}"
+ security_groups: "{{ openshift_aws_launch_config_security_group_id | default(ec2sgs.security_groups | map(attribute='group_id')| list) }}"
+ instance_profile_name: "{{ launch_config_item.value.iam_role if launch_config_item.value.iam_role is defined and
+ launch_config_item.value.iam_role != '' and
+ openshift_aws_create_iam_role
+ else omit }}"
+ user_data: "{{ lookup('template', 'user_data.j2') }}"
+ key_name: "{{ openshift_aws_ssh_key_name }}"
+ ebs_optimized: False
+ volumes: "{{ launch_config_item.value.volumes }}"
+ assign_public_ip: True
diff --git a/roles/openshift_aws/tasks/master_facts.yml b/roles/openshift_aws/tasks/master_facts.yml
index 737cfc7a6..530b0134d 100644
--- a/roles/openshift_aws/tasks/master_facts.yml
+++ b/roles/openshift_aws/tasks/master_facts.yml
@@ -3,20 +3,18 @@
ec2_elb_facts:
region: "{{ openshift_aws_region }}"
names:
- - "{{ item }}"
- with_items:
- - "{{ openshift_aws_elb_name }}-external"
- - "{{ openshift_aws_elb_name }}-internal"
+ - "{{ openshift_aws_elb_name_dict['master']['internal'] }}"
delegate_to: localhost
register: elbs
- debug: var=elbs
+ run_once: true
- name: set fact
set_fact:
- openshift_master_cluster_hostname: "{{ elbs.results[1].elbs[0].dns_name }}"
+ openshift_master_cluster_hostname: "{{ elbs.elbs[0].dns_name }}"
osm_custom_cors_origins:
- - "{{ elbs.results[1].elbs[0].dns_name }}"
+ - "{{ elbs.elbs[0].dns_name }}"
- "console.{{ openshift_aws_clusterid | default('default') }}.openshift.com"
- "api.{{ openshift_aws_clusterid | default('default') }}.openshift.com"
with_items: "{{ groups['masters'] }}"
diff --git a/roles/openshift_aws/tasks/provision.yml b/roles/openshift_aws/tasks/provision.yml
index a8518d43a..91538ed5c 100644
--- a/roles/openshift_aws/tasks/provision.yml
+++ b/roles/openshift_aws/tasks/provision.yml
@@ -7,35 +7,36 @@
name: create s3 bucket for registry
include: s3.yml
+- include: vpc_and_subnet_id.yml
+
+- name: create elbs
+ include: elb.yml
+ with_dict: "{{ openshift_aws_elb_dict }}"
+ vars:
+ l_elb_security_groups: "{{ openshift_aws_elb_security_groups }}"
+ l_openshift_aws_elb_name_dict: "{{ openshift_aws_elb_name_dict }}"
+ loop_control:
+ loop_var: l_elb_dict_item
+
- name: include scale group creation for master
include: build_node_group.yml
+ vars:
+ l_nodes_to_build: "{{ openshift_aws_master_group_config }}"
+ l_launch_config_security_groups: "{{ openshift_aws_launch_config_security_groups }}"
+ l_aws_ami_map: "{{ openshift_aws_ami_map }}"
- name: fetch newly created instances
ec2_remote_facts:
region: "{{ openshift_aws_region }}"
filters:
"tag:clusterid": "{{ openshift_aws_clusterid }}"
- "tag:host-type": "{{ openshift_aws_node_group_type }}"
+ "tag:host-type": "master"
instance-state-name: running
register: instancesout
retries: 20
delay: 3
until: instancesout.instances|length > 0
-- name: create our master internal load balancers
- include: elb.yml
- vars:
- openshift_aws_elb_direction: internal
- l_openshift_aws_elb_name: "{{ openshift_aws_elb_name }}-internal"
- openshift_aws_elb_scheme: internal
-
-- name: create our master external load balancers
- include: elb.yml
- vars:
- openshift_aws_elb_direction: external
- l_openshift_aws_elb_name: "{{ openshift_aws_elb_name }}-external"
- openshift_aws_elb_scheme: internet-facing
-
- name: wait for ssh to become available
wait_for:
port: 22
diff --git a/roles/openshift_aws/tasks/provision_instance.yml b/roles/openshift_aws/tasks/provision_instance.yml
index 25ae6ce1c..3349acb7a 100644
--- a/roles/openshift_aws/tasks/provision_instance.yml
+++ b/roles/openshift_aws/tasks/provision_instance.yml
@@ -3,20 +3,7 @@
set_fact:
openshift_node_bootstrap: True
-- name: query vpc
- ec2_vpc_net_facts:
- region: "{{ openshift_aws_region }}"
- filters:
- 'tag:Name': "{{ openshift_aws_vpc_name }}"
- register: vpcout
-
-- name: fetch the default subnet id
- ec2_vpc_subnet_facts:
- region: "{{ openshift_aws_region }}"
- filters:
- "tag:Name": "{{ openshift_aws_subnet_name }}"
- vpc-id: "{{ vpcout.vpcs[0].id }}"
- register: subnetout
+- include: vpc_and_subnet_id.yml
- name: create instance for ami creation
ec2:
diff --git a/roles/openshift_aws/tasks/provision_nodes.yml b/roles/openshift_aws/tasks/provision_nodes.yml
index fc4996c68..1b40f24d3 100644
--- a/roles/openshift_aws/tasks/provision_nodes.yml
+++ b/roles/openshift_aws/tasks/provision_nodes.yml
@@ -25,19 +25,23 @@
set_fact:
openshift_aws_launch_config_bootstrap_token: "{{ bootstrap['content'] | b64decode }}"
-- name: include build node group for infra
+- include: vpc_and_subnet_id.yml
+
+- name: include build compute and infra node groups
include: build_node_group.yml
vars:
- openshift_aws_node_group_type: infra
- openshift_aws_scale_group_name: "{{ openshift_aws_clusterid }} openshift infra"
- openshift_aws_launch_config_name: "{{ openshift_aws_clusterid }}-infra-{{ ansible_date_time.epoch }}"
+ l_nodes_to_build: "{{ openshift_aws_node_group_config }}"
+ l_launch_config_security_groups: "{{ openshift_aws_launch_config_security_groups }}"
+ l_aws_ami_map: "{{ openshift_aws_ami_map }}"
-- name: include build node group for compute
+- name: include build node group for extra nodes
include: build_node_group.yml
+ when: openshift_aws_node_group_config_extra is defined
vars:
- openshift_aws_node_group_type: compute
- openshift_aws_scale_group_name: "{{ openshift_aws_clusterid }} openshift compute"
- openshift_aws_launch_config_name: "{{ openshift_aws_clusterid }}-compute-{{ ansible_date_time.epoch }}"
+ l_nodes_to_build: "{{ openshift_aws_node_group_config_extra | default({}) }}"
+ l_launch_config_security_groups: "{{ openshift_aws_launch_config_security_groups_extra }}"
+ l_aws_ami_map: "{{ openshift_aws_ami_map_extra }}"
+
- when: openshift_aws_wait_for_ssh | bool
block:
diff --git a/roles/openshift_aws/tasks/scale_group.yml b/roles/openshift_aws/tasks/scale_group.yml
index eb31636e7..097859af2 100644
--- a/roles/openshift_aws/tasks/scale_group.yml
+++ b/roles/openshift_aws/tasks/scale_group.yml
@@ -1,11 +1,4 @@
---
-- name: query vpc
- ec2_vpc_net_facts:
- region: "{{ openshift_aws_region }}"
- filters:
- 'tag:Name': "{{ openshift_aws_vpc_name }}"
- register: vpcout
-
- name: fetch the subnet to use in scale group
ec2_vpc_subnet_facts:
region: "{{ openshift_aws_region }}"
@@ -16,19 +9,20 @@
- name: Create the scale group
ec2_asg:
- name: "{{ openshift_aws_scale_group_name }}"
- launch_config_name: "{{ openshift_aws_launch_config_name }}"
- health_check_period: "{{ openshift_aws_node_group_config[openshift_aws_node_group_type].health_check.period }}"
- health_check_type: "{{ openshift_aws_node_group_config[openshift_aws_node_group_type].health_check.type }}"
- min_size: "{{ openshift_aws_node_group_config[openshift_aws_node_group_type].min_size }}"
- max_size: "{{ openshift_aws_node_group_config[openshift_aws_node_group_type].max_size }}"
- desired_capacity: "{{ openshift_aws_node_group_config[openshift_aws_node_group_type].desired_size }}"
+ name: "{{ openshift_aws_scale_group_basename }} {{ item.key }}"
+ launch_config_name: "{{ openshift_aws_launch_config_basename }}-{{ item.key }}-{{ l_epoch_time }}"
+ health_check_period: "{{ item.value.health_check.period }}"
+ health_check_type: "{{ item.value.health_check.type }}"
+ min_size: "{{ item.value.min_size }}"
+ max_size: "{{ item.value.max_size }}"
+ desired_capacity: "{{ item.value.desired_size }}"
region: "{{ openshift_aws_region }}"
- termination_policies: "{{ openshift_aws_node_group_config[openshift_aws_node_group_type].termination_policy if 'termination_policy' in openshift_aws_node_group_config[openshift_aws_node_group_type] else omit }}"
- load_balancers: "{{ openshift_aws_node_group_config[openshift_aws_node_group_type].elbs if 'elbs' in openshift_aws_node_group_config[openshift_aws_node_group_type] else omit }}"
- wait_for_instances: "{{ openshift_aws_node_group_config[openshift_aws_node_group_type].wait_for_instances | default(False)}}"
+ termination_policies: "{{ item.value.termination_policy if 'termination_policy' in item.value else omit }}"
+ load_balancers: "{{ item.value.elbs if 'elbs' in item.value else omit }}"
+ wait_for_instances: "{{ item.value.wait_for_instances | default(False)}}"
vpc_zone_identifier: "{{ subnetout.subnets[0].id }}"
replace_instances: "{{ openshift_aws_node_group_replace_instances if openshift_aws_node_group_replace_instances != [] else omit }}"
- replace_all_instances: "{{ omit if openshift_aws_node_group_replace_instances != [] else (openshift_aws_node_group_config[openshift_aws_node_group_type].replace_all_instances | default(omit)) }}"
+ replace_all_instances: "{{ omit if openshift_aws_node_group_replace_instances != [] else (item.value.replace_all_instances | default(omit)) }}"
tags:
- - "{{ openshift_aws_node_group_config.tags | combine(openshift_aws_node_group_config[openshift_aws_node_group_type].tags) }}"
+ - "{{ openshift_aws_node_group_config_tags | combine(item.value.tags) }}"
+ with_dict: "{{ l_nodes_to_build }}"
diff --git a/roles/openshift_aws/tasks/seal_ami.yml b/roles/openshift_aws/tasks/seal_ami.yml
index d319fdd1a..0cb749dcc 100644
--- a/roles/openshift_aws/tasks/seal_ami.yml
+++ b/roles/openshift_aws/tasks/seal_ami.yml
@@ -1,11 +1,4 @@
---
-- name: Remove any ansible facts created during AMI creation
- file:
- path: "/etc/ansible/facts.d/{{ item }}"
- state: absent
- with_items:
- - openshift.fact
-
- name: fetch newly created instances
ec2_remote_facts:
region: "{{ openshift_aws_region }}"
diff --git a/roles/openshift_aws/tasks/security_group.yml b/roles/openshift_aws/tasks/security_group.yml
index 161e72fb4..5cc7ae537 100644
--- a/roles/openshift_aws/tasks/security_group.yml
+++ b/roles/openshift_aws/tasks/security_group.yml
@@ -6,40 +6,11 @@
"tag:Name": "{{ openshift_aws_clusterid }}"
register: vpcout
-- name: Create default security group for cluster
- ec2_group:
- name: "{{ openshift_aws_node_security_groups.default.name }}"
- description: "{{ openshift_aws_node_security_groups.default.desc }}"
- region: "{{ openshift_aws_region }}"
- vpc_id: "{{ vpcout.vpcs[0].id }}"
- rules: "{{ openshift_aws_node_security_groups.default.rules | default(omit, True)}}"
- register: sg_default_created
-
-- name: create the node group sgs
- ec2_group:
- name: "{{ item.name}}"
- description: "{{ item.desc }}"
- rules: "{{ item.rules if 'rules' in item else [] }}"
- region: "{{ openshift_aws_region }}"
- vpc_id: "{{ vpcout.vpcs[0].id }}"
- register: sg_create
- with_items:
- - "{{ openshift_aws_node_security_groups[openshift_aws_node_group_type]}}"
+- include: security_group_create.yml
+ vars:
+ l_security_groups: "{{ openshift_aws_node_security_groups }}"
-- name: create the k8s sgs for the node group
- ec2_group:
- name: "{{ item.name }}_k8s"
- description: "{{ item.desc }} for k8s"
- region: "{{ openshift_aws_region }}"
- vpc_id: "{{ vpcout.vpcs[0].id }}"
- register: k8s_sg_create
- with_items:
- - "{{ openshift_aws_node_security_groups[openshift_aws_node_group_type]}}"
-
-- name: tag sg groups with proper tags
- ec2_tag:
- tags:
- KubernetesCluster: "{{ openshift_aws_clusterid }}"
- resource: "{{ item.group_id }}"
- region: "{{ openshift_aws_region }}"
- with_items: "{{ k8s_sg_create.results }}"
+- include: security_group_create.yml
+ when: openshift_aws_node_security_groups_extra is defined
+ vars:
+ l_security_groups: "{{ openshift_aws_node_security_groups_extra | default({}) }}"
diff --git a/roles/openshift_aws/tasks/security_group_create.yml b/roles/openshift_aws/tasks/security_group_create.yml
new file mode 100644
index 000000000..ef6060555
--- /dev/null
+++ b/roles/openshift_aws/tasks/security_group_create.yml
@@ -0,0 +1,25 @@
+---
+- name: create the node group sgs
+ ec2_group:
+ name: "{{ item.value.name}}"
+ description: "{{ item.value.desc }}"
+ rules: "{{ item.value.rules if 'rules' in item.value else [] }}"
+ region: "{{ openshift_aws_region }}"
+ vpc_id: "{{ vpcout.vpcs[0].id }}"
+ with_dict: "{{ l_security_groups }}"
+
+- name: create the k8s sgs for the node group
+ ec2_group:
+ name: "{{ item.value.name }}_k8s"
+ description: "{{ item.value.desc }} for k8s"
+ region: "{{ openshift_aws_region }}"
+ vpc_id: "{{ vpcout.vpcs[0].id }}"
+ with_dict: "{{ l_security_groups }}"
+ register: k8s_sg_create
+
+- name: tag sg groups with proper tags
+ ec2_tag:
+ tags: "{{ openshift_aws_security_groups_tags }}"
+ resource: "{{ item.group_id }}"
+ region: "{{ openshift_aws_region }}"
+ with_items: "{{ k8s_sg_create.results }}"
diff --git a/roles/openshift_aws/tasks/vpc_and_subnet_id.yml b/roles/openshift_aws/tasks/vpc_and_subnet_id.yml
new file mode 100644
index 000000000..aaf9b300f
--- /dev/null
+++ b/roles/openshift_aws/tasks/vpc_and_subnet_id.yml
@@ -0,0 +1,18 @@
+---
+- name: query vpc
+ ec2_vpc_net_facts:
+ region: "{{ openshift_aws_region }}"
+ filters:
+ 'tag:Name': "{{ openshift_aws_vpc_name }}"
+ register: vpcout
+
+- name: debug
+ debug: var=vpcout
+
+- name: fetch the default subnet id
+ ec2_vpc_subnet_facts:
+ region: "{{ openshift_aws_region }}"
+ filters:
+ "tag:Name": "{{ openshift_aws_subnet_name }}"
+ vpc-id: "{{ vpcout.vpcs[0].id }}"
+ register: subnetout
diff --git a/roles/openshift_aws/templates/user_data.j2 b/roles/openshift_aws/templates/user_data.j2
index ed9c0ed0b..a8c7f9a95 100644
--- a/roles/openshift_aws/templates/user_data.j2
+++ b/roles/openshift_aws/templates/user_data.j2
@@ -7,9 +7,9 @@ write_files:
owner: 'root:root'
permissions: '0640'
content: |
- openshift_group_type: {{ openshift_aws_node_group_type }}
-{% if openshift_aws_node_group_type != 'master' %}
-- path: /etc/origin/node/csr_kubeconfig
+ openshift_group_type: {{ launch_config_item.key }}
+{% if launch_config_item.key != 'master' %}
+- path: /etc/origin/node/bootstrap.kubeconfig
owner: 'root:root'
permissions: '0640'
encoding: b64
@@ -19,7 +19,7 @@ runcmd:
{% if openshift_aws_node_run_bootstrap_startup %}
- [ ansible-playbook, /root/openshift_bootstrap/bootstrap.yml]
{% endif %}
-{% if openshift_aws_node_group_type != 'master' %}
+{% if launch_config_item.key != 'master' %}
- [ systemctl, enable, {% if openshift_deployment_type == 'openshift-enterprise' %}atomic-openshift{% else %}origin{% endif %}-node]
- [ systemctl, start, {% if openshift_deployment_type == 'openshift-enterprise' %}atomic-openshift{% else %}origin{% endif %}-node]
{% endif %}