From c3945d39c9264dfa6f393701122d009c7ef2f2a8 Mon Sep 17 00:00:00 2001 From: Andrew Butcher Date: Mon, 18 Dec 2017 14:13:38 -0500 Subject: Move node group tags to openshift_aws_{master,node}_group. --- roles/openshift_aws/defaults/main.yml | 25 +++++++++++++------------ roles/openshift_aws/tasks/build_node_group.yml | 2 +- roles/openshift_aws/tasks/scale_group.yml | 2 +- 3 files changed, 15 insertions(+), 14 deletions(-) (limited to 'roles/openshift_aws') diff --git a/roles/openshift_aws/defaults/main.yml b/roles/openshift_aws/defaults/main.yml index 74e5d1dde..71de24339 100644 --- a/roles/openshift_aws/defaults/main.yml +++ b/roles/openshift_aws/defaults/main.yml @@ -122,12 +122,25 @@ openshift_aws_ami_map: openshift_aws_master_group: - name: "{{ openshift_aws_clusterid }} master group" group: master + tags: + host-type: master + sub-host-type: default + runtime: docker openshift_aws_node_groups: - name: "{{ openshift_aws_clusterid }} compute group" group: compute + tags: + host-type: node + sub-host-type: compute + runtime: docker + - name: "{{ openshift_aws_clusterid }} infra group" group: infra + tags: + host-type: node + sub-host-type: infra + runtime: docker openshift_aws_created_asgs: [] openshift_aws_current_asgs: [] @@ -144,10 +157,6 @@ openshift_aws_master_group_config: min_size: 3 max_size: 3 desired_size: 3 - tags: - host-type: master - sub-host-type: default - runtime: docker wait_for_instances: True termination_policy: "{{ openshift_aws_node_group_termination_policy }}" replace_all_instances: "{{ openshift_aws_node_group_replace_all_instances }}" @@ -167,10 +176,6 @@ openshift_aws_node_group_config: min_size: 3 max_size: 100 desired_size: 3 - tags: - host-type: node - sub-host-type: compute - runtime: docker termination_policy: "{{ openshift_aws_node_group_termination_policy }}" replace_all_instances: "{{ openshift_aws_node_group_replace_all_instances }}" iam_role: "{{ openshift_aws_iam_role_name }}" @@ -186,10 +191,6 @@ openshift_aws_node_group_config: min_size: 2 max_size: 20 desired_size: 2 - tags: - host-type: node - sub-host-type: infra - runtime: docker termination_policy: "{{ openshift_aws_node_group_termination_policy }}" replace_all_instances: "{{ openshift_aws_node_group_replace_all_instances }}" iam_role: "{{ openshift_aws_iam_role_name }}" diff --git a/roles/openshift_aws/tasks/build_node_group.yml b/roles/openshift_aws/tasks/build_node_group.yml index 7fb617dd5..9485cc3ac 100644 --- a/roles/openshift_aws/tasks/build_node_group.yml +++ b/roles/openshift_aws/tasks/build_node_group.yml @@ -30,7 +30,7 @@ - name: query all asg's for this cluster ec2_asg_facts: region: "{{ openshift_aws_region }}" - tags: "{{ {'kubernetes.io/cluster/' ~ openshift_aws_clusterid: openshift_aws_clusterid} | combine(l_node_group_config[openshift_aws_node_group.group].tags) }}" + tags: "{{ {'kubernetes.io/cluster/' ~ openshift_aws_clusterid: openshift_aws_clusterid} | combine(openshift_aws_node_group.tags) }}" register: asgs - fail: diff --git a/roles/openshift_aws/tasks/scale_group.yml b/roles/openshift_aws/tasks/scale_group.yml index 3632f7ce9..6ce8c58ba 100644 --- a/roles/openshift_aws/tasks/scale_group.yml +++ b/roles/openshift_aws/tasks/scale_group.yml @@ -22,7 +22,7 @@ else (l_node_group_config[openshift_aws_node_group.group].replace_all_instances | default(omit)) }}" tags: - "{{ openshift_aws_node_group_config_tags - | combine(l_node_group_config[openshift_aws_node_group.group].tags) + | combine(openshift_aws_node_group.tags) | combine({'deployment_serial': l_deployment_serial, 'ami': openshift_aws_ami_map[openshift_aws_node_group.group] | default(openshift_aws_ami)}) }}" - name: append the asg name to the openshift_aws_created_asgs fact -- cgit v1.2.3 From 1395f1a87e2dea2eab991d2c46aa7ef4173279a4 Mon Sep 17 00:00:00 2001 From: Kenny Woodson Date: Thu, 21 Dec 2017 12:54:56 -0500 Subject: Adding ability to update ami drive size. --- roles/openshift_aws/tasks/provision_instance.yml | 6 +----- 1 file changed, 1 insertion(+), 5 deletions(-) (limited to 'roles/openshift_aws') diff --git a/roles/openshift_aws/tasks/provision_instance.yml b/roles/openshift_aws/tasks/provision_instance.yml index 696b323c0..7eadd1522 100644 --- a/roles/openshift_aws/tasks/provision_instance.yml +++ b/roles/openshift_aws/tasks/provision_instance.yml @@ -14,11 +14,7 @@ instance_type: m4.xlarge vpc_subnet_id: "{{ openshift_aws_subnet_id | default(subnetout.subnets[0].id) }}" image: "{{ openshift_aws_base_ami }}" - volumes: - - device_name: /dev/sdb - volume_type: gp2 - volume_size: 100 - delete_on_termination: true + volumes: "{{ openshift_aws_node_group_config_node_volumes }}" wait: yes exact_count: 1 count_tag: -- cgit v1.2.3 From eacc12897ca86a255f89b8a4537ce2b7004cf319 Mon Sep 17 00:00:00 2001 From: Scott Dodson Date: Fri, 5 Jan 2018 12:44:56 -0500 Subject: Migrate to import_role for static role inclusion In Ansible 2.2, the include_role directive came into existence as a Tech Preview. It is still a Tech Preview through Ansible 2.4 (and in current devel branch), but with a noteable change. The default behavior switched from static: true to static: false because that functionality moved to the newly introduced import_role directive (in order to stay consistent with include* being dynamic in nature and `import* being static in nature). The dynamic include is considerably more memory intensive as it will dynamically create a role import for every host in the inventory list to be used. (Also worth noting, there is at the time of this writing an object allocation inefficiency in the dynamic include that can in certain situations amplify this effect considerably) This change is meant to mitigate the pressure on memory for the Ansible control host. We need to evaluate where it makes sense to dynamically include roles and revert back to dynamic inclusion if and where it makes sense to do so. --- roles/openshift_aws/README.md | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) (limited to 'roles/openshift_aws') diff --git a/roles/openshift_aws/README.md b/roles/openshift_aws/README.md index 4aca5c7a8..de73ab01d 100644 --- a/roles/openshift_aws/README.md +++ b/roles/openshift_aws/README.md @@ -7,9 +7,9 @@ This role contains many task-areas to provision resources and perform actions against an AWS account for the purposes of dynamically building an openshift cluster. -This role is primarily intended to be used with "include_role" and "tasks_from". +This role is primarily intended to be used with "import_role" and "tasks_from". -include_role can be called from the tasks section in a play. See example +import_role can be called from the tasks section in a play. See example playbook below for reference. These task-areas are: @@ -40,7 +40,7 @@ Example Playbook ---------------- ```yaml -- include_role: +- import_role: name: openshift_aws tasks_from: vpc.yml vars: -- cgit v1.2.3 From 08f324a8c82abb86cb041668f54bd1a758a9060d Mon Sep 17 00:00:00 2001 From: Chris Callegari Date: Thu, 4 Jan 2018 16:32:47 -0500 Subject: Update to AWS EC2 root vol size so that Health Check tasks pass --- roles/openshift_aws/defaults/main.yml | 8 ++++++++ 1 file changed, 8 insertions(+) (limited to 'roles/openshift_aws') diff --git a/roles/openshift_aws/defaults/main.yml b/roles/openshift_aws/defaults/main.yml index 71de24339..91d0b76f5 100644 --- a/roles/openshift_aws/defaults/main.yml +++ b/roles/openshift_aws/defaults/main.yml @@ -98,12 +98,20 @@ openshift_aws_elb_dict: proxy_protocol: True openshift_aws_node_group_config_master_volumes: +- device_name: /dev/sda1 + volume_size: 100 + device_type: gp2 + delete_on_termination: False - device_name: /dev/sdb volume_size: 100 device_type: gp2 delete_on_termination: False openshift_aws_node_group_config_node_volumes: +- device_name: /dev/sda1 + volume_size: 100 + device_type: gp2 + delete_on_termination: True - device_name: /dev/sdb volume_size: 100 device_type: gp2 -- cgit v1.2.3 From 3623bb6d344164f3f071f25ea4fcd99d7f1a7f3a Mon Sep 17 00:00:00 2001 From: Joel Diaz Date: Mon, 8 Jan 2018 16:04:50 -0500 Subject: docker storage setup for ami building add host to g_new_node_hosts so that plays run against the AMI instance update example vars so that overlay2 is used by default for docker storage --- roles/openshift_aws/tasks/provision_instance.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'roles/openshift_aws') diff --git a/roles/openshift_aws/tasks/provision_instance.yml b/roles/openshift_aws/tasks/provision_instance.yml index 7eadd1522..786db1570 100644 --- a/roles/openshift_aws/tasks/provision_instance.yml +++ b/roles/openshift_aws/tasks/provision_instance.yml @@ -42,5 +42,5 @@ - name: add host to nodes add_host: - groups: nodes + groups: nodes,g_new_node_hosts name: "{{ instancesout.instances[0].public_dns_name }}" -- cgit v1.2.3 From 518a90253c8308231f032cc22b7da6241d5334b5 Mon Sep 17 00:00:00 2001 From: Andrew Butcher Date: Tue, 19 Dec 2017 15:52:50 -0500 Subject: Setup master groups in order to use the master group's ansible_ssh_user to pull bootstrap kubeconfig. --- roles/openshift_aws/tasks/provision_nodes.yml | 17 ++--------------- 1 file changed, 2 insertions(+), 15 deletions(-) (limited to 'roles/openshift_aws') diff --git a/roles/openshift_aws/tasks/provision_nodes.yml b/roles/openshift_aws/tasks/provision_nodes.yml index d82f18574..9105b5b4c 100644 --- a/roles/openshift_aws/tasks/provision_nodes.yml +++ b/roles/openshift_aws/tasks/provision_nodes.yml @@ -2,25 +2,12 @@ # Get bootstrap config token # bootstrap should be created on first master # need to fetch it and shove it into cloud data -- name: fetch master instances - ec2_instance_facts: - region: "{{ openshift_aws_region }}" - filters: - "tag:clusterid": "{{ openshift_aws_clusterid }}" - "tag:host-type": master - instance-state-name: running - register: instancesout - retries: 20 - delay: 3 - until: - - "'instances' in instancesout" - - instancesout.instances|length > 0 +- include_tasks: setup_master_group.yml - name: slurp down the bootstrap.kubeconfig slurp: src: /etc/origin/master/bootstrap.kubeconfig - delegate_to: "{{ instancesout.instances[0].public_ip_address }}" - remote_user: root + delegate_to: "{{ groups.masters.0 }}" register: bootstrap - name: set_fact for kubeconfig token -- cgit v1.2.3 From d3fefc32a727fe3c13159c4e9fe4399f35b487a8 Mon Sep 17 00:00:00 2001 From: Michael Gugino Date: Thu, 4 Jan 2018 23:55:34 -0500 Subject: Move more plugins to lib_utils This commit continues moving plugins into lib_utils. This commit does not move any plugins for add-on roles such as logging and metrics. --- roles/openshift_aws/defaults/main.yml | 2 + .../filter_plugins/openshift_aws_filters.py | 74 ---------------------- roles/openshift_aws/tasks/build_node_group.yml | 1 + roles/openshift_aws/tasks/wait_for_groups.yml | 1 + 4 files changed, 4 insertions(+), 74 deletions(-) delete mode 100644 roles/openshift_aws/filter_plugins/openshift_aws_filters.py (limited to 'roles/openshift_aws') diff --git a/roles/openshift_aws/defaults/main.yml b/roles/openshift_aws/defaults/main.yml index 71de24339..8c8227b5e 100644 --- a/roles/openshift_aws/defaults/main.yml +++ b/roles/openshift_aws/defaults/main.yml @@ -109,6 +109,7 @@ openshift_aws_node_group_config_node_volumes: device_type: gp2 delete_on_termination: True +# build_instance_tags is a custom filter in role lib_utils openshift_aws_node_group_config_tags: "{{ openshift_aws_clusterid | build_instance_tags }}" openshift_aws_node_group_termination_policy: Default openshift_aws_node_group_replace_instances: [] @@ -201,6 +202,7 @@ openshift_aws_node_group_config: openshift_aws_elb_tags: "{{ openshift_aws_kube_tags }}" openshift_aws_elb_az_load_balancing: False +# build_instance_tags is a custom filter in role lib_utils openshift_aws_kube_tags: "{{ openshift_aws_clusterid | build_instance_tags }}" openshift_aws_elb_security_groups: "{{ openshift_aws_launch_config_security_groups }}" diff --git a/roles/openshift_aws/filter_plugins/openshift_aws_filters.py b/roles/openshift_aws/filter_plugins/openshift_aws_filters.py deleted file mode 100644 index dfcb11da3..000000000 --- a/roles/openshift_aws/filter_plugins/openshift_aws_filters.py +++ /dev/null @@ -1,74 +0,0 @@ -#!/usr/bin/python -# -*- coding: utf-8 -*- -''' -Custom filters for use in openshift_aws -''' - -from ansible import errors - - -class FilterModule(object): - ''' Custom ansible filters for use by openshift_aws role''' - - @staticmethod - def scale_groups_serial(scale_group_info, upgrade=False): - ''' This function will determine what the deployment serial should be and return it - - Search through the tags and find the deployment_serial tag. Once found, - determine if an increment is needed during an upgrade. - if upgrade is true then increment the serial and return it - else return the serial - ''' - if scale_group_info == []: - return 1 - - scale_group_info = scale_group_info[0] - - if not isinstance(scale_group_info, dict): - raise errors.AnsibleFilterError("|filter plugin failed: Expected scale_group_info to be a dict") - - serial = None - - for tag in scale_group_info['tags']: - if tag['key'] == 'deployment_serial': - serial = int(tag['value']) - if upgrade: - serial += 1 - break - else: - raise errors.AnsibleFilterError("|filter plugin failed: deployment_serial tag was not found") - - return serial - - @staticmethod - def scale_groups_match_capacity(scale_group_info): - ''' This function will verify that the scale group instance count matches - the scale group desired capacity - - ''' - for scale_group in scale_group_info: - if scale_group['desired_capacity'] != len(scale_group['instances']): - return False - - return True - - @staticmethod - def build_instance_tags(clusterid): - ''' This function will return a dictionary of the instance tags. - - The main desire to have this inside of a filter_plugin is that we - need to build the following key. - - {"kubernetes.io/cluster/{{ openshift_aws_clusterid }}": "{{ openshift_aws_clusterid}}"} - - ''' - tags = {'clusterid': clusterid, - 'kubernetes.io/cluster/{}'.format(clusterid): clusterid} - - return tags - - def filters(self): - ''' returns a mapping of filters to methods ''' - return {'build_instance_tags': self.build_instance_tags, - 'scale_groups_match_capacity': self.scale_groups_match_capacity, - 'scale_groups_serial': self.scale_groups_serial} diff --git a/roles/openshift_aws/tasks/build_node_group.yml b/roles/openshift_aws/tasks/build_node_group.yml index 9485cc3ac..a9f9cc3c4 100644 --- a/roles/openshift_aws/tasks/build_node_group.yml +++ b/roles/openshift_aws/tasks/build_node_group.yml @@ -43,6 +43,7 @@ - name: set the value for the deployment_serial and the current asgs set_fact: + # scale_groups_serial is a custom filter in role lib_utils l_deployment_serial: "{{ openshift_aws_node_group_deployment_serial if openshift_aws_node_group_deployment_serial is defined else asgs.results | scale_groups_serial(openshift_aws_node_group_upgrade) }}" openshift_aws_current_asgs: "{{ asgs.results | map(attribute='auto_scaling_group_name') | list | union(openshift_aws_current_asgs) }}" diff --git a/roles/openshift_aws/tasks/wait_for_groups.yml b/roles/openshift_aws/tasks/wait_for_groups.yml index 1f4ef3e1c..3ad876e37 100644 --- a/roles/openshift_aws/tasks/wait_for_groups.yml +++ b/roles/openshift_aws/tasks/wait_for_groups.yml @@ -8,6 +8,7 @@ tags: "{{ {'kubernetes.io/cluster/' ~ openshift_aws_clusterid: openshift_aws_clusterid } }}" register: qasg + # scale_groups_match_capacity is a custom filter in role lib_utils until: qasg | json_query('results[*]') | scale_groups_match_capacity | bool delay: 10 retries: 60 -- cgit v1.2.3 From 3986e89ecf2df2f5f1ff5b28273d9a6407f8d6f6 Mon Sep 17 00:00:00 2001 From: Andrew Butcher Date: Wed, 10 Jan 2018 15:07:58 -0500 Subject: Move s3 & elb provisioning into their own playbooks s.t. they are applied outside of the openshift_aws master provisioning tasks. --- roles/openshift_aws/tasks/provision.yml | 17 ----------------- roles/openshift_aws/tasks/provision_elb.yml | 15 +++++++++++++++ 2 files changed, 15 insertions(+), 17 deletions(-) create mode 100644 roles/openshift_aws/tasks/provision_elb.yml (limited to 'roles/openshift_aws') diff --git a/roles/openshift_aws/tasks/provision.yml b/roles/openshift_aws/tasks/provision.yml index 786a2e4cf..2b5f317d8 100644 --- a/roles/openshift_aws/tasks/provision.yml +++ b/roles/openshift_aws/tasks/provision.yml @@ -1,23 +1,6 @@ --- -- when: openshift_aws_create_iam_cert | bool - name: create the iam_cert for elb certificate - include_tasks: iam_cert.yml - -- when: openshift_aws_create_s3 | bool - name: create s3 bucket for registry - include_tasks: s3.yml - - include_tasks: vpc_and_subnet_id.yml -- name: create elbs - include_tasks: elb.yml - with_dict: "{{ openshift_aws_elb_dict }}" - vars: - l_elb_security_groups: "{{ openshift_aws_elb_security_groups }}" - l_openshift_aws_elb_name_dict: "{{ openshift_aws_elb_name_dict }}" - loop_control: - loop_var: l_elb_dict_item - - name: include scale group creation for master include_tasks: build_node_group.yml with_items: "{{ openshift_aws_master_group }}" diff --git a/roles/openshift_aws/tasks/provision_elb.yml b/roles/openshift_aws/tasks/provision_elb.yml new file mode 100644 index 000000000..a52f63bd5 --- /dev/null +++ b/roles/openshift_aws/tasks/provision_elb.yml @@ -0,0 +1,15 @@ +--- +- when: openshift_aws_create_iam_cert | bool + name: create the iam_cert for elb certificate + include_tasks: iam_cert.yml + +- include_tasks: vpc_and_subnet_id.yml + +- name: create elbs + include_tasks: elb.yml + with_dict: "{{ openshift_aws_elb_dict }}" + vars: + l_elb_security_groups: "{{ openshift_aws_elb_security_groups }}" + l_openshift_aws_elb_name_dict: "{{ openshift_aws_elb_name_dict }}" + loop_control: + loop_var: l_elb_dict_item -- cgit v1.2.3