From 636510c3eec7317acdfded00d6237ed5f6ff3529 Mon Sep 17 00:00:00 2001 From: Andrew Block Date: Mon, 8 Feb 2016 00:10:01 -0600 Subject: New OSE3 docker host builder and OpenStack ansible provisioning support --- roles/common/pre_tasks/pre_tasks.yml | 4 ++++ 1 file changed, 4 insertions(+) create mode 100644 roles/common/pre_tasks/pre_tasks.yml (limited to 'roles') diff --git a/roles/common/pre_tasks/pre_tasks.yml b/roles/common/pre_tasks/pre_tasks.yml new file mode 100644 index 000000000..c573bff8c --- /dev/null +++ b/roles/common/pre_tasks/pre_tasks.yml @@ -0,0 +1,4 @@ +--- +- name: Generate Environment ID + shell: echo "$(cat /dev/urandom | tr -dc 'a-zA-Z0-9' | head -c 8)" + register: env_random_id \ No newline at end of file -- cgit v1.2.3 From 80c3d3332507fe620fcab99e65f2ffd81d48a69e Mon Sep 17 00:00:00 2001 From: Vinny Valdez Date: Tue, 29 Mar 2016 16:52:05 -0500 Subject: Add subscription-manager support for Hosted or Satellite --- roles/subscription-manager/README.md | 95 ++++++++++++++++++++++ roles/subscription-manager/pre_tasks/pre_tasks.yml | 37 +++++++++ roles/subscription-manager/tasks/main.yml | 93 +++++++++++++++++++++ 3 files changed, 225 insertions(+) create mode 100644 roles/subscription-manager/README.md create mode 100644 roles/subscription-manager/pre_tasks/pre_tasks.yml create mode 100644 roles/subscription-manager/tasks/main.yml (limited to 'roles') diff --git a/roles/subscription-manager/README.md b/roles/subscription-manager/README.md new file mode 100644 index 000000000..b140ad09a --- /dev/null +++ b/roles/subscription-manager/README.md @@ -0,0 +1,95 @@ +# Red Hat Subscription Manager Ansible Role + +## Parameters + +This role depends on user specified variables. These can be set in the inventory file, group_vars or passed to the playbook from the CLI. The variables are: + +### rhsm_method + +Subscription Manager method to use for registration. Valid values are: + +* **satellite** - Use a Satellite server. Additional variables required include **rhsm_server**, **rhsm_org** and either (**rhsm_username** and **rhsm_password**) or **rhsm_activationkey** +* **hosted** - Use Red Hat's CDN. Additional variables required are **rhsm_server** (defaults to RHSM CDN) and **rhsm_username** and **rhsm_password** +* none/false/blank will disable any subscription manager activities (this is the default if no parameters are set) + +Default: none + +### rhsm_server + +Subscription Manager server hostname. If using a Satellite server set the FQDN here. If using RHSM Hosted this value is ignored. + +Default: none + +### rhsm_username + +Subscription Manager username. Required for RHSM Hosted. Can be optionally used for Satellite, but it may be better to use **rhsm_activationkey** for this. + +Default: none + +### rhsm_password + +Subscription Manager password. Required for RHSM Hosted. Can be optionally used for Satellite, but it may be better to use **rhsm_activationkey** for this. + +Default: none + +### rhsm_org + +Optional Satellite Subscription Manager Organization. Required for Satellite, ignored if using RHSM Hosted. + +Default: none + +### rhsm_activationkey + +Optional Satellite Subscription Manager Activation Key, use this instead of **rhsm_username** and **rhsm_password** if using Satellite to provide repositories and authentication in a key instead. + +Default: none + +### rhsm_pool + +Optional Subscription Manager pool, determine this by running **subscription-manager list --available** on a registered system. Valid for RHSM Hosted or Satellite. Specifying **rhsm_activationkey** will ignore this option. + +Default: none + +### rhsm_repos + +Optional Repositories to enable, this can also be specified in the **rhsm_activationkey**. Valid for RHSM Hosted or Satellite. Specifying **rhsm_activationkey** will ignore this option. + +NOTE: If specifying this value in an inventory file as opposed to group_vars, be sure to define it as a proper list as such: + +rhsm_repos='["rhel-7-server-rpms", "rhel-7-server-ose-3.1-rpms", "rhel-7-server-extras-rpms"]' + +Default: none + +## Pre-tasks + +A number of variable checks are performed before any tasks to ensure the proper parameters are set. To include these checks call the pre_task yaml before any roles: + +``` + pre_tasks: + - include: roles/subscription-manager/pre_tasks/pre_tasks.yml +``` + +## Tasks + +The bulk of the work is performed in the main.yml for this role. The pre-task play will set a variable which can be checked to contitionally include this role as such: + +``` + roles: + - { role: subscription-manager, when: not hostvars.localhost.rhsm_skip, tags: 'subscription-manager' } +``` + +## Running the Playbook + +To register to RHSM Hosted with username and password: + +``` +ansible-playbook -i inventory/ose-provision ose-provision.yml -e "rhsm_method='hosted' rhsm_username=vvaldez rhsm_password='hunter2' openstack_key_name='vvaldez'" +``` + +To register to a Satellite server with an activation key: + +``` +ansible-playbook -i inventory/ose-provision ose-provision.yml -e "rhsm_server='10.12.32.1' rhsm_org='cloud_practice' rhsm_activationkey='rhel-7-ose-3-1' openstack_key_name='vvaldez' rhsm_method='satellite'" +``` + +To ignore any Subscription Manager activities, simple do not set any parameters or explicitly set **rhsm_method** to false. diff --git a/roles/subscription-manager/pre_tasks/pre_tasks.yml b/roles/subscription-manager/pre_tasks/pre_tasks.yml new file mode 100644 index 000000000..497f39353 --- /dev/null +++ b/roles/subscription-manager/pre_tasks/pre_tasks.yml @@ -0,0 +1,37 @@ +--- +- name: Initialize Subscription Manager fact + set_fact: + rhsm_skip: false + +- name: Determine if Subscription Manager should be skipped or not + set_fact: + rhsm_skip: true + when: rhsm_method is undefined or rhsm_method is none or rhsm_method|trim == '' + +- name: Determine Subscription Manager method + fail: msg="Value for 'rhsm_method' of '{{ rhsm_method }}' is not valid, it should be one of 'hosted', 'satellite', or false/none/blank" + when: + - rhsm_method != 'hosted' and rhsm_method != 'satellite' + - not rhsm_skip + +- name: Validate Subscription Manager host is set + fail: msg="Cannot determine Subscription Manager server hostname without a value for 'rhsm_server'" + when: + - rhsm_server is undefined or rhsm_server is none or rhsm_server|trim == '' + - not rhsm_method == 'hosted' + - not rhsm_skip + +- name: Validate Subscription Manager organization is set + fail: msg="Cannot register to a Satellite server without a value for the Organization via 'rhsm_org'" + when: + - rhsm_org is undefined or rhsm_org is none or rhsm_org|trim == '' + - rhsm_method == 'satellite' + - not rhsm_skip + +- name: Validate Subscription Manager authentication is defined + fail: msg="Cannot register without ('rhsm_username' and 'rhsm_password') or 'rhsm_activationkey' variables set" + when: + - (rhsm_username is undefined or rhsm_username is none or rhsm_username|trim == '') or (rhsm_password is undefined or rhsm_password is none or rhsm_password|trim == '') + - rhsm_activationkey is undefined or rhsm_activationkey is none or rhsm_activationkey|trim == '' + - not rhsm_skip + diff --git a/roles/subscription-manager/tasks/main.yml b/roles/subscription-manager/tasks/main.yml new file mode 100644 index 000000000..2e04a7a22 --- /dev/null +++ b/roles/subscription-manager/tasks/main.yml @@ -0,0 +1,93 @@ +--- +- name: Initializing Subscription Manager authenticaiton method + set_fact: + rhsm_authentication: false + +# 'rhsm_activationkey' will take precedence even if 'rhsm_username' and 'rhsm_password' are also set +- name: Setting Subscription Manager Activation Key Fact + set_fact: + rhsm_authentication: "key" + when: + - rhsm_activationkey is defined + - rhsm_activationkey is not none + - rhsm_activationkey|trim != '' + - not rhsm_authentication + +# If 'rhsm_username' and 'rhsm_password' are set but not 'rhsm_activationkey', set 'rhsm_authentication' to password +- name: Setting Subscription Manager Username and Password Fact + set_fact: + rhsm_authentication: "password" + when: + - rhsm_username is defined and rhsm_username is not none and rhsm_username|trim != '' + - rhsm_password is defined and rhsm_password is not none and rhsm_password|trim != '' + - not rhsm_authentication + +- name: Initializing registration status + set_fact: + registered: false + +- name: Checking subscription status (a failure means it is not registered and will be) + command: "/usr/bin/subscription-manager status" + ignore_errors: yes + changed_when: no + register: check_if_registered + +- name: Set registration fact + set_fact: + registered: true + when: check_if_registered.rc == 0 + +- name: Cleaning any old subscriptions + command: "/usr/bin/subscription-manager clean" + when: + - not registered + - rhsm_authentication is defined + +- name: Install Satellite certificate + command: "rpm -Uvh --force http://{{ rhsm_server }}/pub/katello-ca-consumer-latest.noarch.rpm" + when: + - not registered + - rhsm_method == 'satellite' + +- name: Register to Satellite using activation key + command: "/usr/bin/subscription-manager register --activationkey={{ rhsm_activationkey }} --org={{ rhsm_org }}" + when: + - not registered + - rhsm_authentication == 'key' + - rhsm_method == 'satellite' + +# This can apply to either Hosted or Satellite +- name: Register using username and password + command: "/usr/bin/subscription-manager register --username={{ rhsm_username }} --password={{ rhsm_password }}" + when: + - not registered + - rhsm_authentication != "key" + +- name: Auto-attach to Subscription Manager Pool + command: "/usr/bin/subscription-manager attach --auto" + when: + - not registered + - rhsm_authentication != "key" + +- name: Attach to a specific pool + command: "/usr/bin/subscription-manager attach --pool={{ rhsm_pool }}" + when: + - rhsm_pool is defined and rhsm_pool is not none and rhsm_pool|trim != '' + - and not registered + - rhsm_authentication != "key" + +- name: Disable all repositories + command: "/usr/bin/subscription-manager repos --disable=*" + when: + - not registered + - not rhsm_authentication == "key" + +- name: Enable specified repositories + command: "/usr/bin/subscription-manager repos --enable={{ item }}" + with_items: rhsm_repos + when: + - not registered + - not rhsm_authentication == "key" + +- name: Cleaning yum repositories + command: "yum clean all" -- cgit v1.2.3 From 177950b76a185c20317aa0e89d356cdf8b97c4c3 Mon Sep 17 00:00:00 2001 From: Vinny Valdez Date: Wed, 30 Mar 2016 15:46:31 -0500 Subject: Refactor role to dynamically determine rhsm_method * Removes rhsm_method * Renames rhsm_server to rhsm_satellite * Add additional pre_task checks (hosted + key) * Change conditionals from rhsm_method check to rhsm_satellite defined * Change repos disable/enable from key to if repos are defined * Update README and examples in inventory file --- roles/subscription-manager/README.md | 30 ++++++---------- roles/subscription-manager/pre_tasks/pre_tasks.yml | 41 ++++++++++++---------- roles/subscription-manager/tasks/main.yml | 20 +++++++---- 3 files changed, 46 insertions(+), 45 deletions(-) (limited to 'roles') diff --git a/roles/subscription-manager/README.md b/roles/subscription-manager/README.md index b140ad09a..e604c7475 100644 --- a/roles/subscription-manager/README.md +++ b/roles/subscription-manager/README.md @@ -2,21 +2,11 @@ ## Parameters -This role depends on user specified variables. These can be set in the inventory file, group_vars or passed to the playbook from the CLI. The variables are: +This role depends on user specified variables. These can be set in the inventory file, group_vars or passed to the playbook from the CLI. No values are set by default which disables this role. The variables are: -### rhsm_method +### rhsm_satellite -Subscription Manager method to use for registration. Valid values are: - -* **satellite** - Use a Satellite server. Additional variables required include **rhsm_server**, **rhsm_org** and either (**rhsm_username** and **rhsm_password**) or **rhsm_activationkey** -* **hosted** - Use Red Hat's CDN. Additional variables required are **rhsm_server** (defaults to RHSM CDN) and **rhsm_username** and **rhsm_password** -* none/false/blank will disable any subscription manager activities (this is the default if no parameters are set) - -Default: none - -### rhsm_server - -Subscription Manager server hostname. If using a Satellite server set the FQDN here. If using RHSM Hosted this value is ignored. +Subscription Manager server hostname. If using a Satellite server set the FQDN here. If using RHSM Hosted this value must be left blank, none or false. Default: none @@ -34,13 +24,13 @@ Default: none ### rhsm_org -Optional Satellite Subscription Manager Organization. Required for Satellite, ignored if using RHSM Hosted. +Optional Subscription Manager Satellite Organization. Required for Satellite, ignored if using RHSM Hosted. Default: none ### rhsm_activationkey -Optional Satellite Subscription Manager Activation Key, use this instead of **rhsm_username** and **rhsm_password** if using Satellite to provide repositories and authentication in a key instead. +Optional Subscription Manager Satellite Activation Key, use this instead of **rhsm_username** and **rhsm_password** if using Satellite to provide repositories and authentication in a key instead. Default: none @@ -52,7 +42,7 @@ Default: none ### rhsm_repos -Optional Repositories to enable, this can also be specified in the **rhsm_activationkey**. Valid for RHSM Hosted or Satellite. Specifying **rhsm_activationkey** will ignore this option. +Optional list of repositories to enable. If left blank it is expected that the **rhsm_activationkey** will specify repos instead. If populated, a **subscription-manager repos --disable=\*** will be run and each of the specified repos explicitly enabled. Valid for RHSM Hosted or Satellite NOTE: If specifying this value in an inventory file as opposed to group_vars, be sure to define it as a proper list as such: @@ -75,7 +65,7 @@ The bulk of the work is performed in the main.yml for this role. The pre-task pl ``` roles: - - { role: subscription-manager, when: not hostvars.localhost.rhsm_skip, tags: 'subscription-manager' } + - { role: subscription-manager, when: hostvars.localhost.rhsm_register, tags: 'subscription-manager' } ``` ## Running the Playbook @@ -83,13 +73,13 @@ The bulk of the work is performed in the main.yml for this role. The pre-task pl To register to RHSM Hosted with username and password: ``` -ansible-playbook -i inventory/ose-provision ose-provision.yml -e "rhsm_method='hosted' rhsm_username=vvaldez rhsm_password='hunter2' openstack_key_name='vvaldez'" +ansible-playbook -i inventory/ose-provision ose-provision.yml -e "rhsm_username=vvaldez rhsm_password=hunter2" ``` To register to a Satellite server with an activation key: ``` -ansible-playbook -i inventory/ose-provision ose-provision.yml -e "rhsm_server='10.12.32.1' rhsm_org='cloud_practice' rhsm_activationkey='rhel-7-ose-3-1' openstack_key_name='vvaldez' rhsm_method='satellite'" +ansible-playbook -i inventory/ose-provision ose-provision.yml -e "rhsm_satellite=satellite.example.com rhsm_org=example_org rhsm_activationkey=rhel-7-ose-3-1" ``` -To ignore any Subscription Manager activities, simple do not set any parameters or explicitly set **rhsm_method** to false. +To ignore any Subscription Manager activities, simply do not set any parameters. diff --git a/roles/subscription-manager/pre_tasks/pre_tasks.yml b/roles/subscription-manager/pre_tasks/pre_tasks.yml index 497f39353..dcd56b2b9 100644 --- a/roles/subscription-manager/pre_tasks/pre_tasks.yml +++ b/roles/subscription-manager/pre_tasks/pre_tasks.yml @@ -1,37 +1,40 @@ --- - name: Initialize Subscription Manager fact set_fact: - rhsm_skip: false + rhsm_register: true -- name: Determine if Subscription Manager should be skipped or not +- name: Determine if Subscription Manager should be used set_fact: - rhsm_skip: true - when: rhsm_method is undefined or rhsm_method is none or rhsm_method|trim == '' - -- name: Determine Subscription Manager method - fail: msg="Value for 'rhsm_method' of '{{ rhsm_method }}' is not valid, it should be one of 'hosted', 'satellite', or false/none/blank" - when: - - rhsm_method != 'hosted' and rhsm_method != 'satellite' - - not rhsm_skip - -- name: Validate Subscription Manager host is set - fail: msg="Cannot determine Subscription Manager server hostname without a value for 'rhsm_server'" + rhsm_register: false when: - - rhsm_server is undefined or rhsm_server is none or rhsm_server|trim == '' - - not rhsm_method == 'hosted' - - not rhsm_skip + - rhsm_satellite is undefined or rhsm_satellite is none or rhsm_satellite|trim == '' + - rhsm_username is undefined or rhsm_username is none or rhsm_username|trim == '' + - rhsm_password is undefined or rhsm_password is none or rhsm_password|trim == '' + - rhsm_org is undefined or rhsm_org is none or rhsm_org|trim == '' + - rhsm_activationkey is undefined or rhsm_activationkey is none or rhsm_activationkey|trim == '' + - rhsm_pool is undefined or rhsm_pool is none or rhsm_pool|trim == '' - name: Validate Subscription Manager organization is set fail: msg="Cannot register to a Satellite server without a value for the Organization via 'rhsm_org'" when: - rhsm_org is undefined or rhsm_org is none or rhsm_org|trim == '' - - rhsm_method == 'satellite' - - not rhsm_skip + - rhsm_satellite is defined + - rhsm_satellite is not none + - rhsm_satellite|trim != '' + - rhsm_register - name: Validate Subscription Manager authentication is defined fail: msg="Cannot register without ('rhsm_username' and 'rhsm_password') or 'rhsm_activationkey' variables set" when: - (rhsm_username is undefined or rhsm_username is none or rhsm_username|trim == '') or (rhsm_password is undefined or rhsm_password is none or rhsm_password|trim == '') - rhsm_activationkey is undefined or rhsm_activationkey is none or rhsm_activationkey|trim == '' - - not rhsm_skip + - rhsm_register +- name: Validate activation key and Hosted are not requested together + fail: msg="Cannot register to RHSM Hosted with 'rhsm_activationkey'" + when: + - rhsm_satellite is undefined or rhsm_satellite is none or rhsm_satellite|trim == '' + - rhsm_activationkey is defined + - rhsm_activationkey is not none + - rhsm_activationkey|trim != '' + - rhsm_register diff --git a/roles/subscription-manager/tasks/main.yml b/roles/subscription-manager/tasks/main.yml index 2e04a7a22..78ceaccd1 100644 --- a/roles/subscription-manager/tasks/main.yml +++ b/roles/subscription-manager/tasks/main.yml @@ -32,7 +32,7 @@ changed_when: no register: check_if_registered -- name: Set registration fact +- name: Set registration fact if system is already registered set_fact: registered: true when: check_if_registered.rc == 0 @@ -44,17 +44,21 @@ - rhsm_authentication is defined - name: Install Satellite certificate - command: "rpm -Uvh --force http://{{ rhsm_server }}/pub/katello-ca-consumer-latest.noarch.rpm" + command: "rpm -Uvh --force http://{{ rhsm_satellite }}/pub/katello-ca-consumer-latest.noarch.rpm" when: - not registered - - rhsm_method == 'satellite' + - rhsm_satellite is defined + - rhsm_satellite is not none + - rhsm_satellite|trim != '' - name: Register to Satellite using activation key command: "/usr/bin/subscription-manager register --activationkey={{ rhsm_activationkey }} --org={{ rhsm_org }}" when: - not registered - rhsm_authentication == 'key' - - rhsm_method == 'satellite' + - rhsm_satellite is defined + - rhsm_satellite is not none + - rhsm_satellite|trim != '' # This can apply to either Hosted or Satellite - name: Register using username and password @@ -80,14 +84,18 @@ command: "/usr/bin/subscription-manager repos --disable=*" when: - not registered - - not rhsm_authentication == "key" + - rhsm_repos is defined + - rhsm_repos is not none + - rhsm_repos|trim != '' - name: Enable specified repositories command: "/usr/bin/subscription-manager repos --enable={{ item }}" with_items: rhsm_repos when: - not registered - - not rhsm_authentication == "key" + - rhsm_repos is defined + - rhsm_repos is not none + - rhsm_repos|trim != '' - name: Cleaning yum repositories command: "yum clean all" -- cgit v1.2.3 From 644f1e672c80bd10f34fabafcfe805c306e77b5e Mon Sep 17 00:00:00 2001 From: Vinny Valdez Date: Tue, 5 Apr 2016 12:23:35 -0500 Subject: Fix bad syntax with extra 'and' in when using rhsm_pool --- roles/subscription-manager/tasks/main.yml | 9 +++++++-- 1 file changed, 7 insertions(+), 2 deletions(-) (limited to 'roles') diff --git a/roles/subscription-manager/tasks/main.yml b/roles/subscription-manager/tasks/main.yml index 78ceaccd1..414bf8f7a 100644 --- a/roles/subscription-manager/tasks/main.yml +++ b/roles/subscription-manager/tasks/main.yml @@ -1,4 +1,5 @@ --- + - name: Initializing Subscription Manager authenticaiton method set_fact: rhsm_authentication: false @@ -61,6 +62,7 @@ - rhsm_satellite|trim != '' # This can apply to either Hosted or Satellite + - name: Register using username and password command: "/usr/bin/subscription-manager register --username={{ rhsm_username }} --password={{ rhsm_password }}" when: @@ -72,12 +74,15 @@ when: - not registered - rhsm_authentication != "key" + - rhsm_pool is undefined or rhsm_pool is none or rhsm_pool|trim == '' - name: Attach to a specific pool command: "/usr/bin/subscription-manager attach --pool={{ rhsm_pool }}" when: - - rhsm_pool is defined and rhsm_pool is not none and rhsm_pool|trim != '' - - and not registered + - rhsm_pool is defined + - rhsm_pool is not none + - rhsm_pool|trim != '' + - not registered - rhsm_authentication != "key" - name: Disable all repositories -- cgit v1.2.3 From 96aaa6df25774e05cda3e4a6f73b030ae989100a Mon Sep 17 00:00:00 2001 From: Vinny Valdez Date: Tue, 5 Apr 2016 18:17:36 -0500 Subject: Refactor use of rhsm_password to prevent display to CLI --- roles/subscription-manager/README.md | 30 ++++++++++++++++++---- roles/subscription-manager/pre_tasks/pre_tasks.yml | 9 +++++++ roles/subscription-manager/tasks/main.yml | 23 ++++++++++++----- 3 files changed, 50 insertions(+), 12 deletions(-) (limited to 'roles') diff --git a/roles/subscription-manager/README.md b/roles/subscription-manager/README.md index e604c7475..a5dd1ac44 100644 --- a/roles/subscription-manager/README.md +++ b/roles/subscription-manager/README.md @@ -18,7 +18,9 @@ Default: none ### rhsm_password -Subscription Manager password. Required for RHSM Hosted. Can be optionally used for Satellite, but it may be better to use **rhsm_activationkey** for this. +Subscription Manager password. Required for RHSM Hosted. Can be optionally used for Satellite, but it may be better to use **rhsm_activationkey** for this. + +NOTE: This variable is prompted for at the start of the playbook run. This is for security purposes so the password is not left in the command history. If specified on the command-line or set in a variable file it will be ignored and the value captured from the prompt will overwrite it instead. Default: none @@ -50,7 +52,25 @@ rhsm_repos='["rhel-7-server-rpms", "rhel-7-server-ose-3.1-rpms", "rhel-7-server- Default: none -## Pre-tasks +## Calling This Role +Calling this role requires adding a **vars_prompt**, **pre_tasks**, and **roles** section of a play + +### vars_prompt +Unfortunately **vars_prompt** can only be used at the play level before role tasks are executed, so this is the only place it can go. See http://stackoverflow.com/questions/25466675/ansible-to-conditionally-prompt-for-a-variable + +Add a prompt to capture **rhsm_password** + +``` +- hosts: localhost + vars_prompt: + # Unfortunately vars_prompt can only be used at the play level before role tasks, so this is the only place it can go. See http://stackoverflow.com/questions/25466675/ansible-to-conditionally-prompt-for-a-variable + - name: "rhsm_password" + prompt: "Subscription Manager password (enter blank if using rhsm_activationkey or to disable registration)" + confirm: yes + private: yes +``` + +### pre-tasks A number of variable checks are performed before any tasks to ensure the proper parameters are set. To include these checks call the pre_task yaml before any roles: @@ -59,7 +79,7 @@ A number of variable checks are performed before any tasks to ensure the proper - include: roles/subscription-manager/pre_tasks/pre_tasks.yml ``` -## Tasks +### roles The bulk of the work is performed in the main.yml for this role. The pre-task play will set a variable which can be checked to contitionally include this role as such: @@ -73,7 +93,7 @@ The bulk of the work is performed in the main.yml for this role. The pre-task pl To register to RHSM Hosted with username and password: ``` -ansible-playbook -i inventory/ose-provision ose-provision.yml -e "rhsm_username=vvaldez rhsm_password=hunter2" +ansible-playbook -i inventory/ose-provision ose-provision.yml -e "rhsm_username=vvaldez" ``` To register to a Satellite server with an activation key: @@ -82,4 +102,4 @@ To register to a Satellite server with an activation key: ansible-playbook -i inventory/ose-provision ose-provision.yml -e "rhsm_satellite=satellite.example.com rhsm_org=example_org rhsm_activationkey=rhel-7-ose-3-1" ``` -To ignore any Subscription Manager activities, simply do not set any parameters. +To ignore any Subscription Manager activities, simply do not set any parameters. When prompted for the password, hit **Enter** to set a blank password. diff --git a/roles/subscription-manager/pre_tasks/pre_tasks.yml b/roles/subscription-manager/pre_tasks/pre_tasks.yml index dcd56b2b9..31441785e 100644 --- a/roles/subscription-manager/pre_tasks/pre_tasks.yml +++ b/roles/subscription-manager/pre_tasks/pre_tasks.yml @@ -1,4 +1,13 @@ --- +- name: Set password fact + set_fact: + rhsm_password: "{{ rhsm_password }}" + no_log: true + when: + - rhsm_password is defined + - rhsm_password is not none + - rhsm_password|trim != '' + - name: Initialize Subscription Manager fact set_fact: rhsm_register: true diff --git a/roles/subscription-manager/tasks/main.yml b/roles/subscription-manager/tasks/main.yml index 414bf8f7a..6e51be7e4 100644 --- a/roles/subscription-manager/tasks/main.yml +++ b/roles/subscription-manager/tasks/main.yml @@ -1,5 +1,12 @@ --- - +- name: Initialize rhsm_password variable if vars_prompt was used + set_fact: + rhsm_password: "{{ hostvars.localhost.rhsm_password }}" + when: + - rhsm_password is defined + - rhsm_password is not none + - rhsm_password|trim != '' + - name: Initializing Subscription Manager authenticaiton method set_fact: rhsm_authentication: false @@ -19,8 +26,12 @@ set_fact: rhsm_authentication: "password" when: - - rhsm_username is defined and rhsm_username is not none and rhsm_username|trim != '' - - rhsm_password is defined and rhsm_password is not none and rhsm_password|trim != '' + - rhsm_username is defined + - rhsm_username is not none + - rhsm_username|trim != '' + - rhsm_password is defined + - rhsm_password is not none + - rhsm_password|trim != '' - not rhsm_authentication - name: Initializing registration status @@ -62,18 +73,17 @@ - rhsm_satellite|trim != '' # This can apply to either Hosted or Satellite - - name: Register using username and password command: "/usr/bin/subscription-manager register --username={{ rhsm_username }} --password={{ rhsm_password }}" + no_log: true when: - not registered - - rhsm_authentication != "key" + - rhsm_authentication == "password" - name: Auto-attach to Subscription Manager Pool command: "/usr/bin/subscription-manager attach --auto" when: - not registered - - rhsm_authentication != "key" - rhsm_pool is undefined or rhsm_pool is none or rhsm_pool|trim == '' - name: Attach to a specific pool @@ -83,7 +93,6 @@ - rhsm_pool is not none - rhsm_pool|trim != '' - not registered - - rhsm_authentication != "key" - name: Disable all repositories command: "/usr/bin/subscription-manager repos --disable=*" -- cgit v1.2.3 From 71f4817263a21b6e2062b35928ebfab373d26278 Mon Sep 17 00:00:00 2001 From: Vinny Valdez Date: Fri, 8 Apr 2016 11:02:57 -0500 Subject: Cosmetic changes to task names and move yum clean all to prereqs --- roles/subscription-manager/tasks/main.yml | 33 ++++++++++++++----------------- 1 file changed, 15 insertions(+), 18 deletions(-) (limited to 'roles') diff --git a/roles/subscription-manager/tasks/main.yml b/roles/subscription-manager/tasks/main.yml index 6e51be7e4..adf3a8e85 100644 --- a/roles/subscription-manager/tasks/main.yml +++ b/roles/subscription-manager/tasks/main.yml @@ -1,5 +1,5 @@ --- -- name: Initialize rhsm_password variable if vars_prompt was used +- name: "Initialize rhsm_password variable if vars_prompt was used" set_fact: rhsm_password: "{{ hostvars.localhost.rhsm_password }}" when: @@ -7,12 +7,12 @@ - rhsm_password is not none - rhsm_password|trim != '' -- name: Initializing Subscription Manager authenticaiton method +- name: "Initializing Subscription Manager authenticaiton method" set_fact: rhsm_authentication: false # 'rhsm_activationkey' will take precedence even if 'rhsm_username' and 'rhsm_password' are also set -- name: Setting Subscription Manager Activation Key Fact +- name: "Setting Subscription Manager Activation Key Fact" set_fact: rhsm_authentication: "key" when: @@ -22,7 +22,7 @@ - not rhsm_authentication # If 'rhsm_username' and 'rhsm_password' are set but not 'rhsm_activationkey', set 'rhsm_authentication' to password -- name: Setting Subscription Manager Username and Password Fact +- name: "Setting Subscription Manager Username and Password Fact" set_fact: rhsm_authentication: "password" when: @@ -34,28 +34,28 @@ - rhsm_password|trim != '' - not rhsm_authentication -- name: Initializing registration status +- name: "Initializing registration status" set_fact: registered: false -- name: Checking subscription status (a failure means it is not registered and will be) +- name: "Checking subscription status (a failure means it is not registered and will be)" command: "/usr/bin/subscription-manager status" ignore_errors: yes changed_when: no register: check_if_registered -- name: Set registration fact if system is already registered +- name: "Set registration fact if system is already registered" set_fact: registered: true when: check_if_registered.rc == 0 -- name: Cleaning any old subscriptions +- name: "Cleaning any old subscriptions" command: "/usr/bin/subscription-manager clean" when: - not registered - rhsm_authentication is defined -- name: Install Satellite certificate +- name: "Install Satellite certificate" command: "rpm -Uvh --force http://{{ rhsm_satellite }}/pub/katello-ca-consumer-latest.noarch.rpm" when: - not registered @@ -63,7 +63,7 @@ - rhsm_satellite is not none - rhsm_satellite|trim != '' -- name: Register to Satellite using activation key +- name: "Register to Satellite using activation key" command: "/usr/bin/subscription-manager register --activationkey={{ rhsm_activationkey }} --org={{ rhsm_org }}" when: - not registered @@ -73,20 +73,20 @@ - rhsm_satellite|trim != '' # This can apply to either Hosted or Satellite -- name: Register using username and password +- name: "Register using username and password" command: "/usr/bin/subscription-manager register --username={{ rhsm_username }} --password={{ rhsm_password }}" no_log: true when: - not registered - rhsm_authentication == "password" -- name: Auto-attach to Subscription Manager Pool +- name: "Auto-attach to Subscription Manager Pool" command: "/usr/bin/subscription-manager attach --auto" when: - not registered - rhsm_pool is undefined or rhsm_pool is none or rhsm_pool|trim == '' -- name: Attach to a specific pool +- name: "Attach to a specific pool" command: "/usr/bin/subscription-manager attach --pool={{ rhsm_pool }}" when: - rhsm_pool is defined @@ -94,7 +94,7 @@ - rhsm_pool|trim != '' - not registered -- name: Disable all repositories +- name: "Disable all repositories" command: "/usr/bin/subscription-manager repos --disable=*" when: - not registered @@ -102,7 +102,7 @@ - rhsm_repos is not none - rhsm_repos|trim != '' -- name: Enable specified repositories +- name: "Enable specified repositories" command: "/usr/bin/subscription-manager repos --enable={{ item }}" with_items: rhsm_repos when: @@ -110,6 +110,3 @@ - rhsm_repos is defined - rhsm_repos is not none - rhsm_repos|trim != '' - -- name: Cleaning yum repositories - command: "yum clean all" -- cgit v1.2.3 From 39f973fcfd40fde18f5e92259d05e4ba6b30e22e Mon Sep 17 00:00:00 2001 From: Vinny Valdez Date: Fri, 8 Apr 2016 18:44:23 -0500 Subject: Remove vars_prompt, add info to README to re-enable and for ansible-vault --- roles/subscription-manager/README.md | 91 +++++++++++++++++----- roles/subscription-manager/pre_tasks/pre_tasks.yml | 14 ++-- roles/subscription-manager/tasks/main.yml | 4 +- 3 files changed, 79 insertions(+), 30 deletions(-) (limited to 'roles') diff --git a/roles/subscription-manager/README.md b/roles/subscription-manager/README.md index a5dd1ac44..748de282c 100644 --- a/roles/subscription-manager/README.md +++ b/roles/subscription-manager/README.md @@ -20,7 +20,48 @@ Default: none Subscription Manager password. Required for RHSM Hosted. Can be optionally used for Satellite, but it may be better to use **rhsm_activationkey** for this. -NOTE: This variable is prompted for at the start of the playbook run. This is for security purposes so the password is not left in the command history. If specified on the command-line or set in a variable file it will be ignored and the value captured from the prompt will overwrite it instead. +NOTE: If this variable is specified on the command-line or set in a variable file it may leave your password exposed. For this reason you may perfer to use an Activation Key if using Satellite. For RHSM Hosted, your password must be specified. There are two ways to provide the password to the Ansible playbook without exposing it to prying eyes. + +1. The first method is to use a **vars_prompt** to collect the password up front one time for the playbook. Ansible will not display the password if the prompt is configured as **private** and the task will not display the password on the CLI. This is the a good method as it supports automating the task to every host with only one password entry. To enable **vars_prompt** add the following to the very top of your playbook after the **hosts** declaration and before any **pre_tasks** section: + + ``` + - hosts: localhost + # Add the following lines after a -hosts: declaration and before pre_tasks: + # Start of vars_prompt code block + vars_prompt: + - name: "rhsm_password" + prompt: "Subscription Manager password" + confirm: yes + private: yes + # End of vars_prompt code block + pre_tasks: + ``` + +2. A second method is to use an encrypted file via **ansible-vault**. This does does not require modifying any code as the previous method, but does require more work to create and encrypt the file. To accomplish this, first create a file containing at least the **rhsm_password** variable (it is also possible to specify additional variables to encrypt them all as well): + 1. Create a file to contain the variable such as **secrets.yml**: + + ``` + --- + rhsm_password: "my_secret_password" + # other variables can optionally be placed here as well + ``` + + 2. Encrypt the file with **ansible-vault**: + + ``` + $ ansible-vault encrypt secrets.yml + Vault password: + Confirm Vault password: + Encryption successful + ``` + + 3. When executing **ansible-playbook** specify **--ask-vault-pass** to be prompted for the decryption password, and also specify the location of the **secrets.yml** as such: + + ``` + $ ansible-playbook --ask-vault-pass --extra-vars=@secrets.yml --extra-vars="rhsm_username=myusername" + ``` + + NOTE: Optionally the file containing the encrypted variables can be decrypted with **ansible-vault** and the **--ask-vault-pass** option omitted to prevent any password prompting (for automated runs) and the file can be encrypted after the run. This can be used if an external system such as Jenkins would handle the decryption/encryption outside of Ansible. Default: none @@ -53,21 +94,24 @@ rhsm_repos='["rhel-7-server-rpms", "rhel-7-server-ose-3.1-rpms", "rhel-7-server- Default: none ## Calling This Role -Calling this role requires adding a **vars_prompt**, **pre_tasks**, and **roles** section of a play +Calling this role is done at both **pre_tasks** and **roles** sections of a playbook and optionally a **vars_prompt**. ### vars_prompt -Unfortunately **vars_prompt** can only be used at the play level before role tasks are executed, so this is the only place it can go. See http://stackoverflow.com/questions/25466675/ansible-to-conditionally-prompt-for-a-variable +Unfortunately **vars_prompt** can only be used at the play level before role tasks are executed, so this is the only place it can go. It also cannot be shown conditionally. For this reason it is not included in this role by default. A better method may be using a file containing the password variable encrypted with **ansible-vault**. See the **rhsm_password** section for more details. -Add a prompt to capture **rhsm_password** +To Add a prompt to capture **rhsm_password**: ``` - hosts: localhost + # Add the following lines after a -hosts: declaration and before pre_tasks: + # Start of vars_prompt code block vars_prompt: - # Unfortunately vars_prompt can only be used at the play level before role tasks, so this is the only place it can go. See http://stackoverflow.com/questions/25466675/ansible-to-conditionally-prompt-for-a-variable - name: "rhsm_password" - prompt: "Subscription Manager password (enter blank if using rhsm_activationkey or to disable registration)" + prompt: "Subscription Manager password" confirm: yes private: yes + # End of vars_prompt code block + pre_tasks: ``` ### pre-tasks @@ -75,8 +119,8 @@ Add a prompt to capture **rhsm_password** A number of variable checks are performed before any tasks to ensure the proper parameters are set. To include these checks call the pre_task yaml before any roles: ``` - pre_tasks: - - include: roles/subscription-manager/pre_tasks/pre_tasks.yml +pre_tasks: +- include: roles/subscription-manager/pre_tasks/pre_tasks.yml ``` ### roles @@ -84,22 +128,29 @@ A number of variable checks are performed before any tasks to ensure the proper The bulk of the work is performed in the main.yml for this role. The pre-task play will set a variable which can be checked to contitionally include this role as such: ``` - roles: - - { role: subscription-manager, when: hostvars.localhost.rhsm_register, tags: 'subscription-manager' } +roles: + - { role: subscription-manager, when: hostvars.localhost.rhsm_register, tags: 'subscription-manager' } ``` -## Running the Playbook +## Running Playbooks with this Role -To register to RHSM Hosted with username and password: +- To register to RHSM Hosted or Satellite with a username and plain text password (NOTE: This may retain your password in your CLI history): -``` -ansible-playbook -i inventory/ose-provision ose-provision.yml -e "rhsm_username=vvaldez" -``` + ``` + $ ansible-playbook --extra-vars="rhsm_username=vvaldez rhsm_password=my_secret_password " + ``` -To register to a Satellite server with an activation key: +- To register to RHSM Hosted or Satellite with username and an encrypted file containing the password: -``` -ansible-playbook -i inventory/ose-provision ose-provision.yml -e "rhsm_satellite=satellite.example.com rhsm_org=example_org rhsm_activationkey=rhel-7-ose-3-1" -``` + ``` + $ ansible-playbook --ask-vault-pass --extra-vars=@secrets.yml --extra-vars="rhsm_username=myusername" + + ``` + +- To register to a Satellite server with an activation key: + + ``` + $ ansible-playbook --extra-vars="rhsm_satellite=satellite.example.com rhsm_org=example_org rhsm_activationkey=rhel-7-ose-3-1 " -To ignore any Subscription Manager activities, simply do not set any parameters. When prompted for the password, hit **Enter** to set a blank password. + ``` +- To ignore any Subscription Manager activities, simply do not set any parameters. diff --git a/roles/subscription-manager/pre_tasks/pre_tasks.yml b/roles/subscription-manager/pre_tasks/pre_tasks.yml index 31441785e..8a4d8d06d 100644 --- a/roles/subscription-manager/pre_tasks/pre_tasks.yml +++ b/roles/subscription-manager/pre_tasks/pre_tasks.yml @@ -1,5 +1,5 @@ --- -- name: Set password fact +- name: "Set password fact" set_fact: rhsm_password: "{{ rhsm_password }}" no_log: true @@ -8,11 +8,11 @@ - rhsm_password is not none - rhsm_password|trim != '' -- name: Initialize Subscription Manager fact +- name: "Initialize Subscription Manager fact" set_fact: rhsm_register: true -- name: Determine if Subscription Manager should be used +- name: "Determine if Subscription Manager should be used" set_fact: rhsm_register: false when: @@ -23,7 +23,7 @@ - rhsm_activationkey is undefined or rhsm_activationkey is none or rhsm_activationkey|trim == '' - rhsm_pool is undefined or rhsm_pool is none or rhsm_pool|trim == '' -- name: Validate Subscription Manager organization is set +- name: "Validate Subscription Manager organization is set" fail: msg="Cannot register to a Satellite server without a value for the Organization via 'rhsm_org'" when: - rhsm_org is undefined or rhsm_org is none or rhsm_org|trim == '' @@ -32,14 +32,14 @@ - rhsm_satellite|trim != '' - rhsm_register -- name: Validate Subscription Manager authentication is defined - fail: msg="Cannot register without ('rhsm_username' and 'rhsm_password') or 'rhsm_activationkey' variables set" +- name: "Validate Subscription Manager authentication is defined" + fail: msg="Cannot register without ('rhsm_username' and 'rhsm_password') or 'rhsm_activationkey' variables set. See the README.md for details on securely prompting for a password" when: - (rhsm_username is undefined or rhsm_username is none or rhsm_username|trim == '') or (rhsm_password is undefined or rhsm_password is none or rhsm_password|trim == '') - rhsm_activationkey is undefined or rhsm_activationkey is none or rhsm_activationkey|trim == '' - rhsm_register -- name: Validate activation key and Hosted are not requested together +- name: "Validate activation key and Hosted are not requested together" fail: msg="Cannot register to RHSM Hosted with 'rhsm_activationkey'" when: - rhsm_satellite is undefined or rhsm_satellite is none or rhsm_satellite|trim == '' diff --git a/roles/subscription-manager/tasks/main.yml b/roles/subscription-manager/tasks/main.yml index adf3a8e85..bdb8ca7c4 100644 --- a/roles/subscription-manager/tasks/main.yml +++ b/roles/subscription-manager/tasks/main.yml @@ -3,9 +3,7 @@ set_fact: rhsm_password: "{{ hostvars.localhost.rhsm_password }}" when: - - rhsm_password is defined - - rhsm_password is not none - - rhsm_password|trim != '' + - rhsm_password is not defined or rhsm_password is none or rhsm_password|trim == '' - name: "Initializing Subscription Manager authenticaiton method" set_fact: -- cgit v1.2.3 From 305140bfaeb6cd1bbe34279cbd6750d1136816d6 Mon Sep 17 00:00:00 2001 From: Vinny Valdez Date: Sat, 23 Apr 2016 12:50:25 -0500 Subject: Add org parameter to Satellite with user/pass --- roles/subscription-manager/tasks/main.yml | 12 ++++++++++++ 1 file changed, 12 insertions(+) (limited to 'roles') diff --git a/roles/subscription-manager/tasks/main.yml b/roles/subscription-manager/tasks/main.yml index bdb8ca7c4..9bc430665 100644 --- a/roles/subscription-manager/tasks/main.yml +++ b/roles/subscription-manager/tasks/main.yml @@ -77,6 +77,18 @@ when: - not registered - rhsm_authentication == "password" + - rhsm_org is not defined or rhsm_org is none or rhsm_org|trim == '' + +# This can apply to either Hosted or Satellite +- name: "Register using username, password and organization" + command: "/usr/bin/subscription-manager register --username={{ rhsm_username }} --password={{ rhsm_password }} --org={{ rhsm_org }}" + no_log: true + when: + - not registered + - rhsm_authentication == "password" + - rhsm_org is defined + - rhsm_org is not none + - rhsm_org|trim != '' - name: "Auto-attach to Subscription Manager Pool" command: "/usr/bin/subscription-manager attach --auto" -- cgit v1.2.3 From 150b709052688c1cf1ab435c9775501154c7e35a Mon Sep 17 00:00:00 2001 From: Vinny Valdez Date: Wed, 27 Apr 2016 17:14:42 -0500 Subject: Fix typo in task name --- roles/subscription-manager/tasks/main.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'roles') diff --git a/roles/subscription-manager/tasks/main.yml b/roles/subscription-manager/tasks/main.yml index bdb8ca7c4..f3bd8b656 100644 --- a/roles/subscription-manager/tasks/main.yml +++ b/roles/subscription-manager/tasks/main.yml @@ -5,7 +5,7 @@ when: - rhsm_password is not defined or rhsm_password is none or rhsm_password|trim == '' -- name: "Initializing Subscription Manager authenticaiton method" +- name: "Initializing Subscription Manager authentication method" set_fact: rhsm_authentication: false -- cgit v1.2.3 From ca1b17aeeb8ed4f4db0a90a11bccd9ea009f9eac Mon Sep 17 00:00:00 2001 From: Eric Sauer Date: Fri, 13 May 2016 16:25:19 -0400 Subject: Changes by JayKayy for a full provision of OpenShift on OpenStack --- roles/common/pre_tasks/pre_tasks.yml | 2 +- roles/hostnames/tasks/main.yaml | 17 +++++++++++++++ roles/hostnames/templates/records.template.yaml | 28 +++++++++++++++++++++++++ roles/hostnames/test/inv | 12 +++++++++++ roles/hostnames/test/roles | 1 + roles/hostnames/test/test.retry | 3 +++ roles/hostnames/test/test.yaml | 21 +++++++++++++++++++ roles/hostnames/vars/main.yaml | 2 ++ roles/hostnames/vars/records.yaml | 28 +++++++++++++++++++++++++ 9 files changed, 113 insertions(+), 1 deletion(-) create mode 100644 roles/hostnames/tasks/main.yaml create mode 100644 roles/hostnames/templates/records.template.yaml create mode 100644 roles/hostnames/test/inv create mode 120000 roles/hostnames/test/roles create mode 100644 roles/hostnames/test/test.retry create mode 100644 roles/hostnames/test/test.yaml create mode 100644 roles/hostnames/vars/main.yaml create mode 100644 roles/hostnames/vars/records.yaml (limited to 'roles') diff --git a/roles/common/pre_tasks/pre_tasks.yml b/roles/common/pre_tasks/pre_tasks.yml index c573bff8c..9dd14c30c 100644 --- a/roles/common/pre_tasks/pre_tasks.yml +++ b/roles/common/pre_tasks/pre_tasks.yml @@ -1,4 +1,4 @@ --- - name: Generate Environment ID shell: echo "$(cat /dev/urandom | tr -dc 'a-zA-Z0-9' | head -c 8)" - register: env_random_id \ No newline at end of file + register: env_random_id diff --git a/roles/hostnames/tasks/main.yaml b/roles/hostnames/tasks/main.yaml new file mode 100644 index 000000000..921cd664b --- /dev/null +++ b/roles/hostnames/tasks/main.yaml @@ -0,0 +1,17 @@ +--- + - name: Setting master(s) hostname + hostname: name="{% for thishost in groups['openshift_masters'] %}{% if inventory_hostname == thishost %}master{{ counter }}.{{ dns_domain }}{% endif %}{% set counter = counter + 1 %}{% endfor %}" + when: "'openshift_masters' in group_names" + + - name: Setting node(s) hostname + hostname: name="{% for thishost in groups['openshift_nodes'] %}{% if inventory_hostname == thishost %}node{{ counter }}.{{ dns_domain }}{% endif %}{% set counter = counter + 1 %}{% endfor %}" + when: "'openshift_nodes' in group_names" + + - name: "Templating records" + become: false + remote_user: cloud-user + template: + src: "{{ role_path }}/templates/records.template.yaml" + dest: "/tmp/records.yaml" + force: yes + delegate_to: localhost diff --git a/roles/hostnames/templates/records.template.yaml b/roles/hostnames/templates/records.template.yaml new file mode 100644 index 000000000..a916fd2b3 --- /dev/null +++ b/roles/hostnames/templates/records.template.yaml @@ -0,0 +1,28 @@ +--- +dns_records_add: + - view: private + zone: {{ dns_domain }} + entries: +{% for mst in groups['openshift_masters'] %} + - type: A + hostname: {{ hostvars[mst]['ansible_hostname'] }} + ip: {{ hostvars[mst]['dns_private_ip'] }} +{% endfor %} +{% for node in groups['openshift_nodes'] %} + - type: A + hostname: {{ hostvars[node]['ansible_hostname'] }} + ip: {{ hostvars[node]['dns_private_ip'] }} +{% endfor %} + - view: public + zone: {{ dns_domain}} + entries: +{% for mst in groups['openshift_masters']%} + - type: A + hostname: {{ hostvars[mst]['ansible_hostname'] }} + ip: {{ hostvars[mst]['dns_public_ip'] }} +{% endfor %} +{% for node in groups['openshift_nodes'] %} + - type: A + hostname: {{ hostvars[node]['ansible_hostname'] }} + ip: {{ hostvars[node]['dns_public_ip'] }} +{% endfor %} diff --git a/roles/hostnames/test/inv b/roles/hostnames/test/inv new file mode 100644 index 000000000..ffbe6e03d --- /dev/null +++ b/roles/hostnames/test/inv @@ -0,0 +1,12 @@ +[all:vars] +dns_domain=example.com + +[openshift_masters] +192.168.124.41 dns_private_ip=1.1.1.41 dns_public_ip=192.168.124.41 +192.168.124.117 dns_private_ip=1.1.1.117 dns_public_ip=192.168.124.117 + +[openshift_nodes] +192.168.124.40 dns_private_ip=1.1.1.40 dns_public_ip=192.168.124.40 + +#[dns] +#192.168.124.117 dns_private_ip=1.1.1.117 diff --git a/roles/hostnames/test/roles b/roles/hostnames/test/roles new file mode 120000 index 000000000..e2b799b9d --- /dev/null +++ b/roles/hostnames/test/roles @@ -0,0 +1 @@ +../../../roles/ \ No newline at end of file diff --git a/roles/hostnames/test/test.retry b/roles/hostnames/test/test.retry new file mode 100644 index 000000000..63fc08e4c --- /dev/null +++ b/roles/hostnames/test/test.retry @@ -0,0 +1,3 @@ +192.168.124.117 +192.168.124.40 +192.168.124.41 diff --git a/roles/hostnames/test/test.yaml b/roles/hostnames/test/test.yaml new file mode 100644 index 000000000..34bf37942 --- /dev/null +++ b/roles/hostnames/test/test.yaml @@ -0,0 +1,21 @@ +--- +- hosts: all + roles: + - role: hostnames + +# - debug: +# +# - hosts: dns +# roles: +# - role: dns-server +# named_config_views: +# - name: private +# acl_entry: +# - 192.168.124.40/32 +# - 192.168.124.40/32 +# zone: +# - dns_domain: example.com +# - name: public +# zone: +# - dns_domain: example.com +# - role: dns diff --git a/roles/hostnames/vars/main.yaml b/roles/hostnames/vars/main.yaml new file mode 100644 index 000000000..3eecb8dc4 --- /dev/null +++ b/roles/hostnames/vars/main.yaml @@ -0,0 +1,2 @@ +--- +counter: 1 diff --git a/roles/hostnames/vars/records.yaml b/roles/hostnames/vars/records.yaml new file mode 100644 index 000000000..3bf12ae2b --- /dev/null +++ b/roles/hostnames/vars/records.yaml @@ -0,0 +1,28 @@ +--- + - name: "Building Records" + set_fact: + dns_records_add: + - view: private + zone: example.com + entries: + - type: A + hostname: master1.example.com + ip: 172.16.15.94 + - type: A + hostname: node1.example.com + ip: 172.16.15.86 + - type: A + hostname: node2.example.com + ip: 172.16.15.87 + - view: public + zone: example.com + entries: + - type: A + hostname: master1.example.com + ip: 10.3.10.116 + - type: A + hostname: node1.example.com + ip: 10.3.11.46 + - type: A + hostname: node2.example.com + ip: 10.3.12.6 -- cgit v1.2.3 From c8f84c0aebe1fe9c00498921c5f83022a2e873c3 Mon Sep 17 00:00:00 2001 From: Eric Sauer Date: Fri, 3 Jun 2016 14:01:22 -0400 Subject: Changes to allow runs from inside a container. Also allows for running upstream openshift-ansible installer --- roles/hostnames/tasks/main.yaml | 3 +++ 1 file changed, 3 insertions(+) (limited to 'roles') diff --git a/roles/hostnames/tasks/main.yaml b/roles/hostnames/tasks/main.yaml index 921cd664b..c34d07915 100644 --- a/roles/hostnames/tasks/main.yaml +++ b/roles/hostnames/tasks/main.yaml @@ -15,3 +15,6 @@ dest: "/tmp/records.yaml" force: yes delegate_to: localhost + + - name: "Updating hostname facts" + setup: filter=ansible_hostname -- cgit v1.2.3 From e4c6ba27a5fe784143831e02e5181794c1b953b2 Mon Sep 17 00:00:00 2001 From: Eric Sauer Date: Fri, 3 Jun 2016 18:01:05 -0400 Subject: Reverting previous commit and making template adjustments --- roles/hostnames/tasks/main.yaml | 8 ++++++++ 1 file changed, 8 insertions(+) (limited to 'roles') diff --git a/roles/hostnames/tasks/main.yaml b/roles/hostnames/tasks/main.yaml index c34d07915..700845e47 100644 --- a/roles/hostnames/tasks/main.yaml +++ b/roles/hostnames/tasks/main.yaml @@ -3,10 +3,18 @@ hostname: name="{% for thishost in groups['openshift_masters'] %}{% if inventory_hostname == thishost %}master{{ counter }}.{{ dns_domain }}{% endif %}{% set counter = counter + 1 %}{% endfor %}" when: "'openshift_masters' in group_names" + - name: Setting facts for masters + set_fact: ansible_hostname="{% for thishost in groups['openshift_masters'] %}{% if inventory_hostname == thishost %}master{{ counter }}{% endif %}{% set counter = counter + 1 %}{% endfor %}" + when: "'openshift_masters' in group_names" + - name: Setting node(s) hostname hostname: name="{% for thishost in groups['openshift_nodes'] %}{% if inventory_hostname == thishost %}node{{ counter }}.{{ dns_domain }}{% endif %}{% set counter = counter + 1 %}{% endfor %}" when: "'openshift_nodes' in group_names" + - name: Setting facts for nodes + set_fact: ansible_hostname="{% for thishost in groups['openshift_nodes'] %}{% if inventory_hostname == thishost %}node{{ counter }}{% endif %}{% set counter = counter + 1 %}{% endfor %}" + when: "'openshift_nodes' in group_names" + - name: "Templating records" become: false remote_user: cloud-user -- cgit v1.2.3 From d827e1796c6a3705007365cb58aa6b36a92d3b6e Mon Sep 17 00:00:00 2001 From: Eric Sauer Date: Fri, 3 Jun 2016 19:10:27 -0400 Subject: Subscription manager role should accomodate orgs with spaces --- roles/subscription-manager/tasks/main.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'roles') diff --git a/roles/subscription-manager/tasks/main.yml b/roles/subscription-manager/tasks/main.yml index f3bd8b656..c73204a29 100644 --- a/roles/subscription-manager/tasks/main.yml +++ b/roles/subscription-manager/tasks/main.yml @@ -62,7 +62,7 @@ - rhsm_satellite|trim != '' - name: "Register to Satellite using activation key" - command: "/usr/bin/subscription-manager register --activationkey={{ rhsm_activationkey }} --org={{ rhsm_org }}" + command: "/usr/bin/subscription-manager register --activationkey={{ rhsm_activationkey }} --org='{{ rhsm_org }}'" when: - not registered - rhsm_authentication == 'key' -- cgit v1.2.3 From e2181a706679666a6fff2e2aaca648ed982060bd Mon Sep 17 00:00:00 2001 From: Eric Sauer Date: Wed, 8 Jun 2016 14:58:36 -0400 Subject: Channging hard coded host groups to match openshift-ansible expected host groups. Importing byo playbook now instead of nested ansible run. Need to refactor how we generate hostnames to make it fit this. --- roles/hostnames/tasks/main.yaml | 17 ++++++++--------- roles/hostnames/templates/records.template.yaml | 8 ++++---- 2 files changed, 12 insertions(+), 13 deletions(-) (limited to 'roles') diff --git a/roles/hostnames/tasks/main.yaml b/roles/hostnames/tasks/main.yaml index 700845e47..bf2fafb97 100644 --- a/roles/hostnames/tasks/main.yaml +++ b/roles/hostnames/tasks/main.yaml @@ -1,23 +1,22 @@ --- - name: Setting master(s) hostname - hostname: name="{% for thishost in groups['openshift_masters'] %}{% if inventory_hostname == thishost %}master{{ counter }}.{{ dns_domain }}{% endif %}{% set counter = counter + 1 %}{% endfor %}" - when: "'openshift_masters' in group_names" + hostname: name="{% for thishost in groups['masters'] %}{% if inventory_hostname == thishost %}master{{ counter }}.{{ dns_domain }}{% endif %}{% set counter = counter + 1 %}{% endfor %}" + when: "'masters' in group_names" - name: Setting facts for masters - set_fact: ansible_hostname="{% for thishost in groups['openshift_masters'] %}{% if inventory_hostname == thishost %}master{{ counter }}{% endif %}{% set counter = counter + 1 %}{% endfor %}" - when: "'openshift_masters' in group_names" + set_fact: ansible_hostname="{% for thishost in groups['masters'] %}{% if inventory_hostname == thishost %}master{{ counter }}{% endif %}{% set counter = counter + 1 %}{% endfor %}" + when: "'masters' in group_names" - name: Setting node(s) hostname - hostname: name="{% for thishost in groups['openshift_nodes'] %}{% if inventory_hostname == thishost %}node{{ counter }}.{{ dns_domain }}{% endif %}{% set counter = counter + 1 %}{% endfor %}" - when: "'openshift_nodes' in group_names" + hostname: name="{% for thishost in groups['nodes'] %}{% if inventory_hostname == thishost %}node{{ counter }}.{{ dns_domain }}{% endif %}{% set counter = counter + 1 %}{% endfor %}" + when: "'nodes' in group_names" - name: Setting facts for nodes - set_fact: ansible_hostname="{% for thishost in groups['openshift_nodes'] %}{% if inventory_hostname == thishost %}node{{ counter }}{% endif %}{% set counter = counter + 1 %}{% endfor %}" - when: "'openshift_nodes' in group_names" + set_fact: ansible_hostname="{% for thishost in groups['nodes'] %}{% if inventory_hostname == thishost %}node{{ counter }}{% endif %}{% set counter = counter + 1 %}{% endfor %}" + when: "'nodes' in group_names" - name: "Templating records" become: false - remote_user: cloud-user template: src: "{{ role_path }}/templates/records.template.yaml" dest: "/tmp/records.yaml" diff --git a/roles/hostnames/templates/records.template.yaml b/roles/hostnames/templates/records.template.yaml index a916fd2b3..2f2420464 100644 --- a/roles/hostnames/templates/records.template.yaml +++ b/roles/hostnames/templates/records.template.yaml @@ -3,12 +3,12 @@ dns_records_add: - view: private zone: {{ dns_domain }} entries: -{% for mst in groups['openshift_masters'] %} +{% for mst in groups['masters'] %} - type: A hostname: {{ hostvars[mst]['ansible_hostname'] }} ip: {{ hostvars[mst]['dns_private_ip'] }} {% endfor %} -{% for node in groups['openshift_nodes'] %} +{% for node in groups['nodes'] %} - type: A hostname: {{ hostvars[node]['ansible_hostname'] }} ip: {{ hostvars[node]['dns_private_ip'] }} @@ -16,12 +16,12 @@ dns_records_add: - view: public zone: {{ dns_domain}} entries: -{% for mst in groups['openshift_masters']%} +{% for mst in groups['masters']%} - type: A hostname: {{ hostvars[mst]['ansible_hostname'] }} ip: {{ hostvars[mst]['dns_public_ip'] }} {% endfor %} -{% for node in groups['openshift_nodes'] %} +{% for node in groups['nodes'] %} - type: A hostname: {{ hostvars[node]['ansible_hostname'] }} ip: {{ hostvars[node]['dns_public_ip'] }} -- cgit v1.2.3 From 4d6eb644d78f4b972154ade3d12c23b28dbe19e3 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?=C3=98ystein=20Bedin?= Date: Thu, 9 Jun 2016 11:34:07 -0400 Subject: Updated to run as root rather than cloud-user, for now... --- roles/common/pre_tasks/pre_tasks.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'roles') diff --git a/roles/common/pre_tasks/pre_tasks.yml b/roles/common/pre_tasks/pre_tasks.yml index 9dd14c30c..ed57a2993 100644 --- a/roles/common/pre_tasks/pre_tasks.yml +++ b/roles/common/pre_tasks/pre_tasks.yml @@ -1,4 +1,4 @@ --- - name: Generate Environment ID - shell: echo "$(cat /dev/urandom | tr -dc 'a-zA-Z0-9' | head -c 8)" + shell: echo "$(date +%s)" register: env_random_id -- cgit v1.2.3 From 3866232daed8ce1a48aa2db6f2f6c541e90756ec Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?=C3=98ystein=20Bedin?= Date: Fri, 17 Jun 2016 14:48:37 -0400 Subject: Cleande up hostname role to make it more generic --- roles/hostnames/tasks/main.yaml | 43 ++++++++++++------------- roles/hostnames/templates/records.template.yaml | 28 ---------------- 2 files changed, 21 insertions(+), 50 deletions(-) delete mode 100644 roles/hostnames/templates/records.template.yaml (limited to 'roles') diff --git a/roles/hostnames/tasks/main.yaml b/roles/hostnames/tasks/main.yaml index bf2fafb97..bb45445f5 100644 --- a/roles/hostnames/tasks/main.yaml +++ b/roles/hostnames/tasks/main.yaml @@ -1,27 +1,26 @@ --- - - name: Setting master(s) hostname - hostname: name="{% for thishost in groups['masters'] %}{% if inventory_hostname == thishost %}master{{ counter }}.{{ dns_domain }}{% endif %}{% set counter = counter + 1 %}{% endfor %}" - when: "'masters' in group_names" +- name: Setting Hostname Fact + set_fact: + new_hostname: "{{ custom_hostname | default(inventory_hostname) }}" - - name: Setting facts for masters - set_fact: ansible_hostname="{% for thishost in groups['masters'] %}{% if inventory_hostname == thishost %}master{{ counter }}{% endif %}{% set counter = counter + 1 %}{% endfor %}" - when: "'masters' in group_names" +- name: Setting FQDN Fact + set_fact: + new_fqdn: "{{ new_hostname }}.{{ dns_domain }}" - - name: Setting node(s) hostname - hostname: name="{% for thishost in groups['nodes'] %}{% if inventory_hostname == thishost %}node{{ counter }}.{{ dns_domain }}{% endif %}{% set counter = counter + 1 %}{% endfor %}" - when: "'nodes' in group_names" +- name: Setting hostname and DNS domain + hostname: name="{{ new_fqdn }}" - - name: Setting facts for nodes - set_fact: ansible_hostname="{% for thishost in groups['nodes'] %}{% if inventory_hostname == thishost %}node{{ counter }}{% endif %}{% set counter = counter + 1 %}{% endfor %}" - when: "'nodes' in group_names" +- name: Check for cloud.cfg + stat: path=/etc/cloud/cloud.cfg + register: cloud_cfg - - name: "Templating records" - become: false - template: - src: "{{ role_path }}/templates/records.template.yaml" - dest: "/tmp/records.yaml" - force: yes - delegate_to: localhost - - - name: "Updating hostname facts" - setup: filter=ansible_hostname +- name: Prevent cloud-init updates of hostname/fqdn (if applicable) + lineinfile: + dest: /etc/cloud/cloud.cfg + state: present + regexp: "{{ item.regexp }}" + line: "{{ item.line }}" + with_items: + - { regexp: '^ - set_hostname', line: '# - set_hostname' } + - { regexp: '^ - update_hostname', line: '# - update_hostname' } + when: cloud_cfg.stat.exists == True diff --git a/roles/hostnames/templates/records.template.yaml b/roles/hostnames/templates/records.template.yaml deleted file mode 100644 index 2f2420464..000000000 --- a/roles/hostnames/templates/records.template.yaml +++ /dev/null @@ -1,28 +0,0 @@ ---- -dns_records_add: - - view: private - zone: {{ dns_domain }} - entries: -{% for mst in groups['masters'] %} - - type: A - hostname: {{ hostvars[mst]['ansible_hostname'] }} - ip: {{ hostvars[mst]['dns_private_ip'] }} -{% endfor %} -{% for node in groups['nodes'] %} - - type: A - hostname: {{ hostvars[node]['ansible_hostname'] }} - ip: {{ hostvars[node]['dns_private_ip'] }} -{% endfor %} - - view: public - zone: {{ dns_domain}} - entries: -{% for mst in groups['masters']%} - - type: A - hostname: {{ hostvars[mst]['ansible_hostname'] }} - ip: {{ hostvars[mst]['dns_public_ip'] }} -{% endfor %} -{% for node in groups['nodes'] %} - - type: A - hostname: {{ hostvars[node]['ansible_hostname'] }} - ip: {{ hostvars[node]['dns_public_ip'] }} -{% endfor %} -- cgit v1.2.3 From fbf2f35080f666f68994e30174a590b8308b59f3 Mon Sep 17 00:00:00 2001 From: Vinny Valdez Date: Fri, 15 Jul 2016 14:05:13 -0500 Subject: Fixes Issue #163 if rhsm_password is not defined --- roles/subscription-manager/pre_tasks/pre_tasks.yml | 6 +----- 1 file changed, 1 insertion(+), 5 deletions(-) (limited to 'roles') diff --git a/roles/subscription-manager/pre_tasks/pre_tasks.yml b/roles/subscription-manager/pre_tasks/pre_tasks.yml index 8a4d8d06d..b21356cf2 100644 --- a/roles/subscription-manager/pre_tasks/pre_tasks.yml +++ b/roles/subscription-manager/pre_tasks/pre_tasks.yml @@ -1,12 +1,8 @@ --- - name: "Set password fact" set_fact: - rhsm_password: "{{ rhsm_password }}" + rhsm_password: "{{ rhsm_password | default(None) }}" no_log: true - when: - - rhsm_password is defined - - rhsm_password is not none - - rhsm_password|trim != '' - name: "Initialize Subscription Manager fact" set_fact: -- cgit v1.2.3 From c757fd690d24865ef3b5b9a1b536120299b39a6c Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?=C3=98ystein=20Bedin?= Date: Sun, 21 Aug 2016 02:12:53 -0400 Subject: Updated env_id to be a sub-domain + make the logic a bit more flexible --- roles/common/pre_tasks/pre_tasks.yml | 21 +++++++++++++++++++-- roles/hostnames/tasks/main.yaml | 4 ++-- 2 files changed, 21 insertions(+), 4 deletions(-) (limited to 'roles') diff --git a/roles/common/pre_tasks/pre_tasks.yml b/roles/common/pre_tasks/pre_tasks.yml index ed57a2993..1ba1ea55d 100644 --- a/roles/common/pre_tasks/pre_tasks.yml +++ b/roles/common/pre_tasks/pre_tasks.yml @@ -1,4 +1,21 @@ --- - name: Generate Environment ID - shell: echo "$(date +%s)" - register: env_random_id + set_fact: + env_random_id: "{{ ansible_date_time.epoch }}" + run_once: true + delegate_to: localhost + +- name: Set default Environment ID + set_fact: + default_env_id: "casl-{{ lookup('env','OS_USERNAME') }}-{{ env_random_id }}" + delegate_to: localhost + +- name: Setting Common Facts + set_fact: + env_id: "{{ env_id | default(default_env_id) }}" + delegate_to: localhost + +- name: Updating DNS domain to include env_id (if not empty) + set_fact: + full_dns_domain: "{{ (env_id|trim == '') | ternary(dns_domain, env_id + '.' + dns_domain) }}" + delegate_to: localhost diff --git a/roles/hostnames/tasks/main.yaml b/roles/hostnames/tasks/main.yaml index bb45445f5..bf142d653 100644 --- a/roles/hostnames/tasks/main.yaml +++ b/roles/hostnames/tasks/main.yaml @@ -1,11 +1,11 @@ --- - name: Setting Hostname Fact set_fact: - new_hostname: "{{ custom_hostname | default(inventory_hostname) }}" + new_hostname: "{{ custom_hostname | default(inventory_hostname_short) }}" - name: Setting FQDN Fact set_fact: - new_fqdn: "{{ new_hostname }}.{{ dns_domain }}" + new_fqdn: "{{ new_hostname }}.{{ full_dns_domain }}" - name: Setting hostname and DNS domain hostname: name="{{ new_fqdn }}" -- cgit v1.2.3 From fbda334b6797eb0109cd9c13afb99a47e3916b36 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?=C3=98ystein=20Bedin?= Date: Tue, 15 Nov 2016 22:26:58 -0500 Subject: Fixing ansible impl to work with OSP9 and ansible 2.2 --- roles/subscription-manager/tasks/main.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'roles') diff --git a/roles/subscription-manager/tasks/main.yml b/roles/subscription-manager/tasks/main.yml index 0b3aa351f..2dd14b48e 100644 --- a/roles/subscription-manager/tasks/main.yml +++ b/roles/subscription-manager/tasks/main.yml @@ -114,7 +114,7 @@ - name: "Enable specified repositories" command: "/usr/bin/subscription-manager repos --enable={{ item }}" - with_items: rhsm_repos + with_items: "{{ rhsm_repos }}" when: - not registered - rhsm_repos is defined -- cgit v1.2.3 From 11b48fe4e237950f9d9e9a0e66d8b15f48be1ea0 Mon Sep 17 00:00:00 2001 From: Eric Sauer Date: Wed, 21 Dec 2016 10:37:40 -0500 Subject: Openstack heat (#2) * Adding a role to invoke openstack heat * Adding readme * Pulling parameters out to inventory file * start of end-to-end playbook * More enhancements and refactoring to make dynamic inventory the driver for an openshift install * Switching to variable substituted path to config.yaml playbook * Changes to allow defining of number of nodes/infranodes. * Added labels to inventory * Start of end-to-end functionality * Enhancements to support openstack heat provisioning * Updating inventory sample to remove some deprecation warnings * Working towards making the secure-registry role 'become' aware * Fixing node labels and removing secure-registry as it's no longer needed * No longer need insecure registry line, as installer will secure our registry * Adjusted dynamic inventory to filter by clusterid * Minor updates to dynamic inventory bug * Adding a refactored sample inventory directory * Refactoring playbooks for better directory structure, and to narrow down host groups * Adding volume mounts to heat template * Moving dns playbooks back to original location * Fixing incorrect file path * Cleaning up inventory samples * One more hostname to clean up * Changing var name * changed openshift-provision to openshift-prep * Adjusting current provision script to avoid breakage by new openstack-heat code --- roles/common/pre_tasks/pre_tasks.yml | 5 + roles/openshift-prep/tasks/main.yml | 4 + roles/openshift-prep/tasks/prerequisites.yml | 36 ++ roles/openstack-stack/README.md | 9 + roles/openstack-stack/files/heat_stack.yaml | 684 +++++++++++++++++++++ roles/openstack-stack/files/heat_stack_server.yaml | 156 +++++ roles/openstack-stack/files/user-data | 13 + roles/openstack-stack/tasks/main.yml | 31 + roles/openstack-stack/test/roles | 1 + roles/openstack-stack/test/stack-create-test.yml | 17 + 10 files changed, 956 insertions(+) create mode 100644 roles/openshift-prep/tasks/main.yml create mode 100644 roles/openshift-prep/tasks/prerequisites.yml create mode 100644 roles/openstack-stack/README.md create mode 100644 roles/openstack-stack/files/heat_stack.yaml create mode 100644 roles/openstack-stack/files/heat_stack_server.yaml create mode 100644 roles/openstack-stack/files/user-data create mode 100644 roles/openstack-stack/tasks/main.yml create mode 120000 roles/openstack-stack/test/roles create mode 100644 roles/openstack-stack/test/stack-create-test.yml (limited to 'roles') diff --git a/roles/common/pre_tasks/pre_tasks.yml b/roles/common/pre_tasks/pre_tasks.yml index 1ba1ea55d..71a989b30 100644 --- a/roles/common/pre_tasks/pre_tasks.yml +++ b/roles/common/pre_tasks/pre_tasks.yml @@ -15,6 +15,11 @@ env_id: "{{ env_id | default(default_env_id) }}" delegate_to: localhost +- name: Set Dynamic Inventory Filters + shell: > + export OS_INV_FILTER_KEY=clusterid && OS_INV_FILTER_VALUE={{ env_id }} + delegate_to: localhost + - name: Updating DNS domain to include env_id (if not empty) set_fact: full_dns_domain: "{{ (env_id|trim == '') | ternary(dns_domain, env_id + '.' + dns_domain) }}" diff --git a/roles/openshift-prep/tasks/main.yml b/roles/openshift-prep/tasks/main.yml new file mode 100644 index 000000000..5e484e75f --- /dev/null +++ b/roles/openshift-prep/tasks/main.yml @@ -0,0 +1,4 @@ +--- +# Starting Point for OpenShift Installation and Configuration +- include: prerequisites.yml + tags: [prerequisites] diff --git a/roles/openshift-prep/tasks/prerequisites.yml b/roles/openshift-prep/tasks/prerequisites.yml new file mode 100644 index 000000000..1286905f4 --- /dev/null +++ b/roles/openshift-prep/tasks/prerequisites.yml @@ -0,0 +1,36 @@ +--- +- name: "Cleaning yum repositories" + command: "yum clean all" + +- name: "Install required packages" + yum: + name: "{{ item }}" + state: latest + with_items: + - wget + - git + - net-tools + - bind-utils + - bridge-utils + - bash-completion + - atomic-openshift-utils + - vim-enhanced + +- name: "Update all packages (this can take a very long time)" + yum: + name: "*" + state: latest + +- name: "Verify hostname" + shell: hostnamectl status | awk "/Static hostname/"'{ print $3 }' + register: hostname_fqdn + +- name: "Set hostname if required" + hostname: + name: "{{ ansible_fqdn }}" + when: hostname_fqdn.stdout != ansible_fqdn + +- name: "Verify SELinux is enforcing" + fail: + msg: "SELinux is required for OpenShift and has been detected as '{{ ansible_selinux.config_mode }}'" + when: ansible_selinux.config_mode != "enforcing" diff --git a/roles/openstack-stack/README.md b/roles/openstack-stack/README.md new file mode 100644 index 000000000..509c9de6c --- /dev/null +++ b/roles/openstack-stack/README.md @@ -0,0 +1,9 @@ +# Role openstack-stack + +Role for spinning up instances using OpenStack Heat. + +## To Test + +``` +ansible-playbook casl-ansible/roles/openstack-stack/test/stack-create-test.yml +``` diff --git a/roles/openstack-stack/files/heat_stack.yaml b/roles/openstack-stack/files/heat_stack.yaml new file mode 100644 index 000000000..058f7a7ad --- /dev/null +++ b/roles/openstack-stack/files/heat_stack.yaml @@ -0,0 +1,684 @@ +heat_template_version: 2014-10-16 + +description: OpenShift cluster + +parameters: + + cluster_env: + type: string + label: Cluster environment + description: Environment of the cluster + + cluster_id: + type: string + label: Cluster ID + description: Identifier of the cluster + + subnet_24_prefix: + type: string + label: subnet /24 prefix + description: /24 subnet prefix of the network of the cluster (dot separated number triplet) + + dns_nameservers: + type: comma_delimited_list + label: DNS nameservers list + description: List of DNS nameservers + + external_net: + type: string + label: External network + description: Name of the external network + default: external + + ssh_public_key: + type: string + label: SSH public key + description: SSH public key + hidden: true + + ssh_incoming: + type: string + label: Source of ssh connections + description: Source of legitimate ssh connections + default: 0.0.0.0/0 + + node_port_incoming: + type: string + label: Source of node port connections + description: Authorized sources targetting node ports + default: 0.0.0.0/0 + + num_etcd: + type: number + label: Number of etcd nodes + description: Number of etcd nodes + + num_masters: + type: number + label: Number of masters + description: Number of masters + + num_nodes: + type: number + label: Number of compute nodes + description: Number of compute nodes + + num_infra: + type: number + label: Number of infrastructure nodes + description: Number of infrastructure nodes + + num_dns: + type: number + label: Number of dns servers + description: Number of dns servers + + etcd_image: + type: string + label: Etcd image + description: Name of the image for the etcd servers + + master_image: + type: string + label: Master image + description: Name of the image for the master servers + + node_image: + type: string + label: Node image + description: Name of the image for the compute node servers + + infra_image: + type: string + label: Infra image + description: Name of the image for the infra node servers + + dns_image: + type: string + label: DNS image + description: Name of the image for the DNS server + + etcd_flavor: + type: string + label: Etcd flavor + description: Flavor of the etcd servers + + master_flavor: + type: string + label: Master flavor + description: Flavor of the master servers + + node_flavor: + type: string + label: Node flavor + description: Flavor of the compute node servers + + infra_flavor: + type: string + label: Infra flavor + description: Flavor of the infra node servers + + dns_flavor: + type: string + label: DNS flavor + description: Flavor of the DNS server + + master_volume_size: + type: number + description: Size of the volume to be created. + default: 5 + constraints: + - range: { min: 1, max: 1024 } + description: must be between 1 and 1024 Gb. + + app_volume_size: + type: number + description: Size of the volume to be created. + default: 5 + constraints: + - range: { min: 1, max: 1024 } + description: must be between 1 and 1024 Gb. + + infra_volume_size: + type: number + description: Size of the volume to be created. + default: 5 + constraints: + - range: { min: 1, max: 1024 } + description: must be between 1 and 1024 Gb. + + dns_volume_size: + type: number + description: Size of the volume to be created. + default: 5 + constraints: + - range: { min: 1, max: 1024 } + description: must be between 1 and 1024 Gb. + + etcd_volume_size: + type: number + description: Size of the volume to be created. + default: 5 + constraints: + - range: { min: 1, max: 1024 } + description: must be between 1 and 1024 Gb. + +outputs: + + etcd_names: + description: Name of the etcds + value: { get_attr: [ etcd, name ] } + + etcd_ips: + description: IPs of the etcds + value: { get_attr: [ etcd, private_ip ] } + + etcd_floating_ips: + description: Floating IPs of the etcds + value: { get_attr: [ etcd, floating_ip ] } + + master_names: + description: Name of the masters + value: { get_attr: [ masters, name ] } + + master_ips: + description: IPs of the masters + value: { get_attr: [ masters, private_ip ] } + + master_floating_ips: + description: Floating IPs of the masters + value: { get_attr: [ masters, floating_ip ] } + + node_names: + description: Name of the nodes + value: { get_attr: [ compute_nodes, name ] } + + node_ips: + description: IPs of the nodes + value: { get_attr: [ compute_nodes, private_ip ] } + + node_floating_ips: + description: Floating IPs of the nodes + value: { get_attr: [ compute_nodes, floating_ip ] } + + infra_names: + description: Name of the nodes + value: { get_attr: [ infra_nodes, name ] } + + infra_ips: + description: IPs of the nodes + value: { get_attr: [ infra_nodes, private_ip ] } + + infra_floating_ips: + description: Floating IPs of the nodes + value: { get_attr: [ infra_nodes, floating_ip ] } + + dns_name: + description: Name of the DNS + value: + get_attr: + - dns + - name + + dns_floating_ip: + description: Floating IP of the DNS + value: + get_attr: + - dns + - addresses + - str_replace: + template: openshift-ansible-cluster_id-net + params: + cluster_id: { get_param: cluster_id } + - 1 + - addr + +resources: + + net: + type: OS::Neutron::Net + properties: + name: + str_replace: + template: openshift-ansible-cluster_id-net + params: + cluster_id: { get_param: cluster_id } + + subnet: + type: OS::Neutron::Subnet + properties: + name: + str_replace: + template: openshift-ansible-cluster_id-subnet + params: + cluster_id: { get_param: cluster_id } + network: { get_resource: net } + cidr: + str_replace: + template: subnet_24_prefix.0/24 + params: + subnet_24_prefix: { get_param: subnet_24_prefix } + allocation_pools: + - start: + str_replace: + template: subnet_24_prefix.3 + params: + subnet_24_prefix: { get_param: subnet_24_prefix } + end: + str_replace: + template: subnet_24_prefix.254 + params: + subnet_24_prefix: { get_param: subnet_24_prefix } + dns_nameservers: + - 10.9.48.31 +# - { get_param: dns_nameservers } +# repeat: +# for_each: +# <%nameserver%>: { get_param: dns_nameservers } +# template: <%nameserver%> + + router: + type: OS::Neutron::Router + properties: + name: + str_replace: + template: openshift-ansible-cluster_id-router + params: + cluster_id: { get_param: cluster_id } + external_gateway_info: + network: { get_param: external_net } + + interface: + type: OS::Neutron::RouterInterface + properties: + router_id: { get_resource: router } + subnet_id: { get_resource: subnet } + +# keypair: +# type: OS::Nova::KeyPair +# properties: +# name: +# str_replace: +# template: openshift-ansible-cluster_id-keypair +# params: +# cluster_id: { get_param: cluster_id } +# public_key: { get_param: ssh_public_key } + + master-secgrp: + type: OS::Neutron::SecurityGroup + properties: + name: + str_replace: + template: openshift-ansible-cluster_id-master-secgrp + params: + cluster_id: { get_param: cluster_id } + description: + str_replace: + template: Security group for cluster_id OpenShift cluster master + params: + cluster_id: { get_param: cluster_id } + rules: + - direction: ingress + protocol: tcp + port_range_min: 22 + port_range_max: 22 + remote_ip_prefix: { get_param: ssh_incoming } + - direction: ingress + protocol: tcp + port_range_min: 4001 + port_range_max: 4001 + - direction: ingress + protocol: tcp + port_range_min: 8443 + port_range_max: 8443 + - direction: ingress + protocol: tcp + port_range_min: 8444 + port_range_max: 8444 + - direction: ingress + protocol: tcp + port_range_min: 53 + port_range_max: 53 + - direction: ingress + protocol: udp + port_range_min: 53 + port_range_max: 53 + - direction: ingress + protocol: tcp + port_range_min: 8053 + port_range_max: 8053 + - direction: ingress + protocol: udp + port_range_min: 8053 + port_range_max: 8053 + - direction: ingress + protocol: tcp + port_range_min: 24224 + port_range_max: 24224 + - direction: ingress + protocol: udp + port_range_min: 24224 + port_range_max: 24224 + - direction: ingress + protocol: tcp + port_range_min: 2224 + port_range_max: 2224 + - direction: ingress + protocol: udp + port_range_min: 5404 + port_range_max: 5404 + - direction: ingress + protocol: udp + port_range_min: 5405 + port_range_max: 5405 + - direction: ingress + protocol: tcp + port_range_min: 9090 + port_range_max: 9090 + + etcd-secgrp: + type: OS::Neutron::SecurityGroup + properties: + name: + str_replace: + template: openshift-ansible-cluster_id-etcd-secgrp + params: + cluster_id: { get_param: cluster_id } + description: + str_replace: + template: Security group for cluster_id etcd cluster + params: + cluster_id: { get_param: cluster_id } + rules: + - direction: ingress + protocol: tcp + port_range_min: 22 + port_range_max: 22 + remote_ip_prefix: { get_param: ssh_incoming } + - direction: ingress + protocol: tcp + port_range_min: 2379 + port_range_max: 2379 + remote_mode: remote_group_id + remote_group_id: { get_resource: master-secgrp } + - direction: ingress + protocol: tcp + port_range_min: 2380 + port_range_max: 2380 + remote_mode: remote_group_id + + node-secgrp: + type: OS::Neutron::SecurityGroup + properties: + name: + str_replace: + template: openshift-ansible-cluster_id-node-secgrp + params: + cluster_id: { get_param: cluster_id } + description: + str_replace: + template: Security group for cluster_id OpenShift cluster nodes + params: + cluster_id: { get_param: cluster_id } + rules: + - direction: ingress + protocol: tcp + port_range_min: 22 + port_range_max: 22 + remote_ip_prefix: { get_param: ssh_incoming } + - direction: ingress + protocol: tcp + port_range_min: 10250 + port_range_max: 10250 + remote_mode: remote_group_id + - direction: ingress + protocol: tcp + port_range_min: 10255 + port_range_max: 10255 + remote_mode: remote_group_id + - direction: ingress + protocol: udp + port_range_min: 10255 + port_range_max: 10255 + remote_mode: remote_group_id + - direction: ingress + protocol: udp + port_range_min: 4789 + port_range_max: 4789 + remote_mode: remote_group_id + - direction: ingress + protocol: tcp + port_range_min: 30000 + port_range_max: 32767 + remote_ip_prefix: { get_param: node_port_incoming } + + infra-secgrp: + type: OS::Neutron::SecurityGroup + properties: + name: + str_replace: + template: openshift-ansible-cluster_id-infra-secgrp + params: + cluster_id: { get_param: cluster_id } + description: + str_replace: + template: Security group for cluster_id OpenShift infrastructure cluster nodes + params: + cluster_id: { get_param: cluster_id } + rules: + - direction: ingress + protocol: tcp + port_range_min: 80 + port_range_max: 80 + - direction: ingress + protocol: tcp + port_range_min: 443 + port_range_max: 443 + + dns-secgrp: + type: OS::Neutron::SecurityGroup + properties: + name: + str_replace: + template: openshift-ansible-cluster_id-dns-secgrp + params: + cluster_id: { get_param: cluster_id } + description: + str_replace: + template: Security group for cluster_id cluster DNS + params: + cluster_id: { get_param: cluster_id } + rules: + - direction: ingress + protocol: tcp + port_range_min: 22 + port_range_max: 22 + remote_ip_prefix: { get_param: ssh_incoming } + - direction: ingress + protocol: udp + port_range_min: 53 + port_range_max: 53 + remote_ip_prefix: { get_param: node_port_incoming } + - direction: ingress + protocol: tcp + port_range_min: 53 + port_range_max: 53 + remote_ip_prefix: { get_param: node_port_incoming } + + etcd: + type: OS::Heat::ResourceGroup + properties: + count: { get_param: num_etcd } + resource_def: + type: heat_stack_server.yaml + properties: + name: + str_replace: + template: k8s_type-%index%.cluster_id.cluster_env + params: + cluster_id: { get_param: cluster_id } + k8s_type: etcd + cluster_env: { get_param: cluster_env } + cluster_env: { get_param: cluster_env } + cluster_id: { get_param: cluster_id } + type: etcd + image: { get_param: etcd_image } + flavor: { get_param: etcd_flavor } + key_name: { get_param: ssh_public_key } + net: { get_resource: net } + subnet: { get_resource: subnet } + secgrp: + - { get_resource: etcd-secgrp } + floating_network: { get_param: external_net } + net_name: + str_replace: + template: openshift-ansible-cluster_id-net + params: + cluster_id: { get_param: cluster_id } + volume_size: { get_param: etcd_volume_size } + depends_on: + - interface + + masters: + type: OS::Heat::ResourceGroup + properties: + count: { get_param: num_masters } + resource_def: + type: heat_stack_server.yaml + properties: + name: + str_replace: + template: k8s_type-%index%.cluster_id.cluster_env + params: + cluster_id: { get_param: cluster_id } + k8s_type: master + cluster_env: { get_param: cluster_env } + cluster_env: { get_param: cluster_env } + cluster_id: { get_param: cluster_id } + type: master + image: { get_param: master_image } + flavor: { get_param: master_flavor } + key_name: { get_param: ssh_public_key } + net: { get_resource: net } + subnet: { get_resource: subnet } + secgrp: + - { get_resource: master-secgrp } + - { get_resource: node-secgrp } + floating_network: { get_param: external_net } + net_name: + str_replace: + template: openshift-ansible-cluster_id-net + params: + cluster_id: { get_param: cluster_id } + volume_size: { get_param: master_volume_size } + depends_on: + - interface + + compute_nodes: + type: OS::Heat::ResourceGroup + properties: + count: { get_param: num_nodes } + resource_def: + type: heat_stack_server.yaml + properties: + name: + str_replace: + template: subtype-k8s_type-%index%.cluster_id.cluster_env + params: + cluster_id: { get_param: cluster_id } + k8s_type: node + subtype: app + cluster_env: { get_param: cluster_env } + cluster_env: { get_param: cluster_env } + cluster_id: { get_param: cluster_id } + type: node + subtype: app + image: { get_param: node_image } + flavor: { get_param: node_flavor } + key_name: { get_param: ssh_public_key } + net: { get_resource: net } + subnet: { get_resource: subnet } + secgrp: + - { get_resource: node-secgrp } + floating_network: { get_param: external_net } + net_name: + str_replace: + template: openshift-ansible-cluster_id-net + params: + cluster_id: { get_param: cluster_id } + volume_size: { get_param: app_volume_size } + depends_on: + - interface + + infra_nodes: + type: OS::Heat::ResourceGroup + properties: + count: { get_param: num_infra } + resource_def: + type: heat_stack_server.yaml + properties: + name: + str_replace: + template: subtypek8s_type-%index%.cluster_id.cluster_env + params: + cluster_id: { get_param: cluster_id } + k8s_type: node + subtype: infra + cluster_env: { get_param: cluster_env } + cluster_env: { get_param: cluster_env } + cluster_id: { get_param: cluster_id } + type: node + subtype: infra + image: { get_param: infra_image } + flavor: { get_param: infra_flavor } + key_name: { get_param: ssh_public_key } + net: { get_resource: net } + subnet: { get_resource: subnet } + secgrp: + - { get_resource: node-secgrp } + - { get_resource: infra-secgrp } + floating_network: { get_param: external_net } + net_name: + str_replace: + template: openshift-ansible-cluster_id-net + params: + cluster_id: { get_param: cluster_id } + volume_size: { get_param: infra_volume_size } + depends_on: + - interface + + dns: + type: OS::Heat::ResourceGroup + properties: + count: { get_param: num_dns } + resource_def: + type: heat_stack_server.yaml + properties: + name: + str_replace: + template: k8s_type-%index%.cluster_id.cluster_env + params: + cluster_id: { get_param: cluster_id } + k8s_type: dns + cluster_env: { get_param: cluster_env } + cluster_env: { get_param: cluster_env } + cluster_id: { get_param: cluster_id } + type: dns + image: { get_param: dns_image } + flavor: { get_param: dns_flavor } + key_name: { get_param: ssh_public_key } + net: { get_resource: net } + subnet: { get_resource: subnet } + secgrp: + - { get_resource: node-secgrp } + - { get_resource: dns-secgrp } + floating_network: { get_param: external_net } + net_name: + str_replace: + template: openshift-ansible-cluster_id-net + params: + cluster_id: { get_param: cluster_id } + volume_size: { get_param: dns_volume_size } + depends_on: + - interface + diff --git a/roles/openstack-stack/files/heat_stack_server.yaml b/roles/openstack-stack/files/heat_stack_server.yaml new file mode 100644 index 000000000..978da4f0b --- /dev/null +++ b/roles/openstack-stack/files/heat_stack_server.yaml @@ -0,0 +1,156 @@ +heat_template_version: 2014-10-16 + +description: OpenShift cluster server + +parameters: + + name: + type: string + label: Name + description: Name + + cluster_env: + type: string + label: Cluster environment + description: Environment of the cluster + + cluster_id: + type: string + label: Cluster ID + description: Identifier of the cluster + + type: + type: string + label: Type + description: Type master or node + + subtype: + type: string + label: Sub-type + description: Sub-type compute or infra for nodes, default otherwise + default: default + + key_name: + type: string + label: Key name + description: Key name of keypair + + image: + type: string + label: Image + description: Name of the image + + flavor: + type: string + label: Flavor + description: Name of the flavor + + net: + type: string + label: Net ID + description: Net resource + + net_name: + type: string + label: Net name + description: Net name + + subnet: + type: string + label: Subnet ID + description: Subnet resource + + secgrp: + type: comma_delimited_list + label: Security groups + description: Security group resources + + floating_network: + type: string + label: Floating network + description: Network to allocate floating IP from + + availability_zone: + type: string + description: The Availability Zone to launch the instance. + default: nova + + volume_size: + type: number + description: Size of the volume to be created. + default: 1 + constraints: + - range: { min: 1, max: 1024 } + description: must be between 1 and 1024 Gb. + +outputs: + + name: + description: Name of the server + value: { get_attr: [ server, name ] } + + private_ip: + description: Private IP of the server + value: + get_attr: + - server + - addresses + - { get_param: net_name } + - 0 + - addr + + floating_ip: + description: Floating IP of the server + value: + get_attr: + - server + - addresses + - { get_param: net_name } + - 1 + - addr + +resources: + + server: + type: OS::Nova::Server + properties: + name: { get_param: name } + key_name: { get_param: key_name } + image: { get_param: image } + flavor: { get_param: flavor } + networks: + - port: { get_resource: port } + user_data: { get_file: user-data } + user_data_format: RAW + metadata: + environment: { get_param: cluster_env } + clusterid: { get_param: cluster_id } + host-type: { get_param: type } + sub-host-type: { get_param: subtype } + + port: + type: OS::Neutron::Port + properties: + network: { get_param: net } + fixed_ips: + - subnet: { get_param: subnet } + security_groups: { get_param: secgrp } + + floating-ip: + type: OS::Neutron::FloatingIP + properties: + floating_network: { get_param: floating_network } + port_id: { get_resource: port } + + cinder_volume: + type: OS::Cinder::Volume + properties: + size: { get_param: volume_size } + availability_zone: { get_param: availability_zone } + + volume_attachment: + type: OS::Cinder::VolumeAttachment + properties: + volume_id: { get_resource: cinder_volume } + instance_uuid: { get_resource: server } + mountpoint: /dev/sdb diff --git a/roles/openstack-stack/files/user-data b/roles/openstack-stack/files/user-data new file mode 100644 index 000000000..eb65f7cec --- /dev/null +++ b/roles/openstack-stack/files/user-data @@ -0,0 +1,13 @@ +#cloud-config +disable_root: true + +system_info: + default_user: + name: openshift + sudo: ["ALL=(ALL) NOPASSWD: ALL"] + +write_files: + - path: /etc/sudoers.d/00-openshift-no-requiretty + permissions: 440 + content: | + Defaults:openshift !requiretty diff --git a/roles/openstack-stack/tasks/main.yml b/roles/openstack-stack/tasks/main.yml new file mode 100644 index 000000000..c953cb603 --- /dev/null +++ b/roles/openstack-stack/tasks/main.yml @@ -0,0 +1,31 @@ +--- +- name: create stack + ignore_errors: False + register: stack_create + os_stack: + name: "{{ stack_name }}" + state: present + template: 'roles/openstack-stack/files/heat_stack.yaml' + wait: yes + parameters: + cluster_env: "{{ dns_domain }}" + cluster_id: "{{ stack_name }}" + subnet_24_prefix: "{{ subnet_prefix }}" + dns_nameservers: "{{ dns_nameservers }}" + external_net: "{{ external_network }}" + ssh_public_key: "{{ ssh_public_key }}" + num_etcd: "{{ num_etcd }}" + num_masters: "{{ num_masters }}" + num_nodes: "{{ num_nodes }}" + num_infra: "{{ num_infra }}" + num_dns: "{{ num_dns }}" + etcd_image: "{{ openstack_image }}" + master_image: "{{ openstack_image }}" + node_image: "{{ openstack_image }}" + infra_image: "{{ openstack_image }}" + dns_image: "{{ openstack_image }}" + etcd_flavor: "{{ etcd_flavor }}" + master_flavor: "{{ master_flavor }}" + node_flavor: "{{ node_flavor }}" + infra_flavor: "{{ infra_flavor }}" + dns_flavor: "{{ dns_flavor }}" diff --git a/roles/openstack-stack/test/roles b/roles/openstack-stack/test/roles new file mode 120000 index 000000000..e2b799b9d --- /dev/null +++ b/roles/openstack-stack/test/roles @@ -0,0 +1 @@ +../../../roles/ \ No newline at end of file diff --git a/roles/openstack-stack/test/stack-create-test.yml b/roles/openstack-stack/test/stack-create-test.yml new file mode 100644 index 000000000..94e312ee3 --- /dev/null +++ b/roles/openstack-stack/test/stack-create-test.yml @@ -0,0 +1,17 @@ +--- +- hosts: localhost + roles: + - role: openstack-stack + stack_name: test-stack + dns_domain: "{{ openstack_dns_domain }}" + dns_nameservers: "{{ openstack_nameservers }}" + subnet_prefix: "{{ openstack_subnet_prefix }}" + ssh_public_key: "{{ openstack_ssh_public_key }}" + openstack_image: "{{ openstack_default_image_name }}" + etcd_flavor: "{{ openstack_default_flavor }}" + master_flavor: "{{ openstack_default_flavor }}" + node_flavor: "{{ openstack_default_flavor }}" + infra_flavor: "{{ openstack_default_flavor }}" + dns_flavor: "{{ openstack_default_flavor }}" + external_network: "{{ openstack_external_network_name }}" + -- cgit v1.2.3 From 3bf8df1a873785a09bf3c1827bfb5097955c5e44 Mon Sep 17 00:00:00 2001 From: Eric Sauer Date: Tue, 7 Feb 2017 01:12:58 -0500 Subject: Fixing two significant bugs in the HEAT deployment (#13) --- roles/openstack-stack/tasks/main.yml | 3 +++ 1 file changed, 3 insertions(+) (limited to 'roles') diff --git a/roles/openstack-stack/tasks/main.yml b/roles/openstack-stack/tasks/main.yml index c953cb603..efee08c0e 100644 --- a/roles/openstack-stack/tasks/main.yml +++ b/roles/openstack-stack/tasks/main.yml @@ -29,3 +29,6 @@ node_flavor: "{{ node_flavor }}" infra_flavor: "{{ infra_flavor }}" dns_flavor: "{{ dns_flavor }}" + master_volume_size: "{{ master_volume_size }}" + app_volume_size: "{{ app_volume_size }}" + infra_volume_size: "{{ infra_volume_size }}" -- cgit v1.2.3 From fdac6976d4b48c11b8de253ef8afa34af0da8cdb Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?=C3=98ystein=20Bedin?= Date: Mon, 20 Feb 2017 12:56:21 -0500 Subject: Ensure DNS configuration has wildcards set for infra nodes (#24) * Ensure DNS configuration has wildcards set for infra nodes * Updated to include all cluster hosts for DNS entries --- roles/common/pre_tasks/pre_tasks.yml | 10 ++++++++++ 1 file changed, 10 insertions(+) (limited to 'roles') diff --git a/roles/common/pre_tasks/pre_tasks.yml b/roles/common/pre_tasks/pre_tasks.yml index 71a989b30..06a56605d 100644 --- a/roles/common/pre_tasks/pre_tasks.yml +++ b/roles/common/pre_tasks/pre_tasks.yml @@ -24,3 +24,13 @@ set_fact: full_dns_domain: "{{ (env_id|trim == '') | ternary(dns_domain, env_id + '.' + dns_domain) }}" delegate_to: localhost + +- name: Set the APP domain for OpenShift use + set_fact: + openshift_app_domain: "{{ openshift_app_domain | default('apps') }}" + delegate_to: localhost + +- name: Set the default app domain for routing purposes + set_fact: + openshift_master_default_subdomain: "{{ openshift_app_domain }}.{{ full_dns_domain }}" + delegate_to: localhost -- cgit v1.2.3 From c90d5323afc575246df2f50e9125069f3c12e81e Mon Sep 17 00:00:00 2001 From: Eric Sauer Date: Tue, 25 Apr 2017 23:17:38 -0400 Subject: Stack refactor (#38) * Refactored openstack-stack role to: - Convert static heat template files to ansible templates - Include native ansible groups via openstack metadata. This removes the need for a playbook to map host groups - Some code cleanup * Deleting commentd out code and irrelevant plays * Refactored openstack-stack role to: - Convert static heat template files to ansible templates - Include native ansible groups via openstack metadata. This removes the need for a playbook to map host groups - Some code cleanup * Deleting commentd out code and irrelevant plays * Replacing stack parameters with jinja expressions * Updating sample inventory to work with latest dynamic inventory changes * updating inventory with host group mapping. making sync keys optional * Missing cluster_hosts group * Updating to add infra_hosts * Updating inventory per comments from oybed and sabre1041 --- roles/openstack-stack/defaults/main.yml | 10 + roles/openstack-stack/files/heat_stack.yaml | 684 --------------------- roles/openstack-stack/files/heat_stack_server.yaml | 156 ----- roles/openstack-stack/files/user-data | 13 - roles/openstack-stack/tasks/main.yml | 59 +- roles/openstack-stack/templates/heat_stack.yaml.j2 | 551 +++++++++++++++++ .../templates/heat_stack_server.yaml.j2 | 170 +++++ roles/openstack-stack/templates/user_data.j2 | 13 + 8 files changed, 777 insertions(+), 879 deletions(-) create mode 100644 roles/openstack-stack/defaults/main.yml delete mode 100644 roles/openstack-stack/files/heat_stack.yaml delete mode 100644 roles/openstack-stack/files/heat_stack_server.yaml delete mode 100644 roles/openstack-stack/files/user-data create mode 100644 roles/openstack-stack/templates/heat_stack.yaml.j2 create mode 100644 roles/openstack-stack/templates/heat_stack_server.yaml.j2 create mode 100644 roles/openstack-stack/templates/user_data.j2 (limited to 'roles') diff --git a/roles/openstack-stack/defaults/main.yml b/roles/openstack-stack/defaults/main.yml new file mode 100644 index 000000000..8aefe039d --- /dev/null +++ b/roles/openstack-stack/defaults/main.yml @@ -0,0 +1,10 @@ +--- +dns_volume_size: 1 +ssh_ingress_cidr: 0.0.0.0/0 +node_ingress_cidr: 0.0.0.0/0 +num_etcd: 0 +num_masters: 1 +num_nodes: 1 +num_dns: 1 +num_infra: 1 +etcd_volume_size: 2 diff --git a/roles/openstack-stack/files/heat_stack.yaml b/roles/openstack-stack/files/heat_stack.yaml deleted file mode 100644 index 058f7a7ad..000000000 --- a/roles/openstack-stack/files/heat_stack.yaml +++ /dev/null @@ -1,684 +0,0 @@ -heat_template_version: 2014-10-16 - -description: OpenShift cluster - -parameters: - - cluster_env: - type: string - label: Cluster environment - description: Environment of the cluster - - cluster_id: - type: string - label: Cluster ID - description: Identifier of the cluster - - subnet_24_prefix: - type: string - label: subnet /24 prefix - description: /24 subnet prefix of the network of the cluster (dot separated number triplet) - - dns_nameservers: - type: comma_delimited_list - label: DNS nameservers list - description: List of DNS nameservers - - external_net: - type: string - label: External network - description: Name of the external network - default: external - - ssh_public_key: - type: string - label: SSH public key - description: SSH public key - hidden: true - - ssh_incoming: - type: string - label: Source of ssh connections - description: Source of legitimate ssh connections - default: 0.0.0.0/0 - - node_port_incoming: - type: string - label: Source of node port connections - description: Authorized sources targetting node ports - default: 0.0.0.0/0 - - num_etcd: - type: number - label: Number of etcd nodes - description: Number of etcd nodes - - num_masters: - type: number - label: Number of masters - description: Number of masters - - num_nodes: - type: number - label: Number of compute nodes - description: Number of compute nodes - - num_infra: - type: number - label: Number of infrastructure nodes - description: Number of infrastructure nodes - - num_dns: - type: number - label: Number of dns servers - description: Number of dns servers - - etcd_image: - type: string - label: Etcd image - description: Name of the image for the etcd servers - - master_image: - type: string - label: Master image - description: Name of the image for the master servers - - node_image: - type: string - label: Node image - description: Name of the image for the compute node servers - - infra_image: - type: string - label: Infra image - description: Name of the image for the infra node servers - - dns_image: - type: string - label: DNS image - description: Name of the image for the DNS server - - etcd_flavor: - type: string - label: Etcd flavor - description: Flavor of the etcd servers - - master_flavor: - type: string - label: Master flavor - description: Flavor of the master servers - - node_flavor: - type: string - label: Node flavor - description: Flavor of the compute node servers - - infra_flavor: - type: string - label: Infra flavor - description: Flavor of the infra node servers - - dns_flavor: - type: string - label: DNS flavor - description: Flavor of the DNS server - - master_volume_size: - type: number - description: Size of the volume to be created. - default: 5 - constraints: - - range: { min: 1, max: 1024 } - description: must be between 1 and 1024 Gb. - - app_volume_size: - type: number - description: Size of the volume to be created. - default: 5 - constraints: - - range: { min: 1, max: 1024 } - description: must be between 1 and 1024 Gb. - - infra_volume_size: - type: number - description: Size of the volume to be created. - default: 5 - constraints: - - range: { min: 1, max: 1024 } - description: must be between 1 and 1024 Gb. - - dns_volume_size: - type: number - description: Size of the volume to be created. - default: 5 - constraints: - - range: { min: 1, max: 1024 } - description: must be between 1 and 1024 Gb. - - etcd_volume_size: - type: number - description: Size of the volume to be created. - default: 5 - constraints: - - range: { min: 1, max: 1024 } - description: must be between 1 and 1024 Gb. - -outputs: - - etcd_names: - description: Name of the etcds - value: { get_attr: [ etcd, name ] } - - etcd_ips: - description: IPs of the etcds - value: { get_attr: [ etcd, private_ip ] } - - etcd_floating_ips: - description: Floating IPs of the etcds - value: { get_attr: [ etcd, floating_ip ] } - - master_names: - description: Name of the masters - value: { get_attr: [ masters, name ] } - - master_ips: - description: IPs of the masters - value: { get_attr: [ masters, private_ip ] } - - master_floating_ips: - description: Floating IPs of the masters - value: { get_attr: [ masters, floating_ip ] } - - node_names: - description: Name of the nodes - value: { get_attr: [ compute_nodes, name ] } - - node_ips: - description: IPs of the nodes - value: { get_attr: [ compute_nodes, private_ip ] } - - node_floating_ips: - description: Floating IPs of the nodes - value: { get_attr: [ compute_nodes, floating_ip ] } - - infra_names: - description: Name of the nodes - value: { get_attr: [ infra_nodes, name ] } - - infra_ips: - description: IPs of the nodes - value: { get_attr: [ infra_nodes, private_ip ] } - - infra_floating_ips: - description: Floating IPs of the nodes - value: { get_attr: [ infra_nodes, floating_ip ] } - - dns_name: - description: Name of the DNS - value: - get_attr: - - dns - - name - - dns_floating_ip: - description: Floating IP of the DNS - value: - get_attr: - - dns - - addresses - - str_replace: - template: openshift-ansible-cluster_id-net - params: - cluster_id: { get_param: cluster_id } - - 1 - - addr - -resources: - - net: - type: OS::Neutron::Net - properties: - name: - str_replace: - template: openshift-ansible-cluster_id-net - params: - cluster_id: { get_param: cluster_id } - - subnet: - type: OS::Neutron::Subnet - properties: - name: - str_replace: - template: openshift-ansible-cluster_id-subnet - params: - cluster_id: { get_param: cluster_id } - network: { get_resource: net } - cidr: - str_replace: - template: subnet_24_prefix.0/24 - params: - subnet_24_prefix: { get_param: subnet_24_prefix } - allocation_pools: - - start: - str_replace: - template: subnet_24_prefix.3 - params: - subnet_24_prefix: { get_param: subnet_24_prefix } - end: - str_replace: - template: subnet_24_prefix.254 - params: - subnet_24_prefix: { get_param: subnet_24_prefix } - dns_nameservers: - - 10.9.48.31 -# - { get_param: dns_nameservers } -# repeat: -# for_each: -# <%nameserver%>: { get_param: dns_nameservers } -# template: <%nameserver%> - - router: - type: OS::Neutron::Router - properties: - name: - str_replace: - template: openshift-ansible-cluster_id-router - params: - cluster_id: { get_param: cluster_id } - external_gateway_info: - network: { get_param: external_net } - - interface: - type: OS::Neutron::RouterInterface - properties: - router_id: { get_resource: router } - subnet_id: { get_resource: subnet } - -# keypair: -# type: OS::Nova::KeyPair -# properties: -# name: -# str_replace: -# template: openshift-ansible-cluster_id-keypair -# params: -# cluster_id: { get_param: cluster_id } -# public_key: { get_param: ssh_public_key } - - master-secgrp: - type: OS::Neutron::SecurityGroup - properties: - name: - str_replace: - template: openshift-ansible-cluster_id-master-secgrp - params: - cluster_id: { get_param: cluster_id } - description: - str_replace: - template: Security group for cluster_id OpenShift cluster master - params: - cluster_id: { get_param: cluster_id } - rules: - - direction: ingress - protocol: tcp - port_range_min: 22 - port_range_max: 22 - remote_ip_prefix: { get_param: ssh_incoming } - - direction: ingress - protocol: tcp - port_range_min: 4001 - port_range_max: 4001 - - direction: ingress - protocol: tcp - port_range_min: 8443 - port_range_max: 8443 - - direction: ingress - protocol: tcp - port_range_min: 8444 - port_range_max: 8444 - - direction: ingress - protocol: tcp - port_range_min: 53 - port_range_max: 53 - - direction: ingress - protocol: udp - port_range_min: 53 - port_range_max: 53 - - direction: ingress - protocol: tcp - port_range_min: 8053 - port_range_max: 8053 - - direction: ingress - protocol: udp - port_range_min: 8053 - port_range_max: 8053 - - direction: ingress - protocol: tcp - port_range_min: 24224 - port_range_max: 24224 - - direction: ingress - protocol: udp - port_range_min: 24224 - port_range_max: 24224 - - direction: ingress - protocol: tcp - port_range_min: 2224 - port_range_max: 2224 - - direction: ingress - protocol: udp - port_range_min: 5404 - port_range_max: 5404 - - direction: ingress - protocol: udp - port_range_min: 5405 - port_range_max: 5405 - - direction: ingress - protocol: tcp - port_range_min: 9090 - port_range_max: 9090 - - etcd-secgrp: - type: OS::Neutron::SecurityGroup - properties: - name: - str_replace: - template: openshift-ansible-cluster_id-etcd-secgrp - params: - cluster_id: { get_param: cluster_id } - description: - str_replace: - template: Security group for cluster_id etcd cluster - params: - cluster_id: { get_param: cluster_id } - rules: - - direction: ingress - protocol: tcp - port_range_min: 22 - port_range_max: 22 - remote_ip_prefix: { get_param: ssh_incoming } - - direction: ingress - protocol: tcp - port_range_min: 2379 - port_range_max: 2379 - remote_mode: remote_group_id - remote_group_id: { get_resource: master-secgrp } - - direction: ingress - protocol: tcp - port_range_min: 2380 - port_range_max: 2380 - remote_mode: remote_group_id - - node-secgrp: - type: OS::Neutron::SecurityGroup - properties: - name: - str_replace: - template: openshift-ansible-cluster_id-node-secgrp - params: - cluster_id: { get_param: cluster_id } - description: - str_replace: - template: Security group for cluster_id OpenShift cluster nodes - params: - cluster_id: { get_param: cluster_id } - rules: - - direction: ingress - protocol: tcp - port_range_min: 22 - port_range_max: 22 - remote_ip_prefix: { get_param: ssh_incoming } - - direction: ingress - protocol: tcp - port_range_min: 10250 - port_range_max: 10250 - remote_mode: remote_group_id - - direction: ingress - protocol: tcp - port_range_min: 10255 - port_range_max: 10255 - remote_mode: remote_group_id - - direction: ingress - protocol: udp - port_range_min: 10255 - port_range_max: 10255 - remote_mode: remote_group_id - - direction: ingress - protocol: udp - port_range_min: 4789 - port_range_max: 4789 - remote_mode: remote_group_id - - direction: ingress - protocol: tcp - port_range_min: 30000 - port_range_max: 32767 - remote_ip_prefix: { get_param: node_port_incoming } - - infra-secgrp: - type: OS::Neutron::SecurityGroup - properties: - name: - str_replace: - template: openshift-ansible-cluster_id-infra-secgrp - params: - cluster_id: { get_param: cluster_id } - description: - str_replace: - template: Security group for cluster_id OpenShift infrastructure cluster nodes - params: - cluster_id: { get_param: cluster_id } - rules: - - direction: ingress - protocol: tcp - port_range_min: 80 - port_range_max: 80 - - direction: ingress - protocol: tcp - port_range_min: 443 - port_range_max: 443 - - dns-secgrp: - type: OS::Neutron::SecurityGroup - properties: - name: - str_replace: - template: openshift-ansible-cluster_id-dns-secgrp - params: - cluster_id: { get_param: cluster_id } - description: - str_replace: - template: Security group for cluster_id cluster DNS - params: - cluster_id: { get_param: cluster_id } - rules: - - direction: ingress - protocol: tcp - port_range_min: 22 - port_range_max: 22 - remote_ip_prefix: { get_param: ssh_incoming } - - direction: ingress - protocol: udp - port_range_min: 53 - port_range_max: 53 - remote_ip_prefix: { get_param: node_port_incoming } - - direction: ingress - protocol: tcp - port_range_min: 53 - port_range_max: 53 - remote_ip_prefix: { get_param: node_port_incoming } - - etcd: - type: OS::Heat::ResourceGroup - properties: - count: { get_param: num_etcd } - resource_def: - type: heat_stack_server.yaml - properties: - name: - str_replace: - template: k8s_type-%index%.cluster_id.cluster_env - params: - cluster_id: { get_param: cluster_id } - k8s_type: etcd - cluster_env: { get_param: cluster_env } - cluster_env: { get_param: cluster_env } - cluster_id: { get_param: cluster_id } - type: etcd - image: { get_param: etcd_image } - flavor: { get_param: etcd_flavor } - key_name: { get_param: ssh_public_key } - net: { get_resource: net } - subnet: { get_resource: subnet } - secgrp: - - { get_resource: etcd-secgrp } - floating_network: { get_param: external_net } - net_name: - str_replace: - template: openshift-ansible-cluster_id-net - params: - cluster_id: { get_param: cluster_id } - volume_size: { get_param: etcd_volume_size } - depends_on: - - interface - - masters: - type: OS::Heat::ResourceGroup - properties: - count: { get_param: num_masters } - resource_def: - type: heat_stack_server.yaml - properties: - name: - str_replace: - template: k8s_type-%index%.cluster_id.cluster_env - params: - cluster_id: { get_param: cluster_id } - k8s_type: master - cluster_env: { get_param: cluster_env } - cluster_env: { get_param: cluster_env } - cluster_id: { get_param: cluster_id } - type: master - image: { get_param: master_image } - flavor: { get_param: master_flavor } - key_name: { get_param: ssh_public_key } - net: { get_resource: net } - subnet: { get_resource: subnet } - secgrp: - - { get_resource: master-secgrp } - - { get_resource: node-secgrp } - floating_network: { get_param: external_net } - net_name: - str_replace: - template: openshift-ansible-cluster_id-net - params: - cluster_id: { get_param: cluster_id } - volume_size: { get_param: master_volume_size } - depends_on: - - interface - - compute_nodes: - type: OS::Heat::ResourceGroup - properties: - count: { get_param: num_nodes } - resource_def: - type: heat_stack_server.yaml - properties: - name: - str_replace: - template: subtype-k8s_type-%index%.cluster_id.cluster_env - params: - cluster_id: { get_param: cluster_id } - k8s_type: node - subtype: app - cluster_env: { get_param: cluster_env } - cluster_env: { get_param: cluster_env } - cluster_id: { get_param: cluster_id } - type: node - subtype: app - image: { get_param: node_image } - flavor: { get_param: node_flavor } - key_name: { get_param: ssh_public_key } - net: { get_resource: net } - subnet: { get_resource: subnet } - secgrp: - - { get_resource: node-secgrp } - floating_network: { get_param: external_net } - net_name: - str_replace: - template: openshift-ansible-cluster_id-net - params: - cluster_id: { get_param: cluster_id } - volume_size: { get_param: app_volume_size } - depends_on: - - interface - - infra_nodes: - type: OS::Heat::ResourceGroup - properties: - count: { get_param: num_infra } - resource_def: - type: heat_stack_server.yaml - properties: - name: - str_replace: - template: subtypek8s_type-%index%.cluster_id.cluster_env - params: - cluster_id: { get_param: cluster_id } - k8s_type: node - subtype: infra - cluster_env: { get_param: cluster_env } - cluster_env: { get_param: cluster_env } - cluster_id: { get_param: cluster_id } - type: node - subtype: infra - image: { get_param: infra_image } - flavor: { get_param: infra_flavor } - key_name: { get_param: ssh_public_key } - net: { get_resource: net } - subnet: { get_resource: subnet } - secgrp: - - { get_resource: node-secgrp } - - { get_resource: infra-secgrp } - floating_network: { get_param: external_net } - net_name: - str_replace: - template: openshift-ansible-cluster_id-net - params: - cluster_id: { get_param: cluster_id } - volume_size: { get_param: infra_volume_size } - depends_on: - - interface - - dns: - type: OS::Heat::ResourceGroup - properties: - count: { get_param: num_dns } - resource_def: - type: heat_stack_server.yaml - properties: - name: - str_replace: - template: k8s_type-%index%.cluster_id.cluster_env - params: - cluster_id: { get_param: cluster_id } - k8s_type: dns - cluster_env: { get_param: cluster_env } - cluster_env: { get_param: cluster_env } - cluster_id: { get_param: cluster_id } - type: dns - image: { get_param: dns_image } - flavor: { get_param: dns_flavor } - key_name: { get_param: ssh_public_key } - net: { get_resource: net } - subnet: { get_resource: subnet } - secgrp: - - { get_resource: node-secgrp } - - { get_resource: dns-secgrp } - floating_network: { get_param: external_net } - net_name: - str_replace: - template: openshift-ansible-cluster_id-net - params: - cluster_id: { get_param: cluster_id } - volume_size: { get_param: dns_volume_size } - depends_on: - - interface - diff --git a/roles/openstack-stack/files/heat_stack_server.yaml b/roles/openstack-stack/files/heat_stack_server.yaml deleted file mode 100644 index 978da4f0b..000000000 --- a/roles/openstack-stack/files/heat_stack_server.yaml +++ /dev/null @@ -1,156 +0,0 @@ -heat_template_version: 2014-10-16 - -description: OpenShift cluster server - -parameters: - - name: - type: string - label: Name - description: Name - - cluster_env: - type: string - label: Cluster environment - description: Environment of the cluster - - cluster_id: - type: string - label: Cluster ID - description: Identifier of the cluster - - type: - type: string - label: Type - description: Type master or node - - subtype: - type: string - label: Sub-type - description: Sub-type compute or infra for nodes, default otherwise - default: default - - key_name: - type: string - label: Key name - description: Key name of keypair - - image: - type: string - label: Image - description: Name of the image - - flavor: - type: string - label: Flavor - description: Name of the flavor - - net: - type: string - label: Net ID - description: Net resource - - net_name: - type: string - label: Net name - description: Net name - - subnet: - type: string - label: Subnet ID - description: Subnet resource - - secgrp: - type: comma_delimited_list - label: Security groups - description: Security group resources - - floating_network: - type: string - label: Floating network - description: Network to allocate floating IP from - - availability_zone: - type: string - description: The Availability Zone to launch the instance. - default: nova - - volume_size: - type: number - description: Size of the volume to be created. - default: 1 - constraints: - - range: { min: 1, max: 1024 } - description: must be between 1 and 1024 Gb. - -outputs: - - name: - description: Name of the server - value: { get_attr: [ server, name ] } - - private_ip: - description: Private IP of the server - value: - get_attr: - - server - - addresses - - { get_param: net_name } - - 0 - - addr - - floating_ip: - description: Floating IP of the server - value: - get_attr: - - server - - addresses - - { get_param: net_name } - - 1 - - addr - -resources: - - server: - type: OS::Nova::Server - properties: - name: { get_param: name } - key_name: { get_param: key_name } - image: { get_param: image } - flavor: { get_param: flavor } - networks: - - port: { get_resource: port } - user_data: { get_file: user-data } - user_data_format: RAW - metadata: - environment: { get_param: cluster_env } - clusterid: { get_param: cluster_id } - host-type: { get_param: type } - sub-host-type: { get_param: subtype } - - port: - type: OS::Neutron::Port - properties: - network: { get_param: net } - fixed_ips: - - subnet: { get_param: subnet } - security_groups: { get_param: secgrp } - - floating-ip: - type: OS::Neutron::FloatingIP - properties: - floating_network: { get_param: floating_network } - port_id: { get_resource: port } - - cinder_volume: - type: OS::Cinder::Volume - properties: - size: { get_param: volume_size } - availability_zone: { get_param: availability_zone } - - volume_attachment: - type: OS::Cinder::VolumeAttachment - properties: - volume_id: { get_resource: cinder_volume } - instance_uuid: { get_resource: server } - mountpoint: /dev/sdb diff --git a/roles/openstack-stack/files/user-data b/roles/openstack-stack/files/user-data deleted file mode 100644 index eb65f7cec..000000000 --- a/roles/openstack-stack/files/user-data +++ /dev/null @@ -1,13 +0,0 @@ -#cloud-config -disable_root: true - -system_info: - default_user: - name: openshift - sudo: ["ALL=(ALL) NOPASSWD: ALL"] - -write_files: - - path: /etc/sudoers.d/00-openshift-no-requiretty - permissions: 440 - content: | - Defaults:openshift !requiretty diff --git a/roles/openstack-stack/tasks/main.yml b/roles/openstack-stack/tasks/main.yml index efee08c0e..71c7bbe0d 100644 --- a/roles/openstack-stack/tasks/main.yml +++ b/roles/openstack-stack/tasks/main.yml @@ -1,34 +1,41 @@ --- +- name: create HOT stack template prefix + register: stack_template_pre + tempfile: + state: directory + prefix: casl-ansible + +- name: set template paths + set_fact: + stack_template_path: "{{ stack_template_pre.path }}/stack.yaml" + server_template_path: "{{ stack_template_pre.path }}/server.yaml" + user_data_template_path: "{{ stack_template_pre.path }}/user-data" + +- name: generate HOT stack template from jinja2 template + template: + src: heat_stack.yaml.j2 + dest: "{{ stack_template_path }}" + +- name: generate HOT server template from jinja2 template + template: + src: heat_stack_server.yaml.j2 + dest: "{{ server_template_path }}" + +- name: generate user_data from jinja2 template + template: + src: user_data.j2 + dest: "{{ user_data_template_path }}" + - name: create stack ignore_errors: False register: stack_create os_stack: name: "{{ stack_name }}" state: present - template: 'roles/openstack-stack/files/heat_stack.yaml' + template: "{{ stack_template_path }}" wait: yes - parameters: - cluster_env: "{{ dns_domain }}" - cluster_id: "{{ stack_name }}" - subnet_24_prefix: "{{ subnet_prefix }}" - dns_nameservers: "{{ dns_nameservers }}" - external_net: "{{ external_network }}" - ssh_public_key: "{{ ssh_public_key }}" - num_etcd: "{{ num_etcd }}" - num_masters: "{{ num_masters }}" - num_nodes: "{{ num_nodes }}" - num_infra: "{{ num_infra }}" - num_dns: "{{ num_dns }}" - etcd_image: "{{ openstack_image }}" - master_image: "{{ openstack_image }}" - node_image: "{{ openstack_image }}" - infra_image: "{{ openstack_image }}" - dns_image: "{{ openstack_image }}" - etcd_flavor: "{{ etcd_flavor }}" - master_flavor: "{{ master_flavor }}" - node_flavor: "{{ node_flavor }}" - infra_flavor: "{{ infra_flavor }}" - dns_flavor: "{{ dns_flavor }}" - master_volume_size: "{{ master_volume_size }}" - app_volume_size: "{{ app_volume_size }}" - infra_volume_size: "{{ infra_volume_size }}" + +- name: cleanup temp files + file: + path: "{{ stack_template_pre.path }}" + state: absent diff --git a/roles/openstack-stack/templates/heat_stack.yaml.j2 b/roles/openstack-stack/templates/heat_stack.yaml.j2 new file mode 100644 index 000000000..bc9547f66 --- /dev/null +++ b/roles/openstack-stack/templates/heat_stack.yaml.j2 @@ -0,0 +1,551 @@ +heat_template_version: 2016-10-14 + +description: OpenShift cluster + +parameters: + +outputs: + + etcd_names: + description: Name of the etcds + value: { get_attr: [ etcd, name ] } + + etcd_ips: + description: IPs of the etcds + value: { get_attr: [ etcd, private_ip ] } + + etcd_floating_ips: + description: Floating IPs of the etcds + value: { get_attr: [ etcd, floating_ip ] } + + master_names: + description: Name of the masters + value: { get_attr: [ masters, name ] } + + master_ips: + description: IPs of the masters + value: { get_attr: [ masters, private_ip ] } + + master_floating_ips: + description: Floating IPs of the masters + value: { get_attr: [ masters, floating_ip ] } + + node_names: + description: Name of the nodes + value: { get_attr: [ compute_nodes, name ] } + + node_ips: + description: IPs of the nodes + value: { get_attr: [ compute_nodes, private_ip ] } + + node_floating_ips: + description: Floating IPs of the nodes + value: { get_attr: [ compute_nodes, floating_ip ] } + + infra_names: + description: Name of the nodes + value: { get_attr: [ infra_nodes, name ] } + + infra_ips: + description: IPs of the nodes + value: { get_attr: [ infra_nodes, private_ip ] } + + infra_floating_ips: + description: Floating IPs of the nodes + value: { get_attr: [ infra_nodes, floating_ip ] } + + dns_name: + description: Name of the DNS + value: + get_attr: + - dns + - name + + dns_floating_ip: + description: Floating IP of the DNS + value: + get_attr: + - dns + - addresses + - str_replace: + template: openshift-ansible-cluster_id-net + params: + cluster_id: {{ stack_name }} + - 1 + - addr + +resources: + + net: + type: OS::Neutron::Net + properties: + name: + str_replace: + template: openshift-ansible-cluster_id-net + params: + cluster_id: {{ stack_name }} + + subnet: + type: OS::Neutron::Subnet + properties: + name: + str_replace: + template: openshift-ansible-cluster_id-subnet + params: + cluster_id: {{ stack_name }} + network: { get_resource: net } + cidr: + str_replace: + template: subnet_24_prefix.0/24 + params: + subnet_24_prefix: {{ subnet_prefix }} + allocation_pools: + - start: + str_replace: + template: subnet_24_prefix.3 + params: + subnet_24_prefix: {{ subnet_prefix }} + end: + str_replace: + template: subnet_24_prefix.254 + params: + subnet_24_prefix: {{ subnet_prefix }} + dns_nameservers: + {% for nameserver in dns_nameservers %} + - {{ nameserver }} + {% endfor %} + + router: + type: OS::Neutron::Router + properties: + name: + str_replace: + template: openshift-ansible-cluster_id-router + params: + cluster_id: {{ stack_name }} + external_gateway_info: + network: {{ external_network }} + + interface: + type: OS::Neutron::RouterInterface + properties: + router_id: { get_resource: router } + subnet_id: { get_resource: subnet } + +# keypair: +# type: OS::Nova::KeyPair +# properties: +# name: +# str_replace: +# template: openshift-ansible-cluster_id-keypair +# params: +# cluster_id: {{ stack_name }} +# public_key: {{ ssh_public_key }} + + master-secgrp: + type: OS::Neutron::SecurityGroup + properties: + name: + str_replace: + template: openshift-ansible-cluster_id-master-secgrp + params: + cluster_id: {{ stack_name }} + description: + str_replace: + template: Security group for cluster_id OpenShift cluster master + params: + cluster_id: {{ stack_name }} + rules: + - direction: ingress + protocol: tcp + port_range_min: 22 + port_range_max: 22 + remote_ip_prefix: {{ ssh_ingress_cidr }} + - direction: ingress + protocol: tcp + port_range_min: 4001 + port_range_max: 4001 + - direction: ingress + protocol: tcp + port_range_min: 8443 + port_range_max: 8443 + - direction: ingress + protocol: tcp + port_range_min: 8444 + port_range_max: 8444 + - direction: ingress + protocol: tcp + port_range_min: 53 + port_range_max: 53 + - direction: ingress + protocol: udp + port_range_min: 53 + port_range_max: 53 + - direction: ingress + protocol: tcp + port_range_min: 8053 + port_range_max: 8053 + - direction: ingress + protocol: udp + port_range_min: 8053 + port_range_max: 8053 + - direction: ingress + protocol: tcp + port_range_min: 24224 + port_range_max: 24224 + - direction: ingress + protocol: udp + port_range_min: 24224 + port_range_max: 24224 + - direction: ingress + protocol: tcp + port_range_min: 2224 + port_range_max: 2224 + - direction: ingress + protocol: udp + port_range_min: 5404 + port_range_max: 5404 + - direction: ingress + protocol: udp + port_range_min: 5405 + port_range_max: 5405 + - direction: ingress + protocol: tcp + port_range_min: 9090 + port_range_max: 9090 + + etcd-secgrp: + type: OS::Neutron::SecurityGroup + properties: + name: + str_replace: + template: openshift-ansible-cluster_id-etcd-secgrp + params: + cluster_id: {{ stack_name }} + description: + str_replace: + template: Security group for cluster_id etcd cluster + params: + cluster_id: {{ stack_name }} + rules: + - direction: ingress + protocol: tcp + port_range_min: 22 + port_range_max: 22 + remote_ip_prefix: {{ ssh_ingress_cidr }} + - direction: ingress + protocol: tcp + port_range_min: 2379 + port_range_max: 2379 + remote_mode: remote_group_id + remote_group_id: { get_resource: master-secgrp } + - direction: ingress + protocol: tcp + port_range_min: 2380 + port_range_max: 2380 + remote_mode: remote_group_id + + node-secgrp: + type: OS::Neutron::SecurityGroup + properties: + name: + str_replace: + template: openshift-ansible-cluster_id-node-secgrp + params: + cluster_id: {{ stack_name }} + description: + str_replace: + template: Security group for cluster_id OpenShift cluster nodes + params: + cluster_id: {{ stack_name }} + rules: + - direction: ingress + protocol: tcp + port_range_min: 22 + port_range_max: 22 + remote_ip_prefix: {{ ssh_ingress_cidr }} + - direction: ingress + protocol: tcp + port_range_min: 10250 + port_range_max: 10250 + remote_mode: remote_group_id + - direction: ingress + protocol: tcp + port_range_min: 10255 + port_range_max: 10255 + remote_mode: remote_group_id + - direction: ingress + protocol: udp + port_range_min: 10255 + port_range_max: 10255 + remote_mode: remote_group_id + - direction: ingress + protocol: udp + port_range_min: 4789 + port_range_max: 4789 + remote_mode: remote_group_id + - direction: ingress + protocol: tcp + port_range_min: 30000 + port_range_max: 32767 + remote_ip_prefix: {{ node_ingress_cidr }} + + infra-secgrp: + type: OS::Neutron::SecurityGroup + properties: + name: + str_replace: + template: openshift-ansible-cluster_id-infra-secgrp + params: + cluster_id: {{ stack_name }} + description: + str_replace: + template: Security group for cluster_id OpenShift infrastructure cluster nodes + params: + cluster_id: {{ stack_name }} + rules: + - direction: ingress + protocol: tcp + port_range_min: 80 + port_range_max: 80 + - direction: ingress + protocol: tcp + port_range_min: 443 + port_range_max: 443 + + dns-secgrp: + type: OS::Neutron::SecurityGroup + properties: + name: + str_replace: + template: openshift-ansible-cluster_id-dns-secgrp + params: + cluster_id: {{ stack_name }} + description: + str_replace: + template: Security group for cluster_id cluster DNS + params: + cluster_id: {{ stack_name }} + rules: + - direction: ingress + protocol: tcp + port_range_min: 22 + port_range_max: 22 + remote_ip_prefix: {{ ssh_ingress_cidr }} + - direction: ingress + protocol: udp + port_range_min: 53 + port_range_max: 53 + remote_ip_prefix: {{ node_ingress_cidr }} + - direction: ingress + protocol: tcp + port_range_min: 53 + port_range_max: 53 + remote_ip_prefix: {{ node_ingress_cidr }} + + etcd: + type: OS::Heat::ResourceGroup + properties: + count: {{ num_etcd }} + resource_def: + type: server.yaml + properties: + name: + str_replace: + template: k8s_type-%index%.cluster_id + params: + cluster_id: {{ stack_name }} + k8s_type: etcd + cluster_env: {{ dns_domain }} + cluster_id: {{ stack_name }} + group: + str_replace: + template: k8s_type.cluster_id + params: + k8s_type: etcds + cluster_id: {{ stack_name }} + type: etcd + image: {{ openstack_image }} + flavor: {{ etcd_flavor }} + key_name: {{ ssh_public_key }} + net: { get_resource: net } + subnet: { get_resource: subnet } + secgrp: + - { get_resource: etcd-secgrp } + floating_network: {{ external_network }} + net_name: + str_replace: + template: openshift-ansible-cluster_id-net + params: + cluster_id: {{ stack_name }} + volume_size: {{ etcd_volume_size }} + depends_on: + - interface + + masters: + type: OS::Heat::ResourceGroup + properties: + count: {{ num_masters }} + resource_def: + type: server.yaml + properties: + name: + str_replace: + template: k8s_type-%index%.cluster_id + params: + cluster_id: {{ stack_name }} + k8s_type: master + cluster_env: {{ dns_domain }} + cluster_id: {{ stack_name }} + group: + str_replace: + template: k8s_type.cluster_id + params: + k8s_type: masters + cluster_id: {{ stack_name }} + type: master + image: {{ openstack_image }} + flavor: {{ master_flavor }} + key_name: {{ ssh_public_key }} + net: { get_resource: net } + subnet: { get_resource: subnet } + secgrp: + - { get_resource: master-secgrp } + - { get_resource: node-secgrp } + floating_network: {{ external_network }} + net_name: + str_replace: + template: openshift-ansible-cluster_id-net + params: + cluster_id: {{ stack_name }} + volume_size: {{ master_volume_size }} + depends_on: + - interface + + compute_nodes: + type: OS::Heat::ResourceGroup + properties: + count: {{ num_nodes }} + resource_def: + type: server.yaml + properties: + name: + str_replace: + template: subtype-k8s_type-%index%.cluster_id + params: + cluster_id: {{ stack_name }} + k8s_type: node + subtype: app + cluster_env: {{ dns_domain }} + cluster_id: {{ stack_name }} + group: + str_replace: + template: k8s_type.cluster_id + params: + k8s_type: nodes + cluster_id: {{ stack_name }} + type: node + subtype: app + node_labels: + region: primary + image: {{ openstack_image }} + flavor: {{ node_flavor }} + key_name: {{ ssh_public_key }} + net: { get_resource: net } + subnet: { get_resource: subnet } + secgrp: + - { get_resource: node-secgrp } + floating_network: {{ external_network }} + net_name: + str_replace: + template: openshift-ansible-cluster_id-net + params: + cluster_id: {{ stack_name }} + volume_size: {{ app_volume_size }} + depends_on: + - interface + + infra_nodes: + type: OS::Heat::ResourceGroup + properties: + count: {{ num_infra }} + resource_def: + type: server.yaml + properties: + name: + str_replace: + template: subtypek8s_type-%index%.cluster_id + params: + cluster_id: {{ stack_name }} + k8s_type: node + subtype: infra + cluster_env: {{ dns_domain }} + cluster_id: {{ stack_name }} + group: + str_replace: + template: k8s_type.cluster_id + params: + k8s_type: infra + cluster_id: {{ stack_name }} + type: node + subtype: infra + node_labels: + region: infra + image: {{ openstack_image }} + flavor: {{ infra_flavor }} + key_name: {{ ssh_public_key }} + net: { get_resource: net } + subnet: { get_resource: subnet } + secgrp: + - { get_resource: node-secgrp } + - { get_resource: infra-secgrp } + floating_network: {{ external_network }} + net_name: + str_replace: + template: openshift-ansible-cluster_id-net + params: + cluster_id: {{ stack_name }} + volume_size: {{ infra_volume_size }} + depends_on: + - interface + + dns: + type: OS::Heat::ResourceGroup + properties: + count: {{ num_dns }} + resource_def: + type: server.yaml + properties: + name: + str_replace: + template: k8s_type-%index%.cluster_id + params: + cluster_id: {{ stack_name }} + k8s_type: dns + cluster_env: {{ dns_domain }} + cluster_id: {{ stack_name }} + group: + str_replace: + template: k8s_type.cluster_id + params: + k8s_type: dns + cluster_id: {{ stack_name }} + type: dns + image: {{ openstack_image }} + flavor: {{ dns_flavor }} + key_name: {{ ssh_public_key }} + net: { get_resource: net } + subnet: { get_resource: subnet } + secgrp: + - { get_resource: node-secgrp } + - { get_resource: dns-secgrp } + floating_network: {{ external_network }} + net_name: + str_replace: + template: openshift-ansible-cluster_id-net + params: + cluster_id: {{ stack_name }} + volume_size: {{ dns_volume_size }} + depends_on: + - interface + diff --git a/roles/openstack-stack/templates/heat_stack_server.yaml.j2 b/roles/openstack-stack/templates/heat_stack_server.yaml.j2 new file mode 100644 index 000000000..5851d3b9b --- /dev/null +++ b/roles/openstack-stack/templates/heat_stack_server.yaml.j2 @@ -0,0 +1,170 @@ +heat_template_version: 2016-10-14 + +description: OpenShift cluster server + +parameters: + + name: + type: string + label: Name + description: Name + + group: + type: string + label: Host Group + description: The Primary Ansible Host Group + default: host + + cluster_env: + type: string + label: Cluster environment + description: Environment of the cluster + + cluster_id: + type: string + label: Cluster ID + description: Identifier of the cluster + + type: + type: string + label: Type + description: Type master or node + + subtype: + type: string + label: Sub-type + description: Sub-type compute or infra for nodes, default otherwise + default: default + + key_name: + type: string + label: Key name + description: Key name of keypair + + image: + type: string + label: Image + description: Name of the image + + flavor: + type: string + label: Flavor + description: Name of the flavor + + net: + type: string + label: Net ID + description: Net resource + + net_name: + type: string + label: Net name + description: Net name + + subnet: + type: string + label: Subnet ID + description: Subnet resource + + secgrp: + type: comma_delimited_list + label: Security groups + description: Security group resources + + floating_network: + type: string + label: Floating network + description: Network to allocate floating IP from + + availability_zone: + type: string + description: The Availability Zone to launch the instance. + default: nova + + volume_size: + type: number + description: Size of the volume to be created. + default: 1 + constraints: + - range: { min: 1, max: 1024 } + description: must be between 1 and 1024 Gb. + + node_labels: + type: json + description: OpenShift Node Labels + default: {"region": "default" } + +outputs: + + name: + description: Name of the server + value: { get_attr: [ server, name ] } + + private_ip: + description: Private IP of the server + value: + get_attr: + - server + - addresses + - { get_param: net_name } + - 0 + - addr + + floating_ip: + description: Floating IP of the server + value: + get_attr: + - server + - addresses + - { get_param: net_name } + - 1 + - addr + +resources: + + server: + type: OS::Nova::Server + properties: + name: { get_param: name } + key_name: { get_param: key_name } + image: { get_param: image } + flavor: { get_param: flavor } + networks: + - port: { get_resource: port } + user_data: + get_file: user-data + user_data_format: RAW + metadata: + group: { get_param: group } + environment: { get_param: cluster_env } + clusterid: { get_param: cluster_id } + host-type: { get_param: type } + sub-host-type: { get_param: subtype } + node_labels: { get_param: node_labels } + + port: + type: OS::Neutron::Port + properties: + network: { get_param: net } + fixed_ips: + - subnet: { get_param: subnet } + security_groups: { get_param: secgrp } + + floating-ip: + type: OS::Neutron::FloatingIP + properties: + floating_network: { get_param: floating_network } + port_id: { get_resource: port } + + cinder_volume: + type: OS::Cinder::Volume + properties: + size: { get_param: volume_size } + availability_zone: { get_param: availability_zone } + + volume_attachment: + type: OS::Cinder::VolumeAttachment + properties: + volume_id: { get_resource: cinder_volume } + instance_uuid: { get_resource: server } + mountpoint: /dev/sdb diff --git a/roles/openstack-stack/templates/user_data.j2 b/roles/openstack-stack/templates/user_data.j2 new file mode 100644 index 000000000..eb65f7cec --- /dev/null +++ b/roles/openstack-stack/templates/user_data.j2 @@ -0,0 +1,13 @@ +#cloud-config +disable_root: true + +system_info: + default_user: + name: openshift + sudo: ["ALL=(ALL) NOPASSWD: ALL"] + +write_files: + - path: /etc/sudoers.d/00-openshift-no-requiretty + permissions: 440 + content: | + Defaults:openshift !requiretty -- cgit v1.2.3 From 7304ed4611192f6daa88f84d8b47d3e76514a03b Mon Sep 17 00:00:00 2001 From: Eric Sauer Date: Thu, 27 Apr 2017 16:58:41 -0400 Subject: First attempt at a simple multi-master support (#39) * First attempt at a simple multi-master support * Removing unneeded inventory * adding default number of masters and lower number of nodes --- roles/openstack-stack/defaults/main.yml | 2 + roles/openstack-stack/templates/heat_stack.yaml.j2 | 69 ++++++++++++++++++++++ 2 files changed, 71 insertions(+) (limited to 'roles') diff --git a/roles/openstack-stack/defaults/main.yml b/roles/openstack-stack/defaults/main.yml index 8aefe039d..2a4ef3a45 100644 --- a/roles/openstack-stack/defaults/main.yml +++ b/roles/openstack-stack/defaults/main.yml @@ -2,6 +2,8 @@ dns_volume_size: 1 ssh_ingress_cidr: 0.0.0.0/0 node_ingress_cidr: 0.0.0.0/0 +master_ingress_cidr: 0.0.0.0/0 +lb_ingress_cidr: 0.0.0.0/0 num_etcd: 0 num_masters: 1 num_nodes: 1 diff --git a/roles/openstack-stack/templates/heat_stack.yaml.j2 b/roles/openstack-stack/templates/heat_stack.yaml.j2 index bc9547f66..c367aabe7 100644 --- a/roles/openstack-stack/templates/heat_stack.yaml.j2 +++ b/roles/openstack-stack/templates/heat_stack.yaml.j2 @@ -342,6 +342,31 @@ resources: port_range_min: 53 port_range_max: 53 remote_ip_prefix: {{ node_ingress_cidr }} +{% if num_masters is greaterthan 1 %} + lb-secgrp: + type: OS::Neutron::SecurityGroup + properties: + name: openshift-ansible-{{ stack_name }}-lb-secgrp + description: Security group for {{ stack_name }} cluster Load Balancer + rules: + - direction: ingress + protocol: tcp + port_range_min: 22 + port_range_max: 22 + remote_ip_prefix: {{ ssh_ingress_cidr }} + - direction: ingress + protocol: tcp + port_range_min: {{ openshift_master_api_port | default(8443) }} + port_range_max: {{ openshift_master_api_port | default(8443) }} + remote_ip_prefix: {{ lb_ingress_cidr }} + {% if openshift_master_console_port is defined and openshift_master_console_port is not equalto openshift_master_api_port %} + - direction: ingress + protocol: tcp + port_range_min: {{ openshift_master_console_port | default(8443) }} + port_range_max: {{ openshift_master_console_port | default(8443) }} + remote_ip_prefix: {{ lb_ingress_cidr }} + {% endif %} +{% endif %} etcd: type: OS::Heat::ResourceGroup @@ -382,6 +407,47 @@ resources: depends_on: - interface +{% if num_masters is greaterthan 1 %} + loadbalancer: + type: OS::Heat::ResourceGroup + properties: + count: 1 + resource_def: + type: server.yaml + properties: + name: + str_replace: + template: k8s_type-%index%.cluster_id + params: + cluster_id: {{ stack_name }} + k8s_type: lb + cluster_env: {{ dns_domain }} + cluster_id: {{ stack_name }} + group: + str_replace: + template: k8s_type.cluster_id + params: + k8s_type: lb + cluster_id: {{ stack_name }} + type: lb + image: {{ openstack_image }} + flavor: {{ lb_flavor }} + key_name: {{ ssh_public_key }} + net: { get_resource: net } + subnet: { get_resource: subnet } + secgrp: + - { get_resource: lb-secgrp } + floating_network: {{ external_network }} + net_name: + str_replace: + template: openshift-ansible-cluster_id-net + params: + cluster_id: {{ stack_name }} + volume_size: 5 + depends_on: + - interface +{% endif %} + masters: type: OS::Heat::ResourceGroup properties: @@ -412,6 +478,9 @@ resources: secgrp: - { get_resource: master-secgrp } - { get_resource: node-secgrp } +{% if num_etcd is equalto 0 %} + - { get_resource: etcd-secgrp } +{% endif %} floating_network: {{ external_network }} net_name: str_replace: -- cgit v1.2.3 From 469a88f6d7609df5ffaab812093e0c58baa3be29 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?=C3=98ystein=20Bedin?= Date: Mon, 5 Jun 2017 16:47:13 -0400 Subject: Conditionally set the openshift_master_default_subdomain to avoid overriding it unecessary (#47) --- roles/common/pre_tasks/pre_tasks.yml | 2 ++ 1 file changed, 2 insertions(+) (limited to 'roles') diff --git a/roles/common/pre_tasks/pre_tasks.yml b/roles/common/pre_tasks/pre_tasks.yml index 06a56605d..cc4e64a0f 100644 --- a/roles/common/pre_tasks/pre_tasks.yml +++ b/roles/common/pre_tasks/pre_tasks.yml @@ -34,3 +34,5 @@ set_fact: openshift_master_default_subdomain: "{{ openshift_app_domain }}.{{ full_dns_domain }}" delegate_to: localhost + when: + - openshift_master_default_subdomain is undefined -- cgit v1.2.3 From 22e88c9ce8f81cb13c3d050455d332161a1acd83 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?=C3=98ystein=20Bedin?= Date: Tue, 13 Jun 2017 15:35:22 -0400 Subject: Update CASL to use nsupdate for DNS records (#48) * Updated to use nsupdate for DNS records * Updated formatting of dict * Updating descriptive text * Support for external DNS config * Upgrading jinja2 to work correctly with latest templates * Latest update for nsupdate * Updated to use nsupdate for DNS records * Updated formatting of dict * Updating descriptive text * Support for external DNS config * Latest update for nsupdate * Updated to support external public/private DNS server(s) * Updated DNS server handling * Updated DNS server handling * Updated DNS server handling * Eliminated the from the sample inventories * Updated sample inventory to point to 2 separate DNS servers for private/public * Playbook clean-up * Adding 'python-dns' * splitting subscription manager calls to allow for a clean pre-install playbook --- roles/common/pre_tasks/pre_tasks.yml | 2 +- roles/dns-server-detect/defaults/main.yml | 3 ++ roles/dns-server-detect/tasks/main.yml | 38 ++++++++++++++++++++++ roles/openstack-stack/templates/heat_stack.yaml.j2 | 12 +++---- roles/openstack-stack/test/stack-create-test.yml | 4 +-- 5 files changed, 50 insertions(+), 9 deletions(-) create mode 100644 roles/dns-server-detect/defaults/main.yml create mode 100644 roles/dns-server-detect/tasks/main.yml (limited to 'roles') diff --git a/roles/common/pre_tasks/pre_tasks.yml b/roles/common/pre_tasks/pre_tasks.yml index cc4e64a0f..c5e79e89c 100644 --- a/roles/common/pre_tasks/pre_tasks.yml +++ b/roles/common/pre_tasks/pre_tasks.yml @@ -22,7 +22,7 @@ - name: Updating DNS domain to include env_id (if not empty) set_fact: - full_dns_domain: "{{ (env_id|trim == '') | ternary(dns_domain, env_id + '.' + dns_domain) }}" + full_dns_domain: "{{ (env_id|trim == '') | ternary(public_dns_domain, env_id + '.' + public_dns_domain) }}" delegate_to: localhost - name: Set the APP domain for OpenShift use diff --git a/roles/dns-server-detect/defaults/main.yml b/roles/dns-server-detect/defaults/main.yml new file mode 100644 index 000000000..58bd861cd --- /dev/null +++ b/roles/dns-server-detect/defaults/main.yml @@ -0,0 +1,3 @@ +--- + +external_nsupdate_keys: {} diff --git a/roles/dns-server-detect/tasks/main.yml b/roles/dns-server-detect/tasks/main.yml new file mode 100644 index 000000000..e8dd0acf0 --- /dev/null +++ b/roles/dns-server-detect/tasks/main.yml @@ -0,0 +1,38 @@ +--- + +- fail: + msg: 'Missing required private DNS server(s)' + when: + - external_nsupdate_keys['private'] is undefined + - hostvars[groups['dns'][0]] is undefined + +- fail: + msg: 'Missing required public DNS server(s)' + when: + - external_nsupdate_keys['public'] is undefined + - hostvars[groups['dns'][0]] is undefined + +- name: "Set the private DNS server to use the external value (if provided)" + set_fact: + private_dns_server: "{{ external_nsupdate_keys['private']['server'] }}" + when: + - external_nsupdate_keys['private'] is defined + +- name: "Set the private DNS server to use the provisioned value" + set_fact: + private_dns_server: "{{ hostvars[groups['dns'][0]].openstack.private_v4 }}" + when: + - private_dns_server is undefined + +- name: "Set the public DNS server to use the external value (if provided)" + set_fact: + public_dns_server: "{{ external_nsupdate_keys['public']['server'] }}" + when: + - external_nsupdate_keys['public'] is defined + +- name: "Set the public DNS server to use the provisioned value" + set_fact: + public_dns_server: "{{ hostvars[groups['dns'][0]].openstack.public_v4 }}" + when: + - public_dns_server is undefined + diff --git a/roles/openstack-stack/templates/heat_stack.yaml.j2 b/roles/openstack-stack/templates/heat_stack.yaml.j2 index c367aabe7..09b62cba7 100644 --- a/roles/openstack-stack/templates/heat_stack.yaml.j2 +++ b/roles/openstack-stack/templates/heat_stack.yaml.j2 @@ -381,7 +381,7 @@ resources: params: cluster_id: {{ stack_name }} k8s_type: etcd - cluster_env: {{ dns_domain }} + cluster_env: {{ public_dns_domain }} cluster_id: {{ stack_name }} group: str_replace: @@ -421,7 +421,7 @@ resources: params: cluster_id: {{ stack_name }} k8s_type: lb - cluster_env: {{ dns_domain }} + cluster_env: {{ public_dns_domain }} cluster_id: {{ stack_name }} group: str_replace: @@ -461,7 +461,7 @@ resources: params: cluster_id: {{ stack_name }} k8s_type: master - cluster_env: {{ dns_domain }} + cluster_env: {{ public_dns_domain }} cluster_id: {{ stack_name }} group: str_replace: @@ -505,7 +505,7 @@ resources: cluster_id: {{ stack_name }} k8s_type: node subtype: app - cluster_env: {{ dns_domain }} + cluster_env: {{ public_dns_domain }} cluster_id: {{ stack_name }} group: str_replace: @@ -548,7 +548,7 @@ resources: cluster_id: {{ stack_name }} k8s_type: node subtype: infra - cluster_env: {{ dns_domain }} + cluster_env: {{ public_dns_domain }} cluster_id: {{ stack_name }} group: str_replace: @@ -591,7 +591,7 @@ resources: params: cluster_id: {{ stack_name }} k8s_type: dns - cluster_env: {{ dns_domain }} + cluster_env: {{ public_dns_domain }} cluster_id: {{ stack_name }} group: str_replace: diff --git a/roles/openstack-stack/test/stack-create-test.yml b/roles/openstack-stack/test/stack-create-test.yml index 94e312ee3..6cbd7ff30 100644 --- a/roles/openstack-stack/test/stack-create-test.yml +++ b/roles/openstack-stack/test/stack-create-test.yml @@ -3,8 +3,8 @@ roles: - role: openstack-stack stack_name: test-stack - dns_domain: "{{ openstack_dns_domain }}" - dns_nameservers: "{{ openstack_nameservers }}" + dns_domain: "{{ public_dns_domain }}" + dns_nameservers: "{{ public_dns_nameservers }}" subnet_prefix: "{{ openstack_subnet_prefix }}" ssh_public_key: "{{ openstack_ssh_public_key }}" openstack_image: "{{ openstack_default_image_name }}" -- cgit v1.2.3