-- cgit v1.2.3 From 636510c3eec7317acdfded00d6237ed5f6ff3529 Mon Sep 17 00:00:00 2001 From: Andrew Block Date: Mon, 8 Feb 2016 00:10:01 -0600 Subject: New OSE3 docker host builder and OpenStack ansible provisioning support --- roles/common/pre_tasks/pre_tasks.yml | 4 ++++ 1 file changed, 4 insertions(+) create mode 100644 roles/common/pre_tasks/pre_tasks.yml diff --git a/roles/common/pre_tasks/pre_tasks.yml b/roles/common/pre_tasks/pre_tasks.yml new file mode 100644 index 000000000..c573bff8c --- /dev/null +++ b/roles/common/pre_tasks/pre_tasks.yml @@ -0,0 +1,4 @@ +--- +- name: Generate Environment ID + shell: echo "$(cat /dev/urandom | tr -dc 'a-zA-Z0-9' | head -c 8)" + register: env_random_id \ No newline at end of file -- cgit v1.2.3 -- cgit v1.2.3 -- cgit v1.2.3 -- cgit v1.2.3 -- cgit v1.2.3 -- cgit v1.2.3 -- cgit v1.2.3 -- cgit v1.2.3 -- cgit v1.2.3 -- cgit v1.2.3 -- cgit v1.2.3 -- cgit v1.2.3 -- cgit v1.2.3 -- cgit v1.2.3 -- cgit v1.2.3 -- cgit v1.2.3 -- cgit v1.2.3 -- cgit v1.2.3 -- cgit v1.2.3 -- cgit v1.2.3 -- cgit v1.2.3 -- cgit v1.2.3 -- cgit v1.2.3 -- cgit v1.2.3 -- cgit v1.2.3 -- cgit v1.2.3 -- cgit v1.2.3 -- cgit v1.2.3 From 80c3d3332507fe620fcab99e65f2ffd81d48a69e Mon Sep 17 00:00:00 2001 From: Vinny Valdez Date: Tue, 29 Mar 2016 16:52:05 -0500 Subject: Add subscription-manager support for Hosted or Satellite --- roles/subscription-manager/README.md | 95 ++++++++++++++++++++++ roles/subscription-manager/pre_tasks/pre_tasks.yml | 37 +++++++++ roles/subscription-manager/tasks/main.yml | 93 +++++++++++++++++++++ 3 files changed, 225 insertions(+) create mode 100644 roles/subscription-manager/README.md create mode 100644 roles/subscription-manager/pre_tasks/pre_tasks.yml create mode 100644 roles/subscription-manager/tasks/main.yml diff --git a/roles/subscription-manager/README.md b/roles/subscription-manager/README.md new file mode 100644 index 000000000..b140ad09a --- /dev/null +++ b/roles/subscription-manager/README.md @@ -0,0 +1,95 @@ +# Red Hat Subscription Manager Ansible Role + +## Parameters + +This role depends on user specified variables. These can be set in the inventory file, group_vars or passed to the playbook from the CLI. The variables are: + +### rhsm_method + +Subscription Manager method to use for registration. Valid values are: + +* **satellite** - Use a Satellite server. Additional variables required include **rhsm_server**, **rhsm_org** and either (**rhsm_username** and **rhsm_password**) or **rhsm_activationkey** +* **hosted** - Use Red Hat's CDN. Additional variables required are **rhsm_server** (defaults to RHSM CDN) and **rhsm_username** and **rhsm_password** +* none/false/blank will disable any subscription manager activities (this is the default if no parameters are set) + +Default: none + +### rhsm_server + +Subscription Manager server hostname. If using a Satellite server set the FQDN here. If using RHSM Hosted this value is ignored. + +Default: none + +### rhsm_username + +Subscription Manager username. Required for RHSM Hosted. Can be optionally used for Satellite, but it may be better to use **rhsm_activationkey** for this. + +Default: none + +### rhsm_password + +Subscription Manager password. Required for RHSM Hosted. Can be optionally used for Satellite, but it may be better to use **rhsm_activationkey** for this. + +Default: none + +### rhsm_org + +Optional Satellite Subscription Manager Organization. Required for Satellite, ignored if using RHSM Hosted. + +Default: none + +### rhsm_activationkey + +Optional Satellite Subscription Manager Activation Key, use this instead of **rhsm_username** and **rhsm_password** if using Satellite to provide repositories and authentication in a key instead. + +Default: none + +### rhsm_pool + +Optional Subscription Manager pool, determine this by running **subscription-manager list --available** on a registered system. Valid for RHSM Hosted or Satellite. Specifying **rhsm_activationkey** will ignore this option. + +Default: none + +### rhsm_repos + +Optional Repositories to enable, this can also be specified in the **rhsm_activationkey**. Valid for RHSM Hosted or Satellite. Specifying **rhsm_activationkey** will ignore this option. + +NOTE: If specifying this value in an inventory file as opposed to group_vars, be sure to define it as a proper list as such: + +rhsm_repos='["rhel-7-server-rpms", "rhel-7-server-ose-3.1-rpms", "rhel-7-server-extras-rpms"]' + +Default: none + +## Pre-tasks + +A number of variable checks are performed before any tasks to ensure the proper parameters are set. To include these checks call the pre_task yaml before any roles: + +``` + pre_tasks: + - include: roles/subscription-manager/pre_tasks/pre_tasks.yml +``` + +## Tasks + +The bulk of the work is performed in the main.yml for this role. The pre-task play will set a variable which can be checked to contitionally include this role as such: + +``` + roles: + - { role: subscription-manager, when: not hostvars.localhost.rhsm_skip, tags: 'subscription-manager' } +``` + +## Running the Playbook + +To register to RHSM Hosted with username and password: + +``` +ansible-playbook -i inventory/ose-provision ose-provision.yml -e "rhsm_method='hosted' rhsm_username=vvaldez rhsm_password='hunter2' openstack_key_name='vvaldez'" +``` + +To register to a Satellite server with an activation key: + +``` +ansible-playbook -i inventory/ose-provision ose-provision.yml -e "rhsm_server='10.12.32.1' rhsm_org='cloud_practice' rhsm_activationkey='rhel-7-ose-3-1' openstack_key_name='vvaldez' rhsm_method='satellite'" +``` + +To ignore any Subscription Manager activities, simple do not set any parameters or explicitly set **rhsm_method** to false. diff --git a/roles/subscription-manager/pre_tasks/pre_tasks.yml b/roles/subscription-manager/pre_tasks/pre_tasks.yml new file mode 100644 index 000000000..497f39353 --- /dev/null +++ b/roles/subscription-manager/pre_tasks/pre_tasks.yml @@ -0,0 +1,37 @@ +--- +- name: Initialize Subscription Manager fact + set_fact: + rhsm_skip: false + +- name: Determine if Subscription Manager should be skipped or not + set_fact: + rhsm_skip: true + when: rhsm_method is undefined or rhsm_method is none or rhsm_method|trim == '' + +- name: Determine Subscription Manager method + fail: msg="Value for 'rhsm_method' of '{{ rhsm_method }}' is not valid, it should be one of 'hosted', 'satellite', or false/none/blank" + when: + - rhsm_method != 'hosted' and rhsm_method != 'satellite' + - not rhsm_skip + +- name: Validate Subscription Manager host is set + fail: msg="Cannot determine Subscription Manager server hostname without a value for 'rhsm_server'" + when: + - rhsm_server is undefined or rhsm_server is none or rhsm_server|trim == '' + - not rhsm_method == 'hosted' + - not rhsm_skip + +- name: Validate Subscription Manager organization is set + fail: msg="Cannot register to a Satellite server without a value for the Organization via 'rhsm_org'" + when: + - rhsm_org is undefined or rhsm_org is none or rhsm_org|trim == '' + - rhsm_method == 'satellite' + - not rhsm_skip + +- name: Validate Subscription Manager authentication is defined + fail: msg="Cannot register without ('rhsm_username' and 'rhsm_password') or 'rhsm_activationkey' variables set" + when: + - (rhsm_username is undefined or rhsm_username is none or rhsm_username|trim == '') or (rhsm_password is undefined or rhsm_password is none or rhsm_password|trim == '') + - rhsm_activationkey is undefined or rhsm_activationkey is none or rhsm_activationkey|trim == '' + - not rhsm_skip + diff --git a/roles/subscription-manager/tasks/main.yml b/roles/subscription-manager/tasks/main.yml new file mode 100644 index 000000000..2e04a7a22 --- /dev/null +++ b/roles/subscription-manager/tasks/main.yml @@ -0,0 +1,93 @@ +--- +- name: Initializing Subscription Manager authenticaiton method + set_fact: + rhsm_authentication: false + +# 'rhsm_activationkey' will take precedence even if 'rhsm_username' and 'rhsm_password' are also set +- name: Setting Subscription Manager Activation Key Fact + set_fact: + rhsm_authentication: "key" + when: + - rhsm_activationkey is defined + - rhsm_activationkey is not none + - rhsm_activationkey|trim != '' + - not rhsm_authentication + +# If 'rhsm_username' and 'rhsm_password' are set but not 'rhsm_activationkey', set 'rhsm_authentication' to password +- name: Setting Subscription Manager Username and Password Fact + set_fact: + rhsm_authentication: "password" + when: + - rhsm_username is defined and rhsm_username is not none and rhsm_username|trim != '' + - rhsm_password is defined and rhsm_password is not none and rhsm_password|trim != '' + - not rhsm_authentication + +- name: Initializing registration status + set_fact: + registered: false + +- name: Checking subscription status (a failure means it is not registered and will be) + command: "/usr/bin/subscription-manager status" + ignore_errors: yes + changed_when: no + register: check_if_registered + +- name: Set registration fact + set_fact: + registered: true + when: check_if_registered.rc == 0 + +- name: Cleaning any old subscriptions + command: "/usr/bin/subscription-manager clean" + when: + - not registered + - rhsm_authentication is defined + +- name: Install Satellite certificate + command: "rpm -Uvh --force http://{{ rhsm_server }}/pub/katello-ca-consumer-latest.noarch.rpm" + when: + - not registered + - rhsm_method == 'satellite' + +- name: Register to Satellite using activation key + command: "/usr/bin/subscription-manager register --activationkey={{ rhsm_activationkey }} --org={{ rhsm_org }}" + when: + - not registered + - rhsm_authentication == 'key' + - rhsm_method == 'satellite' + +# This can apply to either Hosted or Satellite +- name: Register using username and password + command: "/usr/bin/subscription-manager register --username={{ rhsm_username }} --password={{ rhsm_password }}" + when: + - not registered + - rhsm_authentication != "key" + +- name: Auto-attach to Subscription Manager Pool + command: "/usr/bin/subscription-manager attach --auto" + when: + - not registered + - rhsm_authentication != "key" + +- name: Attach to a specific pool + command: "/usr/bin/subscription-manager attach --pool={{ rhsm_pool }}" + when: + - rhsm_pool is defined and rhsm_pool is not none and rhsm_pool|trim != '' + - and not registered + - rhsm_authentication != "key" + +- name: Disable all repositories + command: "/usr/bin/subscription-manager repos --disable=*" + when: + - not registered + - not rhsm_authentication == "key" + +- name: Enable specified repositories + command: "/usr/bin/subscription-manager repos --enable={{ item }}" + with_items: rhsm_repos + when: + - not registered + - not rhsm_authentication == "key" + +- name: Cleaning yum repositories + command: "yum clean all" -- cgit v1.2.3 From 177950b76a185c20317aa0e89d356cdf8b97c4c3 Mon Sep 17 00:00:00 2001 From: Vinny Valdez Date: Wed, 30 Mar 2016 15:46:31 -0500 Subject: Refactor role to dynamically determine rhsm_method * Removes rhsm_method * Renames rhsm_server to rhsm_satellite * Add additional pre_task checks (hosted + key) * Change conditionals from rhsm_method check to rhsm_satellite defined * Change repos disable/enable from key to if repos are defined * Update README and examples in inventory file --- roles/subscription-manager/README.md | 30 ++++++---------- roles/subscription-manager/pre_tasks/pre_tasks.yml | 41 ++++++++++++---------- roles/subscription-manager/tasks/main.yml | 20 +++++++---- 3 files changed, 46 insertions(+), 45 deletions(-) diff --git a/roles/subscription-manager/README.md b/roles/subscription-manager/README.md index b140ad09a..e604c7475 100644 --- a/roles/subscription-manager/README.md +++ b/roles/subscription-manager/README.md @@ -2,21 +2,11 @@ ## Parameters -This role depends on user specified variables. These can be set in the inventory file, group_vars or passed to the playbook from the CLI. The variables are: +This role depends on user specified variables. These can be set in the inventory file, group_vars or passed to the playbook from the CLI. No values are set by default which disables this role. The variables are: -### rhsm_method +### rhsm_satellite -Subscription Manager method to use for registration. Valid values are: - -* **satellite** - Use a Satellite server. Additional variables required include **rhsm_server**, **rhsm_org** and either (**rhsm_username** and **rhsm_password**) or **rhsm_activationkey** -* **hosted** - Use Red Hat's CDN. Additional variables required are **rhsm_server** (defaults to RHSM CDN) and **rhsm_username** and **rhsm_password** -* none/false/blank will disable any subscription manager activities (this is the default if no parameters are set) - -Default: none - -### rhsm_server - -Subscription Manager server hostname. If using a Satellite server set the FQDN here. If using RHSM Hosted this value is ignored. +Subscription Manager server hostname. If using a Satellite server set the FQDN here. If using RHSM Hosted this value must be left blank, none or false. Default: none @@ -34,13 +24,13 @@ Default: none ### rhsm_org -Optional Satellite Subscription Manager Organization. Required for Satellite, ignored if using RHSM Hosted. +Optional Subscription Manager Satellite Organization. Required for Satellite, ignored if using RHSM Hosted. Default: none ### rhsm_activationkey -Optional Satellite Subscription Manager Activation Key, use this instead of **rhsm_username** and **rhsm_password** if using Satellite to provide repositories and authentication in a key instead. +Optional Subscription Manager Satellite Activation Key, use this instead of **rhsm_username** and **rhsm_password** if using Satellite to provide repositories and authentication in a key instead. Default: none @@ -52,7 +42,7 @@ Default: none ### rhsm_repos -Optional Repositories to enable, this can also be specified in the **rhsm_activationkey**. Valid for RHSM Hosted or Satellite. Specifying **rhsm_activationkey** will ignore this option. +Optional list of repositories to enable. If left blank it is expected that the **rhsm_activationkey** will specify repos instead. If populated, a **subscription-manager repos --disable=\*** will be run and each of the specified repos explicitly enabled. Valid for RHSM Hosted or Satellite NOTE: If specifying this value in an inventory file as opposed to group_vars, be sure to define it as a proper list as such: @@ -75,7 +65,7 @@ The bulk of the work is performed in the main.yml for this role. The pre-task pl ``` roles: - - { role: subscription-manager, when: not hostvars.localhost.rhsm_skip, tags: 'subscription-manager' } + - { role: subscription-manager, when: hostvars.localhost.rhsm_register, tags: 'subscription-manager' } ``` ## Running the Playbook @@ -83,13 +73,13 @@ The bulk of the work is performed in the main.yml for this role. The pre-task pl To register to RHSM Hosted with username and password: ``` -ansible-playbook -i inventory/ose-provision ose-provision.yml -e "rhsm_method='hosted' rhsm_username=vvaldez rhsm_password='hunter2' openstack_key_name='vvaldez'" +ansible-playbook -i inventory/ose-provision ose-provision.yml -e "rhsm_username=vvaldez rhsm_password=hunter2" ``` To register to a Satellite server with an activation key: ``` -ansible-playbook -i inventory/ose-provision ose-provision.yml -e "rhsm_server='10.12.32.1' rhsm_org='cloud_practice' rhsm_activationkey='rhel-7-ose-3-1' openstack_key_name='vvaldez' rhsm_method='satellite'" +ansible-playbook -i inventory/ose-provision ose-provision.yml -e "rhsm_satellite=satellite.example.com rhsm_org=example_org rhsm_activationkey=rhel-7-ose-3-1" ``` -To ignore any Subscription Manager activities, simple do not set any parameters or explicitly set **rhsm_method** to false. +To ignore any Subscription Manager activities, simply do not set any parameters. diff --git a/roles/subscription-manager/pre_tasks/pre_tasks.yml b/roles/subscription-manager/pre_tasks/pre_tasks.yml index 497f39353..dcd56b2b9 100644 --- a/roles/subscription-manager/pre_tasks/pre_tasks.yml +++ b/roles/subscription-manager/pre_tasks/pre_tasks.yml @@ -1,37 +1,40 @@ --- - name: Initialize Subscription Manager fact set_fact: - rhsm_skip: false + rhsm_register: true -- name: Determine if Subscription Manager should be skipped or not +- name: Determine if Subscription Manager should be used set_fact: - rhsm_skip: true - when: rhsm_method is undefined or rhsm_method is none or rhsm_method|trim == '' - -- name: Determine Subscription Manager method - fail: msg="Value for 'rhsm_method' of '{{ rhsm_method }}' is not valid, it should be one of 'hosted', 'satellite', or false/none/blank" - when: - - rhsm_method != 'hosted' and rhsm_method != 'satellite' - - not rhsm_skip - -- name: Validate Subscription Manager host is set - fail: msg="Cannot determine Subscription Manager server hostname without a value for 'rhsm_server'" + rhsm_register: false when: - - rhsm_server is undefined or rhsm_server is none or rhsm_server|trim == '' - - not rhsm_method == 'hosted' - - not rhsm_skip + - rhsm_satellite is undefined or rhsm_satellite is none or rhsm_satellite|trim == '' + - rhsm_username is undefined or rhsm_username is none or rhsm_username|trim == '' + - rhsm_password is undefined or rhsm_password is none or rhsm_password|trim == '' + - rhsm_org is undefined or rhsm_org is none or rhsm_org|trim == '' + - rhsm_activationkey is undefined or rhsm_activationkey is none or rhsm_activationkey|trim == '' + - rhsm_pool is undefined or rhsm_pool is none or rhsm_pool|trim == '' - name: Validate Subscription Manager organization is set fail: msg="Cannot register to a Satellite server without a value for the Organization via 'rhsm_org'" when: - rhsm_org is undefined or rhsm_org is none or rhsm_org|trim == '' - - rhsm_method == 'satellite' - - not rhsm_skip + - rhsm_satellite is defined + - rhsm_satellite is not none + - rhsm_satellite|trim != '' + - rhsm_register - name: Validate Subscription Manager authentication is defined fail: msg="Cannot register without ('rhsm_username' and 'rhsm_password') or 'rhsm_activationkey' variables set" when: - (rhsm_username is undefined or rhsm_username is none or rhsm_username|trim == '') or (rhsm_password is undefined or rhsm_password is none or rhsm_password|trim == '') - rhsm_activationkey is undefined or rhsm_activationkey is none or rhsm_activationkey|trim == '' - - not rhsm_skip + - rhsm_register +- name: Validate activation key and Hosted are not requested together + fail: msg="Cannot register to RHSM Hosted with 'rhsm_activationkey'" + when: + - rhsm_satellite is undefined or rhsm_satellite is none or rhsm_satellite|trim == '' + - rhsm_activationkey is defined + - rhsm_activationkey is not none + - rhsm_activationkey|trim != '' + - rhsm_register diff --git a/roles/subscription-manager/tasks/main.yml b/roles/subscription-manager/tasks/main.yml index 2e04a7a22..78ceaccd1 100644 --- a/roles/subscription-manager/tasks/main.yml +++ b/roles/subscription-manager/tasks/main.yml @@ -32,7 +32,7 @@ changed_when: no register: check_if_registered -- name: Set registration fact +- name: Set registration fact if system is already registered set_fact: registered: true when: check_if_registered.rc == 0 @@ -44,17 +44,21 @@ - rhsm_authentication is defined - name: Install Satellite certificate - command: "rpm -Uvh --force http://{{ rhsm_server }}/pub/katello-ca-consumer-latest.noarch.rpm" + command: "rpm -Uvh --force http://{{ rhsm_satellite }}/pub/katello-ca-consumer-latest.noarch.rpm" when: - not registered - - rhsm_method == 'satellite' + - rhsm_satellite is defined + - rhsm_satellite is not none + - rhsm_satellite|trim != '' - name: Register to Satellite using activation key command: "/usr/bin/subscription-manager register --activationkey={{ rhsm_activationkey }} --org={{ rhsm_org }}" when: - not registered - rhsm_authentication == 'key' - - rhsm_method == 'satellite' + - rhsm_satellite is defined + - rhsm_satellite is not none + - rhsm_satellite|trim != '' # This can apply to either Hosted or Satellite - name: Register using username and password @@ -80,14 +84,18 @@ command: "/usr/bin/subscription-manager repos --disable=*" when: - not registered - - not rhsm_authentication == "key" + - rhsm_repos is defined + - rhsm_repos is not none + - rhsm_repos|trim != '' - name: Enable specified repositories command: "/usr/bin/subscription-manager repos --enable={{ item }}" with_items: rhsm_repos when: - not registered - - not rhsm_authentication == "key" + - rhsm_repos is defined + - rhsm_repos is not none + - rhsm_repos|trim != '' - name: Cleaning yum repositories command: "yum clean all" -- cgit v1.2.3 From 644f1e672c80bd10f34fabafcfe805c306e77b5e Mon Sep 17 00:00:00 2001 From: Vinny Valdez Date: Tue, 5 Apr 2016 12:23:35 -0500 Subject: Fix bad syntax with extra 'and' in when using rhsm_pool --- roles/subscription-manager/tasks/main.yml | 9 +++++++-- 1 file changed, 7 insertions(+), 2 deletions(-) diff --git a/roles/subscription-manager/tasks/main.yml b/roles/subscription-manager/tasks/main.yml index 78ceaccd1..414bf8f7a 100644 --- a/roles/subscription-manager/tasks/main.yml +++ b/roles/subscription-manager/tasks/main.yml @@ -1,4 +1,5 @@ --- + - name: Initializing Subscription Manager authenticaiton method set_fact: rhsm_authentication: false @@ -61,6 +62,7 @@ - rhsm_satellite|trim != '' # This can apply to either Hosted or Satellite + - name: Register using username and password command: "/usr/bin/subscription-manager register --username={{ rhsm_username }} --password={{ rhsm_password }}" when: @@ -72,12 +74,15 @@ when: - not registered - rhsm_authentication != "key" + - rhsm_pool is undefined or rhsm_pool is none or rhsm_pool|trim == '' - name: Attach to a specific pool command: "/usr/bin/subscription-manager attach --pool={{ rhsm_pool }}" when: - - rhsm_pool is defined and rhsm_pool is not none and rhsm_pool|trim != '' - - and not registered + - rhsm_pool is defined + - rhsm_pool is not none + - rhsm_pool|trim != '' + - not registered - rhsm_authentication != "key" - name: Disable all repositories -- cgit v1.2.3 From 96aaa6df25774e05cda3e4a6f73b030ae989100a Mon Sep 17 00:00:00 2001 From: Vinny Valdez Date: Tue, 5 Apr 2016 18:17:36 -0500 Subject: Refactor use of rhsm_password to prevent display to CLI --- roles/subscription-manager/README.md | 30 ++++++++++++++++++---- roles/subscription-manager/pre_tasks/pre_tasks.yml | 9 +++++++ roles/subscription-manager/tasks/main.yml | 23 ++++++++++++----- 3 files changed, 50 insertions(+), 12 deletions(-) diff --git a/roles/subscription-manager/README.md b/roles/subscription-manager/README.md index e604c7475..a5dd1ac44 100644 --- a/roles/subscription-manager/README.md +++ b/roles/subscription-manager/README.md @@ -18,7 +18,9 @@ Default: none ### rhsm_password -Subscription Manager password. Required for RHSM Hosted. Can be optionally used for Satellite, but it may be better to use **rhsm_activationkey** for this. +Subscription Manager password. Required for RHSM Hosted. Can be optionally used for Satellite, but it may be better to use **rhsm_activationkey** for this. + +NOTE: This variable is prompted for at the start of the playbook run. This is for security purposes so the password is not left in the command history. If specified on the command-line or set in a variable file it will be ignored and the value captured from the prompt will overwrite it instead. Default: none @@ -50,7 +52,25 @@ rhsm_repos='["rhel-7-server-rpms", "rhel-7-server-ose-3.1-rpms", "rhel-7-server- Default: none -## Pre-tasks +## Calling This Role +Calling this role requires adding a **vars_prompt**, **pre_tasks**, and **roles** section of a play + +### vars_prompt +Unfortunately **vars_prompt** can only be used at the play level before role tasks are executed, so this is the only place it can go. See http://stackoverflow.com/questions/25466675/ansible-to-conditionally-prompt-for-a-variable + +Add a prompt to capture **rhsm_password** + +``` +- hosts: localhost + vars_prompt: + # Unfortunately vars_prompt can only be used at the play level before role tasks, so this is the only place it can go. See http://stackoverflow.com/questions/25466675/ansible-to-conditionally-prompt-for-a-variable + - name: "rhsm_password" + prompt: "Subscription Manager password (enter blank if using rhsm_activationkey or to disable registration)" + confirm: yes + private: yes +``` + +### pre-tasks A number of variable checks are performed before any tasks to ensure the proper parameters are set. To include these checks call the pre_task yaml before any roles: @@ -59,7 +79,7 @@ A number of variable checks are performed before any tasks to ensure the proper - include: roles/subscription-manager/pre_tasks/pre_tasks.yml ``` -## Tasks +### roles The bulk of the work is performed in the main.yml for this role. The pre-task play will set a variable which can be checked to contitionally include this role as such: @@ -73,7 +93,7 @@ The bulk of the work is performed in the main.yml for this role. The pre-task pl To register to RHSM Hosted with username and password: ``` -ansible-playbook -i inventory/ose-provision ose-provision.yml -e "rhsm_username=vvaldez rhsm_password=hunter2" +ansible-playbook -i inventory/ose-provision ose-provision.yml -e "rhsm_username=vvaldez" ``` To register to a Satellite server with an activation key: @@ -82,4 +102,4 @@ To register to a Satellite server with an activation key: ansible-playbook -i inventory/ose-provision ose-provision.yml -e "rhsm_satellite=satellite.example.com rhsm_org=example_org rhsm_activationkey=rhel-7-ose-3-1" ``` -To ignore any Subscription Manager activities, simply do not set any parameters. +To ignore any Subscription Manager activities, simply do not set any parameters. When prompted for the password, hit **Enter** to set a blank password. diff --git a/roles/subscription-manager/pre_tasks/pre_tasks.yml b/roles/subscription-manager/pre_tasks/pre_tasks.yml index dcd56b2b9..31441785e 100644 --- a/roles/subscription-manager/pre_tasks/pre_tasks.yml +++ b/roles/subscription-manager/pre_tasks/pre_tasks.yml @@ -1,4 +1,13 @@ --- +- name: Set password fact + set_fact: + rhsm_password: "{{ rhsm_password }}" + no_log: true + when: + - rhsm_password is defined + - rhsm_password is not none + - rhsm_password|trim != '' + - name: Initialize Subscription Manager fact set_fact: rhsm_register: true diff --git a/roles/subscription-manager/tasks/main.yml b/roles/subscription-manager/tasks/main.yml index 414bf8f7a..6e51be7e4 100644 --- a/roles/subscription-manager/tasks/main.yml +++ b/roles/subscription-manager/tasks/main.yml @@ -1,5 +1,12 @@ --- - +- name: Initialize rhsm_password variable if vars_prompt was used + set_fact: + rhsm_password: "{{ hostvars.localhost.rhsm_password }}" + when: + - rhsm_password is defined + - rhsm_password is not none + - rhsm_password|trim != '' + - name: Initializing Subscription Manager authenticaiton method set_fact: rhsm_authentication: false @@ -19,8 +26,12 @@ set_fact: rhsm_authentication: "password" when: - - rhsm_username is defined and rhsm_username is not none and rhsm_username|trim != '' - - rhsm_password is defined and rhsm_password is not none and rhsm_password|trim != '' + - rhsm_username is defined + - rhsm_username is not none + - rhsm_username|trim != '' + - rhsm_password is defined + - rhsm_password is not none + - rhsm_password|trim != '' - not rhsm_authentication - name: Initializing registration status @@ -62,18 +73,17 @@ - rhsm_satellite|trim != '' # This can apply to either Hosted or Satellite - - name: Register using username and password command: "/usr/bin/subscription-manager register --username={{ rhsm_username }} --password={{ rhsm_password }}" + no_log: true when: - not registered - - rhsm_authentication != "key" + - rhsm_authentication == "password" - name: Auto-attach to Subscription Manager Pool command: "/usr/bin/subscription-manager attach --auto" when: - not registered - - rhsm_authentication != "key" - rhsm_pool is undefined or rhsm_pool is none or rhsm_pool|trim == '' - name: Attach to a specific pool @@ -83,7 +93,6 @@ - rhsm_pool is not none - rhsm_pool|trim != '' - not registered - - rhsm_authentication != "key" - name: Disable all repositories command: "/usr/bin/subscription-manager repos --disable=*" -- cgit v1.2.3 From 71f4817263a21b6e2062b35928ebfab373d26278 Mon Sep 17 00:00:00 2001 From: Vinny Valdez Date: Fri, 8 Apr 2016 11:02:57 -0500 Subject: Cosmetic changes to task names and move yum clean all to prereqs --- roles/subscription-manager/tasks/main.yml | 33 ++++++++++++++----------------- 1 file changed, 15 insertions(+), 18 deletions(-) diff --git a/roles/subscription-manager/tasks/main.yml b/roles/subscription-manager/tasks/main.yml index 6e51be7e4..adf3a8e85 100644 --- a/roles/subscription-manager/tasks/main.yml +++ b/roles/subscription-manager/tasks/main.yml @@ -1,5 +1,5 @@ --- -- name: Initialize rhsm_password variable if vars_prompt was used +- name: "Initialize rhsm_password variable if vars_prompt was used" set_fact: rhsm_password: "{{ hostvars.localhost.rhsm_password }}" when: @@ -7,12 +7,12 @@ - rhsm_password is not none - rhsm_password|trim != '' -- name: Initializing Subscription Manager authenticaiton method +- name: "Initializing Subscription Manager authenticaiton method" set_fact: rhsm_authentication: false # 'rhsm_activationkey' will take precedence even if 'rhsm_username' and 'rhsm_password' are also set -- name: Setting Subscription Manager Activation Key Fact +- name: "Setting Subscription Manager Activation Key Fact" set_fact: rhsm_authentication: "key" when: @@ -22,7 +22,7 @@ - not rhsm_authentication # If 'rhsm_username' and 'rhsm_password' are set but not 'rhsm_activationkey', set 'rhsm_authentication' to password -- name: Setting Subscription Manager Username and Password Fact +- name: "Setting Subscription Manager Username and Password Fact" set_fact: rhsm_authentication: "password" when: @@ -34,28 +34,28 @@ - rhsm_password|trim != '' - not rhsm_authentication -- name: Initializing registration status +- name: "Initializing registration status" set_fact: registered: false -- name: Checking subscription status (a failure means it is not registered and will be) +- name: "Checking subscription status (a failure means it is not registered and will be)" command: "/usr/bin/subscription-manager status" ignore_errors: yes changed_when: no register: check_if_registered -- name: Set registration fact if system is already registered +- name: "Set registration fact if system is already registered" set_fact: registered: true when: check_if_registered.rc == 0 -- name: Cleaning any old subscriptions +- name: "Cleaning any old subscriptions" command: "/usr/bin/subscription-manager clean" when: - not registered - rhsm_authentication is defined -- name: Install Satellite certificate +- name: "Install Satellite certificate" command: "rpm -Uvh --force http://{{ rhsm_satellite }}/pub/katello-ca-consumer-latest.noarch.rpm" when: - not registered @@ -63,7 +63,7 @@ - rhsm_satellite is not none - rhsm_satellite|trim != '' -- name: Register to Satellite using activation key +- name: "Register to Satellite using activation key" command: "/usr/bin/subscription-manager register --activationkey={{ rhsm_activationkey }} --org={{ rhsm_org }}" when: - not registered @@ -73,20 +73,20 @@ - rhsm_satellite|trim != '' # This can apply to either Hosted or Satellite -- name: Register using username and password +- name: "Register using username and password" command: "/usr/bin/subscription-manager register --username={{ rhsm_username }} --password={{ rhsm_password }}" no_log: true when: - not registered - rhsm_authentication == "password" -- name: Auto-attach to Subscription Manager Pool +- name: "Auto-attach to Subscription Manager Pool" command: "/usr/bin/subscription-manager attach --auto" when: - not registered - rhsm_pool is undefined or rhsm_pool is none or rhsm_pool|trim == '' -- name: Attach to a specific pool +- name: "Attach to a specific pool" command: "/usr/bin/subscription-manager attach --pool={{ rhsm_pool }}" when: - rhsm_pool is defined @@ -94,7 +94,7 @@ - rhsm_pool|trim != '' - not registered -- name: Disable all repositories +- name: "Disable all repositories" command: "/usr/bin/subscription-manager repos --disable=*" when: - not registered @@ -102,7 +102,7 @@ - rhsm_repos is not none - rhsm_repos|trim != '' -- name: Enable specified repositories +- name: "Enable specified repositories" command: "/usr/bin/subscription-manager repos --enable={{ item }}" with_items: rhsm_repos when: @@ -110,6 +110,3 @@ - rhsm_repos is defined - rhsm_repos is not none - rhsm_repos|trim != '' - -- name: Cleaning yum repositories - command: "yum clean all" -- cgit v1.2.3 From 39f973fcfd40fde18f5e92259d05e4ba6b30e22e Mon Sep 17 00:00:00 2001 From: Vinny Valdez Date: Fri, 8 Apr 2016 18:44:23 -0500 Subject: Remove vars_prompt, add info to README to re-enable and for ansible-vault --- roles/subscription-manager/README.md | 91 +++++++++++++++++----- roles/subscription-manager/pre_tasks/pre_tasks.yml | 14 ++-- roles/subscription-manager/tasks/main.yml | 4 +- 3 files changed, 79 insertions(+), 30 deletions(-) diff --git a/roles/subscription-manager/README.md b/roles/subscription-manager/README.md index a5dd1ac44..748de282c 100644 --- a/roles/subscription-manager/README.md +++ b/roles/subscription-manager/README.md @@ -20,7 +20,48 @@ Default: none Subscription Manager password. Required for RHSM Hosted. Can be optionally used for Satellite, but it may be better to use **rhsm_activationkey** for this. -NOTE: This variable is prompted for at the start of the playbook run. This is for security purposes so the password is not left in the command history. If specified on the command-line or set in a variable file it will be ignored and the value captured from the prompt will overwrite it instead. +NOTE: If this variable is specified on the command-line or set in a variable file it may leave your password exposed. For this reason you may perfer to use an Activation Key if using Satellite. For RHSM Hosted, your password must be specified. There are two ways to provide the password to the Ansible playbook without exposing it to prying eyes. + +1. The first method is to use a **vars_prompt** to collect the password up front one time for the playbook. Ansible will not display the password if the prompt is configured as **private** and the task will not display the password on the CLI. This is the a good method as it supports automating the task to every host with only one password entry. To enable **vars_prompt** add the following to the very top of your playbook after the **hosts** declaration and before any **pre_tasks** section: + + ``` + - hosts: localhost + # Add the following lines after a -hosts: declaration and before pre_tasks: + # Start of vars_prompt code block + vars_prompt: + - name: "rhsm_password" + prompt: "Subscription Manager password" + confirm: yes + private: yes + # End of vars_prompt code block + pre_tasks: + ``` + +2. A second method is to use an encrypted file via **ansible-vault**. This does does not require modifying any code as the previous method, but does require more work to create and encrypt the file. To accomplish this, first create a file containing at least the **rhsm_password** variable (it is also possible to specify additional variables to encrypt them all as well): + 1. Create a file to contain the variable such as **secrets.yml**: + + ``` + --- + rhsm_password: "my_secret_password" + # other variables can optionally be placed here as well + ``` + + 2. Encrypt the file with **ansible-vault**: + + ``` + $ ansible-vault encrypt secrets.yml + Vault password: + Confirm Vault password: + Encryption successful + ``` + + 3. When executing **ansible-playbook** specify **--ask-vault-pass** to be prompted for the decryption password, and also specify the location of the **secrets.yml** as such: + + ``` + $ ansible-playbook --ask-vault-pass --extra-vars=@secrets.yml --extra-vars="rhsm_username=myusername" + ``` + + NOTE: Optionally the file containing the encrypted variables can be decrypted with **ansible-vault** and the **--ask-vault-pass** option omitted to prevent any password prompting (for automated runs) and the file can be encrypted after the run. This can be used if an external system such as Jenkins would handle the decryption/encryption outside of Ansible. Default: none @@ -53,21 +94,24 @@ rhsm_repos='["rhel-7-server-rpms", "rhel-7-server-ose-3.1-rpms", "rhel-7-server- Default: none ## Calling This Role -Calling this role requires adding a **vars_prompt**, **pre_tasks**, and **roles** section of a play +Calling this role is done at both **pre_tasks** and **roles** sections of a playbook and optionally a **vars_prompt**. ### vars_prompt -Unfortunately **vars_prompt** can only be used at the play level before role tasks are executed, so this is the only place it can go. See http://stackoverflow.com/questions/25466675/ansible-to-conditionally-prompt-for-a-variable +Unfortunately **vars_prompt** can only be used at the play level before role tasks are executed, so this is the only place it can go. It also cannot be shown conditionally. For this reason it is not included in this role by default. A better method may be using a file containing the password variable encrypted with **ansible-vault**. See the **rhsm_password** section for more details. -Add a prompt to capture **rhsm_password** +To Add a prompt to capture **rhsm_password**: ``` - hosts: localhost + # Add the following lines after a -hosts: declaration and before pre_tasks: + # Start of vars_prompt code block vars_prompt: - # Unfortunately vars_prompt can only be used at the play level before role tasks, so this is the only place it can go. See http://stackoverflow.com/questions/25466675/ansible-to-conditionally-prompt-for-a-variable - name: "rhsm_password" - prompt: "Subscription Manager password (enter blank if using rhsm_activationkey or to disable registration)" + prompt: "Subscription Manager password" confirm: yes private: yes + # End of vars_prompt code block + pre_tasks: ``` ### pre-tasks @@ -75,8 +119,8 @@ Add a prompt to capture **rhsm_password** A number of variable checks are performed before any tasks to ensure the proper parameters are set. To include these checks call the pre_task yaml before any roles: ``` - pre_tasks: - - include: roles/subscription-manager/pre_tasks/pre_tasks.yml +pre_tasks: +- include: roles/subscription-manager/pre_tasks/pre_tasks.yml ``` ### roles @@ -84,22 +128,29 @@ A number of variable checks are performed before any tasks to ensure the proper The bulk of the work is performed in the main.yml for this role. The pre-task play will set a variable which can be checked to contitionally include this role as such: ``` - roles: - - { role: subscription-manager, when: hostvars.localhost.rhsm_register, tags: 'subscription-manager' } +roles: + - { role: subscription-manager, when: hostvars.localhost.rhsm_register, tags: 'subscription-manager' } ``` -## Running the Playbook +## Running Playbooks with this Role -To register to RHSM Hosted with username and password: +- To register to RHSM Hosted or Satellite with a username and plain text password (NOTE: This may retain your password in your CLI history): -``` -ansible-playbook -i inventory/ose-provision ose-provision.yml -e "rhsm_username=vvaldez" -``` + ``` + $ ansible-playbook --extra-vars="rhsm_username=vvaldez rhsm_password=my_secret_password " + ``` -To register to a Satellite server with an activation key: +- To register to RHSM Hosted or Satellite with username and an encrypted file containing the password: -``` -ansible-playbook -i inventory/ose-provision ose-provision.yml -e "rhsm_satellite=satellite.example.com rhsm_org=example_org rhsm_activationkey=rhel-7-ose-3-1" -``` + ``` + $ ansible-playbook --ask-vault-pass --extra-vars=@secrets.yml --extra-vars="rhsm_username=myusername" + + ``` + +- To register to a Satellite server with an activation key: + + ``` + $ ansible-playbook --extra-vars="rhsm_satellite=satellite.example.com rhsm_org=example_org rhsm_activationkey=rhel-7-ose-3-1 " -To ignore any Subscription Manager activities, simply do not set any parameters. When prompted for the password, hit **Enter** to set a blank password. + ``` +- To ignore any Subscription Manager activities, simply do not set any parameters. diff --git a/roles/subscription-manager/pre_tasks/pre_tasks.yml b/roles/subscription-manager/pre_tasks/pre_tasks.yml index 31441785e..8a4d8d06d 100644 --- a/roles/subscription-manager/pre_tasks/pre_tasks.yml +++ b/roles/subscription-manager/pre_tasks/pre_tasks.yml @@ -1,5 +1,5 @@ --- -- name: Set password fact +- name: "Set password fact" set_fact: rhsm_password: "{{ rhsm_password }}" no_log: true @@ -8,11 +8,11 @@ - rhsm_password is not none - rhsm_password|trim != '' -- name: Initialize Subscription Manager fact +- name: "Initialize Subscription Manager fact" set_fact: rhsm_register: true -- name: Determine if Subscription Manager should be used +- name: "Determine if Subscription Manager should be used" set_fact: rhsm_register: false when: @@ -23,7 +23,7 @@ - rhsm_activationkey is undefined or rhsm_activationkey is none or rhsm_activationkey|trim == '' - rhsm_pool is undefined or rhsm_pool is none or rhsm_pool|trim == '' -- name: Validate Subscription Manager organization is set +- name: "Validate Subscription Manager organization is set" fail: msg="Cannot register to a Satellite server without a value for the Organization via 'rhsm_org'" when: - rhsm_org is undefined or rhsm_org is none or rhsm_org|trim == '' @@ -32,14 +32,14 @@ - rhsm_satellite|trim != '' - rhsm_register -- name: Validate Subscription Manager authentication is defined - fail: msg="Cannot register without ('rhsm_username' and 'rhsm_password') or 'rhsm_activationkey' variables set" +- name: "Validate Subscription Manager authentication is defined" + fail: msg="Cannot register without ('rhsm_username' and 'rhsm_password') or 'rhsm_activationkey' variables set. See the README.md for details on securely prompting for a password" when: - (rhsm_username is undefined or rhsm_username is none or rhsm_username|trim == '') or (rhsm_password is undefined or rhsm_password is none or rhsm_password|trim == '') - rhsm_activationkey is undefined or rhsm_activationkey is none or rhsm_activationkey|trim == '' - rhsm_register -- name: Validate activation key and Hosted are not requested together +- name: "Validate activation key and Hosted are not requested together" fail: msg="Cannot register to RHSM Hosted with 'rhsm_activationkey'" when: - rhsm_satellite is undefined or rhsm_satellite is none or rhsm_satellite|trim == '' diff --git a/roles/subscription-manager/tasks/main.yml b/roles/subscription-manager/tasks/main.yml index adf3a8e85..bdb8ca7c4 100644 --- a/roles/subscription-manager/tasks/main.yml +++ b/roles/subscription-manager/tasks/main.yml @@ -3,9 +3,7 @@ set_fact: rhsm_password: "{{ hostvars.localhost.rhsm_password }}" when: - - rhsm_password is defined - - rhsm_password is not none - - rhsm_password|trim != '' + - rhsm_password is not defined or rhsm_password is none or rhsm_password|trim == '' - name: "Initializing Subscription Manager authenticaiton method" set_fact: -- cgit v1.2.3 -- cgit v1.2.3 -- cgit v1.2.3 -- cgit v1.2.3 -- cgit v1.2.3 -- cgit v1.2.3 -- cgit v1.2.3 -- cgit v1.2.3 -- cgit v1.2.3 -- cgit v1.2.3 -- cgit v1.2.3 From 305140bfaeb6cd1bbe34279cbd6750d1136816d6 Mon Sep 17 00:00:00 2001 From: Vinny Valdez Date: Sat, 23 Apr 2016 12:50:25 -0500 Subject: Add org parameter to Satellite with user/pass --- roles/subscription-manager/tasks/main.yml | 12 ++++++++++++ 1 file changed, 12 insertions(+) diff --git a/roles/subscription-manager/tasks/main.yml b/roles/subscription-manager/tasks/main.yml index bdb8ca7c4..9bc430665 100644 --- a/roles/subscription-manager/tasks/main.yml +++ b/roles/subscription-manager/tasks/main.yml @@ -77,6 +77,18 @@ when: - not registered - rhsm_authentication == "password" + - rhsm_org is not defined or rhsm_org is none or rhsm_org|trim == '' + +# This can apply to either Hosted or Satellite +- name: "Register using username, password and organization" + command: "/usr/bin/subscription-manager register --username={{ rhsm_username }} --password={{ rhsm_password }} --org={{ rhsm_org }}" + no_log: true + when: + - not registered + - rhsm_authentication == "password" + - rhsm_org is defined + - rhsm_org is not none + - rhsm_org|trim != '' - name: "Auto-attach to Subscription Manager Pool" command: "/usr/bin/subscription-manager attach --auto" -- cgit v1.2.3 From 150b709052688c1cf1ab435c9775501154c7e35a Mon Sep 17 00:00:00 2001 From: Vinny Valdez Date: Wed, 27 Apr 2016 17:14:42 -0500 Subject: Fix typo in task name --- roles/subscription-manager/tasks/main.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/roles/subscription-manager/tasks/main.yml b/roles/subscription-manager/tasks/main.yml index bdb8ca7c4..f3bd8b656 100644 --- a/roles/subscription-manager/tasks/main.yml +++ b/roles/subscription-manager/tasks/main.yml @@ -5,7 +5,7 @@ when: - rhsm_password is not defined or rhsm_password is none or rhsm_password|trim == '' -- name: "Initializing Subscription Manager authenticaiton method" +- name: "Initializing Subscription Manager authentication method" set_fact: rhsm_authentication: false -- cgit v1.2.3 -- cgit v1.2.3 -- cgit v1.2.3 -- cgit v1.2.3 -- cgit v1.2.3 -- cgit v1.2.3 -- cgit v1.2.3 -- cgit v1.2.3 -- cgit v1.2.3 -- cgit v1.2.3 -- cgit v1.2.3 -- cgit v1.2.3 -- cgit v1.2.3 -- cgit v1.2.3 -- cgit v1.2.3 -- cgit v1.2.3 -- cgit v1.2.3 -- cgit v1.2.3 -- cgit v1.2.3 -- cgit v1.2.3 -- cgit v1.2.3 -- cgit v1.2.3 -- cgit v1.2.3 -- cgit v1.2.3 -- cgit v1.2.3 -- cgit v1.2.3 -- cgit v1.2.3 -- cgit v1.2.3 -- cgit v1.2.3 -- cgit v1.2.3 -- cgit v1.2.3 -- cgit v1.2.3 -- cgit v1.2.3 -- cgit v1.2.3 -- cgit v1.2.3 -- cgit v1.2.3 -- cgit v1.2.3 -- cgit v1.2.3 From ca1b17aeeb8ed4f4db0a90a11bccd9ea009f9eac Mon Sep 17 00:00:00 2001 From: Eric Sauer Date: Fri, 13 May 2016 16:25:19 -0400 Subject: Changes by JayKayy for a full provision of OpenShift on OpenStack --- roles/common/pre_tasks/pre_tasks.yml | 2 +- roles/hostnames/tasks/main.yaml | 17 +++++++++++++++ roles/hostnames/templates/records.template.yaml | 28 +++++++++++++++++++++++++ roles/hostnames/test/inv | 12 +++++++++++ roles/hostnames/test/roles | 1 + roles/hostnames/test/test.retry | 3 +++ roles/hostnames/test/test.yaml | 21 +++++++++++++++++++ roles/hostnames/vars/main.yaml | 2 ++ roles/hostnames/vars/records.yaml | 28 +++++++++++++++++++++++++ 9 files changed, 113 insertions(+), 1 deletion(-) create mode 100644 roles/hostnames/tasks/main.yaml create mode 100644 roles/hostnames/templates/records.template.yaml create mode 100644 roles/hostnames/test/inv create mode 120000 roles/hostnames/test/roles create mode 100644 roles/hostnames/test/test.retry create mode 100644 roles/hostnames/test/test.yaml create mode 100644 roles/hostnames/vars/main.yaml create mode 100644 roles/hostnames/vars/records.yaml diff --git a/roles/common/pre_tasks/pre_tasks.yml b/roles/common/pre_tasks/pre_tasks.yml index c573bff8c..9dd14c30c 100644 --- a/roles/common/pre_tasks/pre_tasks.yml +++ b/roles/common/pre_tasks/pre_tasks.yml @@ -1,4 +1,4 @@ --- - name: Generate Environment ID shell: echo "$(cat /dev/urandom | tr -dc 'a-zA-Z0-9' | head -c 8)" - register: env_random_id \ No newline at end of file + register: env_random_id diff --git a/roles/hostnames/tasks/main.yaml b/roles/hostnames/tasks/main.yaml new file mode 100644 index 000000000..921cd664b --- /dev/null +++ b/roles/hostnames/tasks/main.yaml @@ -0,0 +1,17 @@ +--- + - name: Setting master(s) hostname + hostname: name="{% for thishost in groups['openshift_masters'] %}{% if inventory_hostname == thishost %}master{{ counter }}.{{ dns_domain }}{% endif %}{% set counter = counter + 1 %}{% endfor %}" + when: "'openshift_masters' in group_names" + + - name: Setting node(s) hostname + hostname: name="{% for thishost in groups['openshift_nodes'] %}{% if inventory_hostname == thishost %}node{{ counter }}.{{ dns_domain }}{% endif %}{% set counter = counter + 1 %}{% endfor %}" + when: "'openshift_nodes' in group_names" + + - name: "Templating records" + become: false + remote_user: cloud-user + template: + src: "{{ role_path }}/templates/records.template.yaml" + dest: "/tmp/records.yaml" + force: yes + delegate_to: localhost diff --git a/roles/hostnames/templates/records.template.yaml b/roles/hostnames/templates/records.template.yaml new file mode 100644 index 000000000..a916fd2b3 --- /dev/null +++ b/roles/hostnames/templates/records.template.yaml @@ -0,0 +1,28 @@ +--- +dns_records_add: + - view: private + zone: {{ dns_domain }} + entries: +{% for mst in groups['openshift_masters'] %} + - type: A + hostname: {{ hostvars[mst]['ansible_hostname'] }} + ip: {{ hostvars[mst]['dns_private_ip'] }} +{% endfor %} +{% for node in groups['openshift_nodes'] %} + - type: A + hostname: {{ hostvars[node]['ansible_hostname'] }} + ip: {{ hostvars[node]['dns_private_ip'] }} +{% endfor %} + - view: public + zone: {{ dns_domain}} + entries: +{% for mst in groups['openshift_masters']%} + - type: A + hostname: {{ hostvars[mst]['ansible_hostname'] }} + ip: {{ hostvars[mst]['dns_public_ip'] }} +{% endfor %} +{% for node in groups['openshift_nodes'] %} + - type: A + hostname: {{ hostvars[node]['ansible_hostname'] }} + ip: {{ hostvars[node]['dns_public_ip'] }} +{% endfor %} diff --git a/roles/hostnames/test/inv b/roles/hostnames/test/inv new file mode 100644 index 000000000..ffbe6e03d --- /dev/null +++ b/roles/hostnames/test/inv @@ -0,0 +1,12 @@ +[all:vars] +dns_domain=example.com + +[openshift_masters] +192.168.124.41 dns_private_ip=1.1.1.41 dns_public_ip=192.168.124.41 +192.168.124.117 dns_private_ip=1.1.1.117 dns_public_ip=192.168.124.117 + +[openshift_nodes] +192.168.124.40 dns_private_ip=1.1.1.40 dns_public_ip=192.168.124.40 + +#[dns] +#192.168.124.117 dns_private_ip=1.1.1.117 diff --git a/roles/hostnames/test/roles b/roles/hostnames/test/roles new file mode 120000 index 000000000..e2b799b9d --- /dev/null +++ b/roles/hostnames/test/roles @@ -0,0 +1 @@ +../../../roles/ \ No newline at end of file diff --git a/roles/hostnames/test/test.retry b/roles/hostnames/test/test.retry new file mode 100644 index 000000000..63fc08e4c --- /dev/null +++ b/roles/hostnames/test/test.retry @@ -0,0 +1,3 @@ +192.168.124.117 +192.168.124.40 +192.168.124.41 diff --git a/roles/hostnames/test/test.yaml b/roles/hostnames/test/test.yaml new file mode 100644 index 000000000..34bf37942 --- /dev/null +++ b/roles/hostnames/test/test.yaml @@ -0,0 +1,21 @@ +--- +- hosts: all + roles: + - role: hostnames + +# - debug: +# +# - hosts: dns +# roles: +# - role: dns-server +# named_config_views: +# - name: private +# acl_entry: +# - 192.168.124.40/32 +# - 192.168.124.40/32 +# zone: +# - dns_domain: example.com +# - name: public +# zone: +# - dns_domain: example.com +# - role: dns diff --git a/roles/hostnames/vars/main.yaml b/roles/hostnames/vars/main.yaml new file mode 100644 index 000000000..3eecb8dc4 --- /dev/null +++ b/roles/hostnames/vars/main.yaml @@ -0,0 +1,2 @@ +--- +counter: 1 diff --git a/roles/hostnames/vars/records.yaml b/roles/hostnames/vars/records.yaml new file mode 100644 index 000000000..3bf12ae2b --- /dev/null +++ b/roles/hostnames/vars/records.yaml @@ -0,0 +1,28 @@ +--- + - name: "Building Records" + set_fact: + dns_records_add: + - view: private + zone: example.com + entries: + - type: A + hostname: master1.example.com + ip: 172.16.15.94 + - type: A + hostname: node1.example.com + ip: 172.16.15.86 + - type: A + hostname: node2.example.com + ip: 172.16.15.87 + - view: public + zone: example.com + entries: + - type: A + hostname: master1.example.com + ip: 10.3.10.116 + - type: A + hostname: node1.example.com + ip: 10.3.11.46 + - type: A + hostname: node2.example.com + ip: 10.3.12.6 -- cgit v1.2.3 -- cgit v1.2.3 -- cgit v1.2.3 -- cgit v1.2.3 -- cgit v1.2.3 From c8f84c0aebe1fe9c00498921c5f83022a2e873c3 Mon Sep 17 00:00:00 2001 From: Eric Sauer Date: Fri, 3 Jun 2016 14:01:22 -0400 Subject: Changes to allow runs from inside a container. Also allows for running upstream openshift-ansible installer --- roles/hostnames/tasks/main.yaml | 3 +++ 1 file changed, 3 insertions(+) diff --git a/roles/hostnames/tasks/main.yaml b/roles/hostnames/tasks/main.yaml index 921cd664b..c34d07915 100644 --- a/roles/hostnames/tasks/main.yaml +++ b/roles/hostnames/tasks/main.yaml @@ -15,3 +15,6 @@ dest: "/tmp/records.yaml" force: yes delegate_to: localhost + + - name: "Updating hostname facts" + setup: filter=ansible_hostname -- cgit v1.2.3 From e4c6ba27a5fe784143831e02e5181794c1b953b2 Mon Sep 17 00:00:00 2001 From: Eric Sauer Date: Fri, 3 Jun 2016 18:01:05 -0400 Subject: Reverting previous commit and making template adjustments --- roles/hostnames/tasks/main.yaml | 8 ++++++++ 1 file changed, 8 insertions(+) diff --git a/roles/hostnames/tasks/main.yaml b/roles/hostnames/tasks/main.yaml index c34d07915..700845e47 100644 --- a/roles/hostnames/tasks/main.yaml +++ b/roles/hostnames/tasks/main.yaml @@ -3,10 +3,18 @@ hostname: name="{% for thishost in groups['openshift_masters'] %}{% if inventory_hostname == thishost %}master{{ counter }}.{{ dns_domain }}{% endif %}{% set counter = counter + 1 %}{% endfor %}" when: "'openshift_masters' in group_names" + - name: Setting facts for masters + set_fact: ansible_hostname="{% for thishost in groups['openshift_masters'] %}{% if inventory_hostname == thishost %}master{{ counter }}{% endif %}{% set counter = counter + 1 %}{% endfor %}" + when: "'openshift_masters' in group_names" + - name: Setting node(s) hostname hostname: name="{% for thishost in groups['openshift_nodes'] %}{% if inventory_hostname == thishost %}node{{ counter }}.{{ dns_domain }}{% endif %}{% set counter = counter + 1 %}{% endfor %}" when: "'openshift_nodes' in group_names" + - name: Setting facts for nodes + set_fact: ansible_hostname="{% for thishost in groups['openshift_nodes'] %}{% if inventory_hostname == thishost %}node{{ counter }}{% endif %}{% set counter = counter + 1 %}{% endfor %}" + when: "'openshift_nodes' in group_names" + - name: "Templating records" become: false remote_user: cloud-user -- cgit v1.2.3 From d827e1796c6a3705007365cb58aa6b36a92d3b6e Mon Sep 17 00:00:00 2001 From: Eric Sauer Date: Fri, 3 Jun 2016 19:10:27 -0400 Subject: Subscription manager role should accomodate orgs with spaces --- roles/subscription-manager/tasks/main.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/roles/subscription-manager/tasks/main.yml b/roles/subscription-manager/tasks/main.yml index f3bd8b656..c73204a29 100644 --- a/roles/subscription-manager/tasks/main.yml +++ b/roles/subscription-manager/tasks/main.yml @@ -62,7 +62,7 @@ - rhsm_satellite|trim != '' - name: "Register to Satellite using activation key" - command: "/usr/bin/subscription-manager register --activationkey={{ rhsm_activationkey }} --org={{ rhsm_org }}" + command: "/usr/bin/subscription-manager register --activationkey={{ rhsm_activationkey }} --org='{{ rhsm_org }}'" when: - not registered - rhsm_authentication == 'key' -- cgit v1.2.3 -- cgit v1.2.3 From e2181a706679666a6fff2e2aaca648ed982060bd Mon Sep 17 00:00:00 2001 From: Eric Sauer Date: Wed, 8 Jun 2016 14:58:36 -0400 Subject: Channging hard coded host groups to match openshift-ansible expected host groups. Importing byo playbook now instead of nested ansible run. Need to refactor how we generate hostnames to make it fit this. --- roles/hostnames/tasks/main.yaml | 17 ++++++++--------- roles/hostnames/templates/records.template.yaml | 8 ++++---- 2 files changed, 12 insertions(+), 13 deletions(-) diff --git a/roles/hostnames/tasks/main.yaml b/roles/hostnames/tasks/main.yaml index 700845e47..bf2fafb97 100644 --- a/roles/hostnames/tasks/main.yaml +++ b/roles/hostnames/tasks/main.yaml @@ -1,23 +1,22 @@ --- - name: Setting master(s) hostname - hostname: name="{% for thishost in groups['openshift_masters'] %}{% if inventory_hostname == thishost %}master{{ counter }}.{{ dns_domain }}{% endif %}{% set counter = counter + 1 %}{% endfor %}" - when: "'openshift_masters' in group_names" + hostname: name="{% for thishost in groups['masters'] %}{% if inventory_hostname == thishost %}master{{ counter }}.{{ dns_domain }}{% endif %}{% set counter = counter + 1 %}{% endfor %}" + when: "'masters' in group_names" - name: Setting facts for masters - set_fact: ansible_hostname="{% for thishost in groups['openshift_masters'] %}{% if inventory_hostname == thishost %}master{{ counter }}{% endif %}{% set counter = counter + 1 %}{% endfor %}" - when: "'openshift_masters' in group_names" + set_fact: ansible_hostname="{% for thishost in groups['masters'] %}{% if inventory_hostname == thishost %}master{{ counter }}{% endif %}{% set counter = counter + 1 %}{% endfor %}" + when: "'masters' in group_names" - name: Setting node(s) hostname - hostname: name="{% for thishost in groups['openshift_nodes'] %}{% if inventory_hostname == thishost %}node{{ counter }}.{{ dns_domain }}{% endif %}{% set counter = counter + 1 %}{% endfor %}" - when: "'openshift_nodes' in group_names" + hostname: name="{% for thishost in groups['nodes'] %}{% if inventory_hostname == thishost %}node{{ counter }}.{{ dns_domain }}{% endif %}{% set counter = counter + 1 %}{% endfor %}" + when: "'nodes' in group_names" - name: Setting facts for nodes - set_fact: ansible_hostname="{% for thishost in groups['openshift_nodes'] %}{% if inventory_hostname == thishost %}node{{ counter }}{% endif %}{% set counter = counter + 1 %}{% endfor %}" - when: "'openshift_nodes' in group_names" + set_fact: ansible_hostname="{% for thishost in groups['nodes'] %}{% if inventory_hostname == thishost %}node{{ counter }}{% endif %}{% set counter = counter + 1 %}{% endfor %}" + when: "'nodes' in group_names" - name: "Templating records" become: false - remote_user: cloud-user template: src: "{{ role_path }}/templates/records.template.yaml" dest: "/tmp/records.yaml" diff --git a/roles/hostnames/templates/records.template.yaml b/roles/hostnames/templates/records.template.yaml index a916fd2b3..2f2420464 100644 --- a/roles/hostnames/templates/records.template.yaml +++ b/roles/hostnames/templates/records.template.yaml @@ -3,12 +3,12 @@ dns_records_add: - view: private zone: {{ dns_domain }} entries: -{% for mst in groups['openshift_masters'] %} +{% for mst in groups['masters'] %} - type: A hostname: {{ hostvars[mst]['ansible_hostname'] }} ip: {{ hostvars[mst]['dns_private_ip'] }} {% endfor %} -{% for node in groups['openshift_nodes'] %} +{% for node in groups['nodes'] %} - type: A hostname: {{ hostvars[node]['ansible_hostname'] }} ip: {{ hostvars[node]['dns_private_ip'] }} @@ -16,12 +16,12 @@ dns_records_add: - view: public zone: {{ dns_domain}} entries: -{% for mst in groups['openshift_masters']%} +{% for mst in groups['masters']%} - type: A hostname: {{ hostvars[mst]['ansible_hostname'] }} ip: {{ hostvars[mst]['dns_public_ip'] }} {% endfor %} -{% for node in groups['openshift_nodes'] %} +{% for node in groups['nodes'] %} - type: A hostname: {{ hostvars[node]['ansible_hostname'] }} ip: {{ hostvars[node]['dns_public_ip'] }} -- cgit v1.2.3 From 4d6eb644d78f4b972154ade3d12c23b28dbe19e3 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?=C3=98ystein=20Bedin?= Date: Thu, 9 Jun 2016 11:34:07 -0400 Subject: Updated to run as root rather than cloud-user, for now... --- roles/common/pre_tasks/pre_tasks.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/roles/common/pre_tasks/pre_tasks.yml b/roles/common/pre_tasks/pre_tasks.yml index 9dd14c30c..ed57a2993 100644 --- a/roles/common/pre_tasks/pre_tasks.yml +++ b/roles/common/pre_tasks/pre_tasks.yml @@ -1,4 +1,4 @@ --- - name: Generate Environment ID - shell: echo "$(cat /dev/urandom | tr -dc 'a-zA-Z0-9' | head -c 8)" + shell: echo "$(date +%s)" register: env_random_id -- cgit v1.2.3 -- cgit v1.2.3 -- cgit v1.2.3 -- cgit v1.2.3 -- cgit v1.2.3 -- cgit v1.2.3 -- cgit v1.2.3 -- cgit v1.2.3 From 3866232daed8ce1a48aa2db6f2f6c541e90756ec Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?=C3=98ystein=20Bedin?= Date: Fri, 17 Jun 2016 14:48:37 -0400 Subject: Cleande up hostname role to make it more generic --- roles/hostnames/tasks/main.yaml | 43 ++++++++++++------------- roles/hostnames/templates/records.template.yaml | 28 ---------------- 2 files changed, 21 insertions(+), 50 deletions(-) delete mode 100644 roles/hostnames/templates/records.template.yaml diff --git a/roles/hostnames/tasks/main.yaml b/roles/hostnames/tasks/main.yaml index bf2fafb97..bb45445f5 100644 --- a/roles/hostnames/tasks/main.yaml +++ b/roles/hostnames/tasks/main.yaml @@ -1,27 +1,26 @@ --- - - name: Setting master(s) hostname - hostname: name="{% for thishost in groups['masters'] %}{% if inventory_hostname == thishost %}master{{ counter }}.{{ dns_domain }}{% endif %}{% set counter = counter + 1 %}{% endfor %}" - when: "'masters' in group_names" +- name: Setting Hostname Fact + set_fact: + new_hostname: "{{ custom_hostname | default(inventory_hostname) }}" - - name: Setting facts for masters - set_fact: ansible_hostname="{% for thishost in groups['masters'] %}{% if inventory_hostname == thishost %}master{{ counter }}{% endif %}{% set counter = counter + 1 %}{% endfor %}" - when: "'masters' in group_names" +- name: Setting FQDN Fact + set_fact: + new_fqdn: "{{ new_hostname }}.{{ dns_domain }}" - - name: Setting node(s) hostname - hostname: name="{% for thishost in groups['nodes'] %}{% if inventory_hostname == thishost %}node{{ counter }}.{{ dns_domain }}{% endif %}{% set counter = counter + 1 %}{% endfor %}" - when: "'nodes' in group_names" +- name: Setting hostname and DNS domain + hostname: name="{{ new_fqdn }}" - - name: Setting facts for nodes - set_fact: ansible_hostname="{% for thishost in groups['nodes'] %}{% if inventory_hostname == thishost %}node{{ counter }}{% endif %}{% set counter = counter + 1 %}{% endfor %}" - when: "'nodes' in group_names" +- name: Check for cloud.cfg + stat: path=/etc/cloud/cloud.cfg + register: cloud_cfg - - name: "Templating records" - become: false - template: - src: "{{ role_path }}/templates/records.template.yaml" - dest: "/tmp/records.yaml" - force: yes - delegate_to: localhost - - - name: "Updating hostname facts" - setup: filter=ansible_hostname +- name: Prevent cloud-init updates of hostname/fqdn (if applicable) + lineinfile: + dest: /etc/cloud/cloud.cfg + state: present + regexp: "{{ item.regexp }}" + line: "{{ item.line }}" + with_items: + - { regexp: '^ - set_hostname', line: '# - set_hostname' } + - { regexp: '^ - update_hostname', line: '# - update_hostname' } + when: cloud_cfg.stat.exists == True diff --git a/roles/hostnames/templates/records.template.yaml b/roles/hostnames/templates/records.template.yaml deleted file mode 100644 index 2f2420464..000000000 --- a/roles/hostnames/templates/records.template.yaml +++ /dev/null @@ -1,28 +0,0 @@ ---- -dns_records_add: - - view: private - zone: {{ dns_domain }} - entries: -{% for mst in groups['masters'] %} - - type: A - hostname: {{ hostvars[mst]['ansible_hostname'] }} - ip: {{ hostvars[mst]['dns_private_ip'] }} -{% endfor %} -{% for node in groups['nodes'] %} - - type: A - hostname: {{ hostvars[node]['ansible_hostname'] }} - ip: {{ hostvars[node]['dns_private_ip'] }} -{% endfor %} - - view: public - zone: {{ dns_domain}} - entries: -{% for mst in groups['masters']%} - - type: A - hostname: {{ hostvars[mst]['ansible_hostname'] }} - ip: {{ hostvars[mst]['dns_public_ip'] }} -{% endfor %} -{% for node in groups['nodes'] %} - - type: A - hostname: {{ hostvars[node]['ansible_hostname'] }} - ip: {{ hostvars[node]['dns_public_ip'] }} -{% endfor %} -- cgit v1.2.3 -- cgit v1.2.3 -- cgit v1.2.3 -- cgit v1.2.3 -- cgit v1.2.3 -- cgit v1.2.3 -- cgit v1.2.3 -- cgit v1.2.3 -- cgit v1.2.3 -- cgit v1.2.3 -- cgit v1.2.3 -- cgit v1.2.3 -- cgit v1.2.3 -- cgit v1.2.3 -- cgit v1.2.3 -- cgit v1.2.3 -- cgit v1.2.3 -- cgit v1.2.3 -- cgit v1.2.3 -- cgit v1.2.3 -- cgit v1.2.3 -- cgit v1.2.3 -- cgit v1.2.3 -- cgit v1.2.3 -- cgit v1.2.3 -- cgit v1.2.3 -- cgit v1.2.3 From fbf2f35080f666f68994e30174a590b8308b59f3 Mon Sep 17 00:00:00 2001 From: Vinny Valdez Date: Fri, 15 Jul 2016 14:05:13 -0500 Subject: Fixes Issue #163 if rhsm_password is not defined --- roles/subscription-manager/pre_tasks/pre_tasks.yml | 6 +----- 1 file changed, 1 insertion(+), 5 deletions(-) diff --git a/roles/subscription-manager/pre_tasks/pre_tasks.yml b/roles/subscription-manager/pre_tasks/pre_tasks.yml index 8a4d8d06d..b21356cf2 100644 --- a/roles/subscription-manager/pre_tasks/pre_tasks.yml +++ b/roles/subscription-manager/pre_tasks/pre_tasks.yml @@ -1,12 +1,8 @@ --- - name: "Set password fact" set_fact: - rhsm_password: "{{ rhsm_password }}" + rhsm_password: "{{ rhsm_password | default(None) }}" no_log: true - when: - - rhsm_password is defined - - rhsm_password is not none - - rhsm_password|trim != '' - name: "Initialize Subscription Manager fact" set_fact: -- cgit v1.2.3 -- cgit v1.2.3 -- cgit v1.2.3 -- cgit v1.2.3 -- cgit v1.2.3 -- cgit v1.2.3 -- cgit v1.2.3 -- cgit v1.2.3 -- cgit v1.2.3 -- cgit v1.2.3 -- cgit v1.2.3 -- cgit v1.2.3 -- cgit v1.2.3 -- cgit v1.2.3 From c757fd690d24865ef3b5b9a1b536120299b39a6c Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?=C3=98ystein=20Bedin?= Date: Sun, 21 Aug 2016 02:12:53 -0400 Subject: Updated env_id to be a sub-domain + make the logic a bit more flexible --- roles/common/pre_tasks/pre_tasks.yml | 21 +++++++++++++++++++-- roles/hostnames/tasks/main.yaml | 4 ++-- 2 files changed, 21 insertions(+), 4 deletions(-) diff --git a/roles/common/pre_tasks/pre_tasks.yml b/roles/common/pre_tasks/pre_tasks.yml index ed57a2993..1ba1ea55d 100644 --- a/roles/common/pre_tasks/pre_tasks.yml +++ b/roles/common/pre_tasks/pre_tasks.yml @@ -1,4 +1,21 @@ --- - name: Generate Environment ID - shell: echo "$(date +%s)" - register: env_random_id + set_fact: + env_random_id: "{{ ansible_date_time.epoch }}" + run_once: true + delegate_to: localhost + +- name: Set default Environment ID + set_fact: + default_env_id: "casl-{{ lookup('env','OS_USERNAME') }}-{{ env_random_id }}" + delegate_to: localhost + +- name: Setting Common Facts + set_fact: + env_id: "{{ env_id | default(default_env_id) }}" + delegate_to: localhost + +- name: Updating DNS domain to include env_id (if not empty) + set_fact: + full_dns_domain: "{{ (env_id|trim == '') | ternary(dns_domain, env_id + '.' + dns_domain) }}" + delegate_to: localhost diff --git a/roles/hostnames/tasks/main.yaml b/roles/hostnames/tasks/main.yaml index bb45445f5..bf142d653 100644 --- a/roles/hostnames/tasks/main.yaml +++ b/roles/hostnames/tasks/main.yaml @@ -1,11 +1,11 @@ --- - name: Setting Hostname Fact set_fact: - new_hostname: "{{ custom_hostname | default(inventory_hostname) }}" + new_hostname: "{{ custom_hostname | default(inventory_hostname_short) }}" - name: Setting FQDN Fact set_fact: - new_fqdn: "{{ new_hostname }}.{{ dns_domain }}" + new_fqdn: "{{ new_hostname }}.{{ full_dns_domain }}" - name: Setting hostname and DNS domain hostname: name="{{ new_fqdn }}" -- cgit v1.2.3 -- cgit v1.2.3 -- cgit v1.2.3 -- cgit v1.2.3 -- cgit v1.2.3 -- cgit v1.2.3 -- cgit v1.2.3 -- cgit v1.2.3 -- cgit v1.2.3 -- cgit v1.2.3 -- cgit v1.2.3 -- cgit v1.2.3 -- cgit v1.2.3 -- cgit v1.2.3 -- cgit v1.2.3 -- cgit v1.2.3 -- cgit v1.2.3 -- cgit v1.2.3 -- cgit v1.2.3 -- cgit v1.2.3 -- cgit v1.2.3 -- cgit v1.2.3 -- cgit v1.2.3 -- cgit v1.2.3 -- cgit v1.2.3 -- cgit v1.2.3 -- cgit v1.2.3 -- cgit v1.2.3 -- cgit v1.2.3 -- cgit v1.2.3 -- cgit v1.2.3 -- cgit v1.2.3 -- cgit v1.2.3 -- cgit v1.2.3 -- cgit v1.2.3 -- cgit v1.2.3 -- cgit v1.2.3 -- cgit v1.2.3 -- cgit v1.2.3 -- cgit v1.2.3 -- cgit v1.2.3 -- cgit v1.2.3 -- cgit v1.2.3 -- cgit v1.2.3 -- cgit v1.2.3 -- cgit v1.2.3 -- cgit v1.2.3 -- cgit v1.2.3 -- cgit v1.2.3 -- cgit v1.2.3 -- cgit v1.2.3 -- cgit v1.2.3 -- cgit v1.2.3 -- cgit v1.2.3 -- cgit v1.2.3 -- cgit v1.2.3 -- cgit v1.2.3 -- cgit v1.2.3 -- cgit v1.2.3 -- cgit v1.2.3 -- cgit v1.2.3 -- cgit v1.2.3 -- cgit v1.2.3 -- cgit v1.2.3 -- cgit v1.2.3 -- cgit v1.2.3 -- cgit v1.2.3 -- cgit v1.2.3 -- cgit v1.2.3 -- cgit v1.2.3 -- cgit v1.2.3 -- cgit v1.2.3 -- cgit v1.2.3 -- cgit v1.2.3 -- cgit v1.2.3 -- cgit v1.2.3 -- cgit v1.2.3 -- cgit v1.2.3 -- cgit v1.2.3 -- cgit v1.2.3 -- cgit v1.2.3 -- cgit v1.2.3 -- cgit v1.2.3 -- cgit v1.2.3 -- cgit v1.2.3 -- cgit v1.2.3 -- cgit v1.2.3 -- cgit v1.2.3 -- cgit v1.2.3 -- cgit v1.2.3 -- cgit v1.2.3 -- cgit v1.2.3 -- cgit v1.2.3 -- cgit v1.2.3 -- cgit v1.2.3 -- cgit v1.2.3 -- cgit v1.2.3 -- cgit v1.2.3 -- cgit v1.2.3 -- cgit v1.2.3 -- cgit v1.2.3 -- cgit v1.2.3 -- cgit v1.2.3 -- cgit v1.2.3 -- cgit v1.2.3 -- cgit v1.2.3 -- cgit v1.2.3 -- cgit v1.2.3 -- cgit v1.2.3 -- cgit v1.2.3 -- cgit v1.2.3 -- cgit v1.2.3 -- cgit v1.2.3 -- cgit v1.2.3 -- cgit v1.2.3 -- cgit v1.2.3 -- cgit v1.2.3 -- cgit v1.2.3 -- cgit v1.2.3 -- cgit v1.2.3 -- cgit v1.2.3 -- cgit v1.2.3 -- cgit v1.2.3 -- cgit v1.2.3 -- cgit v1.2.3 From fbda334b6797eb0109cd9c13afb99a47e3916b36 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?=C3=98ystein=20Bedin?= Date: Tue, 15 Nov 2016 22:26:58 -0500 Subject: Fixing ansible impl to work with OSP9 and ansible 2.2 --- roles/subscription-manager/tasks/main.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/roles/subscription-manager/tasks/main.yml b/roles/subscription-manager/tasks/main.yml index 0b3aa351f..2dd14b48e 100644 --- a/roles/subscription-manager/tasks/main.yml +++ b/roles/subscription-manager/tasks/main.yml @@ -114,7 +114,7 @@ - name: "Enable specified repositories" command: "/usr/bin/subscription-manager repos --enable={{ item }}" - with_items: rhsm_repos + with_items: "{{ rhsm_repos }}" when: - not registered - rhsm_repos is defined -- cgit v1.2.3 -- cgit v1.2.3 -- cgit v1.2.3 -- cgit v1.2.3 -- cgit v1.2.3 -- cgit v1.2.3 -- cgit v1.2.3 -- cgit v1.2.3 -- cgit v1.2.3 -- cgit v1.2.3 -- cgit v1.2.3 -- cgit v1.2.3 -- cgit v1.2.3 -- cgit v1.2.3 -- cgit v1.2.3 -- cgit v1.2.3 -- cgit v1.2.3 -- cgit v1.2.3 -- cgit v1.2.3 -- cgit v1.2.3 -- cgit v1.2.3 -- cgit v1.2.3 -- cgit v1.2.3 -- cgit v1.2.3 -- cgit v1.2.3 -- cgit v1.2.3 -- cgit v1.2.3 -- cgit v1.2.3 -- cgit v1.2.3 -- cgit v1.2.3 -- cgit v1.2.3 -- cgit v1.2.3 -- cgit v1.2.3 -- cgit v1.2.3 -- cgit v1.2.3 -- cgit v1.2.3 -- cgit v1.2.3 -- cgit v1.2.3 -- cgit v1.2.3 -- cgit v1.2.3 -- cgit v1.2.3 -- cgit v1.2.3 -- cgit v1.2.3 -- cgit v1.2.3 -- cgit v1.2.3 -- cgit v1.2.3 -- cgit v1.2.3 -- cgit v1.2.3 -- cgit v1.2.3 -- cgit v1.2.3 -- cgit v1.2.3 -- cgit v1.2.3 -- cgit v1.2.3 -- cgit v1.2.3 -- cgit v1.2.3 -- cgit v1.2.3 -- cgit v1.2.3 -- cgit v1.2.3 -- cgit v1.2.3 -- cgit v1.2.3 -- cgit v1.2.3 -- cgit v1.2.3 -- cgit v1.2.3 -- cgit v1.2.3 -- cgit v1.2.3 -- cgit v1.2.3 -- cgit v1.2.3 -- cgit v1.2.3 -- cgit v1.2.3 -- cgit v1.2.3 -- cgit v1.2.3 -- cgit v1.2.3 -- cgit v1.2.3 -- cgit v1.2.3 -- cgit v1.2.3 -- cgit v1.2.3 -- cgit v1.2.3 -- cgit v1.2.3 -- cgit v1.2.3 From 11b48fe4e237950f9d9e9a0e66d8b15f48be1ea0 Mon Sep 17 00:00:00 2001 From: Eric Sauer Date: Wed, 21 Dec 2016 10:37:40 -0500 Subject: Openstack heat (#2) * Adding a role to invoke openstack heat * Adding readme * Pulling parameters out to inventory file * start of end-to-end playbook * More enhancements and refactoring to make dynamic inventory the driver for an openshift install * Switching to variable substituted path to config.yaml playbook * Changes to allow defining of number of nodes/infranodes. * Added labels to inventory * Start of end-to-end functionality * Enhancements to support openstack heat provisioning * Updating inventory sample to remove some deprecation warnings * Working towards making the secure-registry role 'become' aware * Fixing node labels and removing secure-registry as it's no longer needed * No longer need insecure registry line, as installer will secure our registry * Adjusted dynamic inventory to filter by clusterid * Minor updates to dynamic inventory bug * Adding a refactored sample inventory directory * Refactoring playbooks for better directory structure, and to narrow down host groups * Adding volume mounts to heat template * Moving dns playbooks back to original location * Fixing incorrect file path * Cleaning up inventory samples * One more hostname to clean up * Changing var name * changed openshift-provision to openshift-prep * Adjusting current provision script to avoid breakage by new openstack-heat code --- roles/common/pre_tasks/pre_tasks.yml | 5 + roles/openshift-prep/tasks/main.yml | 4 + roles/openshift-prep/tasks/prerequisites.yml | 36 ++ roles/openstack-stack/README.md | 9 + roles/openstack-stack/files/heat_stack.yaml | 684 +++++++++++++++++++++ roles/openstack-stack/files/heat_stack_server.yaml | 156 +++++ roles/openstack-stack/files/user-data | 13 + roles/openstack-stack/tasks/main.yml | 31 + roles/openstack-stack/test/roles | 1 + roles/openstack-stack/test/stack-create-test.yml | 17 + 10 files changed, 956 insertions(+) create mode 100644 roles/openshift-prep/tasks/main.yml create mode 100644 roles/openshift-prep/tasks/prerequisites.yml create mode 100644 roles/openstack-stack/README.md create mode 100644 roles/openstack-stack/files/heat_stack.yaml create mode 100644 roles/openstack-stack/files/heat_stack_server.yaml create mode 100644 roles/openstack-stack/files/user-data create mode 100644 roles/openstack-stack/tasks/main.yml create mode 120000 roles/openstack-stack/test/roles create mode 100644 roles/openstack-stack/test/stack-create-test.yml diff --git a/roles/common/pre_tasks/pre_tasks.yml b/roles/common/pre_tasks/pre_tasks.yml index 1ba1ea55d..71a989b30 100644 --- a/roles/common/pre_tasks/pre_tasks.yml +++ b/roles/common/pre_tasks/pre_tasks.yml @@ -15,6 +15,11 @@ env_id: "{{ env_id | default(default_env_id) }}" delegate_to: localhost +- name: Set Dynamic Inventory Filters + shell: > + export OS_INV_FILTER_KEY=clusterid && OS_INV_FILTER_VALUE={{ env_id }} + delegate_to: localhost + - name: Updating DNS domain to include env_id (if not empty) set_fact: full_dns_domain: "{{ (env_id|trim == '') | ternary(dns_domain, env_id + '.' + dns_domain) }}" diff --git a/roles/openshift-prep/tasks/main.yml b/roles/openshift-prep/tasks/main.yml new file mode 100644 index 000000000..5e484e75f --- /dev/null +++ b/roles/openshift-prep/tasks/main.yml @@ -0,0 +1,4 @@ +--- +# Starting Point for OpenShift Installation and Configuration +- include: prerequisites.yml + tags: [prerequisites] diff --git a/roles/openshift-prep/tasks/prerequisites.yml b/roles/openshift-prep/tasks/prerequisites.yml new file mode 100644 index 000000000..1286905f4 --- /dev/null +++ b/roles/openshift-prep/tasks/prerequisites.yml @@ -0,0 +1,36 @@ +--- +- name: "Cleaning yum repositories" + command: "yum clean all" + +- name: "Install required packages" + yum: + name: "{{ item }}" + state: latest + with_items: + - wget + - git + - net-tools + - bind-utils + - bridge-utils + - bash-completion + - atomic-openshift-utils + - vim-enhanced + +- name: "Update all packages (this can take a very long time)" + yum: + name: "*" + state: latest + +- name: "Verify hostname" + shell: hostnamectl status | awk "/Static hostname/"'{ print $3 }' + register: hostname_fqdn + +- name: "Set hostname if required" + hostname: + name: "{{ ansible_fqdn }}" + when: hostname_fqdn.stdout != ansible_fqdn + +- name: "Verify SELinux is enforcing" + fail: + msg: "SELinux is required for OpenShift and has been detected as '{{ ansible_selinux.config_mode }}'" + when: ansible_selinux.config_mode != "enforcing" diff --git a/roles/openstack-stack/README.md b/roles/openstack-stack/README.md new file mode 100644 index 000000000..509c9de6c --- /dev/null +++ b/roles/openstack-stack/README.md @@ -0,0 +1,9 @@ +# Role openstack-stack + +Role for spinning up instances using OpenStack Heat. + +## To Test + +``` +ansible-playbook casl-ansible/roles/openstack-stack/test/stack-create-test.yml +``` diff --git a/roles/openstack-stack/files/heat_stack.yaml b/roles/openstack-stack/files/heat_stack.yaml new file mode 100644 index 000000000..058f7a7ad --- /dev/null +++ b/roles/openstack-stack/files/heat_stack.yaml @@ -0,0 +1,684 @@ +heat_template_version: 2014-10-16 + +description: OpenShift cluster + +parameters: + + cluster_env: + type: string + label: Cluster environment + description: Environment of the cluster + + cluster_id: + type: string + label: Cluster ID + description: Identifier of the cluster + + subnet_24_prefix: + type: string + label: subnet /24 prefix + description: /24 subnet prefix of the network of the cluster (dot separated number triplet) + + dns_nameservers: + type: comma_delimited_list + label: DNS nameservers list + description: List of DNS nameservers + + external_net: + type: string + label: External network + description: Name of the external network + default: external + + ssh_public_key: + type: string + label: SSH public key + description: SSH public key + hidden: true + + ssh_incoming: + type: string + label: Source of ssh connections + description: Source of legitimate ssh connections + default: 0.0.0.0/0 + + node_port_incoming: + type: string + label: Source of node port connections + description: Authorized sources targetting node ports + default: 0.0.0.0/0 + + num_etcd: + type: number + label: Number of etcd nodes + description: Number of etcd nodes + + num_masters: + type: number + label: Number of masters + description: Number of masters + + num_nodes: + type: number + label: Number of compute nodes + description: Number of compute nodes + + num_infra: + type: number + label: Number of infrastructure nodes + description: Number of infrastructure nodes + + num_dns: + type: number + label: Number of dns servers + description: Number of dns servers + + etcd_image: + type: string + label: Etcd image + description: Name of the image for the etcd servers + + master_image: + type: string + label: Master image + description: Name of the image for the master servers + + node_image: + type: string + label: Node image + description: Name of the image for the compute node servers + + infra_image: + type: string + label: Infra image + description: Name of the image for the infra node servers + + dns_image: + type: string + label: DNS image + description: Name of the image for the DNS server + + etcd_flavor: + type: string + label: Etcd flavor + description: Flavor of the etcd servers + + master_flavor: + type: string + label: Master flavor + description: Flavor of the master servers + + node_flavor: + type: string + label: Node flavor + description: Flavor of the compute node servers + + infra_flavor: + type: string + label: Infra flavor + description: Flavor of the infra node servers + + dns_flavor: + type: string + label: DNS flavor + description: Flavor of the DNS server + + master_volume_size: + type: number + description: Size of the volume to be created. + default: 5 + constraints: + - range: { min: 1, max: 1024 } + description: must be between 1 and 1024 Gb. + + app_volume_size: + type: number + description: Size of the volume to be created. + default: 5 + constraints: + - range: { min: 1, max: 1024 } + description: must be between 1 and 1024 Gb. + + infra_volume_size: + type: number + description: Size of the volume to be created. + default: 5 + constraints: + - range: { min: 1, max: 1024 } + description: must be between 1 and 1024 Gb. + + dns_volume_size: + type: number + description: Size of the volume to be created. + default: 5 + constraints: + - range: { min: 1, max: 1024 } + description: must be between 1 and 1024 Gb. + + etcd_volume_size: + type: number + description: Size of the volume to be created. + default: 5 + constraints: + - range: { min: 1, max: 1024 } + description: must be between 1 and 1024 Gb. + +outputs: + + etcd_names: + description: Name of the etcds + value: { get_attr: [ etcd, name ] } + + etcd_ips: + description: IPs of the etcds + value: { get_attr: [ etcd, private_ip ] } + + etcd_floating_ips: + description: Floating IPs of the etcds + value: { get_attr: [ etcd, floating_ip ] } + + master_names: + description: Name of the masters + value: { get_attr: [ masters, name ] } + + master_ips: + description: IPs of the masters + value: { get_attr: [ masters, private_ip ] } + + master_floating_ips: + description: Floating IPs of the masters + value: { get_attr: [ masters, floating_ip ] } + + node_names: + description: Name of the nodes + value: { get_attr: [ compute_nodes, name ] } + + node_ips: + description: IPs of the nodes + value: { get_attr: [ compute_nodes, private_ip ] } + + node_floating_ips: + description: Floating IPs of the nodes + value: { get_attr: [ compute_nodes, floating_ip ] } + + infra_names: + description: Name of the nodes + value: { get_attr: [ infra_nodes, name ] } + + infra_ips: + description: IPs of the nodes + value: { get_attr: [ infra_nodes, private_ip ] } + + infra_floating_ips: + description: Floating IPs of the nodes + value: { get_attr: [ infra_nodes, floating_ip ] } + + dns_name: + description: Name of the DNS + value: + get_attr: + - dns + - name + + dns_floating_ip: + description: Floating IP of the DNS + value: + get_attr: + - dns + - addresses + - str_replace: + template: openshift-ansible-cluster_id-net + params: + cluster_id: { get_param: cluster_id } + - 1 + - addr + +resources: + + net: + type: OS::Neutron::Net + properties: + name: + str_replace: + template: openshift-ansible-cluster_id-net + params: + cluster_id: { get_param: cluster_id } + + subnet: + type: OS::Neutron::Subnet + properties: + name: + str_replace: + template: openshift-ansible-cluster_id-subnet + params: + cluster_id: { get_param: cluster_id } + network: { get_resource: net } + cidr: + str_replace: + template: subnet_24_prefix.0/24 + params: + subnet_24_prefix: { get_param: subnet_24_prefix } + allocation_pools: + - start: + str_replace: + template: subnet_24_prefix.3 + params: + subnet_24_prefix: { get_param: subnet_24_prefix } + end: + str_replace: + template: subnet_24_prefix.254 + params: + subnet_24_prefix: { get_param: subnet_24_prefix } + dns_nameservers: + - 10.9.48.31 +# - { get_param: dns_nameservers } +# repeat: +# for_each: +# <%nameserver%>: { get_param: dns_nameservers } +# template: <%nameserver%> + + router: + type: OS::Neutron::Router + properties: + name: + str_replace: + template: openshift-ansible-cluster_id-router + params: + cluster_id: { get_param: cluster_id } + external_gateway_info: + network: { get_param: external_net } + + interface: + type: OS::Neutron::RouterInterface + properties: + router_id: { get_resource: router } + subnet_id: { get_resource: subnet } + +# keypair: +# type: OS::Nova::KeyPair +# properties: +# name: +# str_replace: +# template: openshift-ansible-cluster_id-keypair +# params: +# cluster_id: { get_param: cluster_id } +# public_key: { get_param: ssh_public_key } + + master-secgrp: + type: OS::Neutron::SecurityGroup + properties: + name: + str_replace: + template: openshift-ansible-cluster_id-master-secgrp + params: + cluster_id: { get_param: cluster_id } + description: + str_replace: + template: Security group for cluster_id OpenShift cluster master + params: + cluster_id: { get_param: cluster_id } + rules: + - direction: ingress + protocol: tcp + port_range_min: 22 + port_range_max: 22 + remote_ip_prefix: { get_param: ssh_incoming } + - direction: ingress + protocol: tcp + port_range_min: 4001 + port_range_max: 4001 + - direction: ingress + protocol: tcp + port_range_min: 8443 + port_range_max: 8443 + - direction: ingress + protocol: tcp + port_range_min: 8444 + port_range_max: 8444 + - direction: ingress + protocol: tcp + port_range_min: 53 + port_range_max: 53 + - direction: ingress + protocol: udp + port_range_min: 53 + port_range_max: 53 + - direction: ingress + protocol: tcp + port_range_min: 8053 + port_range_max: 8053 + - direction: ingress + protocol: udp + port_range_min: 8053 + port_range_max: 8053 + - direction: ingress + protocol: tcp + port_range_min: 24224 + port_range_max: 24224 + - direction: ingress + protocol: udp + port_range_min: 24224 + port_range_max: 24224 + - direction: ingress + protocol: tcp + port_range_min: 2224 + port_range_max: 2224 + - direction: ingress + protocol: udp + port_range_min: 5404 + port_range_max: 5404 + - direction: ingress + protocol: udp + port_range_min: 5405 + port_range_max: 5405 + - direction: ingress + protocol: tcp + port_range_min: 9090 + port_range_max: 9090 + + etcd-secgrp: + type: OS::Neutron::SecurityGroup + properties: + name: + str_replace: + template: openshift-ansible-cluster_id-etcd-secgrp + params: + cluster_id: { get_param: cluster_id } + description: + str_replace: + template: Security group for cluster_id etcd cluster + params: + cluster_id: { get_param: cluster_id } + rules: + - direction: ingress + protocol: tcp + port_range_min: 22 + port_range_max: 22 + remote_ip_prefix: { get_param: ssh_incoming } + - direction: ingress + protocol: tcp + port_range_min: 2379 + port_range_max: 2379 + remote_mode: remote_group_id + remote_group_id: { get_resource: master-secgrp } + - direction: ingress + protocol: tcp + port_range_min: 2380 + port_range_max: 2380 + remote_mode: remote_group_id + + node-secgrp: + type: OS::Neutron::SecurityGroup + properties: + name: + str_replace: + template: openshift-ansible-cluster_id-node-secgrp + params: + cluster_id: { get_param: cluster_id } + description: + str_replace: + template: Security group for cluster_id OpenShift cluster nodes + params: + cluster_id: { get_param: cluster_id } + rules: + - direction: ingress + protocol: tcp + port_range_min: 22 + port_range_max: 22 + remote_ip_prefix: { get_param: ssh_incoming } + - direction: ingress + protocol: tcp + port_range_min: 10250 + port_range_max: 10250 + remote_mode: remote_group_id + - direction: ingress + protocol: tcp + port_range_min: 10255 + port_range_max: 10255 + remote_mode: remote_group_id + - direction: ingress + protocol: udp + port_range_min: 10255 + port_range_max: 10255 + remote_mode: remote_group_id + - direction: ingress + protocol: udp + port_range_min: 4789 + port_range_max: 4789 + remote_mode: remote_group_id + - direction: ingress + protocol: tcp + port_range_min: 30000 + port_range_max: 32767 + remote_ip_prefix: { get_param: node_port_incoming } + + infra-secgrp: + type: OS::Neutron::SecurityGroup + properties: + name: + str_replace: + template: openshift-ansible-cluster_id-infra-secgrp + params: + cluster_id: { get_param: cluster_id } + description: + str_replace: + template: Security group for cluster_id OpenShift infrastructure cluster nodes + params: + cluster_id: { get_param: cluster_id } + rules: + - direction: ingress + protocol: tcp + port_range_min: 80 + port_range_max: 80 + - direction: ingress + protocol: tcp + port_range_min: 443 + port_range_max: 443 + + dns-secgrp: + type: OS::Neutron::SecurityGroup + properties: + name: + str_replace: + template: openshift-ansible-cluster_id-dns-secgrp + params: + cluster_id: { get_param: cluster_id } + description: + str_replace: + template: Security group for cluster_id cluster DNS + params: + cluster_id: { get_param: cluster_id } + rules: + - direction: ingress + protocol: tcp + port_range_min: 22 + port_range_max: 22 + remote_ip_prefix: { get_param: ssh_incoming } + - direction: ingress + protocol: udp + port_range_min: 53 + port_range_max: 53 + remote_ip_prefix: { get_param: node_port_incoming } + - direction: ingress + protocol: tcp + port_range_min: 53 + port_range_max: 53 + remote_ip_prefix: { get_param: node_port_incoming } + + etcd: + type: OS::Heat::ResourceGroup + properties: + count: { get_param: num_etcd } + resource_def: + type: heat_stack_server.yaml + properties: + name: + str_replace: + template: k8s_type-%index%.cluster_id.cluster_env + params: + cluster_id: { get_param: cluster_id } + k8s_type: etcd + cluster_env: { get_param: cluster_env } + cluster_env: { get_param: cluster_env } + cluster_id: { get_param: cluster_id } + type: etcd + image: { get_param: etcd_image } + flavor: { get_param: etcd_flavor } + key_name: { get_param: ssh_public_key } + net: { get_resource: net } + subnet: { get_resource: subnet } + secgrp: + - { get_resource: etcd-secgrp } + floating_network: { get_param: external_net } + net_name: + str_replace: + template: openshift-ansible-cluster_id-net + params: + cluster_id: { get_param: cluster_id } + volume_size: { get_param: etcd_volume_size } + depends_on: + - interface + + masters: + type: OS::Heat::ResourceGroup + properties: + count: { get_param: num_masters } + resource_def: + type: heat_stack_server.yaml + properties: + name: + str_replace: + template: k8s_type-%index%.cluster_id.cluster_env + params: + cluster_id: { get_param: cluster_id } + k8s_type: master + cluster_env: { get_param: cluster_env } + cluster_env: { get_param: cluster_env } + cluster_id: { get_param: cluster_id } + type: master + image: { get_param: master_image } + flavor: { get_param: master_flavor } + key_name: { get_param: ssh_public_key } + net: { get_resource: net } + subnet: { get_resource: subnet } + secgrp: + - { get_resource: master-secgrp } + - { get_resource: node-secgrp } + floating_network: { get_param: external_net } + net_name: + str_replace: + template: openshift-ansible-cluster_id-net + params: + cluster_id: { get_param: cluster_id } + volume_size: { get_param: master_volume_size } + depends_on: + - interface + + compute_nodes: + type: OS::Heat::ResourceGroup + properties: + count: { get_param: num_nodes } + resource_def: + type: heat_stack_server.yaml + properties: + name: + str_replace: + template: subtype-k8s_type-%index%.cluster_id.cluster_env + params: + cluster_id: { get_param: cluster_id } + k8s_type: node + subtype: app + cluster_env: { get_param: cluster_env } + cluster_env: { get_param: cluster_env } + cluster_id: { get_param: cluster_id } + type: node + subtype: app + image: { get_param: node_image } + flavor: { get_param: node_flavor } + key_name: { get_param: ssh_public_key } + net: { get_resource: net } + subnet: { get_resource: subnet } + secgrp: + - { get_resource: node-secgrp } + floating_network: { get_param: external_net } + net_name: + str_replace: + template: openshift-ansible-cluster_id-net + params: + cluster_id: { get_param: cluster_id } + volume_size: { get_param: app_volume_size } + depends_on: + - interface + + infra_nodes: + type: OS::Heat::ResourceGroup + properties: + count: { get_param: num_infra } + resource_def: + type: heat_stack_server.yaml + properties: + name: + str_replace: + template: subtypek8s_type-%index%.cluster_id.cluster_env + params: + cluster_id: { get_param: cluster_id } + k8s_type: node + subtype: infra + cluster_env: { get_param: cluster_env } + cluster_env: { get_param: cluster_env } + cluster_id: { get_param: cluster_id } + type: node + subtype: infra + image: { get_param: infra_image } + flavor: { get_param: infra_flavor } + key_name: { get_param: ssh_public_key } + net: { get_resource: net } + subnet: { get_resource: subnet } + secgrp: + - { get_resource: node-secgrp } + - { get_resource: infra-secgrp } + floating_network: { get_param: external_net } + net_name: + str_replace: + template: openshift-ansible-cluster_id-net + params: + cluster_id: { get_param: cluster_id } + volume_size: { get_param: infra_volume_size } + depends_on: + - interface + + dns: + type: OS::Heat::ResourceGroup + properties: + count: { get_param: num_dns } + resource_def: + type: heat_stack_server.yaml + properties: + name: + str_replace: + template: k8s_type-%index%.cluster_id.cluster_env + params: + cluster_id: { get_param: cluster_id } + k8s_type: dns + cluster_env: { get_param: cluster_env } + cluster_env: { get_param: cluster_env } + cluster_id: { get_param: cluster_id } + type: dns + image: { get_param: dns_image } + flavor: { get_param: dns_flavor } + key_name: { get_param: ssh_public_key } + net: { get_resource: net } + subnet: { get_resource: subnet } + secgrp: + - { get_resource: node-secgrp } + - { get_resource: dns-secgrp } + floating_network: { get_param: external_net } + net_name: + str_replace: + template: openshift-ansible-cluster_id-net + params: + cluster_id: { get_param: cluster_id } + volume_size: { get_param: dns_volume_size } + depends_on: + - interface + diff --git a/roles/openstack-stack/files/heat_stack_server.yaml b/roles/openstack-stack/files/heat_stack_server.yaml new file mode 100644 index 000000000..978da4f0b --- /dev/null +++ b/roles/openstack-stack/files/heat_stack_server.yaml @@ -0,0 +1,156 @@ +heat_template_version: 2014-10-16 + +description: OpenShift cluster server + +parameters: + + name: + type: string + label: Name + description: Name + + cluster_env: + type: string + label: Cluster environment + description: Environment of the cluster + + cluster_id: + type: string + label: Cluster ID + description: Identifier of the cluster + + type: + type: string + label: Type + description: Type master or node + + subtype: + type: string + label: Sub-type + description: Sub-type compute or infra for nodes, default otherwise + default: default + + key_name: + type: string + label: Key name + description: Key name of keypair + + image: + type: string + label: Image + description: Name of the image + + flavor: + type: string + label: Flavor + description: Name of the flavor + + net: + type: string + label: Net ID + description: Net resource + + net_name: + type: string + label: Net name + description: Net name + + subnet: + type: string + label: Subnet ID + description: Subnet resource + + secgrp: + type: comma_delimited_list + label: Security groups + description: Security group resources + + floating_network: + type: string + label: Floating network + description: Network to allocate floating IP from + + availability_zone: + type: string + description: The Availability Zone to launch the instance. + default: nova + + volume_size: + type: number + description: Size of the volume to be created. + default: 1 + constraints: + - range: { min: 1, max: 1024 } + description: must be between 1 and 1024 Gb. + +outputs: + + name: + description: Name of the server + value: { get_attr: [ server, name ] } + + private_ip: + description: Private IP of the server + value: + get_attr: + - server + - addresses + - { get_param: net_name } + - 0 + - addr + + floating_ip: + description: Floating IP of the server + value: + get_attr: + - server + - addresses + - { get_param: net_name } + - 1 + - addr + +resources: + + server: + type: OS::Nova::Server + properties: + name: { get_param: name } + key_name: { get_param: key_name } + image: { get_param: image } + flavor: { get_param: flavor } + networks: + - port: { get_resource: port } + user_data: { get_file: user-data } + user_data_format: RAW + metadata: + environment: { get_param: cluster_env } + clusterid: { get_param: cluster_id } + host-type: { get_param: type } + sub-host-type: { get_param: subtype } + + port: + type: OS::Neutron::Port + properties: + network: { get_param: net } + fixed_ips: + - subnet: { get_param: subnet } + security_groups: { get_param: secgrp } + + floating-ip: + type: OS::Neutron::FloatingIP + properties: + floating_network: { get_param: floating_network } + port_id: { get_resource: port } + + cinder_volume: + type: OS::Cinder::Volume + properties: + size: { get_param: volume_size } + availability_zone: { get_param: availability_zone } + + volume_attachment: + type: OS::Cinder::VolumeAttachment + properties: + volume_id: { get_resource: cinder_volume } + instance_uuid: { get_resource: server } + mountpoint: /dev/sdb diff --git a/roles/openstack-stack/files/user-data b/roles/openstack-stack/files/user-data new file mode 100644 index 000000000..eb65f7cec --- /dev/null +++ b/roles/openstack-stack/files/user-data @@ -0,0 +1,13 @@ +#cloud-config +disable_root: true + +system_info: + default_user: + name: openshift + sudo: ["ALL=(ALL) NOPASSWD: ALL"] + +write_files: + - path: /etc/sudoers.d/00-openshift-no-requiretty + permissions: 440 + content: | + Defaults:openshift !requiretty diff --git a/roles/openstack-stack/tasks/main.yml b/roles/openstack-stack/tasks/main.yml new file mode 100644 index 000000000..c953cb603 --- /dev/null +++ b/roles/openstack-stack/tasks/main.yml @@ -0,0 +1,31 @@ +--- +- name: create stack + ignore_errors: False + register: stack_create + os_stack: + name: "{{ stack_name }}" + state: present + template: 'roles/openstack-stack/files/heat_stack.yaml' + wait: yes + parameters: + cluster_env: "{{ dns_domain }}" + cluster_id: "{{ stack_name }}" + subnet_24_prefix: "{{ subnet_prefix }}" + dns_nameservers: "{{ dns_nameservers }}" + external_net: "{{ external_network }}" + ssh_public_key: "{{ ssh_public_key }}" + num_etcd: "{{ num_etcd }}" + num_masters: "{{ num_masters }}" + num_nodes: "{{ num_nodes }}" + num_infra: "{{ num_infra }}" + num_dns: "{{ num_dns }}" + etcd_image: "{{ openstack_image }}" + master_image: "{{ openstack_image }}" + node_image: "{{ openstack_image }}" + infra_image: "{{ openstack_image }}" + dns_image: "{{ openstack_image }}" + etcd_flavor: "{{ etcd_flavor }}" + master_flavor: "{{ master_flavor }}" + node_flavor: "{{ node_flavor }}" + infra_flavor: "{{ infra_flavor }}" + dns_flavor: "{{ dns_flavor }}" diff --git a/roles/openstack-stack/test/roles b/roles/openstack-stack/test/roles new file mode 120000 index 000000000..e2b799b9d --- /dev/null +++ b/roles/openstack-stack/test/roles @@ -0,0 +1 @@ +../../../roles/ \ No newline at end of file diff --git a/roles/openstack-stack/test/stack-create-test.yml b/roles/openstack-stack/test/stack-create-test.yml new file mode 100644 index 000000000..94e312ee3 --- /dev/null +++ b/roles/openstack-stack/test/stack-create-test.yml @@ -0,0 +1,17 @@ +--- +- hosts: localhost + roles: + - role: openstack-stack + stack_name: test-stack + dns_domain: "{{ openstack_dns_domain }}" + dns_nameservers: "{{ openstack_nameservers }}" + subnet_prefix: "{{ openstack_subnet_prefix }}" + ssh_public_key: "{{ openstack_ssh_public_key }}" + openstack_image: "{{ openstack_default_image_name }}" + etcd_flavor: "{{ openstack_default_flavor }}" + master_flavor: "{{ openstack_default_flavor }}" + node_flavor: "{{ openstack_default_flavor }}" + infra_flavor: "{{ openstack_default_flavor }}" + dns_flavor: "{{ openstack_default_flavor }}" + external_network: "{{ openstack_external_network_name }}" + -- cgit v1.2.3 -- cgit v1.2.3 -- cgit v1.2.3 -- cgit v1.2.3 -- cgit v1.2.3 -- cgit v1.2.3 -- cgit v1.2.3 -- cgit v1.2.3 -- cgit v1.2.3 -- cgit v1.2.3 -- cgit v1.2.3 -- cgit v1.2.3 -- cgit v1.2.3 -- cgit v1.2.3 -- cgit v1.2.3 -- cgit v1.2.3 -- cgit v1.2.3 -- cgit v1.2.3 -- cgit v1.2.3 -- cgit v1.2.3 -- cgit v1.2.3 -- cgit v1.2.3 -- cgit v1.2.3 -- cgit v1.2.3 -- cgit v1.2.3 -- cgit v1.2.3 -- cgit v1.2.3 -- cgit v1.2.3 -- cgit v1.2.3 From f0ca54ac5c4408284105fe877d81e8afbfbc2991 Mon Sep 17 00:00:00 2001 From: Ryan Cook Date: Fri, 13 Jan 2017 15:47:50 -0500 Subject: Making providers common (#126) * Making providers common * moving directory locations * using links and removal of vars file callout * rename of file * went block crazy * cleanup * add to remove * missing Pyyaml package in README * let docker actually setup docker storage and start the service * name change * Fix for vmware. Will variablize in the future * catchup to test common providers against master * should only be schedulable nodes --- roles/docker-storage-setup/files/docker-storage-setup | 4 ++++ roles/docker-storage-setup/tasks/main.yaml | 8 ++++++++ 2 files changed, 12 insertions(+) create mode 100644 roles/docker-storage-setup/files/docker-storage-setup create mode 100644 roles/docker-storage-setup/tasks/main.yaml diff --git a/roles/docker-storage-setup/files/docker-storage-setup b/roles/docker-storage-setup/files/docker-storage-setup new file mode 100644 index 000000000..5e9d494a1 --- /dev/null +++ b/roles/docker-storage-setup/files/docker-storage-setup @@ -0,0 +1,4 @@ +DEVS=/dev/sdb +VG=docker-vol +DATA_SIZE=95%VG +EXTRA_DOCKER_STORAGE_OPTIONS="--storage-opt dm.basesize=3G" diff --git a/roles/docker-storage-setup/tasks/main.yaml b/roles/docker-storage-setup/tasks/main.yaml new file mode 100644 index 000000000..32f79fff9 --- /dev/null +++ b/roles/docker-storage-setup/tasks/main.yaml @@ -0,0 +1,8 @@ +--- +- name: create the docker-storage-setup config file + copy: + src: docker-storage-setup + dest: /etc/sysconfig/docker-storage-setup + owner: root + group: root + mode: 0644 -- cgit v1.2.3 -- cgit v1.2.3 -- cgit v1.2.3 -- cgit v1.2.3 -- cgit v1.2.3 -- cgit v1.2.3 -- cgit v1.2.3 -- cgit v1.2.3 -- cgit v1.2.3 -- cgit v1.2.3 -- cgit v1.2.3 -- cgit v1.2.3 -- cgit v1.2.3 -- cgit v1.2.3 -- cgit v1.2.3 -- cgit v1.2.3 -- cgit v1.2.3 -- cgit v1.2.3 -- cgit v1.2.3 -- cgit v1.2.3 -- cgit v1.2.3 -- cgit v1.2.3 -- cgit v1.2.3 -- cgit v1.2.3 -- cgit v1.2.3 -- cgit v1.2.3 -- cgit v1.2.3 -- cgit v1.2.3 -- cgit v1.2.3 -- cgit v1.2.3 -- cgit v1.2.3 -- cgit v1.2.3 -- cgit v1.2.3 -- cgit v1.2.3 -- cgit v1.2.3 -- cgit v1.2.3 -- cgit v1.2.3 -- cgit v1.2.3 -- cgit v1.2.3 -- cgit v1.2.3 -- cgit v1.2.3 -- cgit v1.2.3 -- cgit v1.2.3 -- cgit v1.2.3 From 986d04922446da75879ce5a9064bd0db1477ac7f Mon Sep 17 00:00:00 2001 From: Jason DeTiberus Date: Thu, 26 Jan 2017 17:37:06 -0500 Subject: update for yamllint errors --- roles/docker-storage-setup/tasks/main.yaml | 10 +++++----- 1 file changed, 5 insertions(+), 5 deletions(-) diff --git a/roles/docker-storage-setup/tasks/main.yaml b/roles/docker-storage-setup/tasks/main.yaml index 32f79fff9..17b13f27f 100644 --- a/roles/docker-storage-setup/tasks/main.yaml +++ b/roles/docker-storage-setup/tasks/main.yaml @@ -1,8 +1,8 @@ --- - name: create the docker-storage-setup config file copy: - src: docker-storage-setup - dest: /etc/sysconfig/docker-storage-setup - owner: root - group: root - mode: 0644 + src: docker-storage-setup + dest: /etc/sysconfig/docker-storage-setup + owner: root + group: root + mode: 0644 -- cgit v1.2.3 -- cgit v1.2.3 -- cgit v1.2.3 -- cgit v1.2.3 -- cgit v1.2.3 -- cgit v1.2.3 -- cgit v1.2.3 -- cgit v1.2.3 -- cgit v1.2.3 -- cgit v1.2.3 -- cgit v1.2.3 -- cgit v1.2.3 -- cgit v1.2.3 -- cgit v1.2.3 -- cgit v1.2.3 From 3bf8df1a873785a09bf3c1827bfb5097955c5e44 Mon Sep 17 00:00:00 2001 From: Eric Sauer Date: Tue, 7 Feb 2017 01:12:58 -0500 Subject: Fixing two significant bugs in the HEAT deployment (#13) --- roles/openstack-stack/tasks/main.yml | 3 +++ 1 file changed, 3 insertions(+) diff --git a/roles/openstack-stack/tasks/main.yml b/roles/openstack-stack/tasks/main.yml index c953cb603..efee08c0e 100644 --- a/roles/openstack-stack/tasks/main.yml +++ b/roles/openstack-stack/tasks/main.yml @@ -29,3 +29,6 @@ node_flavor: "{{ node_flavor }}" infra_flavor: "{{ infra_flavor }}" dns_flavor: "{{ dns_flavor }}" + master_volume_size: "{{ master_volume_size }}" + app_volume_size: "{{ app_volume_size }}" + infra_volume_size: "{{ infra_volume_size }}" -- cgit v1.2.3 -- cgit v1.2.3 -- cgit v1.2.3 -- cgit v1.2.3 -- cgit v1.2.3 -- cgit v1.2.3 -- cgit v1.2.3 -- cgit v1.2.3 -- cgit v1.2.3 -- cgit v1.2.3 -- cgit v1.2.3 -- cgit v1.2.3 -- cgit v1.2.3 -- cgit v1.2.3 -- cgit v1.2.3 -- cgit v1.2.3 -- cgit v1.2.3 -- cgit v1.2.3 -- cgit v1.2.3 From fdac6976d4b48c11b8de253ef8afa34af0da8cdb Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?=C3=98ystein=20Bedin?= Date: Mon, 20 Feb 2017 12:56:21 -0500 Subject: Ensure DNS configuration has wildcards set for infra nodes (#24) * Ensure DNS configuration has wildcards set for infra nodes * Updated to include all cluster hosts for DNS entries --- roles/common/pre_tasks/pre_tasks.yml | 10 ++++++++++ 1 file changed, 10 insertions(+) diff --git a/roles/common/pre_tasks/pre_tasks.yml b/roles/common/pre_tasks/pre_tasks.yml index 71a989b30..06a56605d 100644 --- a/roles/common/pre_tasks/pre_tasks.yml +++ b/roles/common/pre_tasks/pre_tasks.yml @@ -24,3 +24,13 @@ set_fact: full_dns_domain: "{{ (env_id|trim == '') | ternary(dns_domain, env_id + '.' + dns_domain) }}" delegate_to: localhost + +- name: Set the APP domain for OpenShift use + set_fact: + openshift_app_domain: "{{ openshift_app_domain | default('apps') }}" + delegate_to: localhost + +- name: Set the default app domain for routing purposes + set_fact: + openshift_master_default_subdomain: "{{ openshift_app_domain }}.{{ full_dns_domain }}" + delegate_to: localhost -- cgit v1.2.3 -- cgit v1.2.3 -- cgit v1.2.3 -- cgit v1.2.3 -- cgit v1.2.3 -- cgit v1.2.3 -- cgit v1.2.3 -- cgit v1.2.3 -- cgit v1.2.3 -- cgit v1.2.3 -- cgit v1.2.3 -- cgit v1.2.3 -- cgit v1.2.3 -- cgit v1.2.3 -- cgit v1.2.3 -- cgit v1.2.3 -- cgit v1.2.3 -- cgit v1.2.3 -- cgit v1.2.3 -- cgit v1.2.3 -- cgit v1.2.3 -- cgit v1.2.3 -- cgit v1.2.3 -- cgit v1.2.3 -- cgit v1.2.3 -- cgit v1.2.3 -- cgit v1.2.3 -- cgit v1.2.3 -- cgit v1.2.3 -- cgit v1.2.3 -- cgit v1.2.3 -- cgit v1.2.3 -- cgit v1.2.3 -- cgit v1.2.3 -- cgit v1.2.3 -- cgit v1.2.3 -- cgit v1.2.3 -- cgit v1.2.3 -- cgit v1.2.3 -- cgit v1.2.3 -- cgit v1.2.3 -- cgit v1.2.3 -- cgit v1.2.3 -- cgit v1.2.3 -- cgit v1.2.3 -- cgit v1.2.3 -- cgit v1.2.3 -- cgit v1.2.3 -- cgit v1.2.3 -- cgit v1.2.3 -- cgit v1.2.3 -- cgit v1.2.3 -- cgit v1.2.3 -- cgit v1.2.3 -- cgit v1.2.3 -- cgit v1.2.3 -- cgit v1.2.3 -- cgit v1.2.3 -- cgit v1.2.3 -- cgit v1.2.3 -- cgit v1.2.3 -- cgit v1.2.3 -- cgit v1.2.3 -- cgit v1.2.3 -- cgit v1.2.3 -- cgit v1.2.3 -- cgit v1.2.3 -- cgit v1.2.3 -- cgit v1.2.3 -- cgit v1.2.3 From c90d5323afc575246df2f50e9125069f3c12e81e Mon Sep 17 00:00:00 2001 From: Eric Sauer Date: Tue, 25 Apr 2017 23:17:38 -0400 Subject: Stack refactor (#38) * Refactored openstack-stack role to: - Convert static heat template files to ansible templates - Include native ansible groups via openstack metadata. This removes the need for a playbook to map host groups - Some code cleanup * Deleting commentd out code and irrelevant plays * Refactored openstack-stack role to: - Convert static heat template files to ansible templates - Include native ansible groups via openstack metadata. This removes the need for a playbook to map host groups - Some code cleanup * Deleting commentd out code and irrelevant plays * Replacing stack parameters with jinja expressions * Updating sample inventory to work with latest dynamic inventory changes * updating inventory with host group mapping. making sync keys optional * Missing cluster_hosts group * Updating to add infra_hosts * Updating inventory per comments from oybed and sabre1041 --- roles/openstack-stack/defaults/main.yml | 10 + roles/openstack-stack/files/heat_stack.yaml | 684 --------------------- roles/openstack-stack/files/heat_stack_server.yaml | 156 ----- roles/openstack-stack/files/user-data | 13 - roles/openstack-stack/tasks/main.yml | 59 +- roles/openstack-stack/templates/heat_stack.yaml.j2 | 551 +++++++++++++++++ .../templates/heat_stack_server.yaml.j2 | 170 +++++ roles/openstack-stack/templates/user_data.j2 | 13 + 8 files changed, 777 insertions(+), 879 deletions(-) create mode 100644 roles/openstack-stack/defaults/main.yml delete mode 100644 roles/openstack-stack/files/heat_stack.yaml delete mode 100644 roles/openstack-stack/files/heat_stack_server.yaml delete mode 100644 roles/openstack-stack/files/user-data create mode 100644 roles/openstack-stack/templates/heat_stack.yaml.j2 create mode 100644 roles/openstack-stack/templates/heat_stack_server.yaml.j2 create mode 100644 roles/openstack-stack/templates/user_data.j2 diff --git a/roles/openstack-stack/defaults/main.yml b/roles/openstack-stack/defaults/main.yml new file mode 100644 index 000000000..8aefe039d --- /dev/null +++ b/roles/openstack-stack/defaults/main.yml @@ -0,0 +1,10 @@ +--- +dns_volume_size: 1 +ssh_ingress_cidr: 0.0.0.0/0 +node_ingress_cidr: 0.0.0.0/0 +num_etcd: 0 +num_masters: 1 +num_nodes: 1 +num_dns: 1 +num_infra: 1 +etcd_volume_size: 2 diff --git a/roles/openstack-stack/files/heat_stack.yaml b/roles/openstack-stack/files/heat_stack.yaml deleted file mode 100644 index 058f7a7ad..000000000 --- a/roles/openstack-stack/files/heat_stack.yaml +++ /dev/null @@ -1,684 +0,0 @@ -heat_template_version: 2014-10-16 - -description: OpenShift cluster - -parameters: - - cluster_env: - type: string - label: Cluster environment - description: Environment of the cluster - - cluster_id: - type: string - label: Cluster ID - description: Identifier of the cluster - - subnet_24_prefix: - type: string - label: subnet /24 prefix - description: /24 subnet prefix of the network of the cluster (dot separated number triplet) - - dns_nameservers: - type: comma_delimited_list - label: DNS nameservers list - description: List of DNS nameservers - - external_net: - type: string - label: External network - description: Name of the external network - default: external - - ssh_public_key: - type: string - label: SSH public key - description: SSH public key - hidden: true - - ssh_incoming: - type: string - label: Source of ssh connections - description: Source of legitimate ssh connections - default: 0.0.0.0/0 - - node_port_incoming: - type: string - label: Source of node port connections - description: Authorized sources targetting node ports - default: 0.0.0.0/0 - - num_etcd: - type: number - label: Number of etcd nodes - description: Number of etcd nodes - - num_masters: - type: number - label: Number of masters - description: Number of masters - - num_nodes: - type: number - label: Number of compute nodes - description: Number of compute nodes - - num_infra: - type: number - label: Number of infrastructure nodes - description: Number of infrastructure nodes - - num_dns: - type: number - label: Number of dns servers - description: Number of dns servers - - etcd_image: - type: string - label: Etcd image - description: Name of the image for the etcd servers - - master_image: - type: string - label: Master image - description: Name of the image for the master servers - - node_image: - type: string - label: Node image - description: Name of the image for the compute node servers - - infra_image: - type: string - label: Infra image - description: Name of the image for the infra node servers - - dns_image: - type: string - label: DNS image - description: Name of the image for the DNS server - - etcd_flavor: - type: string - label: Etcd flavor - description: Flavor of the etcd servers - - master_flavor: - type: string - label: Master flavor - description: Flavor of the master servers - - node_flavor: - type: string - label: Node flavor - description: Flavor of the compute node servers - - infra_flavor: - type: string - label: Infra flavor - description: Flavor of the infra node servers - - dns_flavor: - type: string - label: DNS flavor - description: Flavor of the DNS server - - master_volume_size: - type: number - description: Size of the volume to be created. - default: 5 - constraints: - - range: { min: 1, max: 1024 } - description: must be between 1 and 1024 Gb. - - app_volume_size: - type: number - description: Size of the volume to be created. - default: 5 - constraints: - - range: { min: 1, max: 1024 } - description: must be between 1 and 1024 Gb. - - infra_volume_size: - type: number - description: Size of the volume to be created. - default: 5 - constraints: - - range: { min: 1, max: 1024 } - description: must be between 1 and 1024 Gb. - - dns_volume_size: - type: number - description: Size of the volume to be created. - default: 5 - constraints: - - range: { min: 1, max: 1024 } - description: must be between 1 and 1024 Gb. - - etcd_volume_size: - type: number - description: Size of the volume to be created. - default: 5 - constraints: - - range: { min: 1, max: 1024 } - description: must be between 1 and 1024 Gb. - -outputs: - - etcd_names: - description: Name of the etcds - value: { get_attr: [ etcd, name ] } - - etcd_ips: - description: IPs of the etcds - value: { get_attr: [ etcd, private_ip ] } - - etcd_floating_ips: - description: Floating IPs of the etcds - value: { get_attr: [ etcd, floating_ip ] } - - master_names: - description: Name of the masters - value: { get_attr: [ masters, name ] } - - master_ips: - description: IPs of the masters - value: { get_attr: [ masters, private_ip ] } - - master_floating_ips: - description: Floating IPs of the masters - value: { get_attr: [ masters, floating_ip ] } - - node_names: - description: Name of the nodes - value: { get_attr: [ compute_nodes, name ] } - - node_ips: - description: IPs of the nodes - value: { get_attr: [ compute_nodes, private_ip ] } - - node_floating_ips: - description: Floating IPs of the nodes - value: { get_attr: [ compute_nodes, floating_ip ] } - - infra_names: - description: Name of the nodes - value: { get_attr: [ infra_nodes, name ] } - - infra_ips: - description: IPs of the nodes - value: { get_attr: [ infra_nodes, private_ip ] } - - infra_floating_ips: - description: Floating IPs of the nodes - value: { get_attr: [ infra_nodes, floating_ip ] } - - dns_name: - description: Name of the DNS - value: - get_attr: - - dns - - name - - dns_floating_ip: - description: Floating IP of the DNS - value: - get_attr: - - dns - - addresses - - str_replace: - template: openshift-ansible-cluster_id-net - params: - cluster_id: { get_param: cluster_id } - - 1 - - addr - -resources: - - net: - type: OS::Neutron::Net - properties: - name: - str_replace: - template: openshift-ansible-cluster_id-net - params: - cluster_id: { get_param: cluster_id } - - subnet: - type: OS::Neutron::Subnet - properties: - name: - str_replace: - template: openshift-ansible-cluster_id-subnet - params: - cluster_id: { get_param: cluster_id } - network: { get_resource: net } - cidr: - str_replace: - template: subnet_24_prefix.0/24 - params: - subnet_24_prefix: { get_param: subnet_24_prefix } - allocation_pools: - - start: - str_replace: - template: subnet_24_prefix.3 - params: - subnet_24_prefix: { get_param: subnet_24_prefix } - end: - str_replace: - template: subnet_24_prefix.254 - params: - subnet_24_prefix: { get_param: subnet_24_prefix } - dns_nameservers: - - 10.9.48.31 -# - { get_param: dns_nameservers } -# repeat: -# for_each: -# <%nameserver%>: { get_param: dns_nameservers } -# template: <%nameserver%> - - router: - type: OS::Neutron::Router - properties: - name: - str_replace: - template: openshift-ansible-cluster_id-router - params: - cluster_id: { get_param: cluster_id } - external_gateway_info: - network: { get_param: external_net } - - interface: - type: OS::Neutron::RouterInterface - properties: - router_id: { get_resource: router } - subnet_id: { get_resource: subnet } - -# keypair: -# type: OS::Nova::KeyPair -# properties: -# name: -# str_replace: -# template: openshift-ansible-cluster_id-keypair -# params: -# cluster_id: { get_param: cluster_id } -# public_key: { get_param: ssh_public_key } - - master-secgrp: - type: OS::Neutron::SecurityGroup - properties: - name: - str_replace: - template: openshift-ansible-cluster_id-master-secgrp - params: - cluster_id: { get_param: cluster_id } - description: - str_replace: - template: Security group for cluster_id OpenShift cluster master - params: - cluster_id: { get_param: cluster_id } - rules: - - direction: ingress - protocol: tcp - port_range_min: 22 - port_range_max: 22 - remote_ip_prefix: { get_param: ssh_incoming } - - direction: ingress - protocol: tcp - port_range_min: 4001 - port_range_max: 4001 - - direction: ingress - protocol: tcp - port_range_min: 8443 - port_range_max: 8443 - - direction: ingress - protocol: tcp - port_range_min: 8444 - port_range_max: 8444 - - direction: ingress - protocol: tcp - port_range_min: 53 - port_range_max: 53 - - direction: ingress - protocol: udp - port_range_min: 53 - port_range_max: 53 - - direction: ingress - protocol: tcp - port_range_min: 8053 - port_range_max: 8053 - - direction: ingress - protocol: udp - port_range_min: 8053 - port_range_max: 8053 - - direction: ingress - protocol: tcp - port_range_min: 24224 - port_range_max: 24224 - - direction: ingress - protocol: udp - port_range_min: 24224 - port_range_max: 24224 - - direction: ingress - protocol: tcp - port_range_min: 2224 - port_range_max: 2224 - - direction: ingress - protocol: udp - port_range_min: 5404 - port_range_max: 5404 - - direction: ingress - protocol: udp - port_range_min: 5405 - port_range_max: 5405 - - direction: ingress - protocol: tcp - port_range_min: 9090 - port_range_max: 9090 - - etcd-secgrp: - type: OS::Neutron::SecurityGroup - properties: - name: - str_replace: - template: openshift-ansible-cluster_id-etcd-secgrp - params: - cluster_id: { get_param: cluster_id } - description: - str_replace: - template: Security group for cluster_id etcd cluster - params: - cluster_id: { get_param: cluster_id } - rules: - - direction: ingress - protocol: tcp - port_range_min: 22 - port_range_max: 22 - remote_ip_prefix: { get_param: ssh_incoming } - - direction: ingress - protocol: tcp - port_range_min: 2379 - port_range_max: 2379 - remote_mode: remote_group_id - remote_group_id: { get_resource: master-secgrp } - - direction: ingress - protocol: tcp - port_range_min: 2380 - port_range_max: 2380 - remote_mode: remote_group_id - - node-secgrp: - type: OS::Neutron::SecurityGroup - properties: - name: - str_replace: - template: openshift-ansible-cluster_id-node-secgrp - params: - cluster_id: { get_param: cluster_id } - description: - str_replace: - template: Security group for cluster_id OpenShift cluster nodes - params: - cluster_id: { get_param: cluster_id } - rules: - - direction: ingress - protocol: tcp - port_range_min: 22 - port_range_max: 22 - remote_ip_prefix: { get_param: ssh_incoming } - - direction: ingress - protocol: tcp - port_range_min: 10250 - port_range_max: 10250 - remote_mode: remote_group_id - - direction: ingress - protocol: tcp - port_range_min: 10255 - port_range_max: 10255 - remote_mode: remote_group_id - - direction: ingress - protocol: udp - port_range_min: 10255 - port_range_max: 10255 - remote_mode: remote_group_id - - direction: ingress - protocol: udp - port_range_min: 4789 - port_range_max: 4789 - remote_mode: remote_group_id - - direction: ingress - protocol: tcp - port_range_min: 30000 - port_range_max: 32767 - remote_ip_prefix: { get_param: node_port_incoming } - - infra-secgrp: - type: OS::Neutron::SecurityGroup - properties: - name: - str_replace: - template: openshift-ansible-cluster_id-infra-secgrp - params: - cluster_id: { get_param: cluster_id } - description: - str_replace: - template: Security group for cluster_id OpenShift infrastructure cluster nodes - params: - cluster_id: { get_param: cluster_id } - rules: - - direction: ingress - protocol: tcp - port_range_min: 80 - port_range_max: 80 - - direction: ingress - protocol: tcp - port_range_min: 443 - port_range_max: 443 - - dns-secgrp: - type: OS::Neutron::SecurityGroup - properties: - name: - str_replace: - template: openshift-ansible-cluster_id-dns-secgrp - params: - cluster_id: { get_param: cluster_id } - description: - str_replace: - template: Security group for cluster_id cluster DNS - params: - cluster_id: { get_param: cluster_id } - rules: - - direction: ingress - protocol: tcp - port_range_min: 22 - port_range_max: 22 - remote_ip_prefix: { get_param: ssh_incoming } - - direction: ingress - protocol: udp - port_range_min: 53 - port_range_max: 53 - remote_ip_prefix: { get_param: node_port_incoming } - - direction: ingress - protocol: tcp - port_range_min: 53 - port_range_max: 53 - remote_ip_prefix: { get_param: node_port_incoming } - - etcd: - type: OS::Heat::ResourceGroup - properties: - count: { get_param: num_etcd } - resource_def: - type: heat_stack_server.yaml - properties: - name: - str_replace: - template: k8s_type-%index%.cluster_id.cluster_env - params: - cluster_id: { get_param: cluster_id } - k8s_type: etcd - cluster_env: { get_param: cluster_env } - cluster_env: { get_param: cluster_env } - cluster_id: { get_param: cluster_id } - type: etcd - image: { get_param: etcd_image } - flavor: { get_param: etcd_flavor } - key_name: { get_param: ssh_public_key } - net: { get_resource: net } - subnet: { get_resource: subnet } - secgrp: - - { get_resource: etcd-secgrp } - floating_network: { get_param: external_net } - net_name: - str_replace: - template: openshift-ansible-cluster_id-net - params: - cluster_id: { get_param: cluster_id } - volume_size: { get_param: etcd_volume_size } - depends_on: - - interface - - masters: - type: OS::Heat::ResourceGroup - properties: - count: { get_param: num_masters } - resource_def: - type: heat_stack_server.yaml - properties: - name: - str_replace: - template: k8s_type-%index%.cluster_id.cluster_env - params: - cluster_id: { get_param: cluster_id } - k8s_type: master - cluster_env: { get_param: cluster_env } - cluster_env: { get_param: cluster_env } - cluster_id: { get_param: cluster_id } - type: master - image: { get_param: master_image } - flavor: { get_param: master_flavor } - key_name: { get_param: ssh_public_key } - net: { get_resource: net } - subnet: { get_resource: subnet } - secgrp: - - { get_resource: master-secgrp } - - { get_resource: node-secgrp } - floating_network: { get_param: external_net } - net_name: - str_replace: - template: openshift-ansible-cluster_id-net - params: - cluster_id: { get_param: cluster_id } - volume_size: { get_param: master_volume_size } - depends_on: - - interface - - compute_nodes: - type: OS::Heat::ResourceGroup - properties: - count: { get_param: num_nodes } - resource_def: - type: heat_stack_server.yaml - properties: - name: - str_replace: - template: subtype-k8s_type-%index%.cluster_id.cluster_env - params: - cluster_id: { get_param: cluster_id } - k8s_type: node - subtype: app - cluster_env: { get_param: cluster_env } - cluster_env: { get_param: cluster_env } - cluster_id: { get_param: cluster_id } - type: node - subtype: app - image: { get_param: node_image } - flavor: { get_param: node_flavor } - key_name: { get_param: ssh_public_key } - net: { get_resource: net } - subnet: { get_resource: subnet } - secgrp: - - { get_resource: node-secgrp } - floating_network: { get_param: external_net } - net_name: - str_replace: - template: openshift-ansible-cluster_id-net - params: - cluster_id: { get_param: cluster_id } - volume_size: { get_param: app_volume_size } - depends_on: - - interface - - infra_nodes: - type: OS::Heat::ResourceGroup - properties: - count: { get_param: num_infra } - resource_def: - type: heat_stack_server.yaml - properties: - name: - str_replace: - template: subtypek8s_type-%index%.cluster_id.cluster_env - params: - cluster_id: { get_param: cluster_id } - k8s_type: node - subtype: infra - cluster_env: { get_param: cluster_env } - cluster_env: { get_param: cluster_env } - cluster_id: { get_param: cluster_id } - type: node - subtype: infra - image: { get_param: infra_image } - flavor: { get_param: infra_flavor } - key_name: { get_param: ssh_public_key } - net: { get_resource: net } - subnet: { get_resource: subnet } - secgrp: - - { get_resource: node-secgrp } - - { get_resource: infra-secgrp } - floating_network: { get_param: external_net } - net_name: - str_replace: - template: openshift-ansible-cluster_id-net - params: - cluster_id: { get_param: cluster_id } - volume_size: { get_param: infra_volume_size } - depends_on: - - interface - - dns: - type: OS::Heat::ResourceGroup - properties: - count: { get_param: num_dns } - resource_def: - type: heat_stack_server.yaml - properties: - name: - str_replace: - template: k8s_type-%index%.cluster_id.cluster_env - params: - cluster_id: { get_param: cluster_id } - k8s_type: dns - cluster_env: { get_param: cluster_env } - cluster_env: { get_param: cluster_env } - cluster_id: { get_param: cluster_id } - type: dns - image: { get_param: dns_image } - flavor: { get_param: dns_flavor } - key_name: { get_param: ssh_public_key } - net: { get_resource: net } - subnet: { get_resource: subnet } - secgrp: - - { get_resource: node-secgrp } - - { get_resource: dns-secgrp } - floating_network: { get_param: external_net } - net_name: - str_replace: - template: openshift-ansible-cluster_id-net - params: - cluster_id: { get_param: cluster_id } - volume_size: { get_param: dns_volume_size } - depends_on: - - interface - diff --git a/roles/openstack-stack/files/heat_stack_server.yaml b/roles/openstack-stack/files/heat_stack_server.yaml deleted file mode 100644 index 978da4f0b..000000000 --- a/roles/openstack-stack/files/heat_stack_server.yaml +++ /dev/null @@ -1,156 +0,0 @@ -heat_template_version: 2014-10-16 - -description: OpenShift cluster server - -parameters: - - name: - type: string - label: Name - description: Name - - cluster_env: - type: string - label: Cluster environment - description: Environment of the cluster - - cluster_id: - type: string - label: Cluster ID - description: Identifier of the cluster - - type: - type: string - label: Type - description: Type master or node - - subtype: - type: string - label: Sub-type - description: Sub-type compute or infra for nodes, default otherwise - default: default - - key_name: - type: string - label: Key name - description: Key name of keypair - - image: - type: string - label: Image - description: Name of the image - - flavor: - type: string - label: Flavor - description: Name of the flavor - - net: - type: string - label: Net ID - description: Net resource - - net_name: - type: string - label: Net name - description: Net name - - subnet: - type: string - label: Subnet ID - description: Subnet resource - - secgrp: - type: comma_delimited_list - label: Security groups - description: Security group resources - - floating_network: - type: string - label: Floating network - description: Network to allocate floating IP from - - availability_zone: - type: string - description: The Availability Zone to launch the instance. - default: nova - - volume_size: - type: number - description: Size of the volume to be created. - default: 1 - constraints: - - range: { min: 1, max: 1024 } - description: must be between 1 and 1024 Gb. - -outputs: - - name: - description: Name of the server - value: { get_attr: [ server, name ] } - - private_ip: - description: Private IP of the server - value: - get_attr: - - server - - addresses - - { get_param: net_name } - - 0 - - addr - - floating_ip: - description: Floating IP of the server - value: - get_attr: - - server - - addresses - - { get_param: net_name } - - 1 - - addr - -resources: - - server: - type: OS::Nova::Server - properties: - name: { get_param: name } - key_name: { get_param: key_name } - image: { get_param: image } - flavor: { get_param: flavor } - networks: - - port: { get_resource: port } - user_data: { get_file: user-data } - user_data_format: RAW - metadata: - environment: { get_param: cluster_env } - clusterid: { get_param: cluster_id } - host-type: { get_param: type } - sub-host-type: { get_param: subtype } - - port: - type: OS::Neutron::Port - properties: - network: { get_param: net } - fixed_ips: - - subnet: { get_param: subnet } - security_groups: { get_param: secgrp } - - floating-ip: - type: OS::Neutron::FloatingIP - properties: - floating_network: { get_param: floating_network } - port_id: { get_resource: port } - - cinder_volume: - type: OS::Cinder::Volume - properties: - size: { get_param: volume_size } - availability_zone: { get_param: availability_zone } - - volume_attachment: - type: OS::Cinder::VolumeAttachment - properties: - volume_id: { get_resource: cinder_volume } - instance_uuid: { get_resource: server } - mountpoint: /dev/sdb diff --git a/roles/openstack-stack/files/user-data b/roles/openstack-stack/files/user-data deleted file mode 100644 index eb65f7cec..000000000 --- a/roles/openstack-stack/files/user-data +++ /dev/null @@ -1,13 +0,0 @@ -#cloud-config -disable_root: true - -system_info: - default_user: - name: openshift - sudo: ["ALL=(ALL) NOPASSWD: ALL"] - -write_files: - - path: /etc/sudoers.d/00-openshift-no-requiretty - permissions: 440 - content: | - Defaults:openshift !requiretty diff --git a/roles/openstack-stack/tasks/main.yml b/roles/openstack-stack/tasks/main.yml index efee08c0e..71c7bbe0d 100644 --- a/roles/openstack-stack/tasks/main.yml +++ b/roles/openstack-stack/tasks/main.yml @@ -1,34 +1,41 @@ --- +- name: create HOT stack template prefix + register: stack_template_pre + tempfile: + state: directory + prefix: casl-ansible + +- name: set template paths + set_fact: + stack_template_path: "{{ stack_template_pre.path }}/stack.yaml" + server_template_path: "{{ stack_template_pre.path }}/server.yaml" + user_data_template_path: "{{ stack_template_pre.path }}/user-data" + +- name: generate HOT stack template from jinja2 template + template: + src: heat_stack.yaml.j2 + dest: "{{ stack_template_path }}" + +- name: generate HOT server template from jinja2 template + template: + src: heat_stack_server.yaml.j2 + dest: "{{ server_template_path }}" + +- name: generate user_data from jinja2 template + template: + src: user_data.j2 + dest: "{{ user_data_template_path }}" + - name: create stack ignore_errors: False register: stack_create os_stack: name: "{{ stack_name }}" state: present - template: 'roles/openstack-stack/files/heat_stack.yaml' + template: "{{ stack_template_path }}" wait: yes - parameters: - cluster_env: "{{ dns_domain }}" - cluster_id: "{{ stack_name }}" - subnet_24_prefix: "{{ subnet_prefix }}" - dns_nameservers: "{{ dns_nameservers }}" - external_net: "{{ external_network }}" - ssh_public_key: "{{ ssh_public_key }}" - num_etcd: "{{ num_etcd }}" - num_masters: "{{ num_masters }}" - num_nodes: "{{ num_nodes }}" - num_infra: "{{ num_infra }}" - num_dns: "{{ num_dns }}" - etcd_image: "{{ openstack_image }}" - master_image: "{{ openstack_image }}" - node_image: "{{ openstack_image }}" - infra_image: "{{ openstack_image }}" - dns_image: "{{ openstack_image }}" - etcd_flavor: "{{ etcd_flavor }}" - master_flavor: "{{ master_flavor }}" - node_flavor: "{{ node_flavor }}" - infra_flavor: "{{ infra_flavor }}" - dns_flavor: "{{ dns_flavor }}" - master_volume_size: "{{ master_volume_size }}" - app_volume_size: "{{ app_volume_size }}" - infra_volume_size: "{{ infra_volume_size }}" + +- name: cleanup temp files + file: + path: "{{ stack_template_pre.path }}" + state: absent diff --git a/roles/openstack-stack/templates/heat_stack.yaml.j2 b/roles/openstack-stack/templates/heat_stack.yaml.j2 new file mode 100644 index 000000000..bc9547f66 --- /dev/null +++ b/roles/openstack-stack/templates/heat_stack.yaml.j2 @@ -0,0 +1,551 @@ +heat_template_version: 2016-10-14 + +description: OpenShift cluster + +parameters: + +outputs: + + etcd_names: + description: Name of the etcds + value: { get_attr: [ etcd, name ] } + + etcd_ips: + description: IPs of the etcds + value: { get_attr: [ etcd, private_ip ] } + + etcd_floating_ips: + description: Floating IPs of the etcds + value: { get_attr: [ etcd, floating_ip ] } + + master_names: + description: Name of the masters + value: { get_attr: [ masters, name ] } + + master_ips: + description: IPs of the masters + value: { get_attr: [ masters, private_ip ] } + + master_floating_ips: + description: Floating IPs of the masters + value: { get_attr: [ masters, floating_ip ] } + + node_names: + description: Name of the nodes + value: { get_attr: [ compute_nodes, name ] } + + node_ips: + description: IPs of the nodes + value: { get_attr: [ compute_nodes, private_ip ] } + + node_floating_ips: + description: Floating IPs of the nodes + value: { get_attr: [ compute_nodes, floating_ip ] } + + infra_names: + description: Name of the nodes + value: { get_attr: [ infra_nodes, name ] } + + infra_ips: + description: IPs of the nodes + value: { get_attr: [ infra_nodes, private_ip ] } + + infra_floating_ips: + description: Floating IPs of the nodes + value: { get_attr: [ infra_nodes, floating_ip ] } + + dns_name: + description: Name of the DNS + value: + get_attr: + - dns + - name + + dns_floating_ip: + description: Floating IP of the DNS + value: + get_attr: + - dns + - addresses + - str_replace: + template: openshift-ansible-cluster_id-net + params: + cluster_id: {{ stack_name }} + - 1 + - addr + +resources: + + net: + type: OS::Neutron::Net + properties: + name: + str_replace: + template: openshift-ansible-cluster_id-net + params: + cluster_id: {{ stack_name }} + + subnet: + type: OS::Neutron::Subnet + properties: + name: + str_replace: + template: openshift-ansible-cluster_id-subnet + params: + cluster_id: {{ stack_name }} + network: { get_resource: net } + cidr: + str_replace: + template: subnet_24_prefix.0/24 + params: + subnet_24_prefix: {{ subnet_prefix }} + allocation_pools: + - start: + str_replace: + template: subnet_24_prefix.3 + params: + subnet_24_prefix: {{ subnet_prefix }} + end: + str_replace: + template: subnet_24_prefix.254 + params: + subnet_24_prefix: {{ subnet_prefix }} + dns_nameservers: + {% for nameserver in dns_nameservers %} + - {{ nameserver }} + {% endfor %} + + router: + type: OS::Neutron::Router + properties: + name: + str_replace: + template: openshift-ansible-cluster_id-router + params: + cluster_id: {{ stack_name }} + external_gateway_info: + network: {{ external_network }} + + interface: + type: OS::Neutron::RouterInterface + properties: + router_id: { get_resource: router } + subnet_id: { get_resource: subnet } + +# keypair: +# type: OS::Nova::KeyPair +# properties: +# name: +# str_replace: +# template: openshift-ansible-cluster_id-keypair +# params: +# cluster_id: {{ stack_name }} +# public_key: {{ ssh_public_key }} + + master-secgrp: + type: OS::Neutron::SecurityGroup + properties: + name: + str_replace: + template: openshift-ansible-cluster_id-master-secgrp + params: + cluster_id: {{ stack_name }} + description: + str_replace: + template: Security group for cluster_id OpenShift cluster master + params: + cluster_id: {{ stack_name }} + rules: + - direction: ingress + protocol: tcp + port_range_min: 22 + port_range_max: 22 + remote_ip_prefix: {{ ssh_ingress_cidr }} + - direction: ingress + protocol: tcp + port_range_min: 4001 + port_range_max: 4001 + - direction: ingress + protocol: tcp + port_range_min: 8443 + port_range_max: 8443 + - direction: ingress + protocol: tcp + port_range_min: 8444 + port_range_max: 8444 + - direction: ingress + protocol: tcp + port_range_min: 53 + port_range_max: 53 + - direction: ingress + protocol: udp + port_range_min: 53 + port_range_max: 53 + - direction: ingress + protocol: tcp + port_range_min: 8053 + port_range_max: 8053 + - direction: ingress + protocol: udp + port_range_min: 8053 + port_range_max: 8053 + - direction: ingress + protocol: tcp + port_range_min: 24224 + port_range_max: 24224 + - direction: ingress + protocol: udp + port_range_min: 24224 + port_range_max: 24224 + - direction: ingress + protocol: tcp + port_range_min: 2224 + port_range_max: 2224 + - direction: ingress + protocol: udp + port_range_min: 5404 + port_range_max: 5404 + - direction: ingress + protocol: udp + port_range_min: 5405 + port_range_max: 5405 + - direction: ingress + protocol: tcp + port_range_min: 9090 + port_range_max: 9090 + + etcd-secgrp: + type: OS::Neutron::SecurityGroup + properties: + name: + str_replace: + template: openshift-ansible-cluster_id-etcd-secgrp + params: + cluster_id: {{ stack_name }} + description: + str_replace: + template: Security group for cluster_id etcd cluster + params: + cluster_id: {{ stack_name }} + rules: + - direction: ingress + protocol: tcp + port_range_min: 22 + port_range_max: 22 + remote_ip_prefix: {{ ssh_ingress_cidr }} + - direction: ingress + protocol: tcp + port_range_min: 2379 + port_range_max: 2379 + remote_mode: remote_group_id + remote_group_id: { get_resource: master-secgrp } + - direction: ingress + protocol: tcp + port_range_min: 2380 + port_range_max: 2380 + remote_mode: remote_group_id + + node-secgrp: + type: OS::Neutron::SecurityGroup + properties: + name: + str_replace: + template: openshift-ansible-cluster_id-node-secgrp + params: + cluster_id: {{ stack_name }} + description: + str_replace: + template: Security group for cluster_id OpenShift cluster nodes + params: + cluster_id: {{ stack_name }} + rules: + - direction: ingress + protocol: tcp + port_range_min: 22 + port_range_max: 22 + remote_ip_prefix: {{ ssh_ingress_cidr }} + - direction: ingress + protocol: tcp + port_range_min: 10250 + port_range_max: 10250 + remote_mode: remote_group_id + - direction: ingress + protocol: tcp + port_range_min: 10255 + port_range_max: 10255 + remote_mode: remote_group_id + - direction: ingress + protocol: udp + port_range_min: 10255 + port_range_max: 10255 + remote_mode: remote_group_id + - direction: ingress + protocol: udp + port_range_min: 4789 + port_range_max: 4789 + remote_mode: remote_group_id + - direction: ingress + protocol: tcp + port_range_min: 30000 + port_range_max: 32767 + remote_ip_prefix: {{ node_ingress_cidr }} + + infra-secgrp: + type: OS::Neutron::SecurityGroup + properties: + name: + str_replace: + template: openshift-ansible-cluster_id-infra-secgrp + params: + cluster_id: {{ stack_name }} + description: + str_replace: + template: Security group for cluster_id OpenShift infrastructure cluster nodes + params: + cluster_id: {{ stack_name }} + rules: + - direction: ingress + protocol: tcp + port_range_min: 80 + port_range_max: 80 + - direction: ingress + protocol: tcp + port_range_min: 443 + port_range_max: 443 + + dns-secgrp: + type: OS::Neutron::SecurityGroup + properties: + name: + str_replace: + template: openshift-ansible-cluster_id-dns-secgrp + params: + cluster_id: {{ stack_name }} + description: + str_replace: + template: Security group for cluster_id cluster DNS + params: + cluster_id: {{ stack_name }} + rules: + - direction: ingress + protocol: tcp + port_range_min: 22 + port_range_max: 22 + remote_ip_prefix: {{ ssh_ingress_cidr }} + - direction: ingress + protocol: udp + port_range_min: 53 + port_range_max: 53 + remote_ip_prefix: {{ node_ingress_cidr }} + - direction: ingress + protocol: tcp + port_range_min: 53 + port_range_max: 53 + remote_ip_prefix: {{ node_ingress_cidr }} + + etcd: + type: OS::Heat::ResourceGroup + properties: + count: {{ num_etcd }} + resource_def: + type: server.yaml + properties: + name: + str_replace: + template: k8s_type-%index%.cluster_id + params: + cluster_id: {{ stack_name }} + k8s_type: etcd + cluster_env: {{ dns_domain }} + cluster_id: {{ stack_name }} + group: + str_replace: + template: k8s_type.cluster_id + params: + k8s_type: etcds + cluster_id: {{ stack_name }} + type: etcd + image: {{ openstack_image }} + flavor: {{ etcd_flavor }} + key_name: {{ ssh_public_key }} + net: { get_resource: net } + subnet: { get_resource: subnet } + secgrp: + - { get_resource: etcd-secgrp } + floating_network: {{ external_network }} + net_name: + str_replace: + template: openshift-ansible-cluster_id-net + params: + cluster_id: {{ stack_name }} + volume_size: {{ etcd_volume_size }} + depends_on: + - interface + + masters: + type: OS::Heat::ResourceGroup + properties: + count: {{ num_masters }} + resource_def: + type: server.yaml + properties: + name: + str_replace: + template: k8s_type-%index%.cluster_id + params: + cluster_id: {{ stack_name }} + k8s_type: master + cluster_env: {{ dns_domain }} + cluster_id: {{ stack_name }} + group: + str_replace: + template: k8s_type.cluster_id + params: + k8s_type: masters + cluster_id: {{ stack_name }} + type: master + image: {{ openstack_image }} + flavor: {{ master_flavor }} + key_name: {{ ssh_public_key }} + net: { get_resource: net } + subnet: { get_resource: subnet } + secgrp: + - { get_resource: master-secgrp } + - { get_resource: node-secgrp } + floating_network: {{ external_network }} + net_name: + str_replace: + template: openshift-ansible-cluster_id-net + params: + cluster_id: {{ stack_name }} + volume_size: {{ master_volume_size }} + depends_on: + - interface + + compute_nodes: + type: OS::Heat::ResourceGroup + properties: + count: {{ num_nodes }} + resource_def: + type: server.yaml + properties: + name: + str_replace: + template: subtype-k8s_type-%index%.cluster_id + params: + cluster_id: {{ stack_name }} + k8s_type: node + subtype: app + cluster_env: {{ dns_domain }} + cluster_id: {{ stack_name }} + group: + str_replace: + template: k8s_type.cluster_id + params: + k8s_type: nodes + cluster_id: {{ stack_name }} + type: node + subtype: app + node_labels: + region: primary + image: {{ openstack_image }} + flavor: {{ node_flavor }} + key_name: {{ ssh_public_key }} + net: { get_resource: net } + subnet: { get_resource: subnet } + secgrp: + - { get_resource: node-secgrp } + floating_network: {{ external_network }} + net_name: + str_replace: + template: openshift-ansible-cluster_id-net + params: + cluster_id: {{ stack_name }} + volume_size: {{ app_volume_size }} + depends_on: + - interface + + infra_nodes: + type: OS::Heat::ResourceGroup + properties: + count: {{ num_infra }} + resource_def: + type: server.yaml + properties: + name: + str_replace: + template: subtypek8s_type-%index%.cluster_id + params: + cluster_id: {{ stack_name }} + k8s_type: node + subtype: infra + cluster_env: {{ dns_domain }} + cluster_id: {{ stack_name }} + group: + str_replace: + template: k8s_type.cluster_id + params: + k8s_type: infra + cluster_id: {{ stack_name }} + type: node + subtype: infra + node_labels: + region: infra + image: {{ openstack_image }} + flavor: {{ infra_flavor }} + key_name: {{ ssh_public_key }} + net: { get_resource: net } + subnet: { get_resource: subnet } + secgrp: + - { get_resource: node-secgrp } + - { get_resource: infra-secgrp } + floating_network: {{ external_network }} + net_name: + str_replace: + template: openshift-ansible-cluster_id-net + params: + cluster_id: {{ stack_name }} + volume_size: {{ infra_volume_size }} + depends_on: + - interface + + dns: + type: OS::Heat::ResourceGroup + properties: + count: {{ num_dns }} + resource_def: + type: server.yaml + properties: + name: + str_replace: + template: k8s_type-%index%.cluster_id + params: + cluster_id: {{ stack_name }} + k8s_type: dns + cluster_env: {{ dns_domain }} + cluster_id: {{ stack_name }} + group: + str_replace: + template: k8s_type.cluster_id + params: + k8s_type: dns + cluster_id: {{ stack_name }} + type: dns + image: {{ openstack_image }} + flavor: {{ dns_flavor }} + key_name: {{ ssh_public_key }} + net: { get_resource: net } + subnet: { get_resource: subnet } + secgrp: + - { get_resource: node-secgrp } + - { get_resource: dns-secgrp } + floating_network: {{ external_network }} + net_name: + str_replace: + template: openshift-ansible-cluster_id-net + params: + cluster_id: {{ stack_name }} + volume_size: {{ dns_volume_size }} + depends_on: + - interface + diff --git a/roles/openstack-stack/templates/heat_stack_server.yaml.j2 b/roles/openstack-stack/templates/heat_stack_server.yaml.j2 new file mode 100644 index 000000000..5851d3b9b --- /dev/null +++ b/roles/openstack-stack/templates/heat_stack_server.yaml.j2 @@ -0,0 +1,170 @@ +heat_template_version: 2016-10-14 + +description: OpenShift cluster server + +parameters: + + name: + type: string + label: Name + description: Name + + group: + type: string + label: Host Group + description: The Primary Ansible Host Group + default: host + + cluster_env: + type: string + label: Cluster environment + description: Environment of the cluster + + cluster_id: + type: string + label: Cluster ID + description: Identifier of the cluster + + type: + type: string + label: Type + description: Type master or node + + subtype: + type: string + label: Sub-type + description: Sub-type compute or infra for nodes, default otherwise + default: default + + key_name: + type: string + label: Key name + description: Key name of keypair + + image: + type: string + label: Image + description: Name of the image + + flavor: + type: string + label: Flavor + description: Name of the flavor + + net: + type: string + label: Net ID + description: Net resource + + net_name: + type: string + label: Net name + description: Net name + + subnet: + type: string + label: Subnet ID + description: Subnet resource + + secgrp: + type: comma_delimited_list + label: Security groups + description: Security group resources + + floating_network: + type: string + label: Floating network + description: Network to allocate floating IP from + + availability_zone: + type: string + description: The Availability Zone to launch the instance. + default: nova + + volume_size: + type: number + description: Size of the volume to be created. + default: 1 + constraints: + - range: { min: 1, max: 1024 } + description: must be between 1 and 1024 Gb. + + node_labels: + type: json + description: OpenShift Node Labels + default: {"region": "default" } + +outputs: + + name: + description: Name of the server + value: { get_attr: [ server, name ] } + + private_ip: + description: Private IP of the server + value: + get_attr: + - server + - addresses + - { get_param: net_name } + - 0 + - addr + + floating_ip: + description: Floating IP of the server + value: + get_attr: + - server + - addresses + - { get_param: net_name } + - 1 + - addr + +resources: + + server: + type: OS::Nova::Server + properties: + name: { get_param: name } + key_name: { get_param: key_name } + image: { get_param: image } + flavor: { get_param: flavor } + networks: + - port: { get_resource: port } + user_data: + get_file: user-data + user_data_format: RAW + metadata: + group: { get_param: group } + environment: { get_param: cluster_env } + clusterid: { get_param: cluster_id } + host-type: { get_param: type } + sub-host-type: { get_param: subtype } + node_labels: { get_param: node_labels } + + port: + type: OS::Neutron::Port + properties: + network: { get_param: net } + fixed_ips: + - subnet: { get_param: subnet } + security_groups: { get_param: secgrp } + + floating-ip: + type: OS::Neutron::FloatingIP + properties: + floating_network: { get_param: floating_network } + port_id: { get_resource: port } + + cinder_volume: + type: OS::Cinder::Volume + properties: + size: { get_param: volume_size } + availability_zone: { get_param: availability_zone } + + volume_attachment: + type: OS::Cinder::VolumeAttachment + properties: + volume_id: { get_resource: cinder_volume } + instance_uuid: { get_resource: server } + mountpoint: /dev/sdb diff --git a/roles/openstack-stack/templates/user_data.j2 b/roles/openstack-stack/templates/user_data.j2 new file mode 100644 index 000000000..eb65f7cec --- /dev/null +++ b/roles/openstack-stack/templates/user_data.j2 @@ -0,0 +1,13 @@ +#cloud-config +disable_root: true + +system_info: + default_user: + name: openshift + sudo: ["ALL=(ALL) NOPASSWD: ALL"] + +write_files: + - path: /etc/sudoers.d/00-openshift-no-requiretty + permissions: 440 + content: | + Defaults:openshift !requiretty -- cgit v1.2.3 -- cgit v1.2.3 From 7304ed4611192f6daa88f84d8b47d3e76514a03b Mon Sep 17 00:00:00 2001 From: Eric Sauer Date: Thu, 27 Apr 2017 16:58:41 -0400 Subject: First attempt at a simple multi-master support (#39) * First attempt at a simple multi-master support * Removing unneeded inventory * adding default number of masters and lower number of nodes --- roles/openstack-stack/defaults/main.yml | 2 + roles/openstack-stack/templates/heat_stack.yaml.j2 | 69 ++++++++++++++++++++++ 2 files changed, 71 insertions(+) diff --git a/roles/openstack-stack/defaults/main.yml b/roles/openstack-stack/defaults/main.yml index 8aefe039d..2a4ef3a45 100644 --- a/roles/openstack-stack/defaults/main.yml +++ b/roles/openstack-stack/defaults/main.yml @@ -2,6 +2,8 @@ dns_volume_size: 1 ssh_ingress_cidr: 0.0.0.0/0 node_ingress_cidr: 0.0.0.0/0 +master_ingress_cidr: 0.0.0.0/0 +lb_ingress_cidr: 0.0.0.0/0 num_etcd: 0 num_masters: 1 num_nodes: 1 diff --git a/roles/openstack-stack/templates/heat_stack.yaml.j2 b/roles/openstack-stack/templates/heat_stack.yaml.j2 index bc9547f66..c367aabe7 100644 --- a/roles/openstack-stack/templates/heat_stack.yaml.j2 +++ b/roles/openstack-stack/templates/heat_stack.yaml.j2 @@ -342,6 +342,31 @@ resources: port_range_min: 53 port_range_max: 53 remote_ip_prefix: {{ node_ingress_cidr }} +{% if num_masters is greaterthan 1 %} + lb-secgrp: + type: OS::Neutron::SecurityGroup + properties: + name: openshift-ansible-{{ stack_name }}-lb-secgrp + description: Security group for {{ stack_name }} cluster Load Balancer + rules: + - direction: ingress + protocol: tcp + port_range_min: 22 + port_range_max: 22 + remote_ip_prefix: {{ ssh_ingress_cidr }} + - direction: ingress + protocol: tcp + port_range_min: {{ openshift_master_api_port | default(8443) }} + port_range_max: {{ openshift_master_api_port | default(8443) }} + remote_ip_prefix: {{ lb_ingress_cidr }} + {% if openshift_master_console_port is defined and openshift_master_console_port is not equalto openshift_master_api_port %} + - direction: ingress + protocol: tcp + port_range_min: {{ openshift_master_console_port | default(8443) }} + port_range_max: {{ openshift_master_console_port | default(8443) }} + remote_ip_prefix: {{ lb_ingress_cidr }} + {% endif %} +{% endif %} etcd: type: OS::Heat::ResourceGroup @@ -382,6 +407,47 @@ resources: depends_on: - interface +{% if num_masters is greaterthan 1 %} + loadbalancer: + type: OS::Heat::ResourceGroup + properties: + count: 1 + resource_def: + type: server.yaml + properties: + name: + str_replace: + template: k8s_type-%index%.cluster_id + params: + cluster_id: {{ stack_name }} + k8s_type: lb + cluster_env: {{ dns_domain }} + cluster_id: {{ stack_name }} + group: + str_replace: + template: k8s_type.cluster_id + params: + k8s_type: lb + cluster_id: {{ stack_name }} + type: lb + image: {{ openstack_image }} + flavor: {{ lb_flavor }} + key_name: {{ ssh_public_key }} + net: { get_resource: net } + subnet: { get_resource: subnet } + secgrp: + - { get_resource: lb-secgrp } + floating_network: {{ external_network }} + net_name: + str_replace: + template: openshift-ansible-cluster_id-net + params: + cluster_id: {{ stack_name }} + volume_size: 5 + depends_on: + - interface +{% endif %} + masters: type: OS::Heat::ResourceGroup properties: @@ -412,6 +478,9 @@ resources: secgrp: - { get_resource: master-secgrp } - { get_resource: node-secgrp } +{% if num_etcd is equalto 0 %} + - { get_resource: etcd-secgrp } +{% endif %} floating_network: {{ external_network }} net_name: str_replace: -- cgit v1.2.3 -- cgit v1.2.3 -- cgit v1.2.3 -- cgit v1.2.3 -- cgit v1.2.3 -- cgit v1.2.3 -- cgit v1.2.3 -- cgit v1.2.3 -- cgit v1.2.3 -- cgit v1.2.3 -- cgit v1.2.3 -- cgit v1.2.3 -- cgit v1.2.3 -- cgit v1.2.3 -- cgit v1.2.3 -- cgit v1.2.3 -- cgit v1.2.3 -- cgit v1.2.3 -- cgit v1.2.3 -- cgit v1.2.3 -- cgit v1.2.3 -- cgit v1.2.3 -- cgit v1.2.3 -- cgit v1.2.3 From 215807f53977bc561b3791e37136f140383605ef Mon Sep 17 00:00:00 2001 From: Eduardo Minguez Perez Date: Tue, 16 May 2017 16:21:10 +0200 Subject: Removed hardcoded values from ansible roles --- roles/docker-storage-setup/defaults/main.yaml | 5 +++++ roles/docker-storage-setup/files/docker-storage-setup | 4 ---- roles/docker-storage-setup/tasks/main.yaml | 4 ++-- roles/docker-storage-setup/templates/docker-storage-setup.j2 | 4 ++++ 4 files changed, 11 insertions(+), 6 deletions(-) create mode 100644 roles/docker-storage-setup/defaults/main.yaml delete mode 100644 roles/docker-storage-setup/files/docker-storage-setup create mode 100644 roles/docker-storage-setup/templates/docker-storage-setup.j2 diff --git a/roles/docker-storage-setup/defaults/main.yaml b/roles/docker-storage-setup/defaults/main.yaml new file mode 100644 index 000000000..e36f1b85a --- /dev/null +++ b/roles/docker-storage-setup/defaults/main.yaml @@ -0,0 +1,5 @@ +--- +docker_dev: "/dev/sdb" +docker_vg: "docker-vol" +docker_data_size: "95%VG" +docker_dm_basesize: "3G" diff --git a/roles/docker-storage-setup/files/docker-storage-setup b/roles/docker-storage-setup/files/docker-storage-setup deleted file mode 100644 index 5e9d494a1..000000000 --- a/roles/docker-storage-setup/files/docker-storage-setup +++ /dev/null @@ -1,4 +0,0 @@ -DEVS=/dev/sdb -VG=docker-vol -DATA_SIZE=95%VG -EXTRA_DOCKER_STORAGE_OPTIONS="--storage-opt dm.basesize=3G" diff --git a/roles/docker-storage-setup/tasks/main.yaml b/roles/docker-storage-setup/tasks/main.yaml index 17b13f27f..7202bc46b 100644 --- a/roles/docker-storage-setup/tasks/main.yaml +++ b/roles/docker-storage-setup/tasks/main.yaml @@ -1,7 +1,7 @@ --- - name: create the docker-storage-setup config file - copy: - src: docker-storage-setup + template: + src: "{{ role_path }}/templates/docker-storage-setup.j2" dest: /etc/sysconfig/docker-storage-setup owner: root group: root diff --git a/roles/docker-storage-setup/templates/docker-storage-setup.j2 b/roles/docker-storage-setup/templates/docker-storage-setup.j2 new file mode 100644 index 000000000..a5203d7e4 --- /dev/null +++ b/roles/docker-storage-setup/templates/docker-storage-setup.j2 @@ -0,0 +1,4 @@ +DEVS="{{ docker_devs }}" +VG="{{ docker_vg }}" +DATA_SIZE="{{ docker_data_size }}" +EXTRA_DOCKER_STORAGE_OPTIONS="--storage-opt dm.basesize={{ docker_dm_basesize }}" -- cgit v1.2.3 -- cgit v1.2.3 -- cgit v1.2.3 -- cgit v1.2.3 -- cgit v1.2.3 -- cgit v1.2.3 -- cgit v1.2.3 -- cgit v1.2.3 -- cgit v1.2.3 From c9305e842efb4098477e249d2bea87a73a989f0c Mon Sep 17 00:00:00 2001 From: Peter Schiffer Date: Thu, 18 May 2017 19:14:11 +0200 Subject: More ansible migration and deploy OCP from local workstation (#376) * Create registry bucket with deployment manager * Migrate ssh proxy to Ansible * Update gce dynamic inventory script, use instance name for ssh * Fix variable name in docker storage setup role * Deploy OCP from local workstation, and not from the bastion host --- roles/docker-storage-setup/templates/docker-storage-setup.j2 | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/roles/docker-storage-setup/templates/docker-storage-setup.j2 b/roles/docker-storage-setup/templates/docker-storage-setup.j2 index a5203d7e4..b5869feff 100644 --- a/roles/docker-storage-setup/templates/docker-storage-setup.j2 +++ b/roles/docker-storage-setup/templates/docker-storage-setup.j2 @@ -1,4 +1,4 @@ -DEVS="{{ docker_devs }}" +DEVS="{{ docker_dev }}" VG="{{ docker_vg }}" DATA_SIZE="{{ docker_data_size }}" EXTRA_DOCKER_STORAGE_OPTIONS="--storage-opt dm.basesize={{ docker_dm_basesize }}" -- cgit v1.2.3 -- cgit v1.2.3 -- cgit v1.2.3 -- cgit v1.2.3 -- cgit v1.2.3 -- cgit v1.2.3 -- cgit v1.2.3 -- cgit v1.2.3 -- cgit v1.2.3 -- cgit v1.2.3 -- cgit v1.2.3 -- cgit v1.2.3 -- cgit v1.2.3 -- cgit v1.2.3 -- cgit v1.2.3 -- cgit v1.2.3 -- cgit v1.2.3 -- cgit v1.2.3 -- cgit v1.2.3 -- cgit v1.2.3 -- cgit v1.2.3 -- cgit v1.2.3 -- cgit v1.2.3 -- cgit v1.2.3 -- cgit v1.2.3 -- cgit v1.2.3 -- cgit v1.2.3 -- cgit v1.2.3 -- cgit v1.2.3 -- cgit v1.2.3 -- cgit v1.2.3 -- cgit v1.2.3 -- cgit v1.2.3 -- cgit v1.2.3 -- cgit v1.2.3 -- cgit v1.2.3 -- cgit v1.2.3 -- cgit v1.2.3 -- cgit v1.2.3 -- cgit v1.2.3 -- cgit v1.2.3 -- cgit v1.2.3 From 469a88f6d7609df5ffaab812093e0c58baa3be29 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?=C3=98ystein=20Bedin?= Date: Mon, 5 Jun 2017 16:47:13 -0400 Subject: Conditionally set the openshift_master_default_subdomain to avoid overriding it unecessary (#47) --- roles/common/pre_tasks/pre_tasks.yml | 2 ++ 1 file changed, 2 insertions(+) diff --git a/roles/common/pre_tasks/pre_tasks.yml b/roles/common/pre_tasks/pre_tasks.yml index 06a56605d..cc4e64a0f 100644 --- a/roles/common/pre_tasks/pre_tasks.yml +++ b/roles/common/pre_tasks/pre_tasks.yml @@ -34,3 +34,5 @@ set_fact: openshift_master_default_subdomain: "{{ openshift_app_domain }}.{{ full_dns_domain }}" delegate_to: localhost + when: + - openshift_master_default_subdomain is undefined -- cgit v1.2.3 -- cgit v1.2.3 -- cgit v1.2.3 -- cgit v1.2.3 -- cgit v1.2.3 -- cgit v1.2.3 -- cgit v1.2.3 -- cgit v1.2.3 -- cgit v1.2.3 -- cgit v1.2.3 -- cgit v1.2.3 -- cgit v1.2.3 -- cgit v1.2.3 -- cgit v1.2.3 -- cgit v1.2.3 -- cgit v1.2.3 -- cgit v1.2.3 -- cgit v1.2.3 -- cgit v1.2.3 -- cgit v1.2.3 -- cgit v1.2.3 -- cgit v1.2.3 -- cgit v1.2.3 From 22e88c9ce8f81cb13c3d050455d332161a1acd83 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?=C3=98ystein=20Bedin?= Date: Tue, 13 Jun 2017 15:35:22 -0400 Subject: Update CASL to use nsupdate for DNS records (#48) * Updated to use nsupdate for DNS records * Updated formatting of dict * Updating descriptive text * Support for external DNS config * Upgrading jinja2 to work correctly with latest templates * Latest update for nsupdate * Updated to use nsupdate for DNS records * Updated formatting of dict * Updating descriptive text * Support for external DNS config * Latest update for nsupdate * Updated to support external public/private DNS server(s) * Updated DNS server handling * Updated DNS server handling * Updated DNS server handling * Eliminated the from the sample inventories * Updated sample inventory to point to 2 separate DNS servers for private/public * Playbook clean-up * Adding 'python-dns' * splitting subscription manager calls to allow for a clean pre-install playbook --- roles/common/pre_tasks/pre_tasks.yml | 2 +- roles/dns-server-detect/defaults/main.yml | 3 ++ roles/dns-server-detect/tasks/main.yml | 38 ++++++++++++++++++++++ roles/openstack-stack/templates/heat_stack.yaml.j2 | 12 +++---- roles/openstack-stack/test/stack-create-test.yml | 4 +-- 5 files changed, 50 insertions(+), 9 deletions(-) create mode 100644 roles/dns-server-detect/defaults/main.yml create mode 100644 roles/dns-server-detect/tasks/main.yml diff --git a/roles/common/pre_tasks/pre_tasks.yml b/roles/common/pre_tasks/pre_tasks.yml index cc4e64a0f..c5e79e89c 100644 --- a/roles/common/pre_tasks/pre_tasks.yml +++ b/roles/common/pre_tasks/pre_tasks.yml @@ -22,7 +22,7 @@ - name: Updating DNS domain to include env_id (if not empty) set_fact: - full_dns_domain: "{{ (env_id|trim == '') | ternary(dns_domain, env_id + '.' + dns_domain) }}" + full_dns_domain: "{{ (env_id|trim == '') | ternary(public_dns_domain, env_id + '.' + public_dns_domain) }}" delegate_to: localhost - name: Set the APP domain for OpenShift use diff --git a/roles/dns-server-detect/defaults/main.yml b/roles/dns-server-detect/defaults/main.yml new file mode 100644 index 000000000..58bd861cd --- /dev/null +++ b/roles/dns-server-detect/defaults/main.yml @@ -0,0 +1,3 @@ +--- + +external_nsupdate_keys: {} diff --git a/roles/dns-server-detect/tasks/main.yml b/roles/dns-server-detect/tasks/main.yml new file mode 100644 index 000000000..e8dd0acf0 --- /dev/null +++ b/roles/dns-server-detect/tasks/main.yml @@ -0,0 +1,38 @@ +--- + +- fail: + msg: 'Missing required private DNS server(s)' + when: + - external_nsupdate_keys['private'] is undefined + - hostvars[groups['dns'][0]] is undefined + +- fail: + msg: 'Missing required public DNS server(s)' + when: + - external_nsupdate_keys['public'] is undefined + - hostvars[groups['dns'][0]] is undefined + +- name: "Set the private DNS server to use the external value (if provided)" + set_fact: + private_dns_server: "{{ external_nsupdate_keys['private']['server'] }}" + when: + - external_nsupdate_keys['private'] is defined + +- name: "Set the private DNS server to use the provisioned value" + set_fact: + private_dns_server: "{{ hostvars[groups['dns'][0]].openstack.private_v4 }}" + when: + - private_dns_server is undefined + +- name: "Set the public DNS server to use the external value (if provided)" + set_fact: + public_dns_server: "{{ external_nsupdate_keys['public']['server'] }}" + when: + - external_nsupdate_keys['public'] is defined + +- name: "Set the public DNS server to use the provisioned value" + set_fact: + public_dns_server: "{{ hostvars[groups['dns'][0]].openstack.public_v4 }}" + when: + - public_dns_server is undefined + diff --git a/roles/openstack-stack/templates/heat_stack.yaml.j2 b/roles/openstack-stack/templates/heat_stack.yaml.j2 index c367aabe7..09b62cba7 100644 --- a/roles/openstack-stack/templates/heat_stack.yaml.j2 +++ b/roles/openstack-stack/templates/heat_stack.yaml.j2 @@ -381,7 +381,7 @@ resources: params: cluster_id: {{ stack_name }} k8s_type: etcd - cluster_env: {{ dns_domain }} + cluster_env: {{ public_dns_domain }} cluster_id: {{ stack_name }} group: str_replace: @@ -421,7 +421,7 @@ resources: params: cluster_id: {{ stack_name }} k8s_type: lb - cluster_env: {{ dns_domain }} + cluster_env: {{ public_dns_domain }} cluster_id: {{ stack_name }} group: str_replace: @@ -461,7 +461,7 @@ resources: params: cluster_id: {{ stack_name }} k8s_type: master - cluster_env: {{ dns_domain }} + cluster_env: {{ public_dns_domain }} cluster_id: {{ stack_name }} group: str_replace: @@ -505,7 +505,7 @@ resources: cluster_id: {{ stack_name }} k8s_type: node subtype: app - cluster_env: {{ dns_domain }} + cluster_env: {{ public_dns_domain }} cluster_id: {{ stack_name }} group: str_replace: @@ -548,7 +548,7 @@ resources: cluster_id: {{ stack_name }} k8s_type: node subtype: infra - cluster_env: {{ dns_domain }} + cluster_env: {{ public_dns_domain }} cluster_id: {{ stack_name }} group: str_replace: @@ -591,7 +591,7 @@ resources: params: cluster_id: {{ stack_name }} k8s_type: dns - cluster_env: {{ dns_domain }} + cluster_env: {{ public_dns_domain }} cluster_id: {{ stack_name }} group: str_replace: diff --git a/roles/openstack-stack/test/stack-create-test.yml b/roles/openstack-stack/test/stack-create-test.yml index 94e312ee3..6cbd7ff30 100644 --- a/roles/openstack-stack/test/stack-create-test.yml +++ b/roles/openstack-stack/test/stack-create-test.yml @@ -3,8 +3,8 @@ roles: - role: openstack-stack stack_name: test-stack - dns_domain: "{{ openstack_dns_domain }}" - dns_nameservers: "{{ openstack_nameservers }}" + dns_domain: "{{ public_dns_domain }}" + dns_nameservers: "{{ public_dns_nameservers }}" subnet_prefix: "{{ openstack_subnet_prefix }}" ssh_public_key: "{{ openstack_ssh_public_key }}" openstack_image: "{{ openstack_default_image_name }}" -- cgit v1.2.3 -- cgit v1.2.3 From 7f60edeba48d78cd01669d20019e9bdacdf4e305 Mon Sep 17 00:00:00 2001 From: Tomas Sedovic Date: Fri, 2 Jun 2017 14:06:52 +0200 Subject: Move the openstack provisioning playbooks They'll live in playbooks/provisioning/openstack from now on. --- .../openstack/openstack_dns_records.yml | 77 ++++++++++++++++++++++ .../provisioning/openstack/openstack_dns_views.yml | 27 ++++++++ .../openstack/post-provision-openstack.yml | 60 +++++++++++++++++ playbooks/provisioning/openstack/pre-install.yml | 15 +++++ .../provisioning/openstack/provision-openstack.yml | 48 ++++++++++++++ 5 files changed, 227 insertions(+) create mode 100644 playbooks/provisioning/openstack/openstack_dns_records.yml create mode 100644 playbooks/provisioning/openstack/openstack_dns_views.yml create mode 100644 playbooks/provisioning/openstack/post-provision-openstack.yml create mode 100644 playbooks/provisioning/openstack/pre-install.yml create mode 100644 playbooks/provisioning/openstack/provision-openstack.yml diff --git a/playbooks/provisioning/openstack/openstack_dns_records.yml b/playbooks/provisioning/openstack/openstack_dns_records.yml new file mode 100644 index 000000000..b1008fe33 --- /dev/null +++ b/playbooks/provisioning/openstack/openstack_dns_records.yml @@ -0,0 +1,77 @@ +--- + +- name: "Generate list of private A records" + set_fact: + private_records: "{{ private_records | default([]) + [ { 'type': 'A', 'hostname': hostvars[item]['ansible_hostname'], 'ip': hostvars[item]['openstack']['private_v4'] } ] }}" + with_items: "{{ groups['cluster_hosts'] }}" + +- name: "Set the private DNS server to use the external value (if provided)" + set_fact: + nsupdate_server_private: "{{ external_nsupdate_keys['private']['server'] }}" + nsupdate_key_secret_private: "{{ external_nsupdate_keys['private']['key_secret'] }}" + nsupdate_key_algorithm_private: "{{ external_nsupdate_keys['private']['key_algorithm'] }}" + when: + - external_nsupdate_keys is defined + - external_nsupdate_keys['private'] is defined + +- name: "Set the private DNS server to use the provisioned value" + set_fact: + nsupdate_server_private: "{{ hostvars[groups['dns'][0]].openstack.public_v4 }}" + nsupdate_key_secret_private: "{{ hostvars[groups['dns'][0]].nsupdate_keys['private-' + full_dns_domain].key_secret }}" + nsupdate_key_algorithm_private: "{{ hostvars[groups['dns'][0]].nsupdate_keys['private-' + full_dns_domain].key_algorithm }}" + when: + - nsupdate_server_private is undefined + +- name: "Generate the private Add section for DNS" + set_fact: + private_named_records: + - view: "private" + zone: "{{ full_dns_domain }}" + server: "{{ nsupdate_server_private }}" + key_name: "{{ ( 'private-' + full_dns_domain ) }}" + key_secret: "{{ nsupdate_key_secret_private }}" + key_algorithm: "{{ nsupdate_key_algorithm_private | lower }}" + entries: "{{ private_records }}" + +- name: "Generate list of public A records" + set_fact: + public_records: "{{ public_records | default([]) + [ { 'type': 'A', 'hostname': hostvars[item]['ansible_hostname'], 'ip': hostvars[item]['openstack']['public_v4'] } ] }}" + with_items: "{{ groups['cluster_hosts'] }}" + +- name: "Add wildcard records to the public A records" + set_fact: + public_records: "{{ public_records | default([]) + [ { 'type': 'A', 'hostname': '*.' + openshift_app_domain, 'ip': hostvars[item]['openstack']['public_v4'] } ] }}" + with_items: "{{ groups['infra_hosts'] }}" + +- name: "Set the public DNS server details to use the external value (if provided)" + set_fact: + nsupdate_server_public: "{{ external_nsupdate_keys['public']['server'] }}" + nsupdate_key_secret_public: "{{ external_nsupdate_keys['public']['key_secret'] }}" + nsupdate_key_algorithm_public: "{{ external_nsupdate_keys['public']['key_algorithm'] }}" + when: + - external_nsupdate_keys is defined + - external_nsupdate_keys['public'] is defined + +- name: "Set the public DNS server details to use the provisioned value" + set_fact: + nsupdate_server_public: "{{ hostvars[groups['dns'][0]].openstack.public_v4 }}" + nsupdate_key_secret_public: "{{ hostvars[groups['dns'][0]].nsupdate_keys['public-' + full_dns_domain].key_secret }}" + nsupdate_key_algorithm_public: "{{ hostvars[groups['dns'][0]].nsupdate_keys['public-' + full_dns_domain].key_algorithm }}" + when: + - nsupdate_server_public is undefined + +- name: "Generate the public Add section for DNS" + set_fact: + public_named_records: + - view: "public" + zone: "{{ full_dns_domain }}" + server: "{{ nsupdate_server_public }}" + key_name: "{{ ( 'public-' + full_dns_domain ) }}" + key_secret: "{{ nsupdate_key_secret_public }}" + key_algorithm: "{{ nsupdate_key_algorithm_public | lower }}" + entries: "{{ public_records }}" + +- name: "Generate the final dns_records_add" + set_fact: + dns_records_add: "{{ private_named_records + public_named_records }}" + diff --git a/playbooks/provisioning/openstack/openstack_dns_views.yml b/playbooks/provisioning/openstack/openstack_dns_views.yml new file mode 100644 index 000000000..611ed9f82 --- /dev/null +++ b/playbooks/provisioning/openstack/openstack_dns_views.yml @@ -0,0 +1,27 @@ +--- + +- name: "Generate ACL list for DNS server" + set_fact: + acl_list: "{{ acl_list | default([]) + [ (hostvars[item]['openstack']['private_v4'] + '/32') ] }}" + with_items: "{{ groups['cluster_hosts'] }}" + +- name: "Generate the private view" + set_fact: + private_named_view: + - name: "private" + acl_entry: "{{ acl_list }}" + zone: + - dns_domain: "{{ full_dns_domain }}" + +- name: "Generate the public view" + set_fact: + public_named_view: + - name: "public" + zone: + - dns_domain: "{{ full_dns_domain }}" + forwarder: "{{ public_dns_nameservers }}" + +- name: "Generate the final named_config_views" + set_fact: + named_config_views: "{{ private_named_view + public_named_view }}" + diff --git a/playbooks/provisioning/openstack/post-provision-openstack.yml b/playbooks/provisioning/openstack/post-provision-openstack.yml new file mode 100644 index 000000000..d65e075b8 --- /dev/null +++ b/playbooks/provisioning/openstack/post-provision-openstack.yml @@ -0,0 +1,60 @@ +--- + +# Assign hostnames +- hosts: cluster_hosts + pre_tasks: + - include: roles/common/pre_tasks/pre_tasks.yml + roles: + - role: hostnames + +# Subscribe DNS Host to allow for configuration below +- hosts: dns + roles: + - { role: subscription-manager, when: hostvars.localhost.rhsm_register, tags: 'subscription-manager', ansible_sudo: true } + +# Determine which DNS server(s) to use for our generated records +- hosts: localhost + roles: + - dns-server-detect + +# Build the DNS Server Views and Configure DNS Server(s) +- hosts: dns + pre_tasks: + - include: roles/common/pre_tasks/pre_tasks.yml + - name: "Generate dns-server views" + include: openstack_dns_views.yml + roles: + - role: dns-server + +# Build and process DNS Records +- hosts: localhost + pre_tasks: + - include: roles/common/pre_tasks/pre_tasks.yml + - name: "Generate dns records" + include: openstack_dns_records.yml + roles: + - role: dns + +# Use newly configured DNS server for this container ... +- hosts: localhost + tasks: + - name: "Edit /etc/resolv.conf in container" + shell: "sed '0,/.*nameserver.*/s/.*nameserver.*/nameserver {{ public_dns_server }} \\n&/' /etc/resolv.conf > /tmp/resolv.conf && /bin/cp -f /tmp/resolv.conf /etc/resolv.conf" + +# OpenShift Pre-Requisites +- hosts: OSEv3 + tasks: + - name: "Edit /etc/resolv.conf on masters/nodes" + lineinfile: + state: present + dest: /etc/resolv.conf + regexp: "nameserver {{ hostvars['localhost'].private_dns_server }}" + line: "nameserver {{ hostvars['localhost'].private_dns_server }}" + insertafter: search* + - name: "Include DNS configuration to ensure proper name resolution" + lineinfile: + state: present + dest: /etc/sysconfig/network + regexp: "IP4_NAMESERVERS={{ hostvars['localhost'].private_dns_server }}" + line: "IP4_NAMESERVERS={{ hostvars['localhost'].private_dns_server }}" + diff --git a/playbooks/provisioning/openstack/pre-install.yml b/playbooks/provisioning/openstack/pre-install.yml new file mode 100644 index 000000000..8225287f9 --- /dev/null +++ b/playbooks/provisioning/openstack/pre-install.yml @@ -0,0 +1,15 @@ +--- + +############################### +# OpenShift Pre-Requisites + +# - subscribe hosts +# - prepare docker +# - other prep (install additional packages, etc.) +# +- hosts: OSEv3 + roles: + - { role: subscription-manager, when: hostvars.localhost.rhsm_register, tags: 'subscription-manager', ansible_sudo: true } + - { role: docker, tags: 'docker' } + - { role: openshift-prep, tags: 'openshift-prep' } + diff --git a/playbooks/provisioning/openstack/provision-openstack.yml b/playbooks/provisioning/openstack/provision-openstack.yml new file mode 100644 index 000000000..8125548fd --- /dev/null +++ b/playbooks/provisioning/openstack/provision-openstack.yml @@ -0,0 +1,48 @@ +--- +- hosts: localhost + pre_tasks: + - include: roles/common/pre_tasks/pre_tasks.yml + roles: + - role: openstack-stack + stack_name: "{{ env_id }}.{{ public_dns_domain }}" + dns_domain: "{{ public_dns_domain }}" + dns_nameservers: "{{ public_dns_nameservers }}" + subnet_prefix: "{{ openstack_subnet_prefix }}" + ssh_public_key: "{{ openstack_ssh_public_key }}" + openstack_image: "{{ openstack_default_image_name }}" + lb_flavor: "{{ openstack_lb_flavor | default('m1.small') }}" + etcd_flavor: "{{ openstack_default_flavor }}" + master_flavor: "{{ openstack_default_flavor }}" + node_flavor: "{{ openstack_default_flavor }}" + infra_flavor: "{{ openstack_default_flavor }}" + dns_flavor: "{{ openstack_dns_flavor | default('m1.small') }}" + external_network: "{{ openstack_external_network_name }}" + num_etcd: 0 + num_masters: "{{ openstack_num_masters }}" + num_nodes: "{{ openstack_num_nodes }}" + num_infra: "{{ openstack_num_infra }}" + num_dns: "{{ openstack_num_dns | default(1) }}" + master_volume_size: "{{ docker_volume_size }}" + app_volume_size: "{{ docker_volume_size }}" + infra_volume_size: "{{ docker_volume_size }}" + + +- name: Refresh Server inventory + hosts: localhost + connection: local + gather_facts: False + tasks: + - meta: refresh_inventory + +- hosts: cluster_hosts + gather_facts: false + tasks: + - name: Debug hostvar + debug: + msg: "{{ hostvars[inventory_hostname] }}" + verbosity: 2 + - name: waiting for server to come back + local_action: wait_for host={{ hostvars[inventory_hostname]['ansible_ssh_host'] }} port=22 delay=30 timeout=300 + become: false + +- include: post-provision-openstack.yml -- cgit v1.2.3 From 75add72e9737c2c404f7501f6b3ee678e877b59f Mon Sep 17 00:00:00 2001 From: Tomas Sedovic Date: Fri, 2 Jun 2017 14:09:31 +0200 Subject: Add a single provisioning playbook --- playbooks/provisioning/openstack/provision.yaml | 4 ++++ 1 file changed, 4 insertions(+) create mode 100644 playbooks/provisioning/openstack/provision.yaml diff --git a/playbooks/provisioning/openstack/provision.yaml b/playbooks/provisioning/openstack/provision.yaml new file mode 100644 index 000000000..7cde5e8b8 --- /dev/null +++ b/playbooks/provisioning/openstack/provision.yaml @@ -0,0 +1,4 @@ +--- +- include: "provision-openstack.yml" + +- include: "pre-install.yml" -- cgit v1.2.3 From 034be45ada3522966e382d6e51c4b05d7829dec6 Mon Sep 17 00:00:00 2001 From: Tomas Sedovic Date: Fri, 2 Jun 2017 14:10:34 +0200 Subject: Symlink roles to provisioning/openstack/roles --- playbooks/provisioning/openstack/roles | 1 + 1 file changed, 1 insertion(+) create mode 120000 playbooks/provisioning/openstack/roles diff --git a/playbooks/provisioning/openstack/roles b/playbooks/provisioning/openstack/roles new file mode 120000 index 000000000..e2b799b9d --- /dev/null +++ b/playbooks/provisioning/openstack/roles @@ -0,0 +1 @@ +../../../roles/ \ No newline at end of file -- cgit v1.2.3 From 5328475c0e0cbcb9f622a35b5494def4f6409bf6 Mon Sep 17 00:00:00 2001 From: Tomas Sedovic Date: Fri, 2 Jun 2017 14:17:15 +0200 Subject: Add a sample inventory for openstack provisioning --- .../openstack/sample-inventory/clouds.yaml | 5 + .../sample-inventory/group_vars/OSEv3.yml | 10 + .../openstack/sample-inventory/group_vars/all.yml | 39 ++++ .../provisioning/openstack/sample-inventory/hosts | 44 ++++ .../openstack/sample-inventory/openstack.py | 252 +++++++++++++++++++++ 5 files changed, 350 insertions(+) create mode 100644 playbooks/provisioning/openstack/sample-inventory/clouds.yaml create mode 100644 playbooks/provisioning/openstack/sample-inventory/group_vars/OSEv3.yml create mode 100644 playbooks/provisioning/openstack/sample-inventory/group_vars/all.yml create mode 100644 playbooks/provisioning/openstack/sample-inventory/hosts create mode 100755 playbooks/provisioning/openstack/sample-inventory/openstack.py diff --git a/playbooks/provisioning/openstack/sample-inventory/clouds.yaml b/playbooks/provisioning/openstack/sample-inventory/clouds.yaml new file mode 100644 index 000000000..c266426c6 --- /dev/null +++ b/playbooks/provisioning/openstack/sample-inventory/clouds.yaml @@ -0,0 +1,5 @@ +ansible: + use_hostnames: True + expand_hostvars: True + fail_on_errors: True + diff --git a/playbooks/provisioning/openstack/sample-inventory/group_vars/OSEv3.yml b/playbooks/provisioning/openstack/sample-inventory/group_vars/OSEv3.yml new file mode 100644 index 000000000..d850f88a4 --- /dev/null +++ b/playbooks/provisioning/openstack/sample-inventory/group_vars/OSEv3.yml @@ -0,0 +1,10 @@ +--- +openshift_deployment_type: openshift-enterprise +openshift_release: v3.5 +openshift_master_default_subdomain: "apps.openshift.example.com" + +# NOTE(shadower): do not remove this line, otherwise the default node labels +# won't be set up. +openshift_node_labels: "{{ openstack.metadata.node_labels }}" + +osm_default_node_selector: 'region=primary' diff --git a/playbooks/provisioning/openstack/sample-inventory/group_vars/all.yml b/playbooks/provisioning/openstack/sample-inventory/group_vars/all.yml new file mode 100644 index 000000000..50aaa573d --- /dev/null +++ b/playbooks/provisioning/openstack/sample-inventory/group_vars/all.yml @@ -0,0 +1,39 @@ +env_id: "openshift" +openstack_dns_domain: "example.com" +openstack_nameservers: ["192.168.1.1"] +openstack_ssh_public_key: "openshift" +openstack_default_image_name: "rhel73" +openstack_default_flavor: "m1.medium" +openstack_external_network_name: "public" + +openstack_num_masters: 1 +openstack_num_infra: 1 +openstack_num_nodes: 2 + +docker_volume_size: "15" + +# TODO(shadower): this is identical to `openstack_dns_domain`. +# We should make it so it's not duplicated here. +dns_domain: "example.com" + +# TODO(shadower): this is identical to `openstack_nameservers`. +# We should make it so it's not duplicated here. +public_dns_forwarder: "192.168.1.1" + +openstack_subnet_prefix: "192.168.99" + +# # Red Hat subscription +# rhsm_register: True +# rhsm_repos: +# - "rhel-7-server-rpms" +# - "rhel-7-server-ose-3.5-rpms" +# - "rhel-7-server-extras-rpms" +# - "rhel-7-fast-datapath-rpms" +# rhsm_username: '' +# rhsm_password: '' +# rhsm_pool: '' + + +# NOTE(shadower): Do not change this value. The Ansible user is currently +# hardcoded to `openshift`. +ansible_user: openshift diff --git a/playbooks/provisioning/openstack/sample-inventory/hosts b/playbooks/provisioning/openstack/sample-inventory/hosts new file mode 100644 index 000000000..5f73b60f6 --- /dev/null +++ b/playbooks/provisioning/openstack/sample-inventory/hosts @@ -0,0 +1,44 @@ +#[all:vars] +# For all group_vars, see ./group_vars/all.yml + +# Create an OSEv3 group that contains the master, nodes, etcd, and lb groups. +# The lb group lets Ansible configure HAProxy as the load balancing solution. +# Comment lb out if your load balancer is pre-configured. +[cluster_hosts:children] +OSEv3 +dns + +[OSEv3:children] +masters +nodes +etcd + +# Set variables common for all OSEv3 hosts +#[OSEv3:vars] + +# For OSEv3 normal group vars, see ./group_vars/OSEv3.yml + +# Host Groups + +[masters:children] +masters.openshift.example.com + +[etcd:children] +etcd.openshift.example.com + +[nodes:children] +masters +infra.openshift.example.com +nodes.openshift.example.com + +[infra_hosts:children] +infra.openshift.example.com + +[dns:children] +dns.openshift.example.com + +[masters.openshift.example.com] +[etcd.openshift.example.com] +[infra.openshift.example.com] +[nodes.openshift.example.com] +[dns.openshift.example.com] diff --git a/playbooks/provisioning/openstack/sample-inventory/openstack.py b/playbooks/provisioning/openstack/sample-inventory/openstack.py new file mode 100755 index 000000000..9d4886261 --- /dev/null +++ b/playbooks/provisioning/openstack/sample-inventory/openstack.py @@ -0,0 +1,252 @@ +#!/usr/bin/env python + +# Copyright (c) 2012, Marco Vito Moscaritolo +# Copyright (c) 2013, Jesse Keating +# Copyright (c) 2015, Hewlett-Packard Development Company, L.P. +# Copyright (c) 2016, Rackspace Australia +# +# This module is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# This software is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with this software. If not, see . + +# The OpenStack Inventory module uses os-client-config for configuration. +# https://github.com/stackforge/os-client-config +# This means it will either: +# - Respect normal OS_* environment variables like other OpenStack tools +# - Read values from a clouds.yaml file. +# If you want to configure via clouds.yaml, you can put the file in: +# - Current directory +# - ~/.config/openstack/clouds.yaml +# - /etc/openstack/clouds.yaml +# - /etc/ansible/openstack.yml +# The clouds.yaml file can contain entries for multiple clouds and multiple +# regions of those clouds. If it does, this inventory module will connect to +# all of them and present them as one contiguous inventory. +# +# See the adjacent openstack.yml file for an example config file +# There are two ansible inventory specific options that can be set in +# the inventory section. +# expand_hostvars controls whether or not the inventory will make extra API +# calls to fill out additional information about each server +# use_hostnames changes the behavior from registering every host with its UUID +# and making a group of its hostname to only doing this if the +# hostname in question has more than one server +# fail_on_errors causes the inventory to fail and return no hosts if one cloud +# has failed (for example, bad credentials or being offline). +# When set to False, the inventory will return hosts from +# whichever other clouds it can contact. (Default: True) + +import argparse +import collections +import os +import sys +import time +from distutils.version import StrictVersion + +try: + import json +except: + import simplejson as json + +import os_client_config +import shade +import shade.inventory + +CONFIG_FILES = ['/etc/ansible/openstack.yaml', '/etc/ansible/openstack.yml'] + + +def get_groups_from_server(server_vars, namegroup=True): + groups = [] + + region = server_vars['region'] + cloud = server_vars['cloud'] + metadata = server_vars.get('metadata', {}) + + # Create a group for the cloud + groups.append(cloud) + + # Create a group on region + groups.append(region) + + # And one by cloud_region + groups.append("%s_%s" % (cloud, region)) + + # Check if group metadata key in servers' metadata + if 'group' in metadata: + groups.append(metadata['group']) + + for extra_group in metadata.get('groups', '').split(','): + if extra_group: + groups.append(extra_group.strip()) + + groups.append('instance-%s' % server_vars['id']) + if namegroup: + groups.append(server_vars['name']) + + for key in ('flavor', 'image'): + if 'name' in server_vars[key]: + groups.append('%s-%s' % (key, server_vars[key]['name'])) + + for key, value in iter(metadata.items()): + groups.append('meta-%s_%s' % (key, value)) + + az = server_vars.get('az', None) + if az: + # Make groups for az, region_az and cloud_region_az + groups.append(az) + groups.append('%s_%s' % (region, az)) + groups.append('%s_%s_%s' % (cloud, region, az)) + return groups + + +def get_host_groups(inventory, refresh=False): + (cache_file, cache_expiration_time) = get_cache_settings() + if is_cache_stale(cache_file, cache_expiration_time, refresh=refresh): + groups = to_json(get_host_groups_from_cloud(inventory)) + open(cache_file, 'w').write(groups) + else: + groups = open(cache_file, 'r').read() + return groups + + +def append_hostvars(hostvars, groups, key, server, namegroup=False): + hostvars[key] = dict( + ansible_ssh_host=server['interface_ip'], + openshift_hostname=server['name'], + openshift_public_hostname=server['name'], + openstack=server) + for group in get_groups_from_server(server, namegroup=namegroup): + groups[group].append(key) + + +def get_host_groups_from_cloud(inventory): + groups = collections.defaultdict(list) + firstpass = collections.defaultdict(list) + hostvars = {} + list_args = {} + if hasattr(inventory, 'extra_config'): + use_hostnames = inventory.extra_config['use_hostnames'] + list_args['expand'] = inventory.extra_config['expand_hostvars'] + if StrictVersion(shade.__version__) >= StrictVersion("1.6.0"): + list_args['fail_on_cloud_config'] = \ + inventory.extra_config['fail_on_errors'] + else: + use_hostnames = False + + for server in inventory.list_hosts(**list_args): + + if 'interface_ip' not in server: + continue + try: + if server["metadata"][os.environ['OS_INV_FILTER_KEY']] == os.environ['OS_INV_FILTER_VALUE']: + firstpass[server['name']].append(server) + except: + firstpass[server['name']].append(server) + for name, servers in firstpass.items(): + if len(servers) == 1 and use_hostnames: + append_hostvars(hostvars, groups, name, servers[0]) + else: + server_ids = set() + # Trap for duplicate results + for server in servers: + server_ids.add(server['id']) + if len(server_ids) == 1 and use_hostnames: + append_hostvars(hostvars, groups, name, servers[0]) + else: + for server in servers: + append_hostvars( + hostvars, groups, server['id'], server, + namegroup=True) + groups['_meta'] = {'hostvars': hostvars} + return groups + + +def is_cache_stale(cache_file, cache_expiration_time, refresh=False): + ''' Determines if cache file has expired, or if it is still valid ''' + if refresh: + return True + if os.path.isfile(cache_file) and os.path.getsize(cache_file) > 0: + mod_time = os.path.getmtime(cache_file) + current_time = time.time() + if (mod_time + cache_expiration_time) > current_time: + return False + return True + + +def get_cache_settings(): + config = os_client_config.config.OpenStackConfig( + config_files=os_client_config.config.CONFIG_FILES + CONFIG_FILES) + # For inventory-wide caching + cache_expiration_time = config.get_cache_expiration_time() + cache_path = config.get_cache_path() + if not os.path.exists(cache_path): + os.makedirs(cache_path) + cache_file = os.path.join(cache_path, 'ansible-inventory.cache') + return (cache_file, cache_expiration_time) + + +def to_json(in_dict): + return json.dumps(in_dict, sort_keys=True, indent=2) + + +def parse_args(): + parser = argparse.ArgumentParser(description='OpenStack Inventory Module') + parser.add_argument('--private', + action='store_true', + help='Use private address for ansible host') + parser.add_argument('--refresh', action='store_true', + help='Refresh cached information') + parser.add_argument('--debug', action='store_true', default=False, + help='Enable debug output') + group = parser.add_mutually_exclusive_group(required=True) + group.add_argument('--list', action='store_true', + help='List active servers') + group.add_argument('--host', help='List details about the specific host') + + return parser.parse_args() + + +def main(): + args = parse_args() + try: + config_files = os_client_config.config.CONFIG_FILES + CONFIG_FILES + shade.simple_logging(debug=args.debug) + inventory_args = dict( + refresh=args.refresh, + config_files=config_files, + private=args.private, + ) + if hasattr(shade.inventory.OpenStackInventory, 'extra_config'): + inventory_args.update(dict( + config_key='ansible', + config_defaults={ + 'use_hostnames': False, + 'expand_hostvars': True, + 'fail_on_errors': True, + } + )) + + inventory = shade.inventory.OpenStackInventory(**inventory_args) + + if args.list: + output = get_host_groups(inventory, refresh=args.refresh) + elif args.host: + output = to_json(inventory.get_host(args.host)) + print(output) + except shade.OpenStackCloudException as e: + sys.stderr.write('%s\n' % e.message) + sys.exit(1) + sys.exit(0) + + +if __name__ == '__main__': + main() -- cgit v1.2.3 From 685516e9f94e011a30b1be17632a44e23927f226 Mon Sep 17 00:00:00 2001 From: Tomas Sedovic Date: Fri, 2 Jun 2017 14:18:03 +0200 Subject: Add license for openstack.py in inventory It's under the GPLv3+ while the rest of the repo is Apache 2. --- .../provisioning/openstack/INVENTORY-LICENSE.txt | 674 +++++++++++++++++++++ 1 file changed, 674 insertions(+) create mode 100644 playbooks/provisioning/openstack/INVENTORY-LICENSE.txt diff --git a/playbooks/provisioning/openstack/INVENTORY-LICENSE.txt b/playbooks/provisioning/openstack/INVENTORY-LICENSE.txt new file mode 100644 index 000000000..94a9ed024 --- /dev/null +++ b/playbooks/provisioning/openstack/INVENTORY-LICENSE.txt @@ -0,0 +1,674 @@ + GNU GENERAL PUBLIC LICENSE + Version 3, 29 June 2007 + + Copyright (C) 2007 Free Software Foundation, Inc. + Everyone is permitted to copy and distribute verbatim copies + of this license document, but changing it is not allowed. + + Preamble + + The GNU General Public License is a free, copyleft license for +software and other kinds of works. + + The licenses for most software and other practical works are designed +to take away your freedom to share and change the works. By contrast, +the GNU General Public License is intended to guarantee your freedom to +share and change all versions of a program--to make sure it remains free +software for all its users. We, the Free Software Foundation, use the +GNU General Public License for most of our software; it applies also to +any other work released this way by its authors. You can apply it to +your programs, too. + + When we speak of free software, we are referring to freedom, not +price. Our General Public Licenses are designed to make sure that you +have the freedom to distribute copies of free software (and charge for +them if you wish), that you receive source code or can get it if you +want it, that you can change the software or use pieces of it in new +free programs, and that you know you can do these things. + + To protect your rights, we need to prevent others from denying you +these rights or asking you to surrender the rights. Therefore, you have +certain responsibilities if you distribute copies of the software, or if +you modify it: responsibilities to respect the freedom of others. + + For example, if you distribute copies of such a program, whether +gratis or for a fee, you must pass on to the recipients the same +freedoms that you received. You must make sure that they, too, receive +or can get the source code. And you must show them these terms so they +know their rights. + + Developers that use the GNU GPL protect your rights with two steps: +(1) assert copyright on the software, and (2) offer you this License +giving you legal permission to copy, distribute and/or modify it. + + For the developers' and authors' protection, the GPL clearly explains +that there is no warranty for this free software. For both users' and +authors' sake, the GPL requires that modified versions be marked as +changed, so that their problems will not be attributed erroneously to +authors of previous versions. + + Some devices are designed to deny users access to install or run +modified versions of the software inside them, although the manufacturer +can do so. This is fundamentally incompatible with the aim of +protecting users' freedom to change the software. The systematic +pattern of such abuse occurs in the area of products for individuals to +use, which is precisely where it is most unacceptable. Therefore, we +have designed this version of the GPL to prohibit the practice for those +products. If such problems arise substantially in other domains, we +stand ready to extend this provision to those domains in future versions +of the GPL, as needed to protect the freedom of users. + + Finally, every program is threatened constantly by software patents. +States should not allow patents to restrict development and use of +software on general-purpose computers, but in those that do, we wish to +avoid the special danger that patents applied to a free program could +make it effectively proprietary. To prevent this, the GPL assures that +patents cannot be used to render the program non-free. + + The precise terms and conditions for copying, distribution and +modification follow. + + TERMS AND CONDITIONS + + 0. Definitions. + + "This License" refers to version 3 of the GNU General Public License. + + "Copyright" also means copyright-like laws that apply to other kinds of +works, such as semiconductor masks. + + "The Program" refers to any copyrightable work licensed under this +License. Each licensee is addressed as "you". "Licensees" and +"recipients" may be individuals or organizations. + + To "modify" a work means to copy from or adapt all or part of the work +in a fashion requiring copyright permission, other than the making of an +exact copy. The resulting work is called a "modified version" of the +earlier work or a work "based on" the earlier work. + + A "covered work" means either the unmodified Program or a work based +on the Program. + + To "propagate" a work means to do anything with it that, without +permission, would make you directly or secondarily liable for +infringement under applicable copyright law, except executing it on a +computer or modifying a private copy. Propagation includes copying, +distribution (with or without modification), making available to the +public, and in some countries other activities as well. + + To "convey" a work means any kind of propagation that enables other +parties to make or receive copies. Mere interaction with a user through +a computer network, with no transfer of a copy, is not conveying. + + An interactive user interface displays "Appropriate Legal Notices" +to the extent that it includes a convenient and prominently visible +feature that (1) displays an appropriate copyright notice, and (2) +tells the user that there is no warranty for the work (except to the +extent that warranties are provided), that licensees may convey the +work under this License, and how to view a copy of this License. If +the interface presents a list of user commands or options, such as a +menu, a prominent item in the list meets this criterion. + + 1. Source Code. + + The "source code" for a work means the preferred form of the work +for making modifications to it. "Object code" means any non-source +form of a work. + + A "Standard Interface" means an interface that either is an official +standard defined by a recognized standards body, or, in the case of +interfaces specified for a particular programming language, one that +is widely used among developers working in that language. + + The "System Libraries" of an executable work include anything, other +than the work as a whole, that (a) is included in the normal form of +packaging a Major Component, but which is not part of that Major +Component, and (b) serves only to enable use of the work with that +Major Component, or to implement a Standard Interface for which an +implementation is available to the public in source code form. A +"Major Component", in this context, means a major essential component +(kernel, window system, and so on) of the specific operating system +(if any) on which the executable work runs, or a compiler used to +produce the work, or an object code interpreter used to run it. + + The "Corresponding Source" for a work in object code form means all +the source code needed to generate, install, and (for an executable +work) run the object code and to modify the work, including scripts to +control those activities. However, it does not include the work's +System Libraries, or general-purpose tools or generally available free +programs which are used unmodified in performing those activities but +which are not part of the work. For example, Corresponding Source +includes interface definition files associated with source files for +the work, and the source code for shared libraries and dynamically +linked subprograms that the work is specifically designed to require, +such as by intimate data communication or control flow between those +subprograms and other parts of the work. + + The Corresponding Source need not include anything that users +can regenerate automatically from other parts of the Corresponding +Source. + + The Corresponding Source for a work in source code form is that +same work. + + 2. Basic Permissions. + + All rights granted under this License are granted for the term of +copyright on the Program, and are irrevocable provided the stated +conditions are met. This License explicitly affirms your unlimited +permission to run the unmodified Program. The output from running a +covered work is covered by this License only if the output, given its +content, constitutes a covered work. This License acknowledges your +rights of fair use or other equivalent, as provided by copyright law. + + You may make, run and propagate covered works that you do not +convey, without conditions so long as your license otherwise remains +in force. You may convey covered works to others for the sole purpose +of having them make modifications exclusively for you, or provide you +with facilities for running those works, provided that you comply with +the terms of this License in conveying all material for which you do +not control copyright. Those thus making or running the covered works +for you must do so exclusively on your behalf, under your direction +and control, on terms that prohibit them from making any copies of +your copyrighted material outside their relationship with you. + + Conveying under any other circumstances is permitted solely under +the conditions stated below. Sublicensing is not allowed; section 10 +makes it unnecessary. + + 3. Protecting Users' Legal Rights From Anti-Circumvention Law. + + No covered work shall be deemed part of an effective technological +measure under any applicable law fulfilling obligations under article +11 of the WIPO copyright treaty adopted on 20 December 1996, or +similar laws prohibiting or restricting circumvention of such +measures. + + When you convey a covered work, you waive any legal power to forbid +circumvention of technological measures to the extent such circumvention +is effected by exercising rights under this License with respect to +the covered work, and you disclaim any intention to limit operation or +modification of the work as a means of enforcing, against the work's +users, your or third parties' legal rights to forbid circumvention of +technological measures. + + 4. Conveying Verbatim Copies. + + You may convey verbatim copies of the Program's source code as you +receive it, in any medium, provided that you conspicuously and +appropriately publish on each copy an appropriate copyright notice; +keep intact all notices stating that this License and any +non-permissive terms added in accord with section 7 apply to the code; +keep intact all notices of the absence of any warranty; and give all +recipients a copy of this License along with the Program. + + You may charge any price or no price for each copy that you convey, +and you may offer support or warranty protection for a fee. + + 5. Conveying Modified Source Versions. + + You may convey a work based on the Program, or the modifications to +produce it from the Program, in the form of source code under the +terms of section 4, provided that you also meet all of these conditions: + + a) The work must carry prominent notices stating that you modified + it, and giving a relevant date. + + b) The work must carry prominent notices stating that it is + released under this License and any conditions added under section + 7. This requirement modifies the requirement in section 4 to + "keep intact all notices". + + c) You must license the entire work, as a whole, under this + License to anyone who comes into possession of a copy. This + License will therefore apply, along with any applicable section 7 + additional terms, to the whole of the work, and all its parts, + regardless of how they are packaged. This License gives no + permission to license the work in any other way, but it does not + invalidate such permission if you have separately received it. + + d) If the work has interactive user interfaces, each must display + Appropriate Legal Notices; however, if the Program has interactive + interfaces that do not display Appropriate Legal Notices, your + work need not make them do so. + + A compilation of a covered work with other separate and independent +works, which are not by their nature extensions of the covered work, +and which are not combined with it such as to form a larger program, +in or on a volume of a storage or distribution medium, is called an +"aggregate" if the compilation and its resulting copyright are not +used to limit the access or legal rights of the compilation's users +beyond what the individual works permit. Inclusion of a covered work +in an aggregate does not cause this License to apply to the other +parts of the aggregate. + + 6. Conveying Non-Source Forms. + + You may convey a covered work in object code form under the terms +of sections 4 and 5, provided that you also convey the +machine-readable Corresponding Source under the terms of this License, +in one of these ways: + + a) Convey the object code in, or embodied in, a physical product + (including a physical distribution medium), accompanied by the + Corresponding Source fixed on a durable physical medium + customarily used for software interchange. + + b) Convey the object code in, or embodied in, a physical product + (including a physical distribution medium), accompanied by a + written offer, valid for at least three years and valid for as + long as you offer spare parts or customer support for that product + model, to give anyone who possesses the object code either (1) a + copy of the Corresponding Source for all the software in the + product that is covered by this License, on a durable physical + medium customarily used for software interchange, for a price no + more than your reasonable cost of physically performing this + conveying of source, or (2) access to copy the + Corresponding Source from a network server at no charge. + + c) Convey individual copies of the object code with a copy of the + written offer to provide the Corresponding Source. This + alternative is allowed only occasionally and noncommercially, and + only if you received the object code with such an offer, in accord + with subsection 6b. + + d) Convey the object code by offering access from a designated + place (gratis or for a charge), and offer equivalent access to the + Corresponding Source in the same way through the same place at no + further charge. You need not require recipients to copy the + Corresponding Source along with the object code. If the place to + copy the object code is a network server, the Corresponding Source + may be on a different server (operated by you or a third party) + that supports equivalent copying facilities, provided you maintain + clear directions next to the object code saying where to find the + Corresponding Source. Regardless of what server hosts the + Corresponding Source, you remain obligated to ensure that it is + available for as long as needed to satisfy these requirements. + + e) Convey the object code using peer-to-peer transmission, provided + you inform other peers where the object code and Corresponding + Source of the work are being offered to the general public at no + charge under subsection 6d. + + A separable portion of the object code, whose source code is excluded +from the Corresponding Source as a System Library, need not be +included in conveying the object code work. + + A "User Product" is either (1) a "consumer product", which means any +tangible personal property which is normally used for personal, family, +or household purposes, or (2) anything designed or sold for incorporation +into a dwelling. In determining whether a product is a consumer product, +doubtful cases shall be resolved in favor of coverage. For a particular +product received by a particular user, "normally used" refers to a +typical or common use of that class of product, regardless of the status +of the particular user or of the way in which the particular user +actually uses, or expects or is expected to use, the product. A product +is a consumer product regardless of whether the product has substantial +commercial, industrial or non-consumer uses, unless such uses represent +the only significant mode of use of the product. + + "Installation Information" for a User Product means any methods, +procedures, authorization keys, or other information required to install +and execute modified versions of a covered work in that User Product from +a modified version of its Corresponding Source. The information must +suffice to ensure that the continued functioning of the modified object +code is in no case prevented or interfered with solely because +modification has been made. + + If you convey an object code work under this section in, or with, or +specifically for use in, a User Product, and the conveying occurs as +part of a transaction in which the right of possession and use of the +User Product is transferred to the recipient in perpetuity or for a +fixed term (regardless of how the transaction is characterized), the +Corresponding Source conveyed under this section must be accompanied +by the Installation Information. But this requirement does not apply +if neither you nor any third party retains the ability to install +modified object code on the User Product (for example, the work has +been installed in ROM). + + The requirement to provide Installation Information does not include a +requirement to continue to provide support service, warranty, or updates +for a work that has been modified or installed by the recipient, or for +the User Product in which it has been modified or installed. Access to a +network may be denied when the modification itself materially and +adversely affects the operation of the network or violates the rules and +protocols for communication across the network. + + Corresponding Source conveyed, and Installation Information provided, +in accord with this section must be in a format that is publicly +documented (and with an implementation available to the public in +source code form), and must require no special password or key for +unpacking, reading or copying. + + 7. Additional Terms. + + "Additional permissions" are terms that supplement the terms of this +License by making exceptions from one or more of its conditions. +Additional permissions that are applicable to the entire Program shall +be treated as though they were included in this License, to the extent +that they are valid under applicable law. If additional permissions +apply only to part of the Program, that part may be used separately +under those permissions, but the entire Program remains governed by +this License without regard to the additional permissions. + + When you convey a copy of a covered work, you may at your option +remove any additional permissions from that copy, or from any part of +it. (Additional permissions may be written to require their own +removal in certain cases when you modify the work.) You may place +additional permissions on material, added by you to a covered work, +for which you have or can give appropriate copyright permission. + + Notwithstanding any other provision of this License, for material you +add to a covered work, you may (if authorized by the copyright holders of +that material) supplement the terms of this License with terms: + + a) Disclaiming warranty or limiting liability differently from the + terms of sections 15 and 16 of this License; or + + b) Requiring preservation of specified reasonable legal notices or + author attributions in that material or in the Appropriate Legal + Notices displayed by works containing it; or + + c) Prohibiting misrepresentation of the origin of that material, or + requiring that modified versions of such material be marked in + reasonable ways as different from the original version; or + + d) Limiting the use for publicity purposes of names of licensors or + authors of the material; or + + e) Declining to grant rights under trademark law for use of some + trade names, trademarks, or service marks; or + + f) Requiring indemnification of licensors and authors of that + material by anyone who conveys the material (or modified versions of + it) with contractual assumptions of liability to the recipient, for + any liability that these contractual assumptions directly impose on + those licensors and authors. + + All other non-permissive additional terms are considered "further +restrictions" within the meaning of section 10. If the Program as you +received it, or any part of it, contains a notice stating that it is +governed by this License along with a term that is a further +restriction, you may remove that term. If a license document contains +a further restriction but permits relicensing or conveying under this +License, you may add to a covered work material governed by the terms +of that license document, provided that the further restriction does +not survive such relicensing or conveying. + + If you add terms to a covered work in accord with this section, you +must place, in the relevant source files, a statement of the +additional terms that apply to those files, or a notice indicating +where to find the applicable terms. + + Additional terms, permissive or non-permissive, may be stated in the +form of a separately written license, or stated as exceptions; +the above requirements apply either way. + + 8. Termination. + + You may not propagate or modify a covered work except as expressly +provided under this License. Any attempt otherwise to propagate or +modify it is void, and will automatically terminate your rights under +this License (including any patent licenses granted under the third +paragraph of section 11). + + However, if you cease all violation of this License, then your +license from a particular copyright holder is reinstated (a) +provisionally, unless and until the copyright holder explicitly and +finally terminates your license, and (b) permanently, if the copyright +holder fails to notify you of the violation by some reasonable means +prior to 60 days after the cessation. + + Moreover, your license from a particular copyright holder is +reinstated permanently if the copyright holder notifies you of the +violation by some reasonable means, this is the first time you have +received notice of violation of this License (for any work) from that +copyright holder, and you cure the violation prior to 30 days after +your receipt of the notice. + + Termination of your rights under this section does not terminate the +licenses of parties who have received copies or rights from you under +this License. If your rights have been terminated and not permanently +reinstated, you do not qualify to receive new licenses for the same +material under section 10. + + 9. Acceptance Not Required for Having Copies. + + You are not required to accept this License in order to receive or +run a copy of the Program. Ancillary propagation of a covered work +occurring solely as a consequence of using peer-to-peer transmission +to receive a copy likewise does not require acceptance. However, +nothing other than this License grants you permission to propagate or +modify any covered work. These actions infringe copyright if you do +not accept this License. Therefore, by modifying or propagating a +covered work, you indicate your acceptance of this License to do so. + + 10. Automatic Licensing of Downstream Recipients. + + Each time you convey a covered work, the recipient automatically +receives a license from the original licensors, to run, modify and +propagate that work, subject to this License. You are not responsible +for enforcing compliance by third parties with this License. + + An "entity transaction" is a transaction transferring control of an +organization, or substantially all assets of one, or subdividing an +organization, or merging organizations. If propagation of a covered +work results from an entity transaction, each party to that +transaction who receives a copy of the work also receives whatever +licenses to the work the party's predecessor in interest had or could +give under the previous paragraph, plus a right to possession of the +Corresponding Source of the work from the predecessor in interest, if +the predecessor has it or can get it with reasonable efforts. + + You may not impose any further restrictions on the exercise of the +rights granted or affirmed under this License. For example, you may +not impose a license fee, royalty, or other charge for exercise of +rights granted under this License, and you may not initiate litigation +(including a cross-claim or counterclaim in a lawsuit) alleging that +any patent claim is infringed by making, using, selling, offering for +sale, or importing the Program or any portion of it. + + 11. Patents. + + A "contributor" is a copyright holder who authorizes use under this +License of the Program or a work on which the Program is based. The +work thus licensed is called the contributor's "contributor version". + + A contributor's "essential patent claims" are all patent claims +owned or controlled by the contributor, whether already acquired or +hereafter acquired, that would be infringed by some manner, permitted +by this License, of making, using, or selling its contributor version, +but do not include claims that would be infringed only as a +consequence of further modification of the contributor version. For +purposes of this definition, "control" includes the right to grant +patent sublicenses in a manner consistent with the requirements of +this License. + + Each contributor grants you a non-exclusive, worldwide, royalty-free +patent license under the contributor's essential patent claims, to +make, use, sell, offer for sale, import and otherwise run, modify and +propagate the contents of its contributor version. + + In the following three paragraphs, a "patent license" is any express +agreement or commitment, however denominated, not to enforce a patent +(such as an express permission to practice a patent or covenant not to +sue for patent infringement). To "grant" such a patent license to a +party means to make such an agreement or commitment not to enforce a +patent against the party. + + If you convey a covered work, knowingly relying on a patent license, +and the Corresponding Source of the work is not available for anyone +to copy, free of charge and under the terms of this License, through a +publicly available network server or other readily accessible means, +then you must either (1) cause the Corresponding Source to be so +available, or (2) arrange to deprive yourself of the benefit of the +patent license for this particular work, or (3) arrange, in a manner +consistent with the requirements of this License, to extend the patent +license to downstream recipients. "Knowingly relying" means you have +actual knowledge that, but for the patent license, your conveying the +covered work in a country, or your recipient's use of the covered work +in a country, would infringe one or more identifiable patents in that +country that you have reason to believe are valid. + + If, pursuant to or in connection with a single transaction or +arrangement, you convey, or propagate by procuring conveyance of, a +covered work, and grant a patent license to some of the parties +receiving the covered work authorizing them to use, propagate, modify +or convey a specific copy of the covered work, then the patent license +you grant is automatically extended to all recipients of the covered +work and works based on it. + + A patent license is "discriminatory" if it does not include within +the scope of its coverage, prohibits the exercise of, or is +conditioned on the non-exercise of one or more of the rights that are +specifically granted under this License. You may not convey a covered +work if you are a party to an arrangement with a third party that is +in the business of distributing software, under which you make payment +to the third party based on the extent of your activity of conveying +the work, and under which the third party grants, to any of the +parties who would receive the covered work from you, a discriminatory +patent license (a) in connection with copies of the covered work +conveyed by you (or copies made from those copies), or (b) primarily +for and in connection with specific products or compilations that +contain the covered work, unless you entered into that arrangement, +or that patent license was granted, prior to 28 March 2007. + + Nothing in this License shall be construed as excluding or limiting +any implied license or other defenses to infringement that may +otherwise be available to you under applicable patent law. + + 12. No Surrender of Others' Freedom. + + If conditions are imposed on you (whether by court order, agreement or +otherwise) that contradict the conditions of this License, they do not +excuse you from the conditions of this License. If you cannot convey a +covered work so as to satisfy simultaneously your obligations under this +License and any other pertinent obligations, then as a consequence you may +not convey it at all. For example, if you agree to terms that obligate you +to collect a royalty for further conveying from those to whom you convey +the Program, the only way you could satisfy both those terms and this +License would be to refrain entirely from conveying the Program. + + 13. Use with the GNU Affero General Public License. + + Notwithstanding any other provision of this License, you have +permission to link or combine any covered work with a work licensed +under version 3 of the GNU Affero General Public License into a single +combined work, and to convey the resulting work. The terms of this +License will continue to apply to the part which is the covered work, +but the special requirements of the GNU Affero General Public License, +section 13, concerning interaction through a network will apply to the +combination as such. + + 14. Revised Versions of this License. + + The Free Software Foundation may publish revised and/or new versions of +the GNU General Public License from time to time. Such new versions will +be similar in spirit to the present version, but may differ in detail to +address new problems or concerns. + + Each version is given a distinguishing version number. If the +Program specifies that a certain numbered version of the GNU General +Public License "or any later version" applies to it, you have the +option of following the terms and conditions either of that numbered +version or of any later version published by the Free Software +Foundation. If the Program does not specify a version number of the +GNU General Public License, you may choose any version ever published +by the Free Software Foundation. + + If the Program specifies that a proxy can decide which future +versions of the GNU General Public License can be used, that proxy's +public statement of acceptance of a version permanently authorizes you +to choose that version for the Program. + + Later license versions may give you additional or different +permissions. However, no additional obligations are imposed on any +author or copyright holder as a result of your choosing to follow a +later version. + + 15. Disclaimer of Warranty. + + THERE IS NO WARRANTY FOR THE PROGRAM, TO THE EXTENT PERMITTED BY +APPLICABLE LAW. EXCEPT WHEN OTHERWISE STATED IN WRITING THE COPYRIGHT +HOLDERS AND/OR OTHER PARTIES PROVIDE THE PROGRAM "AS IS" WITHOUT WARRANTY +OF ANY KIND, EITHER EXPRESSED OR IMPLIED, INCLUDING, BUT NOT LIMITED TO, +THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR +PURPOSE. THE ENTIRE RISK AS TO THE QUALITY AND PERFORMANCE OF THE PROGRAM +IS WITH YOU. SHOULD THE PROGRAM PROVE DEFECTIVE, YOU ASSUME THE COST OF +ALL NECESSARY SERVICING, REPAIR OR CORRECTION. + + 16. Limitation of Liability. + + IN NO EVENT UNLESS REQUIRED BY APPLICABLE LAW OR AGREED TO IN WRITING +WILL ANY COPYRIGHT HOLDER, OR ANY OTHER PARTY WHO MODIFIES AND/OR CONVEYS +THE PROGRAM AS PERMITTED ABOVE, BE LIABLE TO YOU FOR DAMAGES, INCLUDING ANY +GENERAL, SPECIAL, INCIDENTAL OR CONSEQUENTIAL DAMAGES ARISING OUT OF THE +USE OR INABILITY TO USE THE PROGRAM (INCLUDING BUT NOT LIMITED TO LOSS OF +DATA OR DATA BEING RENDERED INACCURATE OR LOSSES SUSTAINED BY YOU OR THIRD +PARTIES OR A FAILURE OF THE PROGRAM TO OPERATE WITH ANY OTHER PROGRAMS), +EVEN IF SUCH HOLDER OR OTHER PARTY HAS BEEN ADVISED OF THE POSSIBILITY OF +SUCH DAMAGES. + + 17. Interpretation of Sections 15 and 16. + + If the disclaimer of warranty and limitation of liability provided +above cannot be given local legal effect according to their terms, +reviewing courts shall apply local law that most closely approximates +an absolute waiver of all civil liability in connection with the +Program, unless a warranty or assumption of liability accompanies a +copy of the Program in return for a fee. + + END OF TERMS AND CONDITIONS + + How to Apply These Terms to Your New Programs + + If you develop a new program, and you want it to be of the greatest +possible use to the public, the best way to achieve this is to make it +free software which everyone can redistribute and change under these terms. + + To do so, attach the following notices to the program. It is safest +to attach them to the start of each source file to most effectively +state the exclusion of warranty; and each file should have at least +the "copyright" line and a pointer to where the full notice is found. + + + Copyright (C) + + This program is free software: you can redistribute it and/or modify + it under the terms of the GNU General Public License as published by + the Free Software Foundation, either version 3 of the License, or + (at your option) any later version. + + This program is distributed in the hope that it will be useful, + but WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + GNU General Public License for more details. + + You should have received a copy of the GNU General Public License + along with this program. If not, see . + +Also add information on how to contact you by electronic and paper mail. + + If the program does terminal interaction, make it output a short +notice like this when it starts in an interactive mode: + + Copyright (C) + This program comes with ABSOLUTELY NO WARRANTY; for details type `show w'. + This is free software, and you are welcome to redistribute it + under certain conditions; type `show c' for details. + +The hypothetical commands `show w' and `show c' should show the appropriate +parts of the General Public License. Of course, your program's commands +might be different; for a GUI interface, you would use an "about box". + + You should also get your employer (if you work as a programmer) or school, +if any, to sign a "copyright disclaimer" for the program, if necessary. +For more information on this, and how to apply and follow the GNU GPL, see +. + + The GNU General Public License does not permit incorporating your program +into proprietary programs. If your program is a subroutine library, you +may consider it more useful to permit linking proprietary applications with +the library. If this is what you want to do, use the GNU Lesser General +Public License instead of this License. But first, please read +. -- cgit v1.2.3 From 269278b56ed03eca8d5e220275ccdb5b7fa1c07d Mon Sep 17 00:00:00 2001 From: Tomas Sedovic Date: Fri, 2 Jun 2017 14:21:46 +0200 Subject: Add readme --- playbooks/provisioning/openstack/README.md | 113 +++++++++++++++++++++++++++++ 1 file changed, 113 insertions(+) create mode 100644 playbooks/provisioning/openstack/README.md diff --git a/playbooks/provisioning/openstack/README.md b/playbooks/provisioning/openstack/README.md new file mode 100644 index 000000000..68550d3a3 --- /dev/null +++ b/playbooks/provisioning/openstack/README.md @@ -0,0 +1,113 @@ +# OpenStack Provisioning + +This repository contains playbooks and Heat templates to provision +OpenStack resources (servers, networking, volumes, security groups, +etc.). The result is an environment ready for openshift-ansible. + + +## Dependencies + +* [Ansible 2.3](https://pypi.python.org/pypi/ansible) +* [shade](https://pypi.python.org/pypi/shade) + + +## What does it do + +* Create Nova servers with floating IP addresses attached +* Assigns Cinder volumes to the servers +* Set up an `openshift` user with sudo privileges +* Optionally attach Red Hat subscriptions +* Set up a bind-based DNS server +* When deploying more than one master, set up a HAproxy server + + +## Set up + +### Copy the sample inventory + + cp openshift-ansible-contrib/playbooks/provisioning/openstack/sample-inventory inventory + +### Copy clouds.yaml + + cp openshift-ansible-contrib/playbooks/provisioning/openstack/sample-inventory/clouds.yaml clouds.yaml + +### Update `inventory/group_vars/all.yml` + +Pay special attention to the values in the first paragraph -- these +will depend on your OpenStack environment. + +The `env_id` and `openstack_dns_domain` will form the DNS domain all +your servers will be under. With the default values, this will be +`openshift.example.com`. + +`openstack_nameservers` is a list of DNS servers accessible from all +the created Nova servers. These will be serve as your DNS forwarders. + +`openstack_ssh_key` is a Nova keypair -- you can see your keypairs with +`openstack keypair list`. + +`openstack_default_image_name` is the name of the Glance image the +servers will use. You can +see your images with `openstack image list`. + +`openstack_default_flavor` is the Nova flavor the servers will use. +You can see your flavors with `openstack flavor list`. + +`openstack_external_network_name` is the name of the Neutron network +providing external connectivity. It is often called `public`, +`external` or `ext-net`. You can see your networks with `openstack +network list`. + +The `openstack_num_masters`, `openstack_num_infra` and +`openstack_num_nodes` values specify the number of Master, Infra and +App nodes to create. + +### Update the DNS names in `inventory/hosts` + +The different server groups are currently grouped by the domain name, +so if you end up using a different domain than +`openshift.example.com`, you will need to update the `inventory/hosts` +file. + +For example, if your final domain is `my.cloud.com`, you can run this +command to fix update the `hosts` file: + + sed -i 's/openshift.example.com/my.cloud.com/' inventory/hosts + +### Configure the OpenShift parameters + +Finally, you need to update the DNS entry in +`inventory/group_vars/OSEv3.yml` (look at +`openshift_master_default_subdomain`). + +In addition, this is the place where you can customise your OpenShift +installation for example by specifying the authentication. + +The full list of options is available in this sample inventory: + +https://github.com/openshift/openshift-ansible/blob/master/inventory/byo/hosts.ose.example + + +## Deployment + +### Run the playbook + +Assuming your OpenStack (Keystone) credentials are in the `keystonerc` +file, this is how you stat the provisioning process: + + . keystonerc + ansible-playbook -i inventory --private-key ~/.ssh/openshift openshift-ansible-contrib/playbooks/provisioning/openstack/provision.yaml + +### Install OpenShift + +Once it succeeds, you can install openshift by running: + + ansible-playbook --become --user openshift --private-key ~/.ssh/openshift -i inventory/ openshift-ansible/playbooks/byo/config.yml + + +## License + +As the rest of the openshift-ansible-contrib repository, the code here is +licensed under Apache 2. However, the openstack.py file under +`sample-inventory` is GPLv3+. See the INVENTORY-LICENSE.txt file for the full +text of the license. -- cgit v1.2.3 From c3cefa9996fb67b846f44eed78644a0f52d76df1 Mon Sep 17 00:00:00 2001 From: Tomas Sedovic Date: Fri, 2 Jun 2017 14:39:21 +0200 Subject: Move pre_tasks from to the openstack provisioner We should probably not pollute the role namespace with a name as common as "common". Moving the pre_task.yml to provisioners/openstack instead. --- playbooks/provisioning/openstack/pre_tasks.yml | 39 ++++++++++++++++++++++++++ roles/common/pre_tasks/pre_tasks.yml | 38 ------------------------- 2 files changed, 39 insertions(+), 38 deletions(-) create mode 100644 playbooks/provisioning/openstack/pre_tasks.yml delete mode 100644 roles/common/pre_tasks/pre_tasks.yml diff --git a/playbooks/provisioning/openstack/pre_tasks.yml b/playbooks/provisioning/openstack/pre_tasks.yml new file mode 100644 index 000000000..8446bdfbc --- /dev/null +++ b/playbooks/provisioning/openstack/pre_tasks.yml @@ -0,0 +1,39 @@ +--- +- name: Generate Environment ID + set_fact: + env_random_id: "{{ ansible_date_time.epoch }}" + run_once: true + delegate_to: localhost + +- name: Set default Environment ID + set_fact: + default_env_id: "casl-{{ lookup('env','OS_USERNAME') }}-{{ env_random_id }}" + delegate_to: localhost + +- name: Setting Common Facts + set_fact: + env_id: "{{ env_id | default(default_env_id) }}" + delegate_to: localhost + +- name: Set Dynamic Inventory Filters + become: false + shell: > + export OS_INV_FILTER_KEY=clusterid && OS_INV_FILTER_VALUE={{ env_id }} + delegate_to: localhost + +- name: Updating DNS domain to include env_id (if not empty) + set_fact: + full_dns_domain: "{{ (env_id|trim == '') | ternary(public_dns_domain, env_id + '.' + public_dns_domain) }}" + delegate_to: localhost + +- name: Set the APP domain for OpenShift use + set_fact: + openshift_app_domain: "{{ openshift_app_domain | default('apps') }}" + delegate_to: localhost + +- name: Set the default app domain for routing purposes + set_fact: + openshift_master_default_subdomain: "{{ openshift_app_domain }}.{{ full_dns_domain }}" + delegate_to: localhost + when: + - openshift_master_default_subdomain is undefined diff --git a/roles/common/pre_tasks/pre_tasks.yml b/roles/common/pre_tasks/pre_tasks.yml deleted file mode 100644 index c5e79e89c..000000000 --- a/roles/common/pre_tasks/pre_tasks.yml +++ /dev/null @@ -1,38 +0,0 @@ ---- -- name: Generate Environment ID - set_fact: - env_random_id: "{{ ansible_date_time.epoch }}" - run_once: true - delegate_to: localhost - -- name: Set default Environment ID - set_fact: - default_env_id: "casl-{{ lookup('env','OS_USERNAME') }}-{{ env_random_id }}" - delegate_to: localhost - -- name: Setting Common Facts - set_fact: - env_id: "{{ env_id | default(default_env_id) }}" - delegate_to: localhost - -- name: Set Dynamic Inventory Filters - shell: > - export OS_INV_FILTER_KEY=clusterid && OS_INV_FILTER_VALUE={{ env_id }} - delegate_to: localhost - -- name: Updating DNS domain to include env_id (if not empty) - set_fact: - full_dns_domain: "{{ (env_id|trim == '') | ternary(public_dns_domain, env_id + '.' + public_dns_domain) }}" - delegate_to: localhost - -- name: Set the APP domain for OpenShift use - set_fact: - openshift_app_domain: "{{ openshift_app_domain | default('apps') }}" - delegate_to: localhost - -- name: Set the default app domain for routing purposes - set_fact: - openshift_master_default_subdomain: "{{ openshift_app_domain }}.{{ full_dns_domain }}" - delegate_to: localhost - when: - - openshift_master_default_subdomain is undefined -- cgit v1.2.3 From 079f58cb9d137fd35e58043f2b53a9b964f3d3d2 Mon Sep 17 00:00:00 2001 From: Tomas Sedovic Date: Fri, 2 Jun 2017 14:41:29 +0200 Subject: Add default values to provision-openstack.yml --- playbooks/provisioning/openstack/provision-openstack.yml | 16 ++++++++-------- 1 file changed, 8 insertions(+), 8 deletions(-) diff --git a/playbooks/provisioning/openstack/provision-openstack.yml b/playbooks/provisioning/openstack/provision-openstack.yml index 8125548fd..0505f1bda 100644 --- a/playbooks/provisioning/openstack/provision-openstack.yml +++ b/playbooks/provisioning/openstack/provision-openstack.yml @@ -1,7 +1,7 @@ --- - hosts: localhost pre_tasks: - - include: roles/common/pre_tasks/pre_tasks.yml + - include: pre_tasks.yml roles: - role: openstack-stack stack_name: "{{ env_id }}.{{ public_dns_domain }}" @@ -10,14 +10,14 @@ subnet_prefix: "{{ openstack_subnet_prefix }}" ssh_public_key: "{{ openstack_ssh_public_key }}" openstack_image: "{{ openstack_default_image_name }}" - lb_flavor: "{{ openstack_lb_flavor | default('m1.small') }}" - etcd_flavor: "{{ openstack_default_flavor }}" - master_flavor: "{{ openstack_default_flavor }}" - node_flavor: "{{ openstack_default_flavor }}" - infra_flavor: "{{ openstack_default_flavor }}" - dns_flavor: "{{ openstack_dns_flavor | default('m1.small') }}" + lb_flavor: "{{ openstack_default_flavor | default('m1.small') }}" + etcd_flavor: "{{ openstack_default_flavor | default('m1.small') }}" + master_flavor: "{{ openstack_default_flavor | default('m1.medium') }}" + node_flavor: "{{ openstack_default_flavor | default('m1.medium') }}" + infra_flavor: "{{ openstack_default_flavor | default('m1.medium') }}" + dns_flavor: "{{ openstack_default_flavor | default('m1.small') }}" external_network: "{{ openstack_external_network_name }}" - num_etcd: 0 + num_etcd: "{{ openstack_num_etcd | default(0) }}" num_masters: "{{ openstack_num_masters }}" num_nodes: "{{ openstack_num_nodes }}" num_infra: "{{ openstack_num_infra }}" -- cgit v1.2.3 -- cgit v1.2.3 -- cgit v1.2.3 From 0858a645a4ec808d0309b8522f55cef23792fce9 Mon Sep 17 00:00:00 2001 From: Tomas Sedovic Date: Fri, 2 Jun 2017 14:43:13 +0200 Subject: Fix privileges in the pre-install playbook --- .../openstack/post-provision-openstack.yml | 22 +++++++++++----------- playbooks/provisioning/openstack/pre-install.yml | 1 + 2 files changed, 12 insertions(+), 11 deletions(-) diff --git a/playbooks/provisioning/openstack/post-provision-openstack.yml b/playbooks/provisioning/openstack/post-provision-openstack.yml index d65e075b8..e1faf14eb 100644 --- a/playbooks/provisioning/openstack/post-provision-openstack.yml +++ b/playbooks/provisioning/openstack/post-provision-openstack.yml @@ -2,25 +2,30 @@ # Assign hostnames - hosts: cluster_hosts + become: true pre_tasks: - - include: roles/common/pre_tasks/pre_tasks.yml + - include: pre_tasks.yml roles: - role: hostnames # Subscribe DNS Host to allow for configuration below - hosts: dns + become: true roles: - - { role: subscription-manager, when: hostvars.localhost.rhsm_register, tags: 'subscription-manager', ansible_sudo: true } + - role: subscription-manager + when: hostvars.localhost.rhsm_register + tags: 'subscription-manager' # Determine which DNS server(s) to use for our generated records - hosts: localhost - roles: + roles: - dns-server-detect # Build the DNS Server Views and Configure DNS Server(s) - hosts: dns + become: true pre_tasks: - - include: roles/common/pre_tasks/pre_tasks.yml + - include: pre_tasks.yml - name: "Generate dns-server views" include: openstack_dns_views.yml roles: @@ -29,20 +34,15 @@ # Build and process DNS Records - hosts: localhost pre_tasks: - - include: roles/common/pre_tasks/pre_tasks.yml + - include: pre_tasks.yml - name: "Generate dns records" include: openstack_dns_records.yml roles: - role: dns -# Use newly configured DNS server for this container ... -- hosts: localhost - tasks: - - name: "Edit /etc/resolv.conf in container" - shell: "sed '0,/.*nameserver.*/s/.*nameserver.*/nameserver {{ public_dns_server }} \\n&/' /etc/resolv.conf > /tmp/resolv.conf && /bin/cp -f /tmp/resolv.conf /etc/resolv.conf" - # OpenShift Pre-Requisites - hosts: OSEv3 + become: true tasks: - name: "Edit /etc/resolv.conf on masters/nodes" lineinfile: diff --git a/playbooks/provisioning/openstack/pre-install.yml b/playbooks/provisioning/openstack/pre-install.yml index 8225287f9..4da007a16 100644 --- a/playbooks/provisioning/openstack/pre-install.yml +++ b/playbooks/provisioning/openstack/pre-install.yml @@ -8,6 +8,7 @@ # - other prep (install additional packages, etc.) # - hosts: OSEv3 + become: true roles: - { role: subscription-manager, when: hostvars.localhost.rhsm_register, tags: 'subscription-manager', ansible_sudo: true } - { role: docker, tags: 'docker' } -- cgit v1.2.3 -- cgit v1.2.3 From a7300e6b7ace3098aa05794d4ac2f9e5a4cef64a Mon Sep 17 00:00:00 2001 From: Tomas Sedovic Date: Fri, 2 Jun 2017 13:28:00 +0200 Subject: Always let the openshift nodes access the DNS When `node_ingress_cidr` to limit the IP range for the DNS server, this can prevent the actual openshift nodes from accessing it as well. This commit makes the access from the `openstack_subnet_prefix` always pass through and uses `node_ingress_cidr` for additional access control. --- roles/openstack-stack/templates/heat_stack.yaml.j2 | 15 +++++++++++++++ 1 file changed, 15 insertions(+) diff --git a/roles/openstack-stack/templates/heat_stack.yaml.j2 b/roles/openstack-stack/templates/heat_stack.yaml.j2 index 09b62cba7..c10b1d90f 100644 --- a/roles/openstack-stack/templates/heat_stack.yaml.j2 +++ b/roles/openstack-stack/templates/heat_stack.yaml.j2 @@ -289,6 +289,11 @@ resources: port_range_min: 30000 port_range_max: 32767 remote_ip_prefix: {{ node_ingress_cidr }} + - direction: ingress + protocol: tcp + port_range_min: 30000 + port_range_max: 32767 + remote_ip_prefix: "{{ openstack_subnet_prefix }}.0/24" infra-secgrp: type: OS::Neutron::SecurityGroup @@ -337,11 +342,21 @@ resources: port_range_min: 53 port_range_max: 53 remote_ip_prefix: {{ node_ingress_cidr }} + - direction: ingress + protocol: udp + port_range_min: 53 + port_range_max: 53 + remote_ip_prefix: "{{ openstack_subnet_prefix }}.0/24" - direction: ingress protocol: tcp port_range_min: 53 port_range_max: 53 remote_ip_prefix: {{ node_ingress_cidr }} + - direction: ingress + protocol: tcp + port_range_min: 53 + port_range_max: 53 + remote_ip_prefix: "{{ openstack_subnet_prefix }}.0/24" {% if num_masters is greaterthan 1 %} lb-secgrp: type: OS::Neutron::SecurityGroup -- cgit v1.2.3 From 4bb2f005bc6cdeb8e656c2b42ac54db8fbd67fb9 Mon Sep 17 00:00:00 2001 From: Bogdan Dobrelya Date: Mon, 5 Jun 2017 16:41:09 +0200 Subject: Add a flat sec group for openstack provider Add a openstack_flat_secgroup, defaults to False. When set, merges sec rules for master, node, etcd, infra nodes into a single group. Less secure, but might help to mitigate quota limitations. Update docs. Use timeout 30s to mitigate the error: Timeout (12s) waiting for privilege escalation prompt. Signed-off-by: Bogdan Dobrelya --- playbooks/provisioning/openstack/README.md | 11 +- playbooks/provisioning/openstack/pre_tasks.yml | 2 +- .../openstack/sample-inventory/group_vars/all.yml | 3 + roles/openstack-stack/templates/heat_stack.yaml.j2 | 138 +++++++++++++++++++-- 4 files changed, 140 insertions(+), 14 deletions(-) diff --git a/playbooks/provisioning/openstack/README.md b/playbooks/provisioning/openstack/README.md index 68550d3a3..35f37db0d 100644 --- a/playbooks/provisioning/openstack/README.md +++ b/playbooks/provisioning/openstack/README.md @@ -25,7 +25,7 @@ etc.). The result is an environment ready for openshift-ansible. ### Copy the sample inventory - cp openshift-ansible-contrib/playbooks/provisioning/openstack/sample-inventory inventory + cp -r openshift-ansible-contrib/playbooks/provisioning/openstack/sample-inventory inventory ### Copy clouds.yaml @@ -62,6 +62,11 @@ The `openstack_num_masters`, `openstack_num_infra` and `openstack_num_nodes` values specify the number of Master, Infra and App nodes to create. +The `openstack_flat_secgroup`, controls Neutron security groups creation for Heat +stacks. Set it to true, if you experience issues with sec group rules +quotas. It trades security for number of rules, by sharing the same set +of firewall rules for master, node, etcd and infra nodes. + ### Update the DNS names in `inventory/hosts` The different server groups are currently grouped by the domain name, @@ -96,13 +101,13 @@ Assuming your OpenStack (Keystone) credentials are in the `keystonerc` file, this is how you stat the provisioning process: . keystonerc - ansible-playbook -i inventory --private-key ~/.ssh/openshift openshift-ansible-contrib/playbooks/provisioning/openstack/provision.yaml + ansible-playbook -i inventory --timeout 30 --private-key ~/.ssh/openshift openshift-ansible-contrib/playbooks/provisioning/openstack/provision.yaml ### Install OpenShift Once it succeeds, you can install openshift by running: - ansible-playbook --become --user openshift --private-key ~/.ssh/openshift -i inventory/ openshift-ansible/playbooks/byo/config.yml + ansible-playbook --timeout 30 --become --user openshift --private-key ~/.ssh/openshift -i inventory/ openshift-ansible/playbooks/byo/config.yml ## License diff --git a/playbooks/provisioning/openstack/pre_tasks.yml b/playbooks/provisioning/openstack/pre_tasks.yml index 8446bdfbc..a4ff7c4ac 100644 --- a/playbooks/provisioning/openstack/pre_tasks.yml +++ b/playbooks/provisioning/openstack/pre_tasks.yml @@ -18,7 +18,7 @@ - name: Set Dynamic Inventory Filters become: false shell: > - export OS_INV_FILTER_KEY=clusterid && OS_INV_FILTER_VALUE={{ env_id }} + export OS_INV_FILTER_KEY=clusterid && export OS_INV_FILTER_VALUE={{ env_id }} delegate_to: localhost - name: Updating DNS domain to include env_id (if not empty) diff --git a/playbooks/provisioning/openstack/sample-inventory/group_vars/all.yml b/playbooks/provisioning/openstack/sample-inventory/group_vars/all.yml index 50aaa573d..3eb0f9f80 100644 --- a/playbooks/provisioning/openstack/sample-inventory/group_vars/all.yml +++ b/playbooks/provisioning/openstack/sample-inventory/group_vars/all.yml @@ -37,3 +37,6 @@ openstack_subnet_prefix: "192.168.99" # NOTE(shadower): Do not change this value. The Ansible user is currently # hardcoded to `openshift`. ansible_user: openshift + +# Use a single security group for a cluster +openstack_flat_secgroup: false diff --git a/roles/openstack-stack/templates/heat_stack.yaml.j2 b/roles/openstack-stack/templates/heat_stack.yaml.j2 index c10b1d90f..c750865a5 100644 --- a/roles/openstack-stack/templates/heat_stack.yaml.j2 +++ b/roles/openstack-stack/templates/heat_stack.yaml.j2 @@ -142,6 +142,119 @@ resources: # cluster_id: {{ stack_name }} # public_key: {{ ssh_public_key }} +{% if openstack_flat_secgrp|bool %} + flat-secgrp: + type: OS::Neutron::SecurityGroup + properties: + name: + str_replace: + template: openshift-ansible-cluster_id-flat-secgrp + params: + cluster_id: {{ stack_name }} + description: + str_replace: + template: Security group for cluster_id OpenShift cluster + params: + cluster_id: {{ stack_name }} + rules: + - direction: ingress + protocol: tcp + port_range_min: 22 + port_range_max: 22 + remote_ip_prefix: {{ ssh_ingress_cidr }} + - direction: ingress + protocol: tcp + port_range_min: 4001 + port_range_max: 4001 + - direction: ingress + protocol: tcp + port_range_min: 8443 + port_range_max: 8444 + - direction: ingress + protocol: tcp + port_range_min: 53 + port_range_max: 53 + - direction: ingress + protocol: udp + port_range_min: 53 + port_range_max: 53 + - direction: ingress + protocol: tcp + port_range_min: 8053 + port_range_max: 8053 + - direction: ingress + protocol: udp + port_range_min: 8053 + port_range_max: 8053 + - direction: ingress + protocol: tcp + port_range_min: 24224 + port_range_max: 24224 + - direction: ingress + protocol: udp + port_range_min: 24224 + port_range_max: 24224 + - direction: ingress + protocol: tcp + port_range_min: 2224 + port_range_max: 2224 + - direction: ingress + protocol: udp + port_range_min: 5404 + port_range_max: 5405 + - direction: ingress + protocol: tcp + port_range_min: 9090 + port_range_max: 9090 + - direction: ingress + protocol: tcp + port_range_min: 2379 + port_range_max: 2380 + remote_mode: remote_group_id + - direction: ingress + protocol: tcp + port_range_min: 10250 + port_range_max: 10250 + remote_mode: remote_group_id + - direction: ingress + protocol: udp + port_range_min: 10250 + port_range_max: 10250 + remote_mode: remote_group_id + - direction: ingress + protocol: tcp + port_range_min: 10255 + port_range_max: 10255 + remote_mode: remote_group_id + - direction: ingress + protocol: udp + port_range_min: 10255 + port_range_max: 10255 + remote_mode: remote_group_id + - direction: ingress + protocol: udp + port_range_min: 4789 + port_range_max: 4789 + remote_mode: remote_group_id + - direction: ingress + protocol: tcp + port_range_min: 30000 + port_range_max: 32767 + remote_ip_prefix: {{ node_ingress_cidr }} + - direction: ingress + protocol: tcp + port_range_min: 30000 + port_range_max: 32767 + remote_ip_prefix: "{{ openstack_subnet_prefix }}.0/24" + - direction: ingress + protocol: tcp + port_range_min: 80 + port_range_max: 80 + - direction: ingress + protocol: tcp + port_range_min: 443 + port_range_max: 443 +{% else %} master-secgrp: type: OS::Neutron::SecurityGroup properties: @@ -168,10 +281,6 @@ resources: - direction: ingress protocol: tcp port_range_min: 8443 - port_range_max: 8443 - - direction: ingress - protocol: tcp - port_range_min: 8444 port_range_max: 8444 - direction: ingress protocol: tcp @@ -204,10 +313,6 @@ resources: - direction: ingress protocol: udp port_range_min: 5404 - port_range_max: 5404 - - direction: ingress - protocol: udp - port_range_min: 5405 port_range_max: 5405 - direction: ingress protocol: tcp @@ -317,6 +422,7 @@ resources: protocol: tcp port_range_min: 443 port_range_max: 443 +{% endif %} dns-secgrp: type: OS::Neutron::SecurityGroup @@ -411,7 +517,7 @@ resources: net: { get_resource: net } subnet: { get_resource: subnet } secgrp: - - { get_resource: etcd-secgrp } + - { get_resource: {% if openstack_flat_secgrp|bool %}flat-secgrp{% else %}etcd-secgrp{% endif %} } floating_network: {{ external_network }} net_name: str_replace: @@ -491,10 +597,14 @@ resources: net: { get_resource: net } subnet: { get_resource: subnet } secgrp: +{% if openstack_flat_secgrp|bool %} + - { get_resource: flat-secgrp } +{% else %} - { get_resource: master-secgrp } - { get_resource: node-secgrp } {% if num_etcd is equalto 0 %} - { get_resource: etcd-secgrp } +{% endif %} {% endif %} floating_network: {{ external_network }} net_name: @@ -538,7 +648,7 @@ resources: net: { get_resource: net } subnet: { get_resource: subnet } secgrp: - - { get_resource: node-secgrp } + - { get_resource: {% if openstack_flat_secgrp|bool %}flat-secgrp{% else %}node-secgrp{% endif %} } floating_network: {{ external_network }} net_name: str_replace: @@ -581,8 +691,12 @@ resources: net: { get_resource: net } subnet: { get_resource: subnet } secgrp: +{% if openstack_flat_secgrp|bool %} + - { get_resource: flat-secgrp } +{% else %} - { get_resource: node-secgrp } - { get_resource: infra-secgrp } +{% endif %} floating_network: {{ external_network }} net_name: str_replace: @@ -621,7 +735,11 @@ resources: net: { get_resource: net } subnet: { get_resource: subnet } secgrp: +{% if openstack_flat_secgrp|bool %} + - { get_resource: flat-secgrp } +{% else %} - { get_resource: node-secgrp } +{% endif %} - { get_resource: dns-secgrp } floating_network: {{ external_network }} net_name: -- cgit v1.2.3 From a8719af95559926bcf4841197273dfe838a563a4 Mon Sep 17 00:00:00 2001 From: Bogdan Dobrelya Date: Mon, 5 Jun 2017 22:06:48 +0200 Subject: Add ansible.cfg for openstack provider Signed-off-by: Bogdan Dobrelya --- playbooks/provisioning/openstack/README.md | 4 ++++ .../openstack/sample-inventory/ansible.cfg | 19 +++++++++++++++++++ 2 files changed, 23 insertions(+) create mode 100644 playbooks/provisioning/openstack/sample-inventory/ansible.cfg diff --git a/playbooks/provisioning/openstack/README.md b/playbooks/provisioning/openstack/README.md index 35f37db0d..fb2053c25 100644 --- a/playbooks/provisioning/openstack/README.md +++ b/playbooks/provisioning/openstack/README.md @@ -31,6 +31,10 @@ etc.). The result is an environment ready for openshift-ansible. cp openshift-ansible-contrib/playbooks/provisioning/openstack/sample-inventory/clouds.yaml clouds.yaml +### Copy ansible config + + cp openshift-ansible-contrib/playbooks/provisioning/openstack/sample-inventory/ansible.cfg ansible.cfg + ### Update `inventory/group_vars/all.yml` Pay special attention to the values in the first paragraph -- these diff --git a/playbooks/provisioning/openstack/sample-inventory/ansible.cfg b/playbooks/provisioning/openstack/sample-inventory/ansible.cfg new file mode 100644 index 000000000..a701e59ac --- /dev/null +++ b/playbooks/provisioning/openstack/sample-inventory/ansible.cfg @@ -0,0 +1,19 @@ +# config file for ansible -- http://ansible.com/ +# ============================================== +[defaults] +forks = 50 +# work around privilege escalation timeouts in ansible +timeout = 30 +host_key_checking = false +inventory = inventory +inventory_ignore_extensions = secrets.py, .pyc +gathering = smart +retry_files_enabled = false +fact_caching = jsonfile +fact_caching_connection = .ansible/cached_facts +fact_caching_timeout = 900 + +[ssh_connection] +ssh_args = -o ControlMaster=auto -o ControlPersist=900s -o GSSAPIAuthentication=no +control_path = /var/tmp/%%h-%%r +pipelining = True -- cgit v1.2.3 From b884e6a9c77ae2d86b2de3c4ae6e8de558444610 Mon Sep 17 00:00:00 2001 From: Bogdan Dobrelya Date: Mon, 12 Jun 2017 12:02:41 +0200 Subject: Drop atomic-openshift-utils, update docs for origin TODO use with when: ansible_distribution == 'CentOS' Also update docs for origin Signed-off-by: Bogdan Dobrelya --- playbooks/provisioning/openstack/README.md | 11 ++++++++++- roles/openshift-prep/tasks/prerequisites.yml | 3 +-- 2 files changed, 11 insertions(+), 3 deletions(-) diff --git a/playbooks/provisioning/openstack/README.md b/playbooks/provisioning/openstack/README.md index fb2053c25..c319791c9 100644 --- a/playbooks/provisioning/openstack/README.md +++ b/playbooks/provisioning/openstack/README.md @@ -96,6 +96,12 @@ The full list of options is available in this sample inventory: https://github.com/openshift/openshift-ansible/blob/master/inventory/byo/hosts.ose.example +Note, that in order to deploy OpenShift origin, you should update the following +variables for the `inventory/group_vars/OSEv3.yml`, `all.yml`: + + deployment_type: origin + origin_release: 1.5.1 + openshift_deployment_type: "{{ deployment_type }}" ## Deployment @@ -111,8 +117,11 @@ file, this is how you stat the provisioning process: Once it succeeds, you can install openshift by running: - ansible-playbook --timeout 30 --become --user openshift --private-key ~/.ssh/openshift -i inventory/ openshift-ansible/playbooks/byo/config.yml + ansible-playbook --become --user openshift --private-key ~/.ssh/openshift -i inventory/ openshift-ansible/playbooks/byo/openshift-node/network_manager.yml + ansible-playbook --become --user openshift --private-key ~/.ssh/openshift -i inventory/ openshift-ansible/playbooks/byo/config.yml +Note, the `network_manager.yml` is only required if you're deploying OpenShift +origin. ## License diff --git a/roles/openshift-prep/tasks/prerequisites.yml b/roles/openshift-prep/tasks/prerequisites.yml index 1286905f4..60507636f 100644 --- a/roles/openshift-prep/tasks/prerequisites.yml +++ b/roles/openshift-prep/tasks/prerequisites.yml @@ -1,7 +1,7 @@ --- - name: "Cleaning yum repositories" command: "yum clean all" - + - name: "Install required packages" yum: name: "{{ item }}" @@ -13,7 +13,6 @@ - bind-utils - bridge-utils - bash-completion - - atomic-openshift-utils - vim-enhanced - name: "Update all packages (this can take a very long time)" -- cgit v1.2.3 From c12c972e8c1180cebf24ddd0eb43b26657fdaec6 Mon Sep 17 00:00:00 2001 From: Bogdan Dobrelya Date: Wed, 14 Jun 2017 09:33:18 +0200 Subject: Gather facts for provision playbook Provision tasks use facts like ansible_hostname and few others. W/o gathering facts, those expire, and the provision playbook cannot be reapplied in order to update the existing heat stack. Refresh the facts cache by specifying gather_facts: true. Signed-off-by: Bogdan Dobrelya --- playbooks/provisioning/openstack/provision-openstack.yml | 1 + 1 file changed, 1 insertion(+) diff --git a/playbooks/provisioning/openstack/provision-openstack.yml b/playbooks/provisioning/openstack/provision-openstack.yml index 0505f1bda..c7ad782c9 100644 --- a/playbooks/provisioning/openstack/provision-openstack.yml +++ b/playbooks/provisioning/openstack/provision-openstack.yml @@ -1,5 +1,6 @@ --- - hosts: localhost + gather_facts: True pre_tasks: - include: pre_tasks.yml roles: -- cgit v1.2.3 -- cgit v1.2.3 -- cgit v1.2.3 From ca93151d4dee1f907cf578e3ab2b565f288c37c8 Mon Sep 17 00:00:00 2001 From: Tomas Sedovic Date: Fri, 16 Jun 2017 17:16:06 +0200 Subject: Update sample inventory with the latest changes --- playbooks/provisioning/openstack/README.md | 3 +- .../sample-inventory/group_vars/OSEv3.yml | 2 +- .../openstack/sample-inventory/group_vars/all.yml | 42 ++++++++++++++-------- 3 files changed, 31 insertions(+), 16 deletions(-) diff --git a/playbooks/provisioning/openstack/README.md b/playbooks/provisioning/openstack/README.md index c319791c9..423d57113 100644 --- a/playbooks/provisioning/openstack/README.md +++ b/playbooks/provisioning/openstack/README.md @@ -9,6 +9,7 @@ etc.). The result is an environment ready for openshift-ansible. * [Ansible 2.3](https://pypi.python.org/pypi/ansible) * [shade](https://pypi.python.org/pypi/shade) +* python-dns ## What does it do @@ -66,7 +67,7 @@ The `openstack_num_masters`, `openstack_num_infra` and `openstack_num_nodes` values specify the number of Master, Infra and App nodes to create. -The `openstack_flat_secgroup`, controls Neutron security groups creation for Heat +The `openstack_flat_secgrp`, controls Neutron security groups creation for Heat stacks. Set it to true, if you experience issues with sec group rules quotas. It trades security for number of rules, by sharing the same set of firewall rules for master, node, etcd and infra nodes. diff --git a/playbooks/provisioning/openstack/sample-inventory/group_vars/OSEv3.yml b/playbooks/provisioning/openstack/sample-inventory/group_vars/OSEv3.yml index d850f88a4..32ec43387 100644 --- a/playbooks/provisioning/openstack/sample-inventory/group_vars/OSEv3.yml +++ b/playbooks/provisioning/openstack/sample-inventory/group_vars/OSEv3.yml @@ -1,7 +1,7 @@ --- openshift_deployment_type: openshift-enterprise openshift_release: v3.5 -openshift_master_default_subdomain: "apps.openshift.example.com" +openshift_master_default_subdomain: "apps.{{ env_id }}.{{ public_dns_domain }}" # NOTE(shadower): do not remove this line, otherwise the default node labels # won't be set up. diff --git a/playbooks/provisioning/openstack/sample-inventory/group_vars/all.yml b/playbooks/provisioning/openstack/sample-inventory/group_vars/all.yml index 3eb0f9f80..31e0a61ed 100644 --- a/playbooks/provisioning/openstack/sample-inventory/group_vars/all.yml +++ b/playbooks/provisioning/openstack/sample-inventory/group_vars/all.yml @@ -1,6 +1,7 @@ env_id: "openshift" -openstack_dns_domain: "example.com" -openstack_nameservers: ["192.168.1.1"] +public_dns_domain: "example.com" +public_dns_nameservers: [] + openstack_ssh_public_key: "openshift" openstack_default_image_name: "rhel73" openstack_default_flavor: "m1.medium" @@ -12,26 +13,39 @@ openstack_num_nodes: 2 docker_volume_size: "15" -# TODO(shadower): this is identical to `openstack_dns_domain`. -# We should make it so it's not duplicated here. -dns_domain: "example.com" - -# TODO(shadower): this is identical to `openstack_nameservers`. -# We should make it so it's not duplicated here. -public_dns_forwarder: "192.168.1.1" - openstack_subnet_prefix: "192.168.99" # # Red Hat subscription +# # Using Red Hat Satellite: # rhsm_register: True +# rhsm_satellite: 'sat-6.example.com' +# rhsm_org: 'OPENSHIFT_ORG' +# rhsm_activationkey: '' + +# # Or using RHN username, password and optionally pool: +# rhsm_register: True +# rhsm_username: '' +# rhsm_password: '' +# rhsm_pool: '' + # rhsm_repos: # - "rhel-7-server-rpms" # - "rhel-7-server-ose-3.5-rpms" # - "rhel-7-server-extras-rpms" # - "rhel-7-fast-datapath-rpms" -# rhsm_username: '' -# rhsm_password: '' -# rhsm_pool: '' + + +# # Roll-your-own DNS +# openstack_num_dns: 0 +# external_nsupdate_keys: +# public: +# key_secret: 'SKqKNdpfk7llKxZ57bbxUnUDobaaJp9t8CjXLJPl+fRI5mPcSBuxTAyvJPa6Y9R7vUg9DwCy/6WTpgLNqnV4Hg==' +# key_algorithm: 'hmac-md5' +# server: '192.168.1.1' +# private: +# key_secret: 'kVE2bVTgZjrdJipxPhID8BEZmbHD8cExlVPR+zbFpW6la8kL5wpXiwOh8q5AAosXQI5t95UXwq3Inx8QT58duw==' +# key_algorithm: 'hmac-md5' +# server: '192.168.1.2' # NOTE(shadower): Do not change this value. The Ansible user is currently @@ -39,4 +53,4 @@ openstack_subnet_prefix: "192.168.99" ansible_user: openshift # Use a single security group for a cluster -openstack_flat_secgroup: false +openstack_flat_secgrp: false -- cgit v1.2.3 -- cgit v1.2.3 From bf7e5e82872684088995cc55559f8e51fe35d4a9 Mon Sep 17 00:00:00 2001 From: Tomas Sedovic Date: Fri, 16 Jun 2017 17:52:37 +0200 Subject: Fix yamllint errors --- .../openstack/openstack_dns_records.yml | 6 +-- .../provisioning/openstack/openstack_dns_views.yml | 6 +-- .../openstack/post-provision-openstack.yml | 2 - playbooks/provisioning/openstack/pre-install.yml | 2 - .../openstack/sample-inventory/clouds.yaml | 2 +- .../openstack/sample-inventory/group_vars/all.yml | 1 + roles/dns-server-detect/tasks/main.yml | 2 - roles/hostnames/tasks/main.yaml | 4 +- roles/hostnames/test/test.yaml | 17 ------- roles/hostnames/vars/records.yaml | 54 +++++++++++----------- roles/openstack-stack/test/stack-create-test.yml | 1 - roles/subscription-manager/pre_tasks/pre_tasks.yml | 4 +- roles/subscription-manager/tasks/main.yml | 2 +- 13 files changed, 38 insertions(+), 65 deletions(-) diff --git a/playbooks/provisioning/openstack/openstack_dns_records.yml b/playbooks/provisioning/openstack/openstack_dns_records.yml index b1008fe33..b32b70ba9 100644 --- a/playbooks/provisioning/openstack/openstack_dns_records.yml +++ b/playbooks/provisioning/openstack/openstack_dns_records.yml @@ -1,5 +1,4 @@ --- - - name: "Generate list of private A records" set_fact: private_records: "{{ private_records | default([]) + [ { 'type': 'A', 'hostname': hostvars[item]['ansible_hostname'], 'ip': hostvars[item]['openstack']['private_v4'] } ] }}" @@ -42,7 +41,7 @@ set_fact: public_records: "{{ public_records | default([]) + [ { 'type': 'A', 'hostname': '*.' + openshift_app_domain, 'ip': hostvars[item]['openstack']['public_v4'] } ] }}" with_items: "{{ groups['infra_hosts'] }}" - + - name: "Set the public DNS server details to use the external value (if provided)" set_fact: nsupdate_server_public: "{{ external_nsupdate_keys['public']['server'] }}" @@ -72,6 +71,5 @@ entries: "{{ public_records }}" - name: "Generate the final dns_records_add" - set_fact: + set_fact: dns_records_add: "{{ private_named_records + public_named_records }}" - diff --git a/playbooks/provisioning/openstack/openstack_dns_views.yml b/playbooks/provisioning/openstack/openstack_dns_views.yml index 611ed9f82..ea0a7cb96 100644 --- a/playbooks/provisioning/openstack/openstack_dns_views.yml +++ b/playbooks/provisioning/openstack/openstack_dns_views.yml @@ -1,8 +1,7 @@ --- - - name: "Generate ACL list for DNS server" set_fact: - acl_list: "{{ acl_list | default([]) + [ (hostvars[item]['openstack']['private_v4'] + '/32') ] }}" + acl_list: "{{ acl_list | default([]) + [ (hostvars[item]['openstack']['private_v4'] + '/32') ] }}" with_items: "{{ groups['cluster_hosts'] }}" - name: "Generate the private view" @@ -22,6 +21,5 @@ forwarder: "{{ public_dns_nameservers }}" - name: "Generate the final named_config_views" - set_fact: + set_fact: named_config_views: "{{ private_named_view + public_named_view }}" - diff --git a/playbooks/provisioning/openstack/post-provision-openstack.yml b/playbooks/provisioning/openstack/post-provision-openstack.yml index e1faf14eb..4e42c1c7f 100644 --- a/playbooks/provisioning/openstack/post-provision-openstack.yml +++ b/playbooks/provisioning/openstack/post-provision-openstack.yml @@ -1,5 +1,4 @@ --- - # Assign hostnames - hosts: cluster_hosts become: true @@ -57,4 +56,3 @@ dest: /etc/sysconfig/network regexp: "IP4_NAMESERVERS={{ hostvars['localhost'].private_dns_server }}" line: "IP4_NAMESERVERS={{ hostvars['localhost'].private_dns_server }}" - diff --git a/playbooks/provisioning/openstack/pre-install.yml b/playbooks/provisioning/openstack/pre-install.yml index 4da007a16..629182d49 100644 --- a/playbooks/provisioning/openstack/pre-install.yml +++ b/playbooks/provisioning/openstack/pre-install.yml @@ -1,5 +1,4 @@ --- - ############################### # OpenShift Pre-Requisites @@ -13,4 +12,3 @@ - { role: subscription-manager, when: hostvars.localhost.rhsm_register, tags: 'subscription-manager', ansible_sudo: true } - { role: docker, tags: 'docker' } - { role: openshift-prep, tags: 'openshift-prep' } - diff --git a/playbooks/provisioning/openstack/sample-inventory/clouds.yaml b/playbooks/provisioning/openstack/sample-inventory/clouds.yaml index c266426c6..8182d2995 100644 --- a/playbooks/provisioning/openstack/sample-inventory/clouds.yaml +++ b/playbooks/provisioning/openstack/sample-inventory/clouds.yaml @@ -1,5 +1,5 @@ +--- ansible: use_hostnames: True expand_hostvars: True fail_on_errors: True - diff --git a/playbooks/provisioning/openstack/sample-inventory/group_vars/all.yml b/playbooks/provisioning/openstack/sample-inventory/group_vars/all.yml index 31e0a61ed..047923253 100644 --- a/playbooks/provisioning/openstack/sample-inventory/group_vars/all.yml +++ b/playbooks/provisioning/openstack/sample-inventory/group_vars/all.yml @@ -1,3 +1,4 @@ +--- env_id: "openshift" public_dns_domain: "example.com" public_dns_nameservers: [] diff --git a/roles/dns-server-detect/tasks/main.yml b/roles/dns-server-detect/tasks/main.yml index e8dd0acf0..183c0a0ca 100644 --- a/roles/dns-server-detect/tasks/main.yml +++ b/roles/dns-server-detect/tasks/main.yml @@ -1,5 +1,4 @@ --- - - fail: msg: 'Missing required private DNS server(s)' when: @@ -35,4 +34,3 @@ public_dns_server: "{{ hostvars[groups['dns'][0]].openstack.public_v4 }}" when: - public_dns_server is undefined - diff --git a/roles/hostnames/tasks/main.yaml b/roles/hostnames/tasks/main.yaml index bf142d653..c49852210 100644 --- a/roles/hostnames/tasks/main.yaml +++ b/roles/hostnames/tasks/main.yaml @@ -15,8 +15,8 @@ register: cloud_cfg - name: Prevent cloud-init updates of hostname/fqdn (if applicable) - lineinfile: - dest: /etc/cloud/cloud.cfg + lineinfile: + dest: /etc/cloud/cloud.cfg state: present regexp: "{{ item.regexp }}" line: "{{ item.line }}" diff --git a/roles/hostnames/test/test.yaml b/roles/hostnames/test/test.yaml index 34bf37942..0c56aea51 100644 --- a/roles/hostnames/test/test.yaml +++ b/roles/hostnames/test/test.yaml @@ -2,20 +2,3 @@ - hosts: all roles: - role: hostnames - -# - debug: -# -# - hosts: dns -# roles: -# - role: dns-server -# named_config_views: -# - name: private -# acl_entry: -# - 192.168.124.40/32 -# - 192.168.124.40/32 -# zone: -# - dns_domain: example.com -# - name: public -# zone: -# - dns_domain: example.com -# - role: dns diff --git a/roles/hostnames/vars/records.yaml b/roles/hostnames/vars/records.yaml index 3bf12ae2b..0cadc8181 100644 --- a/roles/hostnames/vars/records.yaml +++ b/roles/hostnames/vars/records.yaml @@ -1,28 +1,28 @@ --- - - name: "Building Records" - set_fact: - dns_records_add: - - view: private - zone: example.com - entries: - - type: A - hostname: master1.example.com - ip: 172.16.15.94 - - type: A - hostname: node1.example.com - ip: 172.16.15.86 - - type: A - hostname: node2.example.com - ip: 172.16.15.87 - - view: public - zone: example.com - entries: - - type: A - hostname: master1.example.com - ip: 10.3.10.116 - - type: A - hostname: node1.example.com - ip: 10.3.11.46 - - type: A - hostname: node2.example.com - ip: 10.3.12.6 +- name: "Building Records" + set_fact: + dns_records_add: + - view: private + zone: example.com + entries: + - type: A + hostname: master1.example.com + ip: 172.16.15.94 + - type: A + hostname: node1.example.com + ip: 172.16.15.86 + - type: A + hostname: node2.example.com + ip: 172.16.15.87 + - view: public + zone: example.com + entries: + - type: A + hostname: master1.example.com + ip: 10.3.10.116 + - type: A + hostname: node1.example.com + ip: 10.3.11.46 + - type: A + hostname: node2.example.com + ip: 10.3.12.6 diff --git a/roles/openstack-stack/test/stack-create-test.yml b/roles/openstack-stack/test/stack-create-test.yml index 6cbd7ff30..0fbf66f34 100644 --- a/roles/openstack-stack/test/stack-create-test.yml +++ b/roles/openstack-stack/test/stack-create-test.yml @@ -14,4 +14,3 @@ infra_flavor: "{{ openstack_default_flavor }}" dns_flavor: "{{ openstack_default_flavor }}" external_network: "{{ openstack_external_network_name }}" - diff --git a/roles/subscription-manager/pre_tasks/pre_tasks.yml b/roles/subscription-manager/pre_tasks/pre_tasks.yml index b21356cf2..464670fc0 100644 --- a/roles/subscription-manager/pre_tasks/pre_tasks.yml +++ b/roles/subscription-manager/pre_tasks/pre_tasks.yml @@ -11,7 +11,7 @@ - name: "Determine if Subscription Manager should be used" set_fact: rhsm_register: false - when: + when: - rhsm_satellite is undefined or rhsm_satellite is none or rhsm_satellite|trim == '' - rhsm_username is undefined or rhsm_username is none or rhsm_username|trim == '' - rhsm_password is undefined or rhsm_password is none or rhsm_password|trim == '' @@ -21,7 +21,7 @@ - name: "Validate Subscription Manager organization is set" fail: msg="Cannot register to a Satellite server without a value for the Organization via 'rhsm_org'" - when: + when: - rhsm_org is undefined or rhsm_org is none or rhsm_org|trim == '' - rhsm_satellite is defined - rhsm_satellite is not none diff --git a/roles/subscription-manager/tasks/main.yml b/roles/subscription-manager/tasks/main.yml index 2dd14b48e..8c1ae697a 100644 --- a/roles/subscription-manager/tasks/main.yml +++ b/roles/subscription-manager/tasks/main.yml @@ -4,7 +4,7 @@ rhsm_password: "{{ hostvars.localhost.rhsm_password }}" when: - rhsm_password is not defined or rhsm_password is none or rhsm_password|trim == '' - + - name: "Initializing Subscription Manager authentication method" set_fact: rhsm_authentication: false -- cgit v1.2.3 -- cgit v1.2.3 From 9369c9dfd722e697f83a225d78c2c1dcd1247976 Mon Sep 17 00:00:00 2001 From: Tomas Sedovic Date: Fri, 16 Jun 2017 18:08:00 +0200 Subject: Fix flake8 errors with the openstack inventory --- playbooks/provisioning/openstack/sample-inventory/openstack.py | 10 +++++----- 1 file changed, 5 insertions(+), 5 deletions(-) diff --git a/playbooks/provisioning/openstack/sample-inventory/openstack.py b/playbooks/provisioning/openstack/sample-inventory/openstack.py index 9d4886261..8de73e1e0 100755 --- a/playbooks/provisioning/openstack/sample-inventory/openstack.py +++ b/playbooks/provisioning/openstack/sample-inventory/openstack.py @@ -54,7 +54,7 @@ from distutils.version import StrictVersion try: import json -except: +except ImportError: import simplejson as json import os_client_config @@ -147,10 +147,10 @@ def get_host_groups_from_cloud(inventory): if 'interface_ip' not in server: continue try: - if server["metadata"][os.environ['OS_INV_FILTER_KEY']] == os.environ['OS_INV_FILTER_VALUE']: + if server["metadata"][os.environ['OS_INV_FILTER_KEY']] == os.environ['OS_INV_FILTER_VALUE']: + firstpass[server['name']].append(server) + except Exception: firstpass[server['name']].append(server) - except: - firstpass[server['name']].append(server) for name, servers in firstpass.items(): if len(servers) == 1 and use_hostnames: append_hostvars(hostvars, groups, name, servers[0]) @@ -243,7 +243,7 @@ def main(): output = to_json(inventory.get_host(args.host)) print(output) except shade.OpenStackCloudException as e: - sys.stderr.write('%s\n' % e.message) + sys.stderr.write('%s\n' % str(e)) sys.exit(1) sys.exit(0) -- cgit v1.2.3 From a6bf0552961c7b8c63639850fd0501941e89938f Mon Sep 17 00:00:00 2001 From: Tomas Sedovic Date: Fri, 16 Jun 2017 18:59:45 +0200 Subject: Add an Openstack provider (#397) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit * First cut at the rhc-ose-ansible structure * New OSE3 docker host builder and OpenStack ansible provisioning support * Support for supplying flavor name and moved around variables * Refactored OpenStack provisioning to be a generic role. Created OpenShift specific playbook * Registry Role for ansible playbooks * Added immediate=yes to have firwalld port take affect; restructured registry role; changed true to yes in module parameters * added post_install role * adding playbook * Migration of CICD server provisioning to Ansible * Adding nginx auth layer * Removing key name from registry * Refactoring and renaming * adding openshift-ansible's post install roles * removing deprecated files * Shell for role variable info * removing extra files * Add OpenStack SSH key parameter check * Replacing yum commands and normalizing comments * fixed README * Renaming template files with .j2 for clarity * Add OpenStack security group detection and creation resolves #106 * Change to using split to iterate and SSH rule create only once * Reorder instances names to sort by env_id * Change default_env_id of "testenv" to local env OS_USERNAME resolves #142 * Prepend 'casl' to default_env_id * Add connection test to OpenStack before proceeding * First cut at DNS ansible roles * Updated defaults and tasks for dns-server * Add subscription-manager support for Hosted or Satellite * Refactor role to dynamically determine rhsm_method * Removes rhsm_method * Renames rhsm_server to rhsm_satellite * Add additional pre_task checks (hosted + key) * Change conditionals from rhsm_method check to rhsm_satellite defined * Change repos disable/enable from key to if repos are defined * Update README and examples in inventory file * Fix bad syntax with extra 'and' in when using rhsm_pool * Refactor use of rhsm_password to prevent display to CLI * Cosmetic changes to task names and move yum clean all to prereqs * Remove vars_prompt, add info to README to re-enable and for ansible-vault * Add openstack pre_tasks and ansible_sudo when calling role * Add deprovision playbook using nova list with sanity checks - Add minimum length check for env_id - Add max_instances check - Remove dynamic openstack.py inventory - Add override to bypass checks * Refactor debug flag to be dry_run and other small changes - Removed debug statements and instead display on pause prompt - Moved to playbooks directory * Add ansible_sudo: true to subscription-manager task * This matches PR#133 enabling ansible_sudo: true when calling that role * Also changes max_instances check from >= to just > to allow 2 full default environments to be removed (6 max_instances) * Updated to fix broken/missing 'defaults'... * Add unique image logic and rename playbook to terminate.yml * Add OSE provision prerequisites - Install required packages - Update pacakges (moved from main.yml) - Install and disable firewalld - Install iptables-services and disable iptables - Verify and set hostname if needed * Add SELinux check and fail if not enforcing * Remove getenforce and firewall tasks and use facts - Uses Ansible collected facts to determine SELinux status - Adds ansible_sudo: true when calling role - Adds tag to role when calling it * Add docker role - Largely taken from cicd docker.yml - Changed to using a template for docker-storage-setup - Using variables for both DEV and VG defined in defaults - Using pvs command to check for use of DEV and VG before proceeding * Add org parameter to Satellite with user/pass * Fix typo in task name * Updated dns-server role based on feedback * Changes by JayKayy for a full provision of OpenShift on OpenStack * Role for disconnected git server * Added additional yum dependency and corrected spelling * Added example of disconnected git inventory file * Changes to allow runs from inside a container. Also allows for running upstream openshift-ansible installer * Reverting previous commit and making template adjustments * Subscription manager role should accomodate orgs with spaces * Fixing unescaped newline * Channging hard coded host groups to match openshift-ansible expected host groups. Importing byo playbook now instead of nested ansible run. Need to refactor how we generate hostnames to make it fit this. * Updated to run as root rather than cloud-user, for now... * Updated inventory template to include openshift_hostname and openshift_public_hostname * Wrapping in a script to tie the two playbooks together * Updating ose-provision with DNS workarounds / fixes * Removed spaces causing issues... * DNS fix to support OSEv3.2 * Add floating IP support when using Neutron * Updated to remove repos from playbook + fix typo * Cleande up hostname role to make it more generic * Image name for DNS server becomes configurable. * Updated inventory and template file to make cluster config optional * Removing temporary file * Loosen up the DNS server a bit to allow for ETL OSP installs * Re-implements original subscription-manager role invokation that was removed in PR# 168. * Enhanced provisioning script with better error checking, diretory awareness, and improved help output * Should be looking for generated inventory file in SCRIPTS_BASE_DIR * Add Neutron floating IP support for Issue #195 * Add check for and set_fact if Neutron is in use which is used by several tasks * This PR was originally longer and contained the now split off PR #197 * first attempt at securing the registry * Minor updates for ansible 2.1 compatibility * Updated CICD implementation to support ETL OSP env * Updated OSE inventory file with some clean-up * Add enhancements for for terminate playbook * Fixes Issue #206 * Add check for valid item when attempting to delete objects * Add debug on all variables when using dry_run * Changed default ansible_ssh_user to cloud-user in line with standard cloud guest image * Add count for ips and volumes to display since these may not always be the same as instance count * Enhance displayed warning/note message to include new counts * It is possible for an instance to not have a floating IP for whatever reason (such as manually deallocating or releasing the IP), in this case SSH will not work to the instance so it will not be included in the host group to attempt subscription manager unregister, but will still be deleted * It is possible that an instance will have a volume created but not attached. In this case as a precautionary measure I am excluding these unattached volumes from the deletion in case this was intentionally detached to preserve data. We can further discuss if this should be a parameter to override instead or if we need to change this behavior. * Excluded instances in ERROR state as they will most likely not delete. We can discuss if this should be parameterized instead. * Added prompt variable defaulted to true but can be set to false * Added unregister variable defaulted to true but can be set to false * Adding NFS support and fixing template labels so we get a router and registry out of the box. * testing changes * tested changes * fixing defaults and removing host from test playbook * adding clenaup test book and fixed typo * Allow passing of ansible extra-vars in provisioning script * Change --environment to --extra-vars and add usage. * added check for already secured registry and uses actualy openshift_common dependency * fixed readiness probe by adding logic for 3.1 vs 3.2 * Fix malformed file to address Issue #210 * Pulling out file paths into variables to account for containerized installs * fixed error message logic for already secured registry * added tasks to disable and re-enable deployment triggers, remove debug task * Fixes Issue #163 if rhsm_password is not defined * Adding a post-install playbook with secure-registry and ssh key sync. * Node storage now uses node specific storage var; search for generated inventory file sorts by timestamp not name * Initial commit exposing registry service * move registry_hostname to inventory * Updated env_id to be a sub-domain + make the logic a bit more flexible * Enabled default subdomain/'apps' * Updated inventory template file to include 'openshift_deployment_type' * Adding LDAP and HTPasswd examples for an auth provider to base inventory file * Fixing port number in LDAP example * Refactor OpenStack security group creation * Adds new openstack-security-groups role * Addresses Issue #211 and adds all instances to default group * Defines default security group variable with all groups/rules * Sets security group variables per type (master,node,nfs,dns) * Supports specifying no security group for a type (e.g. nfs) * Uses new Ansible 2.x modules * Refactor to playbook and split data structure out * Split single security group variable into one per type * Moves 'default' security group from role into variable * Moves default security group variables back to openshift-common role * Converts openstack-security-group role into playbook * Playbook called on every openstack-create invocation as before * Simplifies security group tasks and removes type bhecking * Iterate through seucrity groups and build a comma-separated list of groups * Add detection of non-Neutron env * Add UDP 8053 to default master security group * Adjusting docker role, adding support for logging/metrics, and updating client container * OpenShift Management Role * Fixing ansible impl to work with OSP9 and ansible 2.2 * Correcting formatting * Added process / contribution info * Updated default security group rules (#7) * Openstack heat (#2) * Adding a role to invoke openstack heat * Adding readme * Pulling parameters out to inventory file * start of end-to-end playbook * More enhancements and refactoring to make dynamic inventory the driver for an openshift install * Switching to variable substituted path to config.yaml playbook * Changes to allow defining of number of nodes/infranodes. * Added labels to inventory * Start of end-to-end functionality * Enhancements to support openstack heat provisioning * Updating inventory sample to remove some deprecation warnings * Working towards making the secure-registry role 'become' aware * Fixing node labels and removing secure-registry as it's no longer needed * No longer need insecure registry line, as installer will secure our registry * Adjusted dynamic inventory to filter by clusterid * Minor updates to dynamic inventory bug * Adding a refactored sample inventory directory * Refactoring playbooks for better directory structure, and to narrow down host groups * Adding volume mounts to heat template * Moving dns playbooks back to original location * Fixing incorrect file path * Cleaning up inventory samples * One more hostname to clean up * Changing var name * changed openshift-provision to openshift-prep * Adjusting current provision script to avoid breakage by new openstack-heat code * Updating PR Template with Team mention (#10) * Install playbook defaults to the assumption that casl-ansible and openshift-ansible are checked out to the same directory * Removing unnecessary task * Fixing two significant bugs in the HEAT deployment (#13) * Updated values in sample inventory (#17) * Adding documentation and docker containers so others can begin testin… (#16) * Adding documentation and docker containers so others can begin testing cluster provisioning * Making updates per comments by @oybed * Fixing formatting changes for links * Renaming openstack images to align with CoP naming (#18) * Defaulting the DNS instance to a small flavor (#20) * Nagios (#11) * First cut at the nagios work * Added NRPE service enabled * Updated implementation to be a bit more flexible * Updated logic to include checks for services * Added support for DNS and NFS checks * Updated templates and config files * Updated check_service script to simplify and avoid false negatives * Added support for OpenShift checks * Added README for the playbook * Updated README * DNS server should NOT run docker (#25) * Readme (#26) * Updated documentation and example inventory * Update README.md Added "hint" * Update README.md Fix numbering in the markdown * Update README.md * Added docker_volume_size to the sample inventory * Added rhsm_pool to the sample inventory * Updated README per comments * Ensure DNS configuration has wildcards set for infra nodes (#24) * Ensure DNS configuration has wildcards set for infra nodes * Updated to include all cluster hosts for DNS entries * Updated DNS server role + example playbook (#27) * Updated DNS server role + example playbook * Updated DNS server role + example playbook * Dns selinux (#28) * Updated DNS server role + example playbook * Updated DNS server role + example playbook * Updated for SELinux boolean * Openshift mgmt (#30) Added prune_projects to the openshift-management role along with Ansible tower support * Created initial CHANGELOG.md * Updating to development release of ansible 2.3.0 to pull down bug fixes in HEAT module (#21) * Workaround for Ansible 2.3 breakage (#31) * Added quotes where needed and fixed some other minor bugs (#33) * Fixing awk check (#34) * Updating client image to lock it to ansible 2.3 and install some addi… (#32) * Updating client image to lock it to ansible 2.3 and install some additional dependencies * First attempt at a docker-compose based solution * Renaming image * Stack refactor (#38) * Refactored openstack-stack role to: - Convert static heat template files to ansible templates - Include native ansible groups via openstack metadata. This removes the need for a playbook to map host groups - Some code cleanup * Deleting commentd out code and irrelevant plays * Refactored openstack-stack role to: - Convert static heat template files to ansible templates - Include native ansible groups via openstack metadata. This removes the need for a playbook to map host groups - Some code cleanup * Deleting commentd out code and irrelevant plays * Replacing stack parameters with jinja expressions * Updating sample inventory to work with latest dynamic inventory changes * updating inventory with host group mapping. making sync keys optional * Missing cluster_hosts group * Updating to add infra_hosts * Updating inventory per comments from oybed and sabre1041 * First attempt at a simple multi-master support (#39) * First attempt at a simple multi-master support * Removing unneeded inventory * adding default number of masters and lower number of nodes * Some fixes (#41) * Fix the sample inventory The `openstack_nameservers` variable needs to be a list of strings, we need to set the Openshift labels in OSv3.yml and we show an example of using the username/password/poll for RHEL subscriptions. * Update the READMEs This fixes some of the paths, explains that we need to pass `openstack_ssh_public_key` to the end-to-end playbook and includes the full Docker command since there is no `run.sh` script. Oh and Heat is not an acronym :). * Fixes to the readme and inventory * Use docker-compose * Correcting the sample inventory for an HA cluster (#40) * Correcting the sample inventory for an HA cluster * Adding node label mapping * Updating to mre generic IPs * Updating to OSP ocata repo, as there are some bugs with newton's channel (#44) * Use the correct variable name in create_users (#43) The user creation was failing, because it was looking for the `demo_users` variable while the samples put the data under `create_users`. * Upgrading jinja2 to work correctly with latest templates (#45) * Fix rpm deps (#46) * Upgrading jinja2 to work correctly with latest templates * Updated to solve rpm deps + other version issues * Clean-up * Updating control-host settings and env * Updating control-host settings and env * Updating README and names to align across all components * Setting the TERM var for better shell experience * Conditionally set the openshift_master_default_subdomain to avoid overriding it unecessary (#47) * Update README.md * Update CASL to use nsupdate for DNS records (#48) * Updated to use nsupdate for DNS records * Updated formatting of dict * Updating descriptive text * Support for external DNS config * Upgrading jinja2 to work correctly with latest templates * Latest update for nsupdate * Updated to use nsupdate for DNS records * Updated formatting of dict * Updating descriptive text * Support for external DNS config * Latest update for nsupdate * Updated to support external public/private DNS server(s) * Updated DNS server handling * Updated DNS server handling * Updated DNS server handling * Eliminated the from the sample inventories * Updated sample inventory to point to 2 separate DNS servers for private/public * Playbook clean-up * Adding 'python-dns' * splitting subscription manager calls to allow for a clean pre-install playbook * Move the openstack provisioning playbooks They'll live in playbooks/provisioning/openstack from now on. * Add a single provisioning playbook * Symlink roles to provisioning/openstack/roles * Add a sample inventory for openstack provisioning * Add license for openstack.py in inventory It's under the GPLv3+ while the rest of the repo is Apache 2. * Add readme * Move pre_tasks from to the openstack provisioner We should probably not pollute the role namespace with a name as common as "common". Moving the pre_task.yml to provisioners/openstack instead. * Add default values to provision-openstack.yml * Fix privileges in the pre-install playbook * Always let the openshift nodes access the DNS When `node_ingress_cidr` to limit the IP range for the DNS server, this can prevent the actual openshift nodes from accessing it as well. This commit makes the access from the `openstack_subnet_prefix` always pass through and uses `node_ingress_cidr` for additional access control. * Add a flat sec group for openstack provider Add a openstack_flat_secgroup, defaults to False. When set, merges sec rules for master, node, etcd, infra nodes into a single group. Less secure, but might help to mitigate quota limitations. Update docs. Use timeout 30s to mitigate the error: Timeout (12s) waiting for privilege escalation prompt. Signed-off-by: Bogdan Dobrelya * Add ansible.cfg for openstack provider Signed-off-by: Bogdan Dobrelya * Drop atomic-openshift-utils, update docs for origin TODO use with when: ansible_distribution == 'CentOS' Also update docs for origin Signed-off-by: Bogdan Dobrelya * Gather facts for provision playbook Provision tasks use facts like ansible_hostname and few others. W/o gathering facts, those expire, and the provision playbook cannot be reapplied in order to update the existing heat stack. Refresh the facts cache by specifying gather_facts: true. Signed-off-by: Bogdan Dobrelya * Update sample inventory with the latest changes * Fix yamllint errors * Remove the extraneous DNS directory It's a CASL-specific helper, not necessary for the provisioning playbooks. * Fix flake8 errors with the openstack inventory --- .../provisioning/openstack/INVENTORY-LICENSE.txt | 674 ++++++++++++++++++ playbooks/provisioning/openstack/README.md | 132 ++++ .../openstack/openstack_dns_records.yml | 75 ++ .../provisioning/openstack/openstack_dns_views.yml | 25 + .../openstack/post-provision-openstack.yml | 58 ++ playbooks/provisioning/openstack/pre-install.yml | 14 + playbooks/provisioning/openstack/pre_tasks.yml | 39 ++ .../provisioning/openstack/provision-openstack.yml | 49 ++ playbooks/provisioning/openstack/provision.yaml | 4 + playbooks/provisioning/openstack/roles | 1 + .../openstack/sample-inventory/ansible.cfg | 19 + .../openstack/sample-inventory/clouds.yaml | 5 + .../sample-inventory/group_vars/OSEv3.yml | 10 + .../openstack/sample-inventory/group_vars/all.yml | 57 ++ .../provisioning/openstack/sample-inventory/hosts | 44 ++ .../openstack/sample-inventory/openstack.py | 252 +++++++ roles/dns-server-detect/defaults/main.yml | 3 + roles/dns-server-detect/tasks/main.yml | 36 + roles/hostnames/tasks/main.yaml | 26 + roles/hostnames/test/inv | 12 + roles/hostnames/test/roles | 1 + roles/hostnames/test/test.retry | 3 + roles/hostnames/test/test.yaml | 4 + roles/hostnames/vars/main.yaml | 2 + roles/hostnames/vars/records.yaml | 28 + roles/openshift-prep/tasks/main.yml | 4 + roles/openshift-prep/tasks/prerequisites.yml | 35 + roles/openstack-stack/README.md | 9 + roles/openstack-stack/defaults/main.yml | 12 + roles/openstack-stack/tasks/main.yml | 41 ++ roles/openstack-stack/templates/heat_stack.yaml.j2 | 753 +++++++++++++++++++++ .../templates/heat_stack_server.yaml.j2 | 170 +++++ roles/openstack-stack/templates/user_data.j2 | 13 + roles/openstack-stack/test/roles | 1 + roles/openstack-stack/test/stack-create-test.yml | 16 + roles/subscription-manager/README.md | 156 +++++ roles/subscription-manager/pre_tasks/pre_tasks.yml | 45 ++ roles/subscription-manager/tasks/main.yml | 122 ++++ 38 files changed, 2950 insertions(+) create mode 100644 playbooks/provisioning/openstack/INVENTORY-LICENSE.txt create mode 100644 playbooks/provisioning/openstack/README.md create mode 100644 playbooks/provisioning/openstack/openstack_dns_records.yml create mode 100644 playbooks/provisioning/openstack/openstack_dns_views.yml create mode 100644 playbooks/provisioning/openstack/post-provision-openstack.yml create mode 100644 playbooks/provisioning/openstack/pre-install.yml create mode 100644 playbooks/provisioning/openstack/pre_tasks.yml create mode 100644 playbooks/provisioning/openstack/provision-openstack.yml create mode 100644 playbooks/provisioning/openstack/provision.yaml create mode 120000 playbooks/provisioning/openstack/roles create mode 100644 playbooks/provisioning/openstack/sample-inventory/ansible.cfg create mode 100644 playbooks/provisioning/openstack/sample-inventory/clouds.yaml create mode 100644 playbooks/provisioning/openstack/sample-inventory/group_vars/OSEv3.yml create mode 100644 playbooks/provisioning/openstack/sample-inventory/group_vars/all.yml create mode 100644 playbooks/provisioning/openstack/sample-inventory/hosts create mode 100755 playbooks/provisioning/openstack/sample-inventory/openstack.py create mode 100644 roles/dns-server-detect/defaults/main.yml create mode 100644 roles/dns-server-detect/tasks/main.yml create mode 100644 roles/hostnames/tasks/main.yaml create mode 100644 roles/hostnames/test/inv create mode 120000 roles/hostnames/test/roles create mode 100644 roles/hostnames/test/test.retry create mode 100644 roles/hostnames/test/test.yaml create mode 100644 roles/hostnames/vars/main.yaml create mode 100644 roles/hostnames/vars/records.yaml create mode 100644 roles/openshift-prep/tasks/main.yml create mode 100644 roles/openshift-prep/tasks/prerequisites.yml create mode 100644 roles/openstack-stack/README.md create mode 100644 roles/openstack-stack/defaults/main.yml create mode 100644 roles/openstack-stack/tasks/main.yml create mode 100644 roles/openstack-stack/templates/heat_stack.yaml.j2 create mode 100644 roles/openstack-stack/templates/heat_stack_server.yaml.j2 create mode 100644 roles/openstack-stack/templates/user_data.j2 create mode 120000 roles/openstack-stack/test/roles create mode 100644 roles/openstack-stack/test/stack-create-test.yml create mode 100644 roles/subscription-manager/README.md create mode 100644 roles/subscription-manager/pre_tasks/pre_tasks.yml create mode 100644 roles/subscription-manager/tasks/main.yml diff --git a/playbooks/provisioning/openstack/INVENTORY-LICENSE.txt b/playbooks/provisioning/openstack/INVENTORY-LICENSE.txt new file mode 100644 index 000000000..94a9ed024 --- /dev/null +++ b/playbooks/provisioning/openstack/INVENTORY-LICENSE.txt @@ -0,0 +1,674 @@ + GNU GENERAL PUBLIC LICENSE + Version 3, 29 June 2007 + + Copyright (C) 2007 Free Software Foundation, Inc. + Everyone is permitted to copy and distribute verbatim copies + of this license document, but changing it is not allowed. + + Preamble + + The GNU General Public License is a free, copyleft license for +software and other kinds of works. + + The licenses for most software and other practical works are designed +to take away your freedom to share and change the works. By contrast, +the GNU General Public License is intended to guarantee your freedom to +share and change all versions of a program--to make sure it remains free +software for all its users. We, the Free Software Foundation, use the +GNU General Public License for most of our software; it applies also to +any other work released this way by its authors. You can apply it to +your programs, too. + + When we speak of free software, we are referring to freedom, not +price. Our General Public Licenses are designed to make sure that you +have the freedom to distribute copies of free software (and charge for +them if you wish), that you receive source code or can get it if you +want it, that you can change the software or use pieces of it in new +free programs, and that you know you can do these things. + + To protect your rights, we need to prevent others from denying you +these rights or asking you to surrender the rights. Therefore, you have +certain responsibilities if you distribute copies of the software, or if +you modify it: responsibilities to respect the freedom of others. + + For example, if you distribute copies of such a program, whether +gratis or for a fee, you must pass on to the recipients the same +freedoms that you received. You must make sure that they, too, receive +or can get the source code. And you must show them these terms so they +know their rights. + + Developers that use the GNU GPL protect your rights with two steps: +(1) assert copyright on the software, and (2) offer you this License +giving you legal permission to copy, distribute and/or modify it. + + For the developers' and authors' protection, the GPL clearly explains +that there is no warranty for this free software. For both users' and +authors' sake, the GPL requires that modified versions be marked as +changed, so that their problems will not be attributed erroneously to +authors of previous versions. + + Some devices are designed to deny users access to install or run +modified versions of the software inside them, although the manufacturer +can do so. This is fundamentally incompatible with the aim of +protecting users' freedom to change the software. The systematic +pattern of such abuse occurs in the area of products for individuals to +use, which is precisely where it is most unacceptable. Therefore, we +have designed this version of the GPL to prohibit the practice for those +products. If such problems arise substantially in other domains, we +stand ready to extend this provision to those domains in future versions +of the GPL, as needed to protect the freedom of users. + + Finally, every program is threatened constantly by software patents. +States should not allow patents to restrict development and use of +software on general-purpose computers, but in those that do, we wish to +avoid the special danger that patents applied to a free program could +make it effectively proprietary. To prevent this, the GPL assures that +patents cannot be used to render the program non-free. + + The precise terms and conditions for copying, distribution and +modification follow. + + TERMS AND CONDITIONS + + 0. Definitions. + + "This License" refers to version 3 of the GNU General Public License. + + "Copyright" also means copyright-like laws that apply to other kinds of +works, such as semiconductor masks. + + "The Program" refers to any copyrightable work licensed under this +License. Each licensee is addressed as "you". "Licensees" and +"recipients" may be individuals or organizations. + + To "modify" a work means to copy from or adapt all or part of the work +in a fashion requiring copyright permission, other than the making of an +exact copy. The resulting work is called a "modified version" of the +earlier work or a work "based on" the earlier work. + + A "covered work" means either the unmodified Program or a work based +on the Program. + + To "propagate" a work means to do anything with it that, without +permission, would make you directly or secondarily liable for +infringement under applicable copyright law, except executing it on a +computer or modifying a private copy. Propagation includes copying, +distribution (with or without modification), making available to the +public, and in some countries other activities as well. + + To "convey" a work means any kind of propagation that enables other +parties to make or receive copies. Mere interaction with a user through +a computer network, with no transfer of a copy, is not conveying. + + An interactive user interface displays "Appropriate Legal Notices" +to the extent that it includes a convenient and prominently visible +feature that (1) displays an appropriate copyright notice, and (2) +tells the user that there is no warranty for the work (except to the +extent that warranties are provided), that licensees may convey the +work under this License, and how to view a copy of this License. If +the interface presents a list of user commands or options, such as a +menu, a prominent item in the list meets this criterion. + + 1. Source Code. + + The "source code" for a work means the preferred form of the work +for making modifications to it. "Object code" means any non-source +form of a work. + + A "Standard Interface" means an interface that either is an official +standard defined by a recognized standards body, or, in the case of +interfaces specified for a particular programming language, one that +is widely used among developers working in that language. + + The "System Libraries" of an executable work include anything, other +than the work as a whole, that (a) is included in the normal form of +packaging a Major Component, but which is not part of that Major +Component, and (b) serves only to enable use of the work with that +Major Component, or to implement a Standard Interface for which an +implementation is available to the public in source code form. A +"Major Component", in this context, means a major essential component +(kernel, window system, and so on) of the specific operating system +(if any) on which the executable work runs, or a compiler used to +produce the work, or an object code interpreter used to run it. + + The "Corresponding Source" for a work in object code form means all +the source code needed to generate, install, and (for an executable +work) run the object code and to modify the work, including scripts to +control those activities. However, it does not include the work's +System Libraries, or general-purpose tools or generally available free +programs which are used unmodified in performing those activities but +which are not part of the work. For example, Corresponding Source +includes interface definition files associated with source files for +the work, and the source code for shared libraries and dynamically +linked subprograms that the work is specifically designed to require, +such as by intimate data communication or control flow between those +subprograms and other parts of the work. + + The Corresponding Source need not include anything that users +can regenerate automatically from other parts of the Corresponding +Source. + + The Corresponding Source for a work in source code form is that +same work. + + 2. Basic Permissions. + + All rights granted under this License are granted for the term of +copyright on the Program, and are irrevocable provided the stated +conditions are met. This License explicitly affirms your unlimited +permission to run the unmodified Program. The output from running a +covered work is covered by this License only if the output, given its +content, constitutes a covered work. This License acknowledges your +rights of fair use or other equivalent, as provided by copyright law. + + You may make, run and propagate covered works that you do not +convey, without conditions so long as your license otherwise remains +in force. You may convey covered works to others for the sole purpose +of having them make modifications exclusively for you, or provide you +with facilities for running those works, provided that you comply with +the terms of this License in conveying all material for which you do +not control copyright. Those thus making or running the covered works +for you must do so exclusively on your behalf, under your direction +and control, on terms that prohibit them from making any copies of +your copyrighted material outside their relationship with you. + + Conveying under any other circumstances is permitted solely under +the conditions stated below. Sublicensing is not allowed; section 10 +makes it unnecessary. + + 3. Protecting Users' Legal Rights From Anti-Circumvention Law. + + No covered work shall be deemed part of an effective technological +measure under any applicable law fulfilling obligations under article +11 of the WIPO copyright treaty adopted on 20 December 1996, or +similar laws prohibiting or restricting circumvention of such +measures. + + When you convey a covered work, you waive any legal power to forbid +circumvention of technological measures to the extent such circumvention +is effected by exercising rights under this License with respect to +the covered work, and you disclaim any intention to limit operation or +modification of the work as a means of enforcing, against the work's +users, your or third parties' legal rights to forbid circumvention of +technological measures. + + 4. Conveying Verbatim Copies. + + You may convey verbatim copies of the Program's source code as you +receive it, in any medium, provided that you conspicuously and +appropriately publish on each copy an appropriate copyright notice; +keep intact all notices stating that this License and any +non-permissive terms added in accord with section 7 apply to the code; +keep intact all notices of the absence of any warranty; and give all +recipients a copy of this License along with the Program. + + You may charge any price or no price for each copy that you convey, +and you may offer support or warranty protection for a fee. + + 5. Conveying Modified Source Versions. + + You may convey a work based on the Program, or the modifications to +produce it from the Program, in the form of source code under the +terms of section 4, provided that you also meet all of these conditions: + + a) The work must carry prominent notices stating that you modified + it, and giving a relevant date. + + b) The work must carry prominent notices stating that it is + released under this License and any conditions added under section + 7. This requirement modifies the requirement in section 4 to + "keep intact all notices". + + c) You must license the entire work, as a whole, under this + License to anyone who comes into possession of a copy. This + License will therefore apply, along with any applicable section 7 + additional terms, to the whole of the work, and all its parts, + regardless of how they are packaged. This License gives no + permission to license the work in any other way, but it does not + invalidate such permission if you have separately received it. + + d) If the work has interactive user interfaces, each must display + Appropriate Legal Notices; however, if the Program has interactive + interfaces that do not display Appropriate Legal Notices, your + work need not make them do so. + + A compilation of a covered work with other separate and independent +works, which are not by their nature extensions of the covered work, +and which are not combined with it such as to form a larger program, +in or on a volume of a storage or distribution medium, is called an +"aggregate" if the compilation and its resulting copyright are not +used to limit the access or legal rights of the compilation's users +beyond what the individual works permit. Inclusion of a covered work +in an aggregate does not cause this License to apply to the other +parts of the aggregate. + + 6. Conveying Non-Source Forms. + + You may convey a covered work in object code form under the terms +of sections 4 and 5, provided that you also convey the +machine-readable Corresponding Source under the terms of this License, +in one of these ways: + + a) Convey the object code in, or embodied in, a physical product + (including a physical distribution medium), accompanied by the + Corresponding Source fixed on a durable physical medium + customarily used for software interchange. + + b) Convey the object code in, or embodied in, a physical product + (including a physical distribution medium), accompanied by a + written offer, valid for at least three years and valid for as + long as you offer spare parts or customer support for that product + model, to give anyone who possesses the object code either (1) a + copy of the Corresponding Source for all the software in the + product that is covered by this License, on a durable physical + medium customarily used for software interchange, for a price no + more than your reasonable cost of physically performing this + conveying of source, or (2) access to copy the + Corresponding Source from a network server at no charge. + + c) Convey individual copies of the object code with a copy of the + written offer to provide the Corresponding Source. This + alternative is allowed only occasionally and noncommercially, and + only if you received the object code with such an offer, in accord + with subsection 6b. + + d) Convey the object code by offering access from a designated + place (gratis or for a charge), and offer equivalent access to the + Corresponding Source in the same way through the same place at no + further charge. You need not require recipients to copy the + Corresponding Source along with the object code. If the place to + copy the object code is a network server, the Corresponding Source + may be on a different server (operated by you or a third party) + that supports equivalent copying facilities, provided you maintain + clear directions next to the object code saying where to find the + Corresponding Source. Regardless of what server hosts the + Corresponding Source, you remain obligated to ensure that it is + available for as long as needed to satisfy these requirements. + + e) Convey the object code using peer-to-peer transmission, provided + you inform other peers where the object code and Corresponding + Source of the work are being offered to the general public at no + charge under subsection 6d. + + A separable portion of the object code, whose source code is excluded +from the Corresponding Source as a System Library, need not be +included in conveying the object code work. + + A "User Product" is either (1) a "consumer product", which means any +tangible personal property which is normally used for personal, family, +or household purposes, or (2) anything designed or sold for incorporation +into a dwelling. In determining whether a product is a consumer product, +doubtful cases shall be resolved in favor of coverage. For a particular +product received by a particular user, "normally used" refers to a +typical or common use of that class of product, regardless of the status +of the particular user or of the way in which the particular user +actually uses, or expects or is expected to use, the product. A product +is a consumer product regardless of whether the product has substantial +commercial, industrial or non-consumer uses, unless such uses represent +the only significant mode of use of the product. + + "Installation Information" for a User Product means any methods, +procedures, authorization keys, or other information required to install +and execute modified versions of a covered work in that User Product from +a modified version of its Corresponding Source. The information must +suffice to ensure that the continued functioning of the modified object +code is in no case prevented or interfered with solely because +modification has been made. + + If you convey an object code work under this section in, or with, or +specifically for use in, a User Product, and the conveying occurs as +part of a transaction in which the right of possession and use of the +User Product is transferred to the recipient in perpetuity or for a +fixed term (regardless of how the transaction is characterized), the +Corresponding Source conveyed under this section must be accompanied +by the Installation Information. But this requirement does not apply +if neither you nor any third party retains the ability to install +modified object code on the User Product (for example, the work has +been installed in ROM). + + The requirement to provide Installation Information does not include a +requirement to continue to provide support service, warranty, or updates +for a work that has been modified or installed by the recipient, or for +the User Product in which it has been modified or installed. Access to a +network may be denied when the modification itself materially and +adversely affects the operation of the network or violates the rules and +protocols for communication across the network. + + Corresponding Source conveyed, and Installation Information provided, +in accord with this section must be in a format that is publicly +documented (and with an implementation available to the public in +source code form), and must require no special password or key for +unpacking, reading or copying. + + 7. Additional Terms. + + "Additional permissions" are terms that supplement the terms of this +License by making exceptions from one or more of its conditions. +Additional permissions that are applicable to the entire Program shall +be treated as though they were included in this License, to the extent +that they are valid under applicable law. If additional permissions +apply only to part of the Program, that part may be used separately +under those permissions, but the entire Program remains governed by +this License without regard to the additional permissions. + + When you convey a copy of a covered work, you may at your option +remove any additional permissions from that copy, or from any part of +it. (Additional permissions may be written to require their own +removal in certain cases when you modify the work.) You may place +additional permissions on material, added by you to a covered work, +for which you have or can give appropriate copyright permission. + + Notwithstanding any other provision of this License, for material you +add to a covered work, you may (if authorized by the copyright holders of +that material) supplement the terms of this License with terms: + + a) Disclaiming warranty or limiting liability differently from the + terms of sections 15 and 16 of this License; or + + b) Requiring preservation of specified reasonable legal notices or + author attributions in that material or in the Appropriate Legal + Notices displayed by works containing it; or + + c) Prohibiting misrepresentation of the origin of that material, or + requiring that modified versions of such material be marked in + reasonable ways as different from the original version; or + + d) Limiting the use for publicity purposes of names of licensors or + authors of the material; or + + e) Declining to grant rights under trademark law for use of some + trade names, trademarks, or service marks; or + + f) Requiring indemnification of licensors and authors of that + material by anyone who conveys the material (or modified versions of + it) with contractual assumptions of liability to the recipient, for + any liability that these contractual assumptions directly impose on + those licensors and authors. + + All other non-permissive additional terms are considered "further +restrictions" within the meaning of section 10. If the Program as you +received it, or any part of it, contains a notice stating that it is +governed by this License along with a term that is a further +restriction, you may remove that term. If a license document contains +a further restriction but permits relicensing or conveying under this +License, you may add to a covered work material governed by the terms +of that license document, provided that the further restriction does +not survive such relicensing or conveying. + + If you add terms to a covered work in accord with this section, you +must place, in the relevant source files, a statement of the +additional terms that apply to those files, or a notice indicating +where to find the applicable terms. + + Additional terms, permissive or non-permissive, may be stated in the +form of a separately written license, or stated as exceptions; +the above requirements apply either way. + + 8. Termination. + + You may not propagate or modify a covered work except as expressly +provided under this License. Any attempt otherwise to propagate or +modify it is void, and will automatically terminate your rights under +this License (including any patent licenses granted under the third +paragraph of section 11). + + However, if you cease all violation of this License, then your +license from a particular copyright holder is reinstated (a) +provisionally, unless and until the copyright holder explicitly and +finally terminates your license, and (b) permanently, if the copyright +holder fails to notify you of the violation by some reasonable means +prior to 60 days after the cessation. + + Moreover, your license from a particular copyright holder is +reinstated permanently if the copyright holder notifies you of the +violation by some reasonable means, this is the first time you have +received notice of violation of this License (for any work) from that +copyright holder, and you cure the violation prior to 30 days after +your receipt of the notice. + + Termination of your rights under this section does not terminate the +licenses of parties who have received copies or rights from you under +this License. If your rights have been terminated and not permanently +reinstated, you do not qualify to receive new licenses for the same +material under section 10. + + 9. Acceptance Not Required for Having Copies. + + You are not required to accept this License in order to receive or +run a copy of the Program. Ancillary propagation of a covered work +occurring solely as a consequence of using peer-to-peer transmission +to receive a copy likewise does not require acceptance. However, +nothing other than this License grants you permission to propagate or +modify any covered work. These actions infringe copyright if you do +not accept this License. Therefore, by modifying or propagating a +covered work, you indicate your acceptance of this License to do so. + + 10. Automatic Licensing of Downstream Recipients. + + Each time you convey a covered work, the recipient automatically +receives a license from the original licensors, to run, modify and +propagate that work, subject to this License. You are not responsible +for enforcing compliance by third parties with this License. + + An "entity transaction" is a transaction transferring control of an +organization, or substantially all assets of one, or subdividing an +organization, or merging organizations. If propagation of a covered +work results from an entity transaction, each party to that +transaction who receives a copy of the work also receives whatever +licenses to the work the party's predecessor in interest had or could +give under the previous paragraph, plus a right to possession of the +Corresponding Source of the work from the predecessor in interest, if +the predecessor has it or can get it with reasonable efforts. + + You may not impose any further restrictions on the exercise of the +rights granted or affirmed under this License. For example, you may +not impose a license fee, royalty, or other charge for exercise of +rights granted under this License, and you may not initiate litigation +(including a cross-claim or counterclaim in a lawsuit) alleging that +any patent claim is infringed by making, using, selling, offering for +sale, or importing the Program or any portion of it. + + 11. Patents. + + A "contributor" is a copyright holder who authorizes use under this +License of the Program or a work on which the Program is based. The +work thus licensed is called the contributor's "contributor version". + + A contributor's "essential patent claims" are all patent claims +owned or controlled by the contributor, whether already acquired or +hereafter acquired, that would be infringed by some manner, permitted +by this License, of making, using, or selling its contributor version, +but do not include claims that would be infringed only as a +consequence of further modification of the contributor version. For +purposes of this definition, "control" includes the right to grant +patent sublicenses in a manner consistent with the requirements of +this License. + + Each contributor grants you a non-exclusive, worldwide, royalty-free +patent license under the contributor's essential patent claims, to +make, use, sell, offer for sale, import and otherwise run, modify and +propagate the contents of its contributor version. + + In the following three paragraphs, a "patent license" is any express +agreement or commitment, however denominated, not to enforce a patent +(such as an express permission to practice a patent or covenant not to +sue for patent infringement). To "grant" such a patent license to a +party means to make such an agreement or commitment not to enforce a +patent against the party. + + If you convey a covered work, knowingly relying on a patent license, +and the Corresponding Source of the work is not available for anyone +to copy, free of charge and under the terms of this License, through a +publicly available network server or other readily accessible means, +then you must either (1) cause the Corresponding Source to be so +available, or (2) arrange to deprive yourself of the benefit of the +patent license for this particular work, or (3) arrange, in a manner +consistent with the requirements of this License, to extend the patent +license to downstream recipients. "Knowingly relying" means you have +actual knowledge that, but for the patent license, your conveying the +covered work in a country, or your recipient's use of the covered work +in a country, would infringe one or more identifiable patents in that +country that you have reason to believe are valid. + + If, pursuant to or in connection with a single transaction or +arrangement, you convey, or propagate by procuring conveyance of, a +covered work, and grant a patent license to some of the parties +receiving the covered work authorizing them to use, propagate, modify +or convey a specific copy of the covered work, then the patent license +you grant is automatically extended to all recipients of the covered +work and works based on it. + + A patent license is "discriminatory" if it does not include within +the scope of its coverage, prohibits the exercise of, or is +conditioned on the non-exercise of one or more of the rights that are +specifically granted under this License. You may not convey a covered +work if you are a party to an arrangement with a third party that is +in the business of distributing software, under which you make payment +to the third party based on the extent of your activity of conveying +the work, and under which the third party grants, to any of the +parties who would receive the covered work from you, a discriminatory +patent license (a) in connection with copies of the covered work +conveyed by you (or copies made from those copies), or (b) primarily +for and in connection with specific products or compilations that +contain the covered work, unless you entered into that arrangement, +or that patent license was granted, prior to 28 March 2007. + + Nothing in this License shall be construed as excluding or limiting +any implied license or other defenses to infringement that may +otherwise be available to you under applicable patent law. + + 12. No Surrender of Others' Freedom. + + If conditions are imposed on you (whether by court order, agreement or +otherwise) that contradict the conditions of this License, they do not +excuse you from the conditions of this License. If you cannot convey a +covered work so as to satisfy simultaneously your obligations under this +License and any other pertinent obligations, then as a consequence you may +not convey it at all. For example, if you agree to terms that obligate you +to collect a royalty for further conveying from those to whom you convey +the Program, the only way you could satisfy both those terms and this +License would be to refrain entirely from conveying the Program. + + 13. Use with the GNU Affero General Public License. + + Notwithstanding any other provision of this License, you have +permission to link or combine any covered work with a work licensed +under version 3 of the GNU Affero General Public License into a single +combined work, and to convey the resulting work. The terms of this +License will continue to apply to the part which is the covered work, +but the special requirements of the GNU Affero General Public License, +section 13, concerning interaction through a network will apply to the +combination as such. + + 14. Revised Versions of this License. + + The Free Software Foundation may publish revised and/or new versions of +the GNU General Public License from time to time. Such new versions will +be similar in spirit to the present version, but may differ in detail to +address new problems or concerns. + + Each version is given a distinguishing version number. If the +Program specifies that a certain numbered version of the GNU General +Public License "or any later version" applies to it, you have the +option of following the terms and conditions either of that numbered +version or of any later version published by the Free Software +Foundation. If the Program does not specify a version number of the +GNU General Public License, you may choose any version ever published +by the Free Software Foundation. + + If the Program specifies that a proxy can decide which future +versions of the GNU General Public License can be used, that proxy's +public statement of acceptance of a version permanently authorizes you +to choose that version for the Program. + + Later license versions may give you additional or different +permissions. However, no additional obligations are imposed on any +author or copyright holder as a result of your choosing to follow a +later version. + + 15. Disclaimer of Warranty. + + THERE IS NO WARRANTY FOR THE PROGRAM, TO THE EXTENT PERMITTED BY +APPLICABLE LAW. EXCEPT WHEN OTHERWISE STATED IN WRITING THE COPYRIGHT +HOLDERS AND/OR OTHER PARTIES PROVIDE THE PROGRAM "AS IS" WITHOUT WARRANTY +OF ANY KIND, EITHER EXPRESSED OR IMPLIED, INCLUDING, BUT NOT LIMITED TO, +THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR +PURPOSE. THE ENTIRE RISK AS TO THE QUALITY AND PERFORMANCE OF THE PROGRAM +IS WITH YOU. SHOULD THE PROGRAM PROVE DEFECTIVE, YOU ASSUME THE COST OF +ALL NECESSARY SERVICING, REPAIR OR CORRECTION. + + 16. Limitation of Liability. + + IN NO EVENT UNLESS REQUIRED BY APPLICABLE LAW OR AGREED TO IN WRITING +WILL ANY COPYRIGHT HOLDER, OR ANY OTHER PARTY WHO MODIFIES AND/OR CONVEYS +THE PROGRAM AS PERMITTED ABOVE, BE LIABLE TO YOU FOR DAMAGES, INCLUDING ANY +GENERAL, SPECIAL, INCIDENTAL OR CONSEQUENTIAL DAMAGES ARISING OUT OF THE +USE OR INABILITY TO USE THE PROGRAM (INCLUDING BUT NOT LIMITED TO LOSS OF +DATA OR DATA BEING RENDERED INACCURATE OR LOSSES SUSTAINED BY YOU OR THIRD +PARTIES OR A FAILURE OF THE PROGRAM TO OPERATE WITH ANY OTHER PROGRAMS), +EVEN IF SUCH HOLDER OR OTHER PARTY HAS BEEN ADVISED OF THE POSSIBILITY OF +SUCH DAMAGES. + + 17. Interpretation of Sections 15 and 16. + + If the disclaimer of warranty and limitation of liability provided +above cannot be given local legal effect according to their terms, +reviewing courts shall apply local law that most closely approximates +an absolute waiver of all civil liability in connection with the +Program, unless a warranty or assumption of liability accompanies a +copy of the Program in return for a fee. + + END OF TERMS AND CONDITIONS + + How to Apply These Terms to Your New Programs + + If you develop a new program, and you want it to be of the greatest +possible use to the public, the best way to achieve this is to make it +free software which everyone can redistribute and change under these terms. + + To do so, attach the following notices to the program. It is safest +to attach them to the start of each source file to most effectively +state the exclusion of warranty; and each file should have at least +the "copyright" line and a pointer to where the full notice is found. + + + Copyright (C) + + This program is free software: you can redistribute it and/or modify + it under the terms of the GNU General Public License as published by + the Free Software Foundation, either version 3 of the License, or + (at your option) any later version. + + This program is distributed in the hope that it will be useful, + but WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + GNU General Public License for more details. + + You should have received a copy of the GNU General Public License + along with this program. If not, see . + +Also add information on how to contact you by electronic and paper mail. + + If the program does terminal interaction, make it output a short +notice like this when it starts in an interactive mode: + + Copyright (C) + This program comes with ABSOLUTELY NO WARRANTY; for details type `show w'. + This is free software, and you are welcome to redistribute it + under certain conditions; type `show c' for details. + +The hypothetical commands `show w' and `show c' should show the appropriate +parts of the General Public License. Of course, your program's commands +might be different; for a GUI interface, you would use an "about box". + + You should also get your employer (if you work as a programmer) or school, +if any, to sign a "copyright disclaimer" for the program, if necessary. +For more information on this, and how to apply and follow the GNU GPL, see +. + + The GNU General Public License does not permit incorporating your program +into proprietary programs. If your program is a subroutine library, you +may consider it more useful to permit linking proprietary applications with +the library. If this is what you want to do, use the GNU Lesser General +Public License instead of this License. But first, please read +. diff --git a/playbooks/provisioning/openstack/README.md b/playbooks/provisioning/openstack/README.md new file mode 100644 index 000000000..423d57113 --- /dev/null +++ b/playbooks/provisioning/openstack/README.md @@ -0,0 +1,132 @@ +# OpenStack Provisioning + +This repository contains playbooks and Heat templates to provision +OpenStack resources (servers, networking, volumes, security groups, +etc.). The result is an environment ready for openshift-ansible. + + +## Dependencies + +* [Ansible 2.3](https://pypi.python.org/pypi/ansible) +* [shade](https://pypi.python.org/pypi/shade) +* python-dns + + +## What does it do + +* Create Nova servers with floating IP addresses attached +* Assigns Cinder volumes to the servers +* Set up an `openshift` user with sudo privileges +* Optionally attach Red Hat subscriptions +* Set up a bind-based DNS server +* When deploying more than one master, set up a HAproxy server + + +## Set up + +### Copy the sample inventory + + cp -r openshift-ansible-contrib/playbooks/provisioning/openstack/sample-inventory inventory + +### Copy clouds.yaml + + cp openshift-ansible-contrib/playbooks/provisioning/openstack/sample-inventory/clouds.yaml clouds.yaml + +### Copy ansible config + + cp openshift-ansible-contrib/playbooks/provisioning/openstack/sample-inventory/ansible.cfg ansible.cfg + +### Update `inventory/group_vars/all.yml` + +Pay special attention to the values in the first paragraph -- these +will depend on your OpenStack environment. + +The `env_id` and `openstack_dns_domain` will form the DNS domain all +your servers will be under. With the default values, this will be +`openshift.example.com`. + +`openstack_nameservers` is a list of DNS servers accessible from all +the created Nova servers. These will be serve as your DNS forwarders. + +`openstack_ssh_key` is a Nova keypair -- you can see your keypairs with +`openstack keypair list`. + +`openstack_default_image_name` is the name of the Glance image the +servers will use. You can +see your images with `openstack image list`. + +`openstack_default_flavor` is the Nova flavor the servers will use. +You can see your flavors with `openstack flavor list`. + +`openstack_external_network_name` is the name of the Neutron network +providing external connectivity. It is often called `public`, +`external` or `ext-net`. You can see your networks with `openstack +network list`. + +The `openstack_num_masters`, `openstack_num_infra` and +`openstack_num_nodes` values specify the number of Master, Infra and +App nodes to create. + +The `openstack_flat_secgrp`, controls Neutron security groups creation for Heat +stacks. Set it to true, if you experience issues with sec group rules +quotas. It trades security for number of rules, by sharing the same set +of firewall rules for master, node, etcd and infra nodes. + +### Update the DNS names in `inventory/hosts` + +The different server groups are currently grouped by the domain name, +so if you end up using a different domain than +`openshift.example.com`, you will need to update the `inventory/hosts` +file. + +For example, if your final domain is `my.cloud.com`, you can run this +command to fix update the `hosts` file: + + sed -i 's/openshift.example.com/my.cloud.com/' inventory/hosts + +### Configure the OpenShift parameters + +Finally, you need to update the DNS entry in +`inventory/group_vars/OSEv3.yml` (look at +`openshift_master_default_subdomain`). + +In addition, this is the place where you can customise your OpenShift +installation for example by specifying the authentication. + +The full list of options is available in this sample inventory: + +https://github.com/openshift/openshift-ansible/blob/master/inventory/byo/hosts.ose.example + +Note, that in order to deploy OpenShift origin, you should update the following +variables for the `inventory/group_vars/OSEv3.yml`, `all.yml`: + + deployment_type: origin + origin_release: 1.5.1 + openshift_deployment_type: "{{ deployment_type }}" + +## Deployment + +### Run the playbook + +Assuming your OpenStack (Keystone) credentials are in the `keystonerc` +file, this is how you stat the provisioning process: + + . keystonerc + ansible-playbook -i inventory --timeout 30 --private-key ~/.ssh/openshift openshift-ansible-contrib/playbooks/provisioning/openstack/provision.yaml + +### Install OpenShift + +Once it succeeds, you can install openshift by running: + + ansible-playbook --become --user openshift --private-key ~/.ssh/openshift -i inventory/ openshift-ansible/playbooks/byo/openshift-node/network_manager.yml + ansible-playbook --become --user openshift --private-key ~/.ssh/openshift -i inventory/ openshift-ansible/playbooks/byo/config.yml + +Note, the `network_manager.yml` is only required if you're deploying OpenShift +origin. + +## License + +As the rest of the openshift-ansible-contrib repository, the code here is +licensed under Apache 2. However, the openstack.py file under +`sample-inventory` is GPLv3+. See the INVENTORY-LICENSE.txt file for the full +text of the license. diff --git a/playbooks/provisioning/openstack/openstack_dns_records.yml b/playbooks/provisioning/openstack/openstack_dns_records.yml new file mode 100644 index 000000000..b32b70ba9 --- /dev/null +++ b/playbooks/provisioning/openstack/openstack_dns_records.yml @@ -0,0 +1,75 @@ +--- +- name: "Generate list of private A records" + set_fact: + private_records: "{{ private_records | default([]) + [ { 'type': 'A', 'hostname': hostvars[item]['ansible_hostname'], 'ip': hostvars[item]['openstack']['private_v4'] } ] }}" + with_items: "{{ groups['cluster_hosts'] }}" + +- name: "Set the private DNS server to use the external value (if provided)" + set_fact: + nsupdate_server_private: "{{ external_nsupdate_keys['private']['server'] }}" + nsupdate_key_secret_private: "{{ external_nsupdate_keys['private']['key_secret'] }}" + nsupdate_key_algorithm_private: "{{ external_nsupdate_keys['private']['key_algorithm'] }}" + when: + - external_nsupdate_keys is defined + - external_nsupdate_keys['private'] is defined + +- name: "Set the private DNS server to use the provisioned value" + set_fact: + nsupdate_server_private: "{{ hostvars[groups['dns'][0]].openstack.public_v4 }}" + nsupdate_key_secret_private: "{{ hostvars[groups['dns'][0]].nsupdate_keys['private-' + full_dns_domain].key_secret }}" + nsupdate_key_algorithm_private: "{{ hostvars[groups['dns'][0]].nsupdate_keys['private-' + full_dns_domain].key_algorithm }}" + when: + - nsupdate_server_private is undefined + +- name: "Generate the private Add section for DNS" + set_fact: + private_named_records: + - view: "private" + zone: "{{ full_dns_domain }}" + server: "{{ nsupdate_server_private }}" + key_name: "{{ ( 'private-' + full_dns_domain ) }}" + key_secret: "{{ nsupdate_key_secret_private }}" + key_algorithm: "{{ nsupdate_key_algorithm_private | lower }}" + entries: "{{ private_records }}" + +- name: "Generate list of public A records" + set_fact: + public_records: "{{ public_records | default([]) + [ { 'type': 'A', 'hostname': hostvars[item]['ansible_hostname'], 'ip': hostvars[item]['openstack']['public_v4'] } ] }}" + with_items: "{{ groups['cluster_hosts'] }}" + +- name: "Add wildcard records to the public A records" + set_fact: + public_records: "{{ public_records | default([]) + [ { 'type': 'A', 'hostname': '*.' + openshift_app_domain, 'ip': hostvars[item]['openstack']['public_v4'] } ] }}" + with_items: "{{ groups['infra_hosts'] }}" + +- name: "Set the public DNS server details to use the external value (if provided)" + set_fact: + nsupdate_server_public: "{{ external_nsupdate_keys['public']['server'] }}" + nsupdate_key_secret_public: "{{ external_nsupdate_keys['public']['key_secret'] }}" + nsupdate_key_algorithm_public: "{{ external_nsupdate_keys['public']['key_algorithm'] }}" + when: + - external_nsupdate_keys is defined + - external_nsupdate_keys['public'] is defined + +- name: "Set the public DNS server details to use the provisioned value" + set_fact: + nsupdate_server_public: "{{ hostvars[groups['dns'][0]].openstack.public_v4 }}" + nsupdate_key_secret_public: "{{ hostvars[groups['dns'][0]].nsupdate_keys['public-' + full_dns_domain].key_secret }}" + nsupdate_key_algorithm_public: "{{ hostvars[groups['dns'][0]].nsupdate_keys['public-' + full_dns_domain].key_algorithm }}" + when: + - nsupdate_server_public is undefined + +- name: "Generate the public Add section for DNS" + set_fact: + public_named_records: + - view: "public" + zone: "{{ full_dns_domain }}" + server: "{{ nsupdate_server_public }}" + key_name: "{{ ( 'public-' + full_dns_domain ) }}" + key_secret: "{{ nsupdate_key_secret_public }}" + key_algorithm: "{{ nsupdate_key_algorithm_public | lower }}" + entries: "{{ public_records }}" + +- name: "Generate the final dns_records_add" + set_fact: + dns_records_add: "{{ private_named_records + public_named_records }}" diff --git a/playbooks/provisioning/openstack/openstack_dns_views.yml b/playbooks/provisioning/openstack/openstack_dns_views.yml new file mode 100644 index 000000000..ea0a7cb96 --- /dev/null +++ b/playbooks/provisioning/openstack/openstack_dns_views.yml @@ -0,0 +1,25 @@ +--- +- name: "Generate ACL list for DNS server" + set_fact: + acl_list: "{{ acl_list | default([]) + [ (hostvars[item]['openstack']['private_v4'] + '/32') ] }}" + with_items: "{{ groups['cluster_hosts'] }}" + +- name: "Generate the private view" + set_fact: + private_named_view: + - name: "private" + acl_entry: "{{ acl_list }}" + zone: + - dns_domain: "{{ full_dns_domain }}" + +- name: "Generate the public view" + set_fact: + public_named_view: + - name: "public" + zone: + - dns_domain: "{{ full_dns_domain }}" + forwarder: "{{ public_dns_nameservers }}" + +- name: "Generate the final named_config_views" + set_fact: + named_config_views: "{{ private_named_view + public_named_view }}" diff --git a/playbooks/provisioning/openstack/post-provision-openstack.yml b/playbooks/provisioning/openstack/post-provision-openstack.yml new file mode 100644 index 000000000..4e42c1c7f --- /dev/null +++ b/playbooks/provisioning/openstack/post-provision-openstack.yml @@ -0,0 +1,58 @@ +--- +# Assign hostnames +- hosts: cluster_hosts + become: true + pre_tasks: + - include: pre_tasks.yml + roles: + - role: hostnames + +# Subscribe DNS Host to allow for configuration below +- hosts: dns + become: true + roles: + - role: subscription-manager + when: hostvars.localhost.rhsm_register + tags: 'subscription-manager' + +# Determine which DNS server(s) to use for our generated records +- hosts: localhost + roles: + - dns-server-detect + +# Build the DNS Server Views and Configure DNS Server(s) +- hosts: dns + become: true + pre_tasks: + - include: pre_tasks.yml + - name: "Generate dns-server views" + include: openstack_dns_views.yml + roles: + - role: dns-server + +# Build and process DNS Records +- hosts: localhost + pre_tasks: + - include: pre_tasks.yml + - name: "Generate dns records" + include: openstack_dns_records.yml + roles: + - role: dns + +# OpenShift Pre-Requisites +- hosts: OSEv3 + become: true + tasks: + - name: "Edit /etc/resolv.conf on masters/nodes" + lineinfile: + state: present + dest: /etc/resolv.conf + regexp: "nameserver {{ hostvars['localhost'].private_dns_server }}" + line: "nameserver {{ hostvars['localhost'].private_dns_server }}" + insertafter: search* + - name: "Include DNS configuration to ensure proper name resolution" + lineinfile: + state: present + dest: /etc/sysconfig/network + regexp: "IP4_NAMESERVERS={{ hostvars['localhost'].private_dns_server }}" + line: "IP4_NAMESERVERS={{ hostvars['localhost'].private_dns_server }}" diff --git a/playbooks/provisioning/openstack/pre-install.yml b/playbooks/provisioning/openstack/pre-install.yml new file mode 100644 index 000000000..629182d49 --- /dev/null +++ b/playbooks/provisioning/openstack/pre-install.yml @@ -0,0 +1,14 @@ +--- +############################### +# OpenShift Pre-Requisites + +# - subscribe hosts +# - prepare docker +# - other prep (install additional packages, etc.) +# +- hosts: OSEv3 + become: true + roles: + - { role: subscription-manager, when: hostvars.localhost.rhsm_register, tags: 'subscription-manager', ansible_sudo: true } + - { role: docker, tags: 'docker' } + - { role: openshift-prep, tags: 'openshift-prep' } diff --git a/playbooks/provisioning/openstack/pre_tasks.yml b/playbooks/provisioning/openstack/pre_tasks.yml new file mode 100644 index 000000000..a4ff7c4ac --- /dev/null +++ b/playbooks/provisioning/openstack/pre_tasks.yml @@ -0,0 +1,39 @@ +--- +- name: Generate Environment ID + set_fact: + env_random_id: "{{ ansible_date_time.epoch }}" + run_once: true + delegate_to: localhost + +- name: Set default Environment ID + set_fact: + default_env_id: "casl-{{ lookup('env','OS_USERNAME') }}-{{ env_random_id }}" + delegate_to: localhost + +- name: Setting Common Facts + set_fact: + env_id: "{{ env_id | default(default_env_id) }}" + delegate_to: localhost + +- name: Set Dynamic Inventory Filters + become: false + shell: > + export OS_INV_FILTER_KEY=clusterid && export OS_INV_FILTER_VALUE={{ env_id }} + delegate_to: localhost + +- name: Updating DNS domain to include env_id (if not empty) + set_fact: + full_dns_domain: "{{ (env_id|trim == '') | ternary(public_dns_domain, env_id + '.' + public_dns_domain) }}" + delegate_to: localhost + +- name: Set the APP domain for OpenShift use + set_fact: + openshift_app_domain: "{{ openshift_app_domain | default('apps') }}" + delegate_to: localhost + +- name: Set the default app domain for routing purposes + set_fact: + openshift_master_default_subdomain: "{{ openshift_app_domain }}.{{ full_dns_domain }}" + delegate_to: localhost + when: + - openshift_master_default_subdomain is undefined diff --git a/playbooks/provisioning/openstack/provision-openstack.yml b/playbooks/provisioning/openstack/provision-openstack.yml new file mode 100644 index 000000000..c7ad782c9 --- /dev/null +++ b/playbooks/provisioning/openstack/provision-openstack.yml @@ -0,0 +1,49 @@ +--- +- hosts: localhost + gather_facts: True + pre_tasks: + - include: pre_tasks.yml + roles: + - role: openstack-stack + stack_name: "{{ env_id }}.{{ public_dns_domain }}" + dns_domain: "{{ public_dns_domain }}" + dns_nameservers: "{{ public_dns_nameservers }}" + subnet_prefix: "{{ openstack_subnet_prefix }}" + ssh_public_key: "{{ openstack_ssh_public_key }}" + openstack_image: "{{ openstack_default_image_name }}" + lb_flavor: "{{ openstack_default_flavor | default('m1.small') }}" + etcd_flavor: "{{ openstack_default_flavor | default('m1.small') }}" + master_flavor: "{{ openstack_default_flavor | default('m1.medium') }}" + node_flavor: "{{ openstack_default_flavor | default('m1.medium') }}" + infra_flavor: "{{ openstack_default_flavor | default('m1.medium') }}" + dns_flavor: "{{ openstack_default_flavor | default('m1.small') }}" + external_network: "{{ openstack_external_network_name }}" + num_etcd: "{{ openstack_num_etcd | default(0) }}" + num_masters: "{{ openstack_num_masters }}" + num_nodes: "{{ openstack_num_nodes }}" + num_infra: "{{ openstack_num_infra }}" + num_dns: "{{ openstack_num_dns | default(1) }}" + master_volume_size: "{{ docker_volume_size }}" + app_volume_size: "{{ docker_volume_size }}" + infra_volume_size: "{{ docker_volume_size }}" + + +- name: Refresh Server inventory + hosts: localhost + connection: local + gather_facts: False + tasks: + - meta: refresh_inventory + +- hosts: cluster_hosts + gather_facts: false + tasks: + - name: Debug hostvar + debug: + msg: "{{ hostvars[inventory_hostname] }}" + verbosity: 2 + - name: waiting for server to come back + local_action: wait_for host={{ hostvars[inventory_hostname]['ansible_ssh_host'] }} port=22 delay=30 timeout=300 + become: false + +- include: post-provision-openstack.yml diff --git a/playbooks/provisioning/openstack/provision.yaml b/playbooks/provisioning/openstack/provision.yaml new file mode 100644 index 000000000..7cde5e8b8 --- /dev/null +++ b/playbooks/provisioning/openstack/provision.yaml @@ -0,0 +1,4 @@ +--- +- include: "provision-openstack.yml" + +- include: "pre-install.yml" diff --git a/playbooks/provisioning/openstack/roles b/playbooks/provisioning/openstack/roles new file mode 120000 index 000000000..e2b799b9d --- /dev/null +++ b/playbooks/provisioning/openstack/roles @@ -0,0 +1 @@ +../../../roles/ \ No newline at end of file diff --git a/playbooks/provisioning/openstack/sample-inventory/ansible.cfg b/playbooks/provisioning/openstack/sample-inventory/ansible.cfg new file mode 100644 index 000000000..a701e59ac --- /dev/null +++ b/playbooks/provisioning/openstack/sample-inventory/ansible.cfg @@ -0,0 +1,19 @@ +# config file for ansible -- http://ansible.com/ +# ============================================== +[defaults] +forks = 50 +# work around privilege escalation timeouts in ansible +timeout = 30 +host_key_checking = false +inventory = inventory +inventory_ignore_extensions = secrets.py, .pyc +gathering = smart +retry_files_enabled = false +fact_caching = jsonfile +fact_caching_connection = .ansible/cached_facts +fact_caching_timeout = 900 + +[ssh_connection] +ssh_args = -o ControlMaster=auto -o ControlPersist=900s -o GSSAPIAuthentication=no +control_path = /var/tmp/%%h-%%r +pipelining = True diff --git a/playbooks/provisioning/openstack/sample-inventory/clouds.yaml b/playbooks/provisioning/openstack/sample-inventory/clouds.yaml new file mode 100644 index 000000000..8182d2995 --- /dev/null +++ b/playbooks/provisioning/openstack/sample-inventory/clouds.yaml @@ -0,0 +1,5 @@ +--- +ansible: + use_hostnames: True + expand_hostvars: True + fail_on_errors: True diff --git a/playbooks/provisioning/openstack/sample-inventory/group_vars/OSEv3.yml b/playbooks/provisioning/openstack/sample-inventory/group_vars/OSEv3.yml new file mode 100644 index 000000000..32ec43387 --- /dev/null +++ b/playbooks/provisioning/openstack/sample-inventory/group_vars/OSEv3.yml @@ -0,0 +1,10 @@ +--- +openshift_deployment_type: openshift-enterprise +openshift_release: v3.5 +openshift_master_default_subdomain: "apps.{{ env_id }}.{{ public_dns_domain }}" + +# NOTE(shadower): do not remove this line, otherwise the default node labels +# won't be set up. +openshift_node_labels: "{{ openstack.metadata.node_labels }}" + +osm_default_node_selector: 'region=primary' diff --git a/playbooks/provisioning/openstack/sample-inventory/group_vars/all.yml b/playbooks/provisioning/openstack/sample-inventory/group_vars/all.yml new file mode 100644 index 000000000..047923253 --- /dev/null +++ b/playbooks/provisioning/openstack/sample-inventory/group_vars/all.yml @@ -0,0 +1,57 @@ +--- +env_id: "openshift" +public_dns_domain: "example.com" +public_dns_nameservers: [] + +openstack_ssh_public_key: "openshift" +openstack_default_image_name: "rhel73" +openstack_default_flavor: "m1.medium" +openstack_external_network_name: "public" + +openstack_num_masters: 1 +openstack_num_infra: 1 +openstack_num_nodes: 2 + +docker_volume_size: "15" + +openstack_subnet_prefix: "192.168.99" + +# # Red Hat subscription +# # Using Red Hat Satellite: +# rhsm_register: True +# rhsm_satellite: 'sat-6.example.com' +# rhsm_org: 'OPENSHIFT_ORG' +# rhsm_activationkey: '' + +# # Or using RHN username, password and optionally pool: +# rhsm_register: True +# rhsm_username: '' +# rhsm_password: '' +# rhsm_pool: '' + +# rhsm_repos: +# - "rhel-7-server-rpms" +# - "rhel-7-server-ose-3.5-rpms" +# - "rhel-7-server-extras-rpms" +# - "rhel-7-fast-datapath-rpms" + + +# # Roll-your-own DNS +# openstack_num_dns: 0 +# external_nsupdate_keys: +# public: +# key_secret: 'SKqKNdpfk7llKxZ57bbxUnUDobaaJp9t8CjXLJPl+fRI5mPcSBuxTAyvJPa6Y9R7vUg9DwCy/6WTpgLNqnV4Hg==' +# key_algorithm: 'hmac-md5' +# server: '192.168.1.1' +# private: +# key_secret: 'kVE2bVTgZjrdJipxPhID8BEZmbHD8cExlVPR+zbFpW6la8kL5wpXiwOh8q5AAosXQI5t95UXwq3Inx8QT58duw==' +# key_algorithm: 'hmac-md5' +# server: '192.168.1.2' + + +# NOTE(shadower): Do not change this value. The Ansible user is currently +# hardcoded to `openshift`. +ansible_user: openshift + +# Use a single security group for a cluster +openstack_flat_secgrp: false diff --git a/playbooks/provisioning/openstack/sample-inventory/hosts b/playbooks/provisioning/openstack/sample-inventory/hosts new file mode 100644 index 000000000..5f73b60f6 --- /dev/null +++ b/playbooks/provisioning/openstack/sample-inventory/hosts @@ -0,0 +1,44 @@ +#[all:vars] +# For all group_vars, see ./group_vars/all.yml + +# Create an OSEv3 group that contains the master, nodes, etcd, and lb groups. +# The lb group lets Ansible configure HAProxy as the load balancing solution. +# Comment lb out if your load balancer is pre-configured. +[cluster_hosts:children] +OSEv3 +dns + +[OSEv3:children] +masters +nodes +etcd + +# Set variables common for all OSEv3 hosts +#[OSEv3:vars] + +# For OSEv3 normal group vars, see ./group_vars/OSEv3.yml + +# Host Groups + +[masters:children] +masters.openshift.example.com + +[etcd:children] +etcd.openshift.example.com + +[nodes:children] +masters +infra.openshift.example.com +nodes.openshift.example.com + +[infra_hosts:children] +infra.openshift.example.com + +[dns:children] +dns.openshift.example.com + +[masters.openshift.example.com] +[etcd.openshift.example.com] +[infra.openshift.example.com] +[nodes.openshift.example.com] +[dns.openshift.example.com] diff --git a/playbooks/provisioning/openstack/sample-inventory/openstack.py b/playbooks/provisioning/openstack/sample-inventory/openstack.py new file mode 100755 index 000000000..8de73e1e0 --- /dev/null +++ b/playbooks/provisioning/openstack/sample-inventory/openstack.py @@ -0,0 +1,252 @@ +#!/usr/bin/env python + +# Copyright (c) 2012, Marco Vito Moscaritolo +# Copyright (c) 2013, Jesse Keating +# Copyright (c) 2015, Hewlett-Packard Development Company, L.P. +# Copyright (c) 2016, Rackspace Australia +# +# This module is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# This software is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with this software. If not, see . + +# The OpenStack Inventory module uses os-client-config for configuration. +# https://github.com/stackforge/os-client-config +# This means it will either: +# - Respect normal OS_* environment variables like other OpenStack tools +# - Read values from a clouds.yaml file. +# If you want to configure via clouds.yaml, you can put the file in: +# - Current directory +# - ~/.config/openstack/clouds.yaml +# - /etc/openstack/clouds.yaml +# - /etc/ansible/openstack.yml +# The clouds.yaml file can contain entries for multiple clouds and multiple +# regions of those clouds. If it does, this inventory module will connect to +# all of them and present them as one contiguous inventory. +# +# See the adjacent openstack.yml file for an example config file +# There are two ansible inventory specific options that can be set in +# the inventory section. +# expand_hostvars controls whether or not the inventory will make extra API +# calls to fill out additional information about each server +# use_hostnames changes the behavior from registering every host with its UUID +# and making a group of its hostname to only doing this if the +# hostname in question has more than one server +# fail_on_errors causes the inventory to fail and return no hosts if one cloud +# has failed (for example, bad credentials or being offline). +# When set to False, the inventory will return hosts from +# whichever other clouds it can contact. (Default: True) + +import argparse +import collections +import os +import sys +import time +from distutils.version import StrictVersion + +try: + import json +except ImportError: + import simplejson as json + +import os_client_config +import shade +import shade.inventory + +CONFIG_FILES = ['/etc/ansible/openstack.yaml', '/etc/ansible/openstack.yml'] + + +def get_groups_from_server(server_vars, namegroup=True): + groups = [] + + region = server_vars['region'] + cloud = server_vars['cloud'] + metadata = server_vars.get('metadata', {}) + + # Create a group for the cloud + groups.append(cloud) + + # Create a group on region + groups.append(region) + + # And one by cloud_region + groups.append("%s_%s" % (cloud, region)) + + # Check if group metadata key in servers' metadata + if 'group' in metadata: + groups.append(metadata['group']) + + for extra_group in metadata.get('groups', '').split(','): + if extra_group: + groups.append(extra_group.strip()) + + groups.append('instance-%s' % server_vars['id']) + if namegroup: + groups.append(server_vars['name']) + + for key in ('flavor', 'image'): + if 'name' in server_vars[key]: + groups.append('%s-%s' % (key, server_vars[key]['name'])) + + for key, value in iter(metadata.items()): + groups.append('meta-%s_%s' % (key, value)) + + az = server_vars.get('az', None) + if az: + # Make groups for az, region_az and cloud_region_az + groups.append(az) + groups.append('%s_%s' % (region, az)) + groups.append('%s_%s_%s' % (cloud, region, az)) + return groups + + +def get_host_groups(inventory, refresh=False): + (cache_file, cache_expiration_time) = get_cache_settings() + if is_cache_stale(cache_file, cache_expiration_time, refresh=refresh): + groups = to_json(get_host_groups_from_cloud(inventory)) + open(cache_file, 'w').write(groups) + else: + groups = open(cache_file, 'r').read() + return groups + + +def append_hostvars(hostvars, groups, key, server, namegroup=False): + hostvars[key] = dict( + ansible_ssh_host=server['interface_ip'], + openshift_hostname=server['name'], + openshift_public_hostname=server['name'], + openstack=server) + for group in get_groups_from_server(server, namegroup=namegroup): + groups[group].append(key) + + +def get_host_groups_from_cloud(inventory): + groups = collections.defaultdict(list) + firstpass = collections.defaultdict(list) + hostvars = {} + list_args = {} + if hasattr(inventory, 'extra_config'): + use_hostnames = inventory.extra_config['use_hostnames'] + list_args['expand'] = inventory.extra_config['expand_hostvars'] + if StrictVersion(shade.__version__) >= StrictVersion("1.6.0"): + list_args['fail_on_cloud_config'] = \ + inventory.extra_config['fail_on_errors'] + else: + use_hostnames = False + + for server in inventory.list_hosts(**list_args): + + if 'interface_ip' not in server: + continue + try: + if server["metadata"][os.environ['OS_INV_FILTER_KEY']] == os.environ['OS_INV_FILTER_VALUE']: + firstpass[server['name']].append(server) + except Exception: + firstpass[server['name']].append(server) + for name, servers in firstpass.items(): + if len(servers) == 1 and use_hostnames: + append_hostvars(hostvars, groups, name, servers[0]) + else: + server_ids = set() + # Trap for duplicate results + for server in servers: + server_ids.add(server['id']) + if len(server_ids) == 1 and use_hostnames: + append_hostvars(hostvars, groups, name, servers[0]) + else: + for server in servers: + append_hostvars( + hostvars, groups, server['id'], server, + namegroup=True) + groups['_meta'] = {'hostvars': hostvars} + return groups + + +def is_cache_stale(cache_file, cache_expiration_time, refresh=False): + ''' Determines if cache file has expired, or if it is still valid ''' + if refresh: + return True + if os.path.isfile(cache_file) and os.path.getsize(cache_file) > 0: + mod_time = os.path.getmtime(cache_file) + current_time = time.time() + if (mod_time + cache_expiration_time) > current_time: + return False + return True + + +def get_cache_settings(): + config = os_client_config.config.OpenStackConfig( + config_files=os_client_config.config.CONFIG_FILES + CONFIG_FILES) + # For inventory-wide caching + cache_expiration_time = config.get_cache_expiration_time() + cache_path = config.get_cache_path() + if not os.path.exists(cache_path): + os.makedirs(cache_path) + cache_file = os.path.join(cache_path, 'ansible-inventory.cache') + return (cache_file, cache_expiration_time) + + +def to_json(in_dict): + return json.dumps(in_dict, sort_keys=True, indent=2) + + +def parse_args(): + parser = argparse.ArgumentParser(description='OpenStack Inventory Module') + parser.add_argument('--private', + action='store_true', + help='Use private address for ansible host') + parser.add_argument('--refresh', action='store_true', + help='Refresh cached information') + parser.add_argument('--debug', action='store_true', default=False, + help='Enable debug output') + group = parser.add_mutually_exclusive_group(required=True) + group.add_argument('--list', action='store_true', + help='List active servers') + group.add_argument('--host', help='List details about the specific host') + + return parser.parse_args() + + +def main(): + args = parse_args() + try: + config_files = os_client_config.config.CONFIG_FILES + CONFIG_FILES + shade.simple_logging(debug=args.debug) + inventory_args = dict( + refresh=args.refresh, + config_files=config_files, + private=args.private, + ) + if hasattr(shade.inventory.OpenStackInventory, 'extra_config'): + inventory_args.update(dict( + config_key='ansible', + config_defaults={ + 'use_hostnames': False, + 'expand_hostvars': True, + 'fail_on_errors': True, + } + )) + + inventory = shade.inventory.OpenStackInventory(**inventory_args) + + if args.list: + output = get_host_groups(inventory, refresh=args.refresh) + elif args.host: + output = to_json(inventory.get_host(args.host)) + print(output) + except shade.OpenStackCloudException as e: + sys.stderr.write('%s\n' % str(e)) + sys.exit(1) + sys.exit(0) + + +if __name__ == '__main__': + main() diff --git a/roles/dns-server-detect/defaults/main.yml b/roles/dns-server-detect/defaults/main.yml new file mode 100644 index 000000000..58bd861cd --- /dev/null +++ b/roles/dns-server-detect/defaults/main.yml @@ -0,0 +1,3 @@ +--- + +external_nsupdate_keys: {} diff --git a/roles/dns-server-detect/tasks/main.yml b/roles/dns-server-detect/tasks/main.yml new file mode 100644 index 000000000..183c0a0ca --- /dev/null +++ b/roles/dns-server-detect/tasks/main.yml @@ -0,0 +1,36 @@ +--- +- fail: + msg: 'Missing required private DNS server(s)' + when: + - external_nsupdate_keys['private'] is undefined + - hostvars[groups['dns'][0]] is undefined + +- fail: + msg: 'Missing required public DNS server(s)' + when: + - external_nsupdate_keys['public'] is undefined + - hostvars[groups['dns'][0]] is undefined + +- name: "Set the private DNS server to use the external value (if provided)" + set_fact: + private_dns_server: "{{ external_nsupdate_keys['private']['server'] }}" + when: + - external_nsupdate_keys['private'] is defined + +- name: "Set the private DNS server to use the provisioned value" + set_fact: + private_dns_server: "{{ hostvars[groups['dns'][0]].openstack.private_v4 }}" + when: + - private_dns_server is undefined + +- name: "Set the public DNS server to use the external value (if provided)" + set_fact: + public_dns_server: "{{ external_nsupdate_keys['public']['server'] }}" + when: + - external_nsupdate_keys['public'] is defined + +- name: "Set the public DNS server to use the provisioned value" + set_fact: + public_dns_server: "{{ hostvars[groups['dns'][0]].openstack.public_v4 }}" + when: + - public_dns_server is undefined diff --git a/roles/hostnames/tasks/main.yaml b/roles/hostnames/tasks/main.yaml new file mode 100644 index 000000000..c49852210 --- /dev/null +++ b/roles/hostnames/tasks/main.yaml @@ -0,0 +1,26 @@ +--- +- name: Setting Hostname Fact + set_fact: + new_hostname: "{{ custom_hostname | default(inventory_hostname_short) }}" + +- name: Setting FQDN Fact + set_fact: + new_fqdn: "{{ new_hostname }}.{{ full_dns_domain }}" + +- name: Setting hostname and DNS domain + hostname: name="{{ new_fqdn }}" + +- name: Check for cloud.cfg + stat: path=/etc/cloud/cloud.cfg + register: cloud_cfg + +- name: Prevent cloud-init updates of hostname/fqdn (if applicable) + lineinfile: + dest: /etc/cloud/cloud.cfg + state: present + regexp: "{{ item.regexp }}" + line: "{{ item.line }}" + with_items: + - { regexp: '^ - set_hostname', line: '# - set_hostname' } + - { regexp: '^ - update_hostname', line: '# - update_hostname' } + when: cloud_cfg.stat.exists == True diff --git a/roles/hostnames/test/inv b/roles/hostnames/test/inv new file mode 100644 index 000000000..ffbe6e03d --- /dev/null +++ b/roles/hostnames/test/inv @@ -0,0 +1,12 @@ +[all:vars] +dns_domain=example.com + +[openshift_masters] +192.168.124.41 dns_private_ip=1.1.1.41 dns_public_ip=192.168.124.41 +192.168.124.117 dns_private_ip=1.1.1.117 dns_public_ip=192.168.124.117 + +[openshift_nodes] +192.168.124.40 dns_private_ip=1.1.1.40 dns_public_ip=192.168.124.40 + +#[dns] +#192.168.124.117 dns_private_ip=1.1.1.117 diff --git a/roles/hostnames/test/roles b/roles/hostnames/test/roles new file mode 120000 index 000000000..e2b799b9d --- /dev/null +++ b/roles/hostnames/test/roles @@ -0,0 +1 @@ +../../../roles/ \ No newline at end of file diff --git a/roles/hostnames/test/test.retry b/roles/hostnames/test/test.retry new file mode 100644 index 000000000..63fc08e4c --- /dev/null +++ b/roles/hostnames/test/test.retry @@ -0,0 +1,3 @@ +192.168.124.117 +192.168.124.40 +192.168.124.41 diff --git a/roles/hostnames/test/test.yaml b/roles/hostnames/test/test.yaml new file mode 100644 index 000000000..0c56aea51 --- /dev/null +++ b/roles/hostnames/test/test.yaml @@ -0,0 +1,4 @@ +--- +- hosts: all + roles: + - role: hostnames diff --git a/roles/hostnames/vars/main.yaml b/roles/hostnames/vars/main.yaml new file mode 100644 index 000000000..3eecb8dc4 --- /dev/null +++ b/roles/hostnames/vars/main.yaml @@ -0,0 +1,2 @@ +--- +counter: 1 diff --git a/roles/hostnames/vars/records.yaml b/roles/hostnames/vars/records.yaml new file mode 100644 index 000000000..0cadc8181 --- /dev/null +++ b/roles/hostnames/vars/records.yaml @@ -0,0 +1,28 @@ +--- +- name: "Building Records" + set_fact: + dns_records_add: + - view: private + zone: example.com + entries: + - type: A + hostname: master1.example.com + ip: 172.16.15.94 + - type: A + hostname: node1.example.com + ip: 172.16.15.86 + - type: A + hostname: node2.example.com + ip: 172.16.15.87 + - view: public + zone: example.com + entries: + - type: A + hostname: master1.example.com + ip: 10.3.10.116 + - type: A + hostname: node1.example.com + ip: 10.3.11.46 + - type: A + hostname: node2.example.com + ip: 10.3.12.6 diff --git a/roles/openshift-prep/tasks/main.yml b/roles/openshift-prep/tasks/main.yml new file mode 100644 index 000000000..5e484e75f --- /dev/null +++ b/roles/openshift-prep/tasks/main.yml @@ -0,0 +1,4 @@ +--- +# Starting Point for OpenShift Installation and Configuration +- include: prerequisites.yml + tags: [prerequisites] diff --git a/roles/openshift-prep/tasks/prerequisites.yml b/roles/openshift-prep/tasks/prerequisites.yml new file mode 100644 index 000000000..60507636f --- /dev/null +++ b/roles/openshift-prep/tasks/prerequisites.yml @@ -0,0 +1,35 @@ +--- +- name: "Cleaning yum repositories" + command: "yum clean all" + +- name: "Install required packages" + yum: + name: "{{ item }}" + state: latest + with_items: + - wget + - git + - net-tools + - bind-utils + - bridge-utils + - bash-completion + - vim-enhanced + +- name: "Update all packages (this can take a very long time)" + yum: + name: "*" + state: latest + +- name: "Verify hostname" + shell: hostnamectl status | awk "/Static hostname/"'{ print $3 }' + register: hostname_fqdn + +- name: "Set hostname if required" + hostname: + name: "{{ ansible_fqdn }}" + when: hostname_fqdn.stdout != ansible_fqdn + +- name: "Verify SELinux is enforcing" + fail: + msg: "SELinux is required for OpenShift and has been detected as '{{ ansible_selinux.config_mode }}'" + when: ansible_selinux.config_mode != "enforcing" diff --git a/roles/openstack-stack/README.md b/roles/openstack-stack/README.md new file mode 100644 index 000000000..509c9de6c --- /dev/null +++ b/roles/openstack-stack/README.md @@ -0,0 +1,9 @@ +# Role openstack-stack + +Role for spinning up instances using OpenStack Heat. + +## To Test + +``` +ansible-playbook casl-ansible/roles/openstack-stack/test/stack-create-test.yml +``` diff --git a/roles/openstack-stack/defaults/main.yml b/roles/openstack-stack/defaults/main.yml new file mode 100644 index 000000000..2a4ef3a45 --- /dev/null +++ b/roles/openstack-stack/defaults/main.yml @@ -0,0 +1,12 @@ +--- +dns_volume_size: 1 +ssh_ingress_cidr: 0.0.0.0/0 +node_ingress_cidr: 0.0.0.0/0 +master_ingress_cidr: 0.0.0.0/0 +lb_ingress_cidr: 0.0.0.0/0 +num_etcd: 0 +num_masters: 1 +num_nodes: 1 +num_dns: 1 +num_infra: 1 +etcd_volume_size: 2 diff --git a/roles/openstack-stack/tasks/main.yml b/roles/openstack-stack/tasks/main.yml new file mode 100644 index 000000000..71c7bbe0d --- /dev/null +++ b/roles/openstack-stack/tasks/main.yml @@ -0,0 +1,41 @@ +--- +- name: create HOT stack template prefix + register: stack_template_pre + tempfile: + state: directory + prefix: casl-ansible + +- name: set template paths + set_fact: + stack_template_path: "{{ stack_template_pre.path }}/stack.yaml" + server_template_path: "{{ stack_template_pre.path }}/server.yaml" + user_data_template_path: "{{ stack_template_pre.path }}/user-data" + +- name: generate HOT stack template from jinja2 template + template: + src: heat_stack.yaml.j2 + dest: "{{ stack_template_path }}" + +- name: generate HOT server template from jinja2 template + template: + src: heat_stack_server.yaml.j2 + dest: "{{ server_template_path }}" + +- name: generate user_data from jinja2 template + template: + src: user_data.j2 + dest: "{{ user_data_template_path }}" + +- name: create stack + ignore_errors: False + register: stack_create + os_stack: + name: "{{ stack_name }}" + state: present + template: "{{ stack_template_path }}" + wait: yes + +- name: cleanup temp files + file: + path: "{{ stack_template_pre.path }}" + state: absent diff --git a/roles/openstack-stack/templates/heat_stack.yaml.j2 b/roles/openstack-stack/templates/heat_stack.yaml.j2 new file mode 100644 index 000000000..c750865a5 --- /dev/null +++ b/roles/openstack-stack/templates/heat_stack.yaml.j2 @@ -0,0 +1,753 @@ +heat_template_version: 2016-10-14 + +description: OpenShift cluster + +parameters: + +outputs: + + etcd_names: + description: Name of the etcds + value: { get_attr: [ etcd, name ] } + + etcd_ips: + description: IPs of the etcds + value: { get_attr: [ etcd, private_ip ] } + + etcd_floating_ips: + description: Floating IPs of the etcds + value: { get_attr: [ etcd, floating_ip ] } + + master_names: + description: Name of the masters + value: { get_attr: [ masters, name ] } + + master_ips: + description: IPs of the masters + value: { get_attr: [ masters, private_ip ] } + + master_floating_ips: + description: Floating IPs of the masters + value: { get_attr: [ masters, floating_ip ] } + + node_names: + description: Name of the nodes + value: { get_attr: [ compute_nodes, name ] } + + node_ips: + description: IPs of the nodes + value: { get_attr: [ compute_nodes, private_ip ] } + + node_floating_ips: + description: Floating IPs of the nodes + value: { get_attr: [ compute_nodes, floating_ip ] } + + infra_names: + description: Name of the nodes + value: { get_attr: [ infra_nodes, name ] } + + infra_ips: + description: IPs of the nodes + value: { get_attr: [ infra_nodes, private_ip ] } + + infra_floating_ips: + description: Floating IPs of the nodes + value: { get_attr: [ infra_nodes, floating_ip ] } + + dns_name: + description: Name of the DNS + value: + get_attr: + - dns + - name + + dns_floating_ip: + description: Floating IP of the DNS + value: + get_attr: + - dns + - addresses + - str_replace: + template: openshift-ansible-cluster_id-net + params: + cluster_id: {{ stack_name }} + - 1 + - addr + +resources: + + net: + type: OS::Neutron::Net + properties: + name: + str_replace: + template: openshift-ansible-cluster_id-net + params: + cluster_id: {{ stack_name }} + + subnet: + type: OS::Neutron::Subnet + properties: + name: + str_replace: + template: openshift-ansible-cluster_id-subnet + params: + cluster_id: {{ stack_name }} + network: { get_resource: net } + cidr: + str_replace: + template: subnet_24_prefix.0/24 + params: + subnet_24_prefix: {{ subnet_prefix }} + allocation_pools: + - start: + str_replace: + template: subnet_24_prefix.3 + params: + subnet_24_prefix: {{ subnet_prefix }} + end: + str_replace: + template: subnet_24_prefix.254 + params: + subnet_24_prefix: {{ subnet_prefix }} + dns_nameservers: + {% for nameserver in dns_nameservers %} + - {{ nameserver }} + {% endfor %} + + router: + type: OS::Neutron::Router + properties: + name: + str_replace: + template: openshift-ansible-cluster_id-router + params: + cluster_id: {{ stack_name }} + external_gateway_info: + network: {{ external_network }} + + interface: + type: OS::Neutron::RouterInterface + properties: + router_id: { get_resource: router } + subnet_id: { get_resource: subnet } + +# keypair: +# type: OS::Nova::KeyPair +# properties: +# name: +# str_replace: +# template: openshift-ansible-cluster_id-keypair +# params: +# cluster_id: {{ stack_name }} +# public_key: {{ ssh_public_key }} + +{% if openstack_flat_secgrp|bool %} + flat-secgrp: + type: OS::Neutron::SecurityGroup + properties: + name: + str_replace: + template: openshift-ansible-cluster_id-flat-secgrp + params: + cluster_id: {{ stack_name }} + description: + str_replace: + template: Security group for cluster_id OpenShift cluster + params: + cluster_id: {{ stack_name }} + rules: + - direction: ingress + protocol: tcp + port_range_min: 22 + port_range_max: 22 + remote_ip_prefix: {{ ssh_ingress_cidr }} + - direction: ingress + protocol: tcp + port_range_min: 4001 + port_range_max: 4001 + - direction: ingress + protocol: tcp + port_range_min: 8443 + port_range_max: 8444 + - direction: ingress + protocol: tcp + port_range_min: 53 + port_range_max: 53 + - direction: ingress + protocol: udp + port_range_min: 53 + port_range_max: 53 + - direction: ingress + protocol: tcp + port_range_min: 8053 + port_range_max: 8053 + - direction: ingress + protocol: udp + port_range_min: 8053 + port_range_max: 8053 + - direction: ingress + protocol: tcp + port_range_min: 24224 + port_range_max: 24224 + - direction: ingress + protocol: udp + port_range_min: 24224 + port_range_max: 24224 + - direction: ingress + protocol: tcp + port_range_min: 2224 + port_range_max: 2224 + - direction: ingress + protocol: udp + port_range_min: 5404 + port_range_max: 5405 + - direction: ingress + protocol: tcp + port_range_min: 9090 + port_range_max: 9090 + - direction: ingress + protocol: tcp + port_range_min: 2379 + port_range_max: 2380 + remote_mode: remote_group_id + - direction: ingress + protocol: tcp + port_range_min: 10250 + port_range_max: 10250 + remote_mode: remote_group_id + - direction: ingress + protocol: udp + port_range_min: 10250 + port_range_max: 10250 + remote_mode: remote_group_id + - direction: ingress + protocol: tcp + port_range_min: 10255 + port_range_max: 10255 + remote_mode: remote_group_id + - direction: ingress + protocol: udp + port_range_min: 10255 + port_range_max: 10255 + remote_mode: remote_group_id + - direction: ingress + protocol: udp + port_range_min: 4789 + port_range_max: 4789 + remote_mode: remote_group_id + - direction: ingress + protocol: tcp + port_range_min: 30000 + port_range_max: 32767 + remote_ip_prefix: {{ node_ingress_cidr }} + - direction: ingress + protocol: tcp + port_range_min: 30000 + port_range_max: 32767 + remote_ip_prefix: "{{ openstack_subnet_prefix }}.0/24" + - direction: ingress + protocol: tcp + port_range_min: 80 + port_range_max: 80 + - direction: ingress + protocol: tcp + port_range_min: 443 + port_range_max: 443 +{% else %} + master-secgrp: + type: OS::Neutron::SecurityGroup + properties: + name: + str_replace: + template: openshift-ansible-cluster_id-master-secgrp + params: + cluster_id: {{ stack_name }} + description: + str_replace: + template: Security group for cluster_id OpenShift cluster master + params: + cluster_id: {{ stack_name }} + rules: + - direction: ingress + protocol: tcp + port_range_min: 22 + port_range_max: 22 + remote_ip_prefix: {{ ssh_ingress_cidr }} + - direction: ingress + protocol: tcp + port_range_min: 4001 + port_range_max: 4001 + - direction: ingress + protocol: tcp + port_range_min: 8443 + port_range_max: 8444 + - direction: ingress + protocol: tcp + port_range_min: 53 + port_range_max: 53 + - direction: ingress + protocol: udp + port_range_min: 53 + port_range_max: 53 + - direction: ingress + protocol: tcp + port_range_min: 8053 + port_range_max: 8053 + - direction: ingress + protocol: udp + port_range_min: 8053 + port_range_max: 8053 + - direction: ingress + protocol: tcp + port_range_min: 24224 + port_range_max: 24224 + - direction: ingress + protocol: udp + port_range_min: 24224 + port_range_max: 24224 + - direction: ingress + protocol: tcp + port_range_min: 2224 + port_range_max: 2224 + - direction: ingress + protocol: udp + port_range_min: 5404 + port_range_max: 5405 + - direction: ingress + protocol: tcp + port_range_min: 9090 + port_range_max: 9090 + + etcd-secgrp: + type: OS::Neutron::SecurityGroup + properties: + name: + str_replace: + template: openshift-ansible-cluster_id-etcd-secgrp + params: + cluster_id: {{ stack_name }} + description: + str_replace: + template: Security group for cluster_id etcd cluster + params: + cluster_id: {{ stack_name }} + rules: + - direction: ingress + protocol: tcp + port_range_min: 22 + port_range_max: 22 + remote_ip_prefix: {{ ssh_ingress_cidr }} + - direction: ingress + protocol: tcp + port_range_min: 2379 + port_range_max: 2379 + remote_mode: remote_group_id + remote_group_id: { get_resource: master-secgrp } + - direction: ingress + protocol: tcp + port_range_min: 2380 + port_range_max: 2380 + remote_mode: remote_group_id + + node-secgrp: + type: OS::Neutron::SecurityGroup + properties: + name: + str_replace: + template: openshift-ansible-cluster_id-node-secgrp + params: + cluster_id: {{ stack_name }} + description: + str_replace: + template: Security group for cluster_id OpenShift cluster nodes + params: + cluster_id: {{ stack_name }} + rules: + - direction: ingress + protocol: tcp + port_range_min: 22 + port_range_max: 22 + remote_ip_prefix: {{ ssh_ingress_cidr }} + - direction: ingress + protocol: tcp + port_range_min: 10250 + port_range_max: 10250 + remote_mode: remote_group_id + - direction: ingress + protocol: tcp + port_range_min: 10255 + port_range_max: 10255 + remote_mode: remote_group_id + - direction: ingress + protocol: udp + port_range_min: 10255 + port_range_max: 10255 + remote_mode: remote_group_id + - direction: ingress + protocol: udp + port_range_min: 4789 + port_range_max: 4789 + remote_mode: remote_group_id + - direction: ingress + protocol: tcp + port_range_min: 30000 + port_range_max: 32767 + remote_ip_prefix: {{ node_ingress_cidr }} + - direction: ingress + protocol: tcp + port_range_min: 30000 + port_range_max: 32767 + remote_ip_prefix: "{{ openstack_subnet_prefix }}.0/24" + + infra-secgrp: + type: OS::Neutron::SecurityGroup + properties: + name: + str_replace: + template: openshift-ansible-cluster_id-infra-secgrp + params: + cluster_id: {{ stack_name }} + description: + str_replace: + template: Security group for cluster_id OpenShift infrastructure cluster nodes + params: + cluster_id: {{ stack_name }} + rules: + - direction: ingress + protocol: tcp + port_range_min: 80 + port_range_max: 80 + - direction: ingress + protocol: tcp + port_range_min: 443 + port_range_max: 443 +{% endif %} + + dns-secgrp: + type: OS::Neutron::SecurityGroup + properties: + name: + str_replace: + template: openshift-ansible-cluster_id-dns-secgrp + params: + cluster_id: {{ stack_name }} + description: + str_replace: + template: Security group for cluster_id cluster DNS + params: + cluster_id: {{ stack_name }} + rules: + - direction: ingress + protocol: tcp + port_range_min: 22 + port_range_max: 22 + remote_ip_prefix: {{ ssh_ingress_cidr }} + - direction: ingress + protocol: udp + port_range_min: 53 + port_range_max: 53 + remote_ip_prefix: {{ node_ingress_cidr }} + - direction: ingress + protocol: udp + port_range_min: 53 + port_range_max: 53 + remote_ip_prefix: "{{ openstack_subnet_prefix }}.0/24" + - direction: ingress + protocol: tcp + port_range_min: 53 + port_range_max: 53 + remote_ip_prefix: {{ node_ingress_cidr }} + - direction: ingress + protocol: tcp + port_range_min: 53 + port_range_max: 53 + remote_ip_prefix: "{{ openstack_subnet_prefix }}.0/24" +{% if num_masters is greaterthan 1 %} + lb-secgrp: + type: OS::Neutron::SecurityGroup + properties: + name: openshift-ansible-{{ stack_name }}-lb-secgrp + description: Security group for {{ stack_name }} cluster Load Balancer + rules: + - direction: ingress + protocol: tcp + port_range_min: 22 + port_range_max: 22 + remote_ip_prefix: {{ ssh_ingress_cidr }} + - direction: ingress + protocol: tcp + port_range_min: {{ openshift_master_api_port | default(8443) }} + port_range_max: {{ openshift_master_api_port | default(8443) }} + remote_ip_prefix: {{ lb_ingress_cidr }} + {% if openshift_master_console_port is defined and openshift_master_console_port is not equalto openshift_master_api_port %} + - direction: ingress + protocol: tcp + port_range_min: {{ openshift_master_console_port | default(8443) }} + port_range_max: {{ openshift_master_console_port | default(8443) }} + remote_ip_prefix: {{ lb_ingress_cidr }} + {% endif %} +{% endif %} + + etcd: + type: OS::Heat::ResourceGroup + properties: + count: {{ num_etcd }} + resource_def: + type: server.yaml + properties: + name: + str_replace: + template: k8s_type-%index%.cluster_id + params: + cluster_id: {{ stack_name }} + k8s_type: etcd + cluster_env: {{ public_dns_domain }} + cluster_id: {{ stack_name }} + group: + str_replace: + template: k8s_type.cluster_id + params: + k8s_type: etcds + cluster_id: {{ stack_name }} + type: etcd + image: {{ openstack_image }} + flavor: {{ etcd_flavor }} + key_name: {{ ssh_public_key }} + net: { get_resource: net } + subnet: { get_resource: subnet } + secgrp: + - { get_resource: {% if openstack_flat_secgrp|bool %}flat-secgrp{% else %}etcd-secgrp{% endif %} } + floating_network: {{ external_network }} + net_name: + str_replace: + template: openshift-ansible-cluster_id-net + params: + cluster_id: {{ stack_name }} + volume_size: {{ etcd_volume_size }} + depends_on: + - interface + +{% if num_masters is greaterthan 1 %} + loadbalancer: + type: OS::Heat::ResourceGroup + properties: + count: 1 + resource_def: + type: server.yaml + properties: + name: + str_replace: + template: k8s_type-%index%.cluster_id + params: + cluster_id: {{ stack_name }} + k8s_type: lb + cluster_env: {{ public_dns_domain }} + cluster_id: {{ stack_name }} + group: + str_replace: + template: k8s_type.cluster_id + params: + k8s_type: lb + cluster_id: {{ stack_name }} + type: lb + image: {{ openstack_image }} + flavor: {{ lb_flavor }} + key_name: {{ ssh_public_key }} + net: { get_resource: net } + subnet: { get_resource: subnet } + secgrp: + - { get_resource: lb-secgrp } + floating_network: {{ external_network }} + net_name: + str_replace: + template: openshift-ansible-cluster_id-net + params: + cluster_id: {{ stack_name }} + volume_size: 5 + depends_on: + - interface +{% endif %} + + masters: + type: OS::Heat::ResourceGroup + properties: + count: {{ num_masters }} + resource_def: + type: server.yaml + properties: + name: + str_replace: + template: k8s_type-%index%.cluster_id + params: + cluster_id: {{ stack_name }} + k8s_type: master + cluster_env: {{ public_dns_domain }} + cluster_id: {{ stack_name }} + group: + str_replace: + template: k8s_type.cluster_id + params: + k8s_type: masters + cluster_id: {{ stack_name }} + type: master + image: {{ openstack_image }} + flavor: {{ master_flavor }} + key_name: {{ ssh_public_key }} + net: { get_resource: net } + subnet: { get_resource: subnet } + secgrp: +{% if openstack_flat_secgrp|bool %} + - { get_resource: flat-secgrp } +{% else %} + - { get_resource: master-secgrp } + - { get_resource: node-secgrp } +{% if num_etcd is equalto 0 %} + - { get_resource: etcd-secgrp } +{% endif %} +{% endif %} + floating_network: {{ external_network }} + net_name: + str_replace: + template: openshift-ansible-cluster_id-net + params: + cluster_id: {{ stack_name }} + volume_size: {{ master_volume_size }} + depends_on: + - interface + + compute_nodes: + type: OS::Heat::ResourceGroup + properties: + count: {{ num_nodes }} + resource_def: + type: server.yaml + properties: + name: + str_replace: + template: subtype-k8s_type-%index%.cluster_id + params: + cluster_id: {{ stack_name }} + k8s_type: node + subtype: app + cluster_env: {{ public_dns_domain }} + cluster_id: {{ stack_name }} + group: + str_replace: + template: k8s_type.cluster_id + params: + k8s_type: nodes + cluster_id: {{ stack_name }} + type: node + subtype: app + node_labels: + region: primary + image: {{ openstack_image }} + flavor: {{ node_flavor }} + key_name: {{ ssh_public_key }} + net: { get_resource: net } + subnet: { get_resource: subnet } + secgrp: + - { get_resource: {% if openstack_flat_secgrp|bool %}flat-secgrp{% else %}node-secgrp{% endif %} } + floating_network: {{ external_network }} + net_name: + str_replace: + template: openshift-ansible-cluster_id-net + params: + cluster_id: {{ stack_name }} + volume_size: {{ app_volume_size }} + depends_on: + - interface + + infra_nodes: + type: OS::Heat::ResourceGroup + properties: + count: {{ num_infra }} + resource_def: + type: server.yaml + properties: + name: + str_replace: + template: subtypek8s_type-%index%.cluster_id + params: + cluster_id: {{ stack_name }} + k8s_type: node + subtype: infra + cluster_env: {{ public_dns_domain }} + cluster_id: {{ stack_name }} + group: + str_replace: + template: k8s_type.cluster_id + params: + k8s_type: infra + cluster_id: {{ stack_name }} + type: node + subtype: infra + node_labels: + region: infra + image: {{ openstack_image }} + flavor: {{ infra_flavor }} + key_name: {{ ssh_public_key }} + net: { get_resource: net } + subnet: { get_resource: subnet } + secgrp: +{% if openstack_flat_secgrp|bool %} + - { get_resource: flat-secgrp } +{% else %} + - { get_resource: node-secgrp } + - { get_resource: infra-secgrp } +{% endif %} + floating_network: {{ external_network }} + net_name: + str_replace: + template: openshift-ansible-cluster_id-net + params: + cluster_id: {{ stack_name }} + volume_size: {{ infra_volume_size }} + depends_on: + - interface + + dns: + type: OS::Heat::ResourceGroup + properties: + count: {{ num_dns }} + resource_def: + type: server.yaml + properties: + name: + str_replace: + template: k8s_type-%index%.cluster_id + params: + cluster_id: {{ stack_name }} + k8s_type: dns + cluster_env: {{ public_dns_domain }} + cluster_id: {{ stack_name }} + group: + str_replace: + template: k8s_type.cluster_id + params: + k8s_type: dns + cluster_id: {{ stack_name }} + type: dns + image: {{ openstack_image }} + flavor: {{ dns_flavor }} + key_name: {{ ssh_public_key }} + net: { get_resource: net } + subnet: { get_resource: subnet } + secgrp: +{% if openstack_flat_secgrp|bool %} + - { get_resource: flat-secgrp } +{% else %} + - { get_resource: node-secgrp } +{% endif %} + - { get_resource: dns-secgrp } + floating_network: {{ external_network }} + net_name: + str_replace: + template: openshift-ansible-cluster_id-net + params: + cluster_id: {{ stack_name }} + volume_size: {{ dns_volume_size }} + depends_on: + - interface + diff --git a/roles/openstack-stack/templates/heat_stack_server.yaml.j2 b/roles/openstack-stack/templates/heat_stack_server.yaml.j2 new file mode 100644 index 000000000..5851d3b9b --- /dev/null +++ b/roles/openstack-stack/templates/heat_stack_server.yaml.j2 @@ -0,0 +1,170 @@ +heat_template_version: 2016-10-14 + +description: OpenShift cluster server + +parameters: + + name: + type: string + label: Name + description: Name + + group: + type: string + label: Host Group + description: The Primary Ansible Host Group + default: host + + cluster_env: + type: string + label: Cluster environment + description: Environment of the cluster + + cluster_id: + type: string + label: Cluster ID + description: Identifier of the cluster + + type: + type: string + label: Type + description: Type master or node + + subtype: + type: string + label: Sub-type + description: Sub-type compute or infra for nodes, default otherwise + default: default + + key_name: + type: string + label: Key name + description: Key name of keypair + + image: + type: string + label: Image + description: Name of the image + + flavor: + type: string + label: Flavor + description: Name of the flavor + + net: + type: string + label: Net ID + description: Net resource + + net_name: + type: string + label: Net name + description: Net name + + subnet: + type: string + label: Subnet ID + description: Subnet resource + + secgrp: + type: comma_delimited_list + label: Security groups + description: Security group resources + + floating_network: + type: string + label: Floating network + description: Network to allocate floating IP from + + availability_zone: + type: string + description: The Availability Zone to launch the instance. + default: nova + + volume_size: + type: number + description: Size of the volume to be created. + default: 1 + constraints: + - range: { min: 1, max: 1024 } + description: must be between 1 and 1024 Gb. + + node_labels: + type: json + description: OpenShift Node Labels + default: {"region": "default" } + +outputs: + + name: + description: Name of the server + value: { get_attr: [ server, name ] } + + private_ip: + description: Private IP of the server + value: + get_attr: + - server + - addresses + - { get_param: net_name } + - 0 + - addr + + floating_ip: + description: Floating IP of the server + value: + get_attr: + - server + - addresses + - { get_param: net_name } + - 1 + - addr + +resources: + + server: + type: OS::Nova::Server + properties: + name: { get_param: name } + key_name: { get_param: key_name } + image: { get_param: image } + flavor: { get_param: flavor } + networks: + - port: { get_resource: port } + user_data: + get_file: user-data + user_data_format: RAW + metadata: + group: { get_param: group } + environment: { get_param: cluster_env } + clusterid: { get_param: cluster_id } + host-type: { get_param: type } + sub-host-type: { get_param: subtype } + node_labels: { get_param: node_labels } + + port: + type: OS::Neutron::Port + properties: + network: { get_param: net } + fixed_ips: + - subnet: { get_param: subnet } + security_groups: { get_param: secgrp } + + floating-ip: + type: OS::Neutron::FloatingIP + properties: + floating_network: { get_param: floating_network } + port_id: { get_resource: port } + + cinder_volume: + type: OS::Cinder::Volume + properties: + size: { get_param: volume_size } + availability_zone: { get_param: availability_zone } + + volume_attachment: + type: OS::Cinder::VolumeAttachment + properties: + volume_id: { get_resource: cinder_volume } + instance_uuid: { get_resource: server } + mountpoint: /dev/sdb diff --git a/roles/openstack-stack/templates/user_data.j2 b/roles/openstack-stack/templates/user_data.j2 new file mode 100644 index 000000000..eb65f7cec --- /dev/null +++ b/roles/openstack-stack/templates/user_data.j2 @@ -0,0 +1,13 @@ +#cloud-config +disable_root: true + +system_info: + default_user: + name: openshift + sudo: ["ALL=(ALL) NOPASSWD: ALL"] + +write_files: + - path: /etc/sudoers.d/00-openshift-no-requiretty + permissions: 440 + content: | + Defaults:openshift !requiretty diff --git a/roles/openstack-stack/test/roles b/roles/openstack-stack/test/roles new file mode 120000 index 000000000..e2b799b9d --- /dev/null +++ b/roles/openstack-stack/test/roles @@ -0,0 +1 @@ +../../../roles/ \ No newline at end of file diff --git a/roles/openstack-stack/test/stack-create-test.yml b/roles/openstack-stack/test/stack-create-test.yml new file mode 100644 index 000000000..0fbf66f34 --- /dev/null +++ b/roles/openstack-stack/test/stack-create-test.yml @@ -0,0 +1,16 @@ +--- +- hosts: localhost + roles: + - role: openstack-stack + stack_name: test-stack + dns_domain: "{{ public_dns_domain }}" + dns_nameservers: "{{ public_dns_nameservers }}" + subnet_prefix: "{{ openstack_subnet_prefix }}" + ssh_public_key: "{{ openstack_ssh_public_key }}" + openstack_image: "{{ openstack_default_image_name }}" + etcd_flavor: "{{ openstack_default_flavor }}" + master_flavor: "{{ openstack_default_flavor }}" + node_flavor: "{{ openstack_default_flavor }}" + infra_flavor: "{{ openstack_default_flavor }}" + dns_flavor: "{{ openstack_default_flavor }}" + external_network: "{{ openstack_external_network_name }}" diff --git a/roles/subscription-manager/README.md b/roles/subscription-manager/README.md new file mode 100644 index 000000000..748de282c --- /dev/null +++ b/roles/subscription-manager/README.md @@ -0,0 +1,156 @@ +# Red Hat Subscription Manager Ansible Role + +## Parameters + +This role depends on user specified variables. These can be set in the inventory file, group_vars or passed to the playbook from the CLI. No values are set by default which disables this role. The variables are: + +### rhsm_satellite + +Subscription Manager server hostname. If using a Satellite server set the FQDN here. If using RHSM Hosted this value must be left blank, none or false. + +Default: none + +### rhsm_username + +Subscription Manager username. Required for RHSM Hosted. Can be optionally used for Satellite, but it may be better to use **rhsm_activationkey** for this. + +Default: none + +### rhsm_password + +Subscription Manager password. Required for RHSM Hosted. Can be optionally used for Satellite, but it may be better to use **rhsm_activationkey** for this. + +NOTE: If this variable is specified on the command-line or set in a variable file it may leave your password exposed. For this reason you may perfer to use an Activation Key if using Satellite. For RHSM Hosted, your password must be specified. There are two ways to provide the password to the Ansible playbook without exposing it to prying eyes. + +1. The first method is to use a **vars_prompt** to collect the password up front one time for the playbook. Ansible will not display the password if the prompt is configured as **private** and the task will not display the password on the CLI. This is the a good method as it supports automating the task to every host with only one password entry. To enable **vars_prompt** add the following to the very top of your playbook after the **hosts** declaration and before any **pre_tasks** section: + + ``` + - hosts: localhost + # Add the following lines after a -hosts: declaration and before pre_tasks: + # Start of vars_prompt code block + vars_prompt: + - name: "rhsm_password" + prompt: "Subscription Manager password" + confirm: yes + private: yes + # End of vars_prompt code block + pre_tasks: + ``` + +2. A second method is to use an encrypted file via **ansible-vault**. This does does not require modifying any code as the previous method, but does require more work to create and encrypt the file. To accomplish this, first create a file containing at least the **rhsm_password** variable (it is also possible to specify additional variables to encrypt them all as well): + 1. Create a file to contain the variable such as **secrets.yml**: + + ``` + --- + rhsm_password: "my_secret_password" + # other variables can optionally be placed here as well + ``` + + 2. Encrypt the file with **ansible-vault**: + + ``` + $ ansible-vault encrypt secrets.yml + Vault password: + Confirm Vault password: + Encryption successful + ``` + + 3. When executing **ansible-playbook** specify **--ask-vault-pass** to be prompted for the decryption password, and also specify the location of the **secrets.yml** as such: + + ``` + $ ansible-playbook --ask-vault-pass --extra-vars=@secrets.yml --extra-vars="rhsm_username=myusername" + ``` + + NOTE: Optionally the file containing the encrypted variables can be decrypted with **ansible-vault** and the **--ask-vault-pass** option omitted to prevent any password prompting (for automated runs) and the file can be encrypted after the run. This can be used if an external system such as Jenkins would handle the decryption/encryption outside of Ansible. + +Default: none + +### rhsm_org + +Optional Subscription Manager Satellite Organization. Required for Satellite, ignored if using RHSM Hosted. + +Default: none + +### rhsm_activationkey + +Optional Subscription Manager Satellite Activation Key, use this instead of **rhsm_username** and **rhsm_password** if using Satellite to provide repositories and authentication in a key instead. + +Default: none + +### rhsm_pool + +Optional Subscription Manager pool, determine this by running **subscription-manager list --available** on a registered system. Valid for RHSM Hosted or Satellite. Specifying **rhsm_activationkey** will ignore this option. + +Default: none + +### rhsm_repos + +Optional list of repositories to enable. If left blank it is expected that the **rhsm_activationkey** will specify repos instead. If populated, a **subscription-manager repos --disable=\*** will be run and each of the specified repos explicitly enabled. Valid for RHSM Hosted or Satellite + +NOTE: If specifying this value in an inventory file as opposed to group_vars, be sure to define it as a proper list as such: + +rhsm_repos='["rhel-7-server-rpms", "rhel-7-server-ose-3.1-rpms", "rhel-7-server-extras-rpms"]' + +Default: none + +## Calling This Role +Calling this role is done at both **pre_tasks** and **roles** sections of a playbook and optionally a **vars_prompt**. + +### vars_prompt +Unfortunately **vars_prompt** can only be used at the play level before role tasks are executed, so this is the only place it can go. It also cannot be shown conditionally. For this reason it is not included in this role by default. A better method may be using a file containing the password variable encrypted with **ansible-vault**. See the **rhsm_password** section for more details. + +To Add a prompt to capture **rhsm_password**: + +``` +- hosts: localhost + # Add the following lines after a -hosts: declaration and before pre_tasks: + # Start of vars_prompt code block + vars_prompt: + - name: "rhsm_password" + prompt: "Subscription Manager password" + confirm: yes + private: yes + # End of vars_prompt code block + pre_tasks: +``` + +### pre-tasks + +A number of variable checks are performed before any tasks to ensure the proper parameters are set. To include these checks call the pre_task yaml before any roles: + +``` +pre_tasks: +- include: roles/subscription-manager/pre_tasks/pre_tasks.yml +``` + +### roles + +The bulk of the work is performed in the main.yml for this role. The pre-task play will set a variable which can be checked to contitionally include this role as such: + +``` +roles: + - { role: subscription-manager, when: hostvars.localhost.rhsm_register, tags: 'subscription-manager' } +``` + +## Running Playbooks with this Role + +- To register to RHSM Hosted or Satellite with a username and plain text password (NOTE: This may retain your password in your CLI history): + + ``` + $ ansible-playbook --extra-vars="rhsm_username=vvaldez rhsm_password=my_secret_password " + ``` + +- To register to RHSM Hosted or Satellite with username and an encrypted file containing the password: + + ``` + $ ansible-playbook --ask-vault-pass --extra-vars=@secrets.yml --extra-vars="rhsm_username=myusername" + + ``` + +- To register to a Satellite server with an activation key: + + ``` + $ ansible-playbook --extra-vars="rhsm_satellite=satellite.example.com rhsm_org=example_org rhsm_activationkey=rhel-7-ose-3-1 " + + ``` +- To ignore any Subscription Manager activities, simply do not set any parameters. diff --git a/roles/subscription-manager/pre_tasks/pre_tasks.yml b/roles/subscription-manager/pre_tasks/pre_tasks.yml new file mode 100644 index 000000000..464670fc0 --- /dev/null +++ b/roles/subscription-manager/pre_tasks/pre_tasks.yml @@ -0,0 +1,45 @@ +--- +- name: "Set password fact" + set_fact: + rhsm_password: "{{ rhsm_password | default(None) }}" + no_log: true + +- name: "Initialize Subscription Manager fact" + set_fact: + rhsm_register: true + +- name: "Determine if Subscription Manager should be used" + set_fact: + rhsm_register: false + when: + - rhsm_satellite is undefined or rhsm_satellite is none or rhsm_satellite|trim == '' + - rhsm_username is undefined or rhsm_username is none or rhsm_username|trim == '' + - rhsm_password is undefined or rhsm_password is none or rhsm_password|trim == '' + - rhsm_org is undefined or rhsm_org is none or rhsm_org|trim == '' + - rhsm_activationkey is undefined or rhsm_activationkey is none or rhsm_activationkey|trim == '' + - rhsm_pool is undefined or rhsm_pool is none or rhsm_pool|trim == '' + +- name: "Validate Subscription Manager organization is set" + fail: msg="Cannot register to a Satellite server without a value for the Organization via 'rhsm_org'" + when: + - rhsm_org is undefined or rhsm_org is none or rhsm_org|trim == '' + - rhsm_satellite is defined + - rhsm_satellite is not none + - rhsm_satellite|trim != '' + - rhsm_register + +- name: "Validate Subscription Manager authentication is defined" + fail: msg="Cannot register without ('rhsm_username' and 'rhsm_password') or 'rhsm_activationkey' variables set. See the README.md for details on securely prompting for a password" + when: + - (rhsm_username is undefined or rhsm_username is none or rhsm_username|trim == '') or (rhsm_password is undefined or rhsm_password is none or rhsm_password|trim == '') + - rhsm_activationkey is undefined or rhsm_activationkey is none or rhsm_activationkey|trim == '' + - rhsm_register + +- name: "Validate activation key and Hosted are not requested together" + fail: msg="Cannot register to RHSM Hosted with 'rhsm_activationkey'" + when: + - rhsm_satellite is undefined or rhsm_satellite is none or rhsm_satellite|trim == '' + - rhsm_activationkey is defined + - rhsm_activationkey is not none + - rhsm_activationkey|trim != '' + - rhsm_register diff --git a/roles/subscription-manager/tasks/main.yml b/roles/subscription-manager/tasks/main.yml new file mode 100644 index 000000000..8c1ae697a --- /dev/null +++ b/roles/subscription-manager/tasks/main.yml @@ -0,0 +1,122 @@ +--- +- name: "Initialize rhsm_password variable if vars_prompt was used" + set_fact: + rhsm_password: "{{ hostvars.localhost.rhsm_password }}" + when: + - rhsm_password is not defined or rhsm_password is none or rhsm_password|trim == '' + +- name: "Initializing Subscription Manager authentication method" + set_fact: + rhsm_authentication: false + +# 'rhsm_activationkey' will take precedence even if 'rhsm_username' and 'rhsm_password' are also set +- name: "Setting Subscription Manager Activation Key Fact" + set_fact: + rhsm_authentication: "key" + when: + - rhsm_activationkey is defined + - rhsm_activationkey is not none + - rhsm_activationkey|trim != '' + - not rhsm_authentication + +# If 'rhsm_username' and 'rhsm_password' are set but not 'rhsm_activationkey', set 'rhsm_authentication' to password +- name: "Setting Subscription Manager Username and Password Fact" + set_fact: + rhsm_authentication: "password" + when: + - rhsm_username is defined + - rhsm_username is not none + - rhsm_username|trim != '' + - rhsm_password is defined + - rhsm_password is not none + - rhsm_password|trim != '' + - not rhsm_authentication + +- name: "Initializing registration status" + set_fact: + registered: false + +- name: "Checking subscription status (a failure means it is not registered and will be)" + command: "/usr/bin/subscription-manager status" + ignore_errors: yes + changed_when: no + register: check_if_registered + +- name: "Set registration fact if system is already registered" + set_fact: + registered: true + when: check_if_registered.rc == 0 + +- name: "Cleaning any old subscriptions" + command: "/usr/bin/subscription-manager clean" + when: + - not registered + - rhsm_authentication is defined + +- name: "Install Satellite certificate" + command: "rpm -Uvh --force http://{{ rhsm_satellite }}/pub/katello-ca-consumer-latest.noarch.rpm" + when: + - not registered + - rhsm_satellite is defined + - rhsm_satellite is not none + - rhsm_satellite|trim != '' + +- name: "Register to Satellite using activation key" + command: "/usr/bin/subscription-manager register --activationkey={{ rhsm_activationkey }} --org='{{ rhsm_org }}'" + when: + - not registered + - rhsm_authentication == 'key' + - rhsm_satellite is defined + - rhsm_satellite is not none + - rhsm_satellite|trim != '' + +# This can apply to either Hosted or Satellite +- name: "Register using username and password" + command: "/usr/bin/subscription-manager register --username={{ rhsm_username }} --password={{ rhsm_password }}" + no_log: true + when: + - not registered + - rhsm_authentication == "password" + - rhsm_org is not defined or rhsm_org is none or rhsm_org|trim == '' + +# This can apply to either Hosted or Satellite +- name: "Register using username, password and organization" + command: "/usr/bin/subscription-manager register --username={{ rhsm_username }} --password={{ rhsm_password }} --org={{ rhsm_org }}" + no_log: true + when: + - not registered + - rhsm_authentication == "password" + - rhsm_org is defined + - rhsm_org is not none + - rhsm_org|trim != '' + +- name: "Auto-attach to Subscription Manager Pool" + command: "/usr/bin/subscription-manager attach --auto" + when: + - not registered + - rhsm_pool is undefined or rhsm_pool is none or rhsm_pool|trim == '' + +- name: "Attach to a specific pool" + command: "/usr/bin/subscription-manager attach --pool={{ rhsm_pool }}" + when: + - rhsm_pool is defined + - rhsm_pool is not none + - rhsm_pool|trim != '' + - not registered + +- name: "Disable all repositories" + command: "/usr/bin/subscription-manager repos --disable=*" + when: + - not registered + - rhsm_repos is defined + - rhsm_repos is not none + - rhsm_repos|trim != '' + +- name: "Enable specified repositories" + command: "/usr/bin/subscription-manager repos --enable={{ item }}" + with_items: "{{ rhsm_repos }}" + when: + - not registered + - rhsm_repos is defined + - rhsm_repos is not none + - rhsm_repos|trim != '' -- cgit v1.2.3 From 7ba2ef768b25b99f628c88a8e6348a8d0df630a0 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?=C3=98ystein=20Bedin?= Date: Fri, 16 Jun 2017 13:25:27 -0400 Subject: Revert "Add an Openstack provider (#397)" (#465) This reverts commit 94756e66352439d48e5d02b461679bd0f1e121cb. --- .../provisioning/openstack/INVENTORY-LICENSE.txt | 674 ------------------ playbooks/provisioning/openstack/README.md | 132 ---- .../openstack/openstack_dns_records.yml | 75 -- .../provisioning/openstack/openstack_dns_views.yml | 25 - .../openstack/post-provision-openstack.yml | 58 -- playbooks/provisioning/openstack/pre-install.yml | 14 - playbooks/provisioning/openstack/pre_tasks.yml | 39 -- .../provisioning/openstack/provision-openstack.yml | 49 -- playbooks/provisioning/openstack/provision.yaml | 4 - playbooks/provisioning/openstack/roles | 1 - .../openstack/sample-inventory/ansible.cfg | 19 - .../openstack/sample-inventory/clouds.yaml | 5 - .../sample-inventory/group_vars/OSEv3.yml | 10 - .../openstack/sample-inventory/group_vars/all.yml | 57 -- .../provisioning/openstack/sample-inventory/hosts | 44 -- .../openstack/sample-inventory/openstack.py | 252 ------- roles/dns-server-detect/defaults/main.yml | 3 - roles/dns-server-detect/tasks/main.yml | 36 - roles/hostnames/tasks/main.yaml | 26 - roles/hostnames/test/inv | 12 - roles/hostnames/test/roles | 1 - roles/hostnames/test/test.retry | 3 - roles/hostnames/test/test.yaml | 4 - roles/hostnames/vars/main.yaml | 2 - roles/hostnames/vars/records.yaml | 28 - roles/openshift-prep/tasks/main.yml | 4 - roles/openshift-prep/tasks/prerequisites.yml | 35 - roles/openstack-stack/README.md | 9 - roles/openstack-stack/defaults/main.yml | 12 - roles/openstack-stack/tasks/main.yml | 41 -- roles/openstack-stack/templates/heat_stack.yaml.j2 | 753 --------------------- .../templates/heat_stack_server.yaml.j2 | 170 ----- roles/openstack-stack/templates/user_data.j2 | 13 - roles/openstack-stack/test/roles | 1 - roles/openstack-stack/test/stack-create-test.yml | 16 - roles/subscription-manager/README.md | 156 ----- roles/subscription-manager/pre_tasks/pre_tasks.yml | 45 -- roles/subscription-manager/tasks/main.yml | 122 ---- 38 files changed, 2950 deletions(-) delete mode 100644 playbooks/provisioning/openstack/INVENTORY-LICENSE.txt delete mode 100644 playbooks/provisioning/openstack/README.md delete mode 100644 playbooks/provisioning/openstack/openstack_dns_records.yml delete mode 100644 playbooks/provisioning/openstack/openstack_dns_views.yml delete mode 100644 playbooks/provisioning/openstack/post-provision-openstack.yml delete mode 100644 playbooks/provisioning/openstack/pre-install.yml delete mode 100644 playbooks/provisioning/openstack/pre_tasks.yml delete mode 100644 playbooks/provisioning/openstack/provision-openstack.yml delete mode 100644 playbooks/provisioning/openstack/provision.yaml delete mode 120000 playbooks/provisioning/openstack/roles delete mode 100644 playbooks/provisioning/openstack/sample-inventory/ansible.cfg delete mode 100644 playbooks/provisioning/openstack/sample-inventory/clouds.yaml delete mode 100644 playbooks/provisioning/openstack/sample-inventory/group_vars/OSEv3.yml delete mode 100644 playbooks/provisioning/openstack/sample-inventory/group_vars/all.yml delete mode 100644 playbooks/provisioning/openstack/sample-inventory/hosts delete mode 100755 playbooks/provisioning/openstack/sample-inventory/openstack.py delete mode 100644 roles/dns-server-detect/defaults/main.yml delete mode 100644 roles/dns-server-detect/tasks/main.yml delete mode 100644 roles/hostnames/tasks/main.yaml delete mode 100644 roles/hostnames/test/inv delete mode 120000 roles/hostnames/test/roles delete mode 100644 roles/hostnames/test/test.retry delete mode 100644 roles/hostnames/test/test.yaml delete mode 100644 roles/hostnames/vars/main.yaml delete mode 100644 roles/hostnames/vars/records.yaml delete mode 100644 roles/openshift-prep/tasks/main.yml delete mode 100644 roles/openshift-prep/tasks/prerequisites.yml delete mode 100644 roles/openstack-stack/README.md delete mode 100644 roles/openstack-stack/defaults/main.yml delete mode 100644 roles/openstack-stack/tasks/main.yml delete mode 100644 roles/openstack-stack/templates/heat_stack.yaml.j2 delete mode 100644 roles/openstack-stack/templates/heat_stack_server.yaml.j2 delete mode 100644 roles/openstack-stack/templates/user_data.j2 delete mode 120000 roles/openstack-stack/test/roles delete mode 100644 roles/openstack-stack/test/stack-create-test.yml delete mode 100644 roles/subscription-manager/README.md delete mode 100644 roles/subscription-manager/pre_tasks/pre_tasks.yml delete mode 100644 roles/subscription-manager/tasks/main.yml diff --git a/playbooks/provisioning/openstack/INVENTORY-LICENSE.txt b/playbooks/provisioning/openstack/INVENTORY-LICENSE.txt deleted file mode 100644 index 94a9ed024..000000000 --- a/playbooks/provisioning/openstack/INVENTORY-LICENSE.txt +++ /dev/null @@ -1,674 +0,0 @@ - GNU GENERAL PUBLIC LICENSE - Version 3, 29 June 2007 - - Copyright (C) 2007 Free Software Foundation, Inc. - Everyone is permitted to copy and distribute verbatim copies - of this license document, but changing it is not allowed. - - Preamble - - The GNU General Public License is a free, copyleft license for -software and other kinds of works. - - The licenses for most software and other practical works are designed -to take away your freedom to share and change the works. By contrast, -the GNU General Public License is intended to guarantee your freedom to -share and change all versions of a program--to make sure it remains free -software for all its users. We, the Free Software Foundation, use the -GNU General Public License for most of our software; it applies also to -any other work released this way by its authors. You can apply it to -your programs, too. - - When we speak of free software, we are referring to freedom, not -price. Our General Public Licenses are designed to make sure that you -have the freedom to distribute copies of free software (and charge for -them if you wish), that you receive source code or can get it if you -want it, that you can change the software or use pieces of it in new -free programs, and that you know you can do these things. - - To protect your rights, we need to prevent others from denying you -these rights or asking you to surrender the rights. Therefore, you have -certain responsibilities if you distribute copies of the software, or if -you modify it: responsibilities to respect the freedom of others. - - For example, if you distribute copies of such a program, whether -gratis or for a fee, you must pass on to the recipients the same -freedoms that you received. You must make sure that they, too, receive -or can get the source code. And you must show them these terms so they -know their rights. - - Developers that use the GNU GPL protect your rights with two steps: -(1) assert copyright on the software, and (2) offer you this License -giving you legal permission to copy, distribute and/or modify it. - - For the developers' and authors' protection, the GPL clearly explains -that there is no warranty for this free software. For both users' and -authors' sake, the GPL requires that modified versions be marked as -changed, so that their problems will not be attributed erroneously to -authors of previous versions. - - Some devices are designed to deny users access to install or run -modified versions of the software inside them, although the manufacturer -can do so. This is fundamentally incompatible with the aim of -protecting users' freedom to change the software. The systematic -pattern of such abuse occurs in the area of products for individuals to -use, which is precisely where it is most unacceptable. Therefore, we -have designed this version of the GPL to prohibit the practice for those -products. If such problems arise substantially in other domains, we -stand ready to extend this provision to those domains in future versions -of the GPL, as needed to protect the freedom of users. - - Finally, every program is threatened constantly by software patents. -States should not allow patents to restrict development and use of -software on general-purpose computers, but in those that do, we wish to -avoid the special danger that patents applied to a free program could -make it effectively proprietary. To prevent this, the GPL assures that -patents cannot be used to render the program non-free. - - The precise terms and conditions for copying, distribution and -modification follow. - - TERMS AND CONDITIONS - - 0. Definitions. - - "This License" refers to version 3 of the GNU General Public License. - - "Copyright" also means copyright-like laws that apply to other kinds of -works, such as semiconductor masks. - - "The Program" refers to any copyrightable work licensed under this -License. Each licensee is addressed as "you". "Licensees" and -"recipients" may be individuals or organizations. - - To "modify" a work means to copy from or adapt all or part of the work -in a fashion requiring copyright permission, other than the making of an -exact copy. The resulting work is called a "modified version" of the -earlier work or a work "based on" the earlier work. - - A "covered work" means either the unmodified Program or a work based -on the Program. - - To "propagate" a work means to do anything with it that, without -permission, would make you directly or secondarily liable for -infringement under applicable copyright law, except executing it on a -computer or modifying a private copy. Propagation includes copying, -distribution (with or without modification), making available to the -public, and in some countries other activities as well. - - To "convey" a work means any kind of propagation that enables other -parties to make or receive copies. Mere interaction with a user through -a computer network, with no transfer of a copy, is not conveying. - - An interactive user interface displays "Appropriate Legal Notices" -to the extent that it includes a convenient and prominently visible -feature that (1) displays an appropriate copyright notice, and (2) -tells the user that there is no warranty for the work (except to the -extent that warranties are provided), that licensees may convey the -work under this License, and how to view a copy of this License. If -the interface presents a list of user commands or options, such as a -menu, a prominent item in the list meets this criterion. - - 1. Source Code. - - The "source code" for a work means the preferred form of the work -for making modifications to it. "Object code" means any non-source -form of a work. - - A "Standard Interface" means an interface that either is an official -standard defined by a recognized standards body, or, in the case of -interfaces specified for a particular programming language, one that -is widely used among developers working in that language. - - The "System Libraries" of an executable work include anything, other -than the work as a whole, that (a) is included in the normal form of -packaging a Major Component, but which is not part of that Major -Component, and (b) serves only to enable use of the work with that -Major Component, or to implement a Standard Interface for which an -implementation is available to the public in source code form. A -"Major Component", in this context, means a major essential component -(kernel, window system, and so on) of the specific operating system -(if any) on which the executable work runs, or a compiler used to -produce the work, or an object code interpreter used to run it. - - The "Corresponding Source" for a work in object code form means all -the source code needed to generate, install, and (for an executable -work) run the object code and to modify the work, including scripts to -control those activities. However, it does not include the work's -System Libraries, or general-purpose tools or generally available free -programs which are used unmodified in performing those activities but -which are not part of the work. For example, Corresponding Source -includes interface definition files associated with source files for -the work, and the source code for shared libraries and dynamically -linked subprograms that the work is specifically designed to require, -such as by intimate data communication or control flow between those -subprograms and other parts of the work. - - The Corresponding Source need not include anything that users -can regenerate automatically from other parts of the Corresponding -Source. - - The Corresponding Source for a work in source code form is that -same work. - - 2. Basic Permissions. - - All rights granted under this License are granted for the term of -copyright on the Program, and are irrevocable provided the stated -conditions are met. This License explicitly affirms your unlimited -permission to run the unmodified Program. The output from running a -covered work is covered by this License only if the output, given its -content, constitutes a covered work. This License acknowledges your -rights of fair use or other equivalent, as provided by copyright law. - - You may make, run and propagate covered works that you do not -convey, without conditions so long as your license otherwise remains -in force. You may convey covered works to others for the sole purpose -of having them make modifications exclusively for you, or provide you -with facilities for running those works, provided that you comply with -the terms of this License in conveying all material for which you do -not control copyright. Those thus making or running the covered works -for you must do so exclusively on your behalf, under your direction -and control, on terms that prohibit them from making any copies of -your copyrighted material outside their relationship with you. - - Conveying under any other circumstances is permitted solely under -the conditions stated below. Sublicensing is not allowed; section 10 -makes it unnecessary. - - 3. Protecting Users' Legal Rights From Anti-Circumvention Law. - - No covered work shall be deemed part of an effective technological -measure under any applicable law fulfilling obligations under article -11 of the WIPO copyright treaty adopted on 20 December 1996, or -similar laws prohibiting or restricting circumvention of such -measures. - - When you convey a covered work, you waive any legal power to forbid -circumvention of technological measures to the extent such circumvention -is effected by exercising rights under this License with respect to -the covered work, and you disclaim any intention to limit operation or -modification of the work as a means of enforcing, against the work's -users, your or third parties' legal rights to forbid circumvention of -technological measures. - - 4. Conveying Verbatim Copies. - - You may convey verbatim copies of the Program's source code as you -receive it, in any medium, provided that you conspicuously and -appropriately publish on each copy an appropriate copyright notice; -keep intact all notices stating that this License and any -non-permissive terms added in accord with section 7 apply to the code; -keep intact all notices of the absence of any warranty; and give all -recipients a copy of this License along with the Program. - - You may charge any price or no price for each copy that you convey, -and you may offer support or warranty protection for a fee. - - 5. Conveying Modified Source Versions. - - You may convey a work based on the Program, or the modifications to -produce it from the Program, in the form of source code under the -terms of section 4, provided that you also meet all of these conditions: - - a) The work must carry prominent notices stating that you modified - it, and giving a relevant date. - - b) The work must carry prominent notices stating that it is - released under this License and any conditions added under section - 7. This requirement modifies the requirement in section 4 to - "keep intact all notices". - - c) You must license the entire work, as a whole, under this - License to anyone who comes into possession of a copy. This - License will therefore apply, along with any applicable section 7 - additional terms, to the whole of the work, and all its parts, - regardless of how they are packaged. This License gives no - permission to license the work in any other way, but it does not - invalidate such permission if you have separately received it. - - d) If the work has interactive user interfaces, each must display - Appropriate Legal Notices; however, if the Program has interactive - interfaces that do not display Appropriate Legal Notices, your - work need not make them do so. - - A compilation of a covered work with other separate and independent -works, which are not by their nature extensions of the covered work, -and which are not combined with it such as to form a larger program, -in or on a volume of a storage or distribution medium, is called an -"aggregate" if the compilation and its resulting copyright are not -used to limit the access or legal rights of the compilation's users -beyond what the individual works permit. Inclusion of a covered work -in an aggregate does not cause this License to apply to the other -parts of the aggregate. - - 6. Conveying Non-Source Forms. - - You may convey a covered work in object code form under the terms -of sections 4 and 5, provided that you also convey the -machine-readable Corresponding Source under the terms of this License, -in one of these ways: - - a) Convey the object code in, or embodied in, a physical product - (including a physical distribution medium), accompanied by the - Corresponding Source fixed on a durable physical medium - customarily used for software interchange. - - b) Convey the object code in, or embodied in, a physical product - (including a physical distribution medium), accompanied by a - written offer, valid for at least three years and valid for as - long as you offer spare parts or customer support for that product - model, to give anyone who possesses the object code either (1) a - copy of the Corresponding Source for all the software in the - product that is covered by this License, on a durable physical - medium customarily used for software interchange, for a price no - more than your reasonable cost of physically performing this - conveying of source, or (2) access to copy the - Corresponding Source from a network server at no charge. - - c) Convey individual copies of the object code with a copy of the - written offer to provide the Corresponding Source. This - alternative is allowed only occasionally and noncommercially, and - only if you received the object code with such an offer, in accord - with subsection 6b. - - d) Convey the object code by offering access from a designated - place (gratis or for a charge), and offer equivalent access to the - Corresponding Source in the same way through the same place at no - further charge. You need not require recipients to copy the - Corresponding Source along with the object code. If the place to - copy the object code is a network server, the Corresponding Source - may be on a different server (operated by you or a third party) - that supports equivalent copying facilities, provided you maintain - clear directions next to the object code saying where to find the - Corresponding Source. Regardless of what server hosts the - Corresponding Source, you remain obligated to ensure that it is - available for as long as needed to satisfy these requirements. - - e) Convey the object code using peer-to-peer transmission, provided - you inform other peers where the object code and Corresponding - Source of the work are being offered to the general public at no - charge under subsection 6d. - - A separable portion of the object code, whose source code is excluded -from the Corresponding Source as a System Library, need not be -included in conveying the object code work. - - A "User Product" is either (1) a "consumer product", which means any -tangible personal property which is normally used for personal, family, -or household purposes, or (2) anything designed or sold for incorporation -into a dwelling. In determining whether a product is a consumer product, -doubtful cases shall be resolved in favor of coverage. For a particular -product received by a particular user, "normally used" refers to a -typical or common use of that class of product, regardless of the status -of the particular user or of the way in which the particular user -actually uses, or expects or is expected to use, the product. A product -is a consumer product regardless of whether the product has substantial -commercial, industrial or non-consumer uses, unless such uses represent -the only significant mode of use of the product. - - "Installation Information" for a User Product means any methods, -procedures, authorization keys, or other information required to install -and execute modified versions of a covered work in that User Product from -a modified version of its Corresponding Source. The information must -suffice to ensure that the continued functioning of the modified object -code is in no case prevented or interfered with solely because -modification has been made. - - If you convey an object code work under this section in, or with, or -specifically for use in, a User Product, and the conveying occurs as -part of a transaction in which the right of possession and use of the -User Product is transferred to the recipient in perpetuity or for a -fixed term (regardless of how the transaction is characterized), the -Corresponding Source conveyed under this section must be accompanied -by the Installation Information. But this requirement does not apply -if neither you nor any third party retains the ability to install -modified object code on the User Product (for example, the work has -been installed in ROM). - - The requirement to provide Installation Information does not include a -requirement to continue to provide support service, warranty, or updates -for a work that has been modified or installed by the recipient, or for -the User Product in which it has been modified or installed. Access to a -network may be denied when the modification itself materially and -adversely affects the operation of the network or violates the rules and -protocols for communication across the network. - - Corresponding Source conveyed, and Installation Information provided, -in accord with this section must be in a format that is publicly -documented (and with an implementation available to the public in -source code form), and must require no special password or key for -unpacking, reading or copying. - - 7. Additional Terms. - - "Additional permissions" are terms that supplement the terms of this -License by making exceptions from one or more of its conditions. -Additional permissions that are applicable to the entire Program shall -be treated as though they were included in this License, to the extent -that they are valid under applicable law. If additional permissions -apply only to part of the Program, that part may be used separately -under those permissions, but the entire Program remains governed by -this License without regard to the additional permissions. - - When you convey a copy of a covered work, you may at your option -remove any additional permissions from that copy, or from any part of -it. (Additional permissions may be written to require their own -removal in certain cases when you modify the work.) You may place -additional permissions on material, added by you to a covered work, -for which you have or can give appropriate copyright permission. - - Notwithstanding any other provision of this License, for material you -add to a covered work, you may (if authorized by the copyright holders of -that material) supplement the terms of this License with terms: - - a) Disclaiming warranty or limiting liability differently from the - terms of sections 15 and 16 of this License; or - - b) Requiring preservation of specified reasonable legal notices or - author attributions in that material or in the Appropriate Legal - Notices displayed by works containing it; or - - c) Prohibiting misrepresentation of the origin of that material, or - requiring that modified versions of such material be marked in - reasonable ways as different from the original version; or - - d) Limiting the use for publicity purposes of names of licensors or - authors of the material; or - - e) Declining to grant rights under trademark law for use of some - trade names, trademarks, or service marks; or - - f) Requiring indemnification of licensors and authors of that - material by anyone who conveys the material (or modified versions of - it) with contractual assumptions of liability to the recipient, for - any liability that these contractual assumptions directly impose on - those licensors and authors. - - All other non-permissive additional terms are considered "further -restrictions" within the meaning of section 10. If the Program as you -received it, or any part of it, contains a notice stating that it is -governed by this License along with a term that is a further -restriction, you may remove that term. If a license document contains -a further restriction but permits relicensing or conveying under this -License, you may add to a covered work material governed by the terms -of that license document, provided that the further restriction does -not survive such relicensing or conveying. - - If you add terms to a covered work in accord with this section, you -must place, in the relevant source files, a statement of the -additional terms that apply to those files, or a notice indicating -where to find the applicable terms. - - Additional terms, permissive or non-permissive, may be stated in the -form of a separately written license, or stated as exceptions; -the above requirements apply either way. - - 8. Termination. - - You may not propagate or modify a covered work except as expressly -provided under this License. Any attempt otherwise to propagate or -modify it is void, and will automatically terminate your rights under -this License (including any patent licenses granted under the third -paragraph of section 11). - - However, if you cease all violation of this License, then your -license from a particular copyright holder is reinstated (a) -provisionally, unless and until the copyright holder explicitly and -finally terminates your license, and (b) permanently, if the copyright -holder fails to notify you of the violation by some reasonable means -prior to 60 days after the cessation. - - Moreover, your license from a particular copyright holder is -reinstated permanently if the copyright holder notifies you of the -violation by some reasonable means, this is the first time you have -received notice of violation of this License (for any work) from that -copyright holder, and you cure the violation prior to 30 days after -your receipt of the notice. - - Termination of your rights under this section does not terminate the -licenses of parties who have received copies or rights from you under -this License. If your rights have been terminated and not permanently -reinstated, you do not qualify to receive new licenses for the same -material under section 10. - - 9. Acceptance Not Required for Having Copies. - - You are not required to accept this License in order to receive or -run a copy of the Program. Ancillary propagation of a covered work -occurring solely as a consequence of using peer-to-peer transmission -to receive a copy likewise does not require acceptance. However, -nothing other than this License grants you permission to propagate or -modify any covered work. These actions infringe copyright if you do -not accept this License. Therefore, by modifying or propagating a -covered work, you indicate your acceptance of this License to do so. - - 10. Automatic Licensing of Downstream Recipients. - - Each time you convey a covered work, the recipient automatically -receives a license from the original licensors, to run, modify and -propagate that work, subject to this License. You are not responsible -for enforcing compliance by third parties with this License. - - An "entity transaction" is a transaction transferring control of an -organization, or substantially all assets of one, or subdividing an -organization, or merging organizations. If propagation of a covered -work results from an entity transaction, each party to that -transaction who receives a copy of the work also receives whatever -licenses to the work the party's predecessor in interest had or could -give under the previous paragraph, plus a right to possession of the -Corresponding Source of the work from the predecessor in interest, if -the predecessor has it or can get it with reasonable efforts. - - You may not impose any further restrictions on the exercise of the -rights granted or affirmed under this License. For example, you may -not impose a license fee, royalty, or other charge for exercise of -rights granted under this License, and you may not initiate litigation -(including a cross-claim or counterclaim in a lawsuit) alleging that -any patent claim is infringed by making, using, selling, offering for -sale, or importing the Program or any portion of it. - - 11. Patents. - - A "contributor" is a copyright holder who authorizes use under this -License of the Program or a work on which the Program is based. The -work thus licensed is called the contributor's "contributor version". - - A contributor's "essential patent claims" are all patent claims -owned or controlled by the contributor, whether already acquired or -hereafter acquired, that would be infringed by some manner, permitted -by this License, of making, using, or selling its contributor version, -but do not include claims that would be infringed only as a -consequence of further modification of the contributor version. For -purposes of this definition, "control" includes the right to grant -patent sublicenses in a manner consistent with the requirements of -this License. - - Each contributor grants you a non-exclusive, worldwide, royalty-free -patent license under the contributor's essential patent claims, to -make, use, sell, offer for sale, import and otherwise run, modify and -propagate the contents of its contributor version. - - In the following three paragraphs, a "patent license" is any express -agreement or commitment, however denominated, not to enforce a patent -(such as an express permission to practice a patent or covenant not to -sue for patent infringement). To "grant" such a patent license to a -party means to make such an agreement or commitment not to enforce a -patent against the party. - - If you convey a covered work, knowingly relying on a patent license, -and the Corresponding Source of the work is not available for anyone -to copy, free of charge and under the terms of this License, through a -publicly available network server or other readily accessible means, -then you must either (1) cause the Corresponding Source to be so -available, or (2) arrange to deprive yourself of the benefit of the -patent license for this particular work, or (3) arrange, in a manner -consistent with the requirements of this License, to extend the patent -license to downstream recipients. "Knowingly relying" means you have -actual knowledge that, but for the patent license, your conveying the -covered work in a country, or your recipient's use of the covered work -in a country, would infringe one or more identifiable patents in that -country that you have reason to believe are valid. - - If, pursuant to or in connection with a single transaction or -arrangement, you convey, or propagate by procuring conveyance of, a -covered work, and grant a patent license to some of the parties -receiving the covered work authorizing them to use, propagate, modify -or convey a specific copy of the covered work, then the patent license -you grant is automatically extended to all recipients of the covered -work and works based on it. - - A patent license is "discriminatory" if it does not include within -the scope of its coverage, prohibits the exercise of, or is -conditioned on the non-exercise of one or more of the rights that are -specifically granted under this License. You may not convey a covered -work if you are a party to an arrangement with a third party that is -in the business of distributing software, under which you make payment -to the third party based on the extent of your activity of conveying -the work, and under which the third party grants, to any of the -parties who would receive the covered work from you, a discriminatory -patent license (a) in connection with copies of the covered work -conveyed by you (or copies made from those copies), or (b) primarily -for and in connection with specific products or compilations that -contain the covered work, unless you entered into that arrangement, -or that patent license was granted, prior to 28 March 2007. - - Nothing in this License shall be construed as excluding or limiting -any implied license or other defenses to infringement that may -otherwise be available to you under applicable patent law. - - 12. No Surrender of Others' Freedom. - - If conditions are imposed on you (whether by court order, agreement or -otherwise) that contradict the conditions of this License, they do not -excuse you from the conditions of this License. If you cannot convey a -covered work so as to satisfy simultaneously your obligations under this -License and any other pertinent obligations, then as a consequence you may -not convey it at all. For example, if you agree to terms that obligate you -to collect a royalty for further conveying from those to whom you convey -the Program, the only way you could satisfy both those terms and this -License would be to refrain entirely from conveying the Program. - - 13. Use with the GNU Affero General Public License. - - Notwithstanding any other provision of this License, you have -permission to link or combine any covered work with a work licensed -under version 3 of the GNU Affero General Public License into a single -combined work, and to convey the resulting work. The terms of this -License will continue to apply to the part which is the covered work, -but the special requirements of the GNU Affero General Public License, -section 13, concerning interaction through a network will apply to the -combination as such. - - 14. Revised Versions of this License. - - The Free Software Foundation may publish revised and/or new versions of -the GNU General Public License from time to time. Such new versions will -be similar in spirit to the present version, but may differ in detail to -address new problems or concerns. - - Each version is given a distinguishing version number. If the -Program specifies that a certain numbered version of the GNU General -Public License "or any later version" applies to it, you have the -option of following the terms and conditions either of that numbered -version or of any later version published by the Free Software -Foundation. If the Program does not specify a version number of the -GNU General Public License, you may choose any version ever published -by the Free Software Foundation. - - If the Program specifies that a proxy can decide which future -versions of the GNU General Public License can be used, that proxy's -public statement of acceptance of a version permanently authorizes you -to choose that version for the Program. - - Later license versions may give you additional or different -permissions. However, no additional obligations are imposed on any -author or copyright holder as a result of your choosing to follow a -later version. - - 15. Disclaimer of Warranty. - - THERE IS NO WARRANTY FOR THE PROGRAM, TO THE EXTENT PERMITTED BY -APPLICABLE LAW. EXCEPT WHEN OTHERWISE STATED IN WRITING THE COPYRIGHT -HOLDERS AND/OR OTHER PARTIES PROVIDE THE PROGRAM "AS IS" WITHOUT WARRANTY -OF ANY KIND, EITHER EXPRESSED OR IMPLIED, INCLUDING, BUT NOT LIMITED TO, -THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR -PURPOSE. THE ENTIRE RISK AS TO THE QUALITY AND PERFORMANCE OF THE PROGRAM -IS WITH YOU. SHOULD THE PROGRAM PROVE DEFECTIVE, YOU ASSUME THE COST OF -ALL NECESSARY SERVICING, REPAIR OR CORRECTION. - - 16. Limitation of Liability. - - IN NO EVENT UNLESS REQUIRED BY APPLICABLE LAW OR AGREED TO IN WRITING -WILL ANY COPYRIGHT HOLDER, OR ANY OTHER PARTY WHO MODIFIES AND/OR CONVEYS -THE PROGRAM AS PERMITTED ABOVE, BE LIABLE TO YOU FOR DAMAGES, INCLUDING ANY -GENERAL, SPECIAL, INCIDENTAL OR CONSEQUENTIAL DAMAGES ARISING OUT OF THE -USE OR INABILITY TO USE THE PROGRAM (INCLUDING BUT NOT LIMITED TO LOSS OF -DATA OR DATA BEING RENDERED INACCURATE OR LOSSES SUSTAINED BY YOU OR THIRD -PARTIES OR A FAILURE OF THE PROGRAM TO OPERATE WITH ANY OTHER PROGRAMS), -EVEN IF SUCH HOLDER OR OTHER PARTY HAS BEEN ADVISED OF THE POSSIBILITY OF -SUCH DAMAGES. - - 17. Interpretation of Sections 15 and 16. - - If the disclaimer of warranty and limitation of liability provided -above cannot be given local legal effect according to their terms, -reviewing courts shall apply local law that most closely approximates -an absolute waiver of all civil liability in connection with the -Program, unless a warranty or assumption of liability accompanies a -copy of the Program in return for a fee. - - END OF TERMS AND CONDITIONS - - How to Apply These Terms to Your New Programs - - If you develop a new program, and you want it to be of the greatest -possible use to the public, the best way to achieve this is to make it -free software which everyone can redistribute and change under these terms. - - To do so, attach the following notices to the program. It is safest -to attach them to the start of each source file to most effectively -state the exclusion of warranty; and each file should have at least -the "copyright" line and a pointer to where the full notice is found. - - - Copyright (C) - - This program is free software: you can redistribute it and/or modify - it under the terms of the GNU General Public License as published by - the Free Software Foundation, either version 3 of the License, or - (at your option) any later version. - - This program is distributed in the hope that it will be useful, - but WITHOUT ANY WARRANTY; without even the implied warranty of - MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - GNU General Public License for more details. - - You should have received a copy of the GNU General Public License - along with this program. If not, see . - -Also add information on how to contact you by electronic and paper mail. - - If the program does terminal interaction, make it output a short -notice like this when it starts in an interactive mode: - - Copyright (C) - This program comes with ABSOLUTELY NO WARRANTY; for details type `show w'. - This is free software, and you are welcome to redistribute it - under certain conditions; type `show c' for details. - -The hypothetical commands `show w' and `show c' should show the appropriate -parts of the General Public License. Of course, your program's commands -might be different; for a GUI interface, you would use an "about box". - - You should also get your employer (if you work as a programmer) or school, -if any, to sign a "copyright disclaimer" for the program, if necessary. -For more information on this, and how to apply and follow the GNU GPL, see -. - - The GNU General Public License does not permit incorporating your program -into proprietary programs. If your program is a subroutine library, you -may consider it more useful to permit linking proprietary applications with -the library. If this is what you want to do, use the GNU Lesser General -Public License instead of this License. But first, please read -. diff --git a/playbooks/provisioning/openstack/README.md b/playbooks/provisioning/openstack/README.md deleted file mode 100644 index 423d57113..000000000 --- a/playbooks/provisioning/openstack/README.md +++ /dev/null @@ -1,132 +0,0 @@ -# OpenStack Provisioning - -This repository contains playbooks and Heat templates to provision -OpenStack resources (servers, networking, volumes, security groups, -etc.). The result is an environment ready for openshift-ansible. - - -## Dependencies - -* [Ansible 2.3](https://pypi.python.org/pypi/ansible) -* [shade](https://pypi.python.org/pypi/shade) -* python-dns - - -## What does it do - -* Create Nova servers with floating IP addresses attached -* Assigns Cinder volumes to the servers -* Set up an `openshift` user with sudo privileges -* Optionally attach Red Hat subscriptions -* Set up a bind-based DNS server -* When deploying more than one master, set up a HAproxy server - - -## Set up - -### Copy the sample inventory - - cp -r openshift-ansible-contrib/playbooks/provisioning/openstack/sample-inventory inventory - -### Copy clouds.yaml - - cp openshift-ansible-contrib/playbooks/provisioning/openstack/sample-inventory/clouds.yaml clouds.yaml - -### Copy ansible config - - cp openshift-ansible-contrib/playbooks/provisioning/openstack/sample-inventory/ansible.cfg ansible.cfg - -### Update `inventory/group_vars/all.yml` - -Pay special attention to the values in the first paragraph -- these -will depend on your OpenStack environment. - -The `env_id` and `openstack_dns_domain` will form the DNS domain all -your servers will be under. With the default values, this will be -`openshift.example.com`. - -`openstack_nameservers` is a list of DNS servers accessible from all -the created Nova servers. These will be serve as your DNS forwarders. - -`openstack_ssh_key` is a Nova keypair -- you can see your keypairs with -`openstack keypair list`. - -`openstack_default_image_name` is the name of the Glance image the -servers will use. You can -see your images with `openstack image list`. - -`openstack_default_flavor` is the Nova flavor the servers will use. -You can see your flavors with `openstack flavor list`. - -`openstack_external_network_name` is the name of the Neutron network -providing external connectivity. It is often called `public`, -`external` or `ext-net`. You can see your networks with `openstack -network list`. - -The `openstack_num_masters`, `openstack_num_infra` and -`openstack_num_nodes` values specify the number of Master, Infra and -App nodes to create. - -The `openstack_flat_secgrp`, controls Neutron security groups creation for Heat -stacks. Set it to true, if you experience issues with sec group rules -quotas. It trades security for number of rules, by sharing the same set -of firewall rules for master, node, etcd and infra nodes. - -### Update the DNS names in `inventory/hosts` - -The different server groups are currently grouped by the domain name, -so if you end up using a different domain than -`openshift.example.com`, you will need to update the `inventory/hosts` -file. - -For example, if your final domain is `my.cloud.com`, you can run this -command to fix update the `hosts` file: - - sed -i 's/openshift.example.com/my.cloud.com/' inventory/hosts - -### Configure the OpenShift parameters - -Finally, you need to update the DNS entry in -`inventory/group_vars/OSEv3.yml` (look at -`openshift_master_default_subdomain`). - -In addition, this is the place where you can customise your OpenShift -installation for example by specifying the authentication. - -The full list of options is available in this sample inventory: - -https://github.com/openshift/openshift-ansible/blob/master/inventory/byo/hosts.ose.example - -Note, that in order to deploy OpenShift origin, you should update the following -variables for the `inventory/group_vars/OSEv3.yml`, `all.yml`: - - deployment_type: origin - origin_release: 1.5.1 - openshift_deployment_type: "{{ deployment_type }}" - -## Deployment - -### Run the playbook - -Assuming your OpenStack (Keystone) credentials are in the `keystonerc` -file, this is how you stat the provisioning process: - - . keystonerc - ansible-playbook -i inventory --timeout 30 --private-key ~/.ssh/openshift openshift-ansible-contrib/playbooks/provisioning/openstack/provision.yaml - -### Install OpenShift - -Once it succeeds, you can install openshift by running: - - ansible-playbook --become --user openshift --private-key ~/.ssh/openshift -i inventory/ openshift-ansible/playbooks/byo/openshift-node/network_manager.yml - ansible-playbook --become --user openshift --private-key ~/.ssh/openshift -i inventory/ openshift-ansible/playbooks/byo/config.yml - -Note, the `network_manager.yml` is only required if you're deploying OpenShift -origin. - -## License - -As the rest of the openshift-ansible-contrib repository, the code here is -licensed under Apache 2. However, the openstack.py file under -`sample-inventory` is GPLv3+. See the INVENTORY-LICENSE.txt file for the full -text of the license. diff --git a/playbooks/provisioning/openstack/openstack_dns_records.yml b/playbooks/provisioning/openstack/openstack_dns_records.yml deleted file mode 100644 index b32b70ba9..000000000 --- a/playbooks/provisioning/openstack/openstack_dns_records.yml +++ /dev/null @@ -1,75 +0,0 @@ ---- -- name: "Generate list of private A records" - set_fact: - private_records: "{{ private_records | default([]) + [ { 'type': 'A', 'hostname': hostvars[item]['ansible_hostname'], 'ip': hostvars[item]['openstack']['private_v4'] } ] }}" - with_items: "{{ groups['cluster_hosts'] }}" - -- name: "Set the private DNS server to use the external value (if provided)" - set_fact: - nsupdate_server_private: "{{ external_nsupdate_keys['private']['server'] }}" - nsupdate_key_secret_private: "{{ external_nsupdate_keys['private']['key_secret'] }}" - nsupdate_key_algorithm_private: "{{ external_nsupdate_keys['private']['key_algorithm'] }}" - when: - - external_nsupdate_keys is defined - - external_nsupdate_keys['private'] is defined - -- name: "Set the private DNS server to use the provisioned value" - set_fact: - nsupdate_server_private: "{{ hostvars[groups['dns'][0]].openstack.public_v4 }}" - nsupdate_key_secret_private: "{{ hostvars[groups['dns'][0]].nsupdate_keys['private-' + full_dns_domain].key_secret }}" - nsupdate_key_algorithm_private: "{{ hostvars[groups['dns'][0]].nsupdate_keys['private-' + full_dns_domain].key_algorithm }}" - when: - - nsupdate_server_private is undefined - -- name: "Generate the private Add section for DNS" - set_fact: - private_named_records: - - view: "private" - zone: "{{ full_dns_domain }}" - server: "{{ nsupdate_server_private }}" - key_name: "{{ ( 'private-' + full_dns_domain ) }}" - key_secret: "{{ nsupdate_key_secret_private }}" - key_algorithm: "{{ nsupdate_key_algorithm_private | lower }}" - entries: "{{ private_records }}" - -- name: "Generate list of public A records" - set_fact: - public_records: "{{ public_records | default([]) + [ { 'type': 'A', 'hostname': hostvars[item]['ansible_hostname'], 'ip': hostvars[item]['openstack']['public_v4'] } ] }}" - with_items: "{{ groups['cluster_hosts'] }}" - -- name: "Add wildcard records to the public A records" - set_fact: - public_records: "{{ public_records | default([]) + [ { 'type': 'A', 'hostname': '*.' + openshift_app_domain, 'ip': hostvars[item]['openstack']['public_v4'] } ] }}" - with_items: "{{ groups['infra_hosts'] }}" - -- name: "Set the public DNS server details to use the external value (if provided)" - set_fact: - nsupdate_server_public: "{{ external_nsupdate_keys['public']['server'] }}" - nsupdate_key_secret_public: "{{ external_nsupdate_keys['public']['key_secret'] }}" - nsupdate_key_algorithm_public: "{{ external_nsupdate_keys['public']['key_algorithm'] }}" - when: - - external_nsupdate_keys is defined - - external_nsupdate_keys['public'] is defined - -- name: "Set the public DNS server details to use the provisioned value" - set_fact: - nsupdate_server_public: "{{ hostvars[groups['dns'][0]].openstack.public_v4 }}" - nsupdate_key_secret_public: "{{ hostvars[groups['dns'][0]].nsupdate_keys['public-' + full_dns_domain].key_secret }}" - nsupdate_key_algorithm_public: "{{ hostvars[groups['dns'][0]].nsupdate_keys['public-' + full_dns_domain].key_algorithm }}" - when: - - nsupdate_server_public is undefined - -- name: "Generate the public Add section for DNS" - set_fact: - public_named_records: - - view: "public" - zone: "{{ full_dns_domain }}" - server: "{{ nsupdate_server_public }}" - key_name: "{{ ( 'public-' + full_dns_domain ) }}" - key_secret: "{{ nsupdate_key_secret_public }}" - key_algorithm: "{{ nsupdate_key_algorithm_public | lower }}" - entries: "{{ public_records }}" - -- name: "Generate the final dns_records_add" - set_fact: - dns_records_add: "{{ private_named_records + public_named_records }}" diff --git a/playbooks/provisioning/openstack/openstack_dns_views.yml b/playbooks/provisioning/openstack/openstack_dns_views.yml deleted file mode 100644 index ea0a7cb96..000000000 --- a/playbooks/provisioning/openstack/openstack_dns_views.yml +++ /dev/null @@ -1,25 +0,0 @@ ---- -- name: "Generate ACL list for DNS server" - set_fact: - acl_list: "{{ acl_list | default([]) + [ (hostvars[item]['openstack']['private_v4'] + '/32') ] }}" - with_items: "{{ groups['cluster_hosts'] }}" - -- name: "Generate the private view" - set_fact: - private_named_view: - - name: "private" - acl_entry: "{{ acl_list }}" - zone: - - dns_domain: "{{ full_dns_domain }}" - -- name: "Generate the public view" - set_fact: - public_named_view: - - name: "public" - zone: - - dns_domain: "{{ full_dns_domain }}" - forwarder: "{{ public_dns_nameservers }}" - -- name: "Generate the final named_config_views" - set_fact: - named_config_views: "{{ private_named_view + public_named_view }}" diff --git a/playbooks/provisioning/openstack/post-provision-openstack.yml b/playbooks/provisioning/openstack/post-provision-openstack.yml deleted file mode 100644 index 4e42c1c7f..000000000 --- a/playbooks/provisioning/openstack/post-provision-openstack.yml +++ /dev/null @@ -1,58 +0,0 @@ ---- -# Assign hostnames -- hosts: cluster_hosts - become: true - pre_tasks: - - include: pre_tasks.yml - roles: - - role: hostnames - -# Subscribe DNS Host to allow for configuration below -- hosts: dns - become: true - roles: - - role: subscription-manager - when: hostvars.localhost.rhsm_register - tags: 'subscription-manager' - -# Determine which DNS server(s) to use for our generated records -- hosts: localhost - roles: - - dns-server-detect - -# Build the DNS Server Views and Configure DNS Server(s) -- hosts: dns - become: true - pre_tasks: - - include: pre_tasks.yml - - name: "Generate dns-server views" - include: openstack_dns_views.yml - roles: - - role: dns-server - -# Build and process DNS Records -- hosts: localhost - pre_tasks: - - include: pre_tasks.yml - - name: "Generate dns records" - include: openstack_dns_records.yml - roles: - - role: dns - -# OpenShift Pre-Requisites -- hosts: OSEv3 - become: true - tasks: - - name: "Edit /etc/resolv.conf on masters/nodes" - lineinfile: - state: present - dest: /etc/resolv.conf - regexp: "nameserver {{ hostvars['localhost'].private_dns_server }}" - line: "nameserver {{ hostvars['localhost'].private_dns_server }}" - insertafter: search* - - name: "Include DNS configuration to ensure proper name resolution" - lineinfile: - state: present - dest: /etc/sysconfig/network - regexp: "IP4_NAMESERVERS={{ hostvars['localhost'].private_dns_server }}" - line: "IP4_NAMESERVERS={{ hostvars['localhost'].private_dns_server }}" diff --git a/playbooks/provisioning/openstack/pre-install.yml b/playbooks/provisioning/openstack/pre-install.yml deleted file mode 100644 index 629182d49..000000000 --- a/playbooks/provisioning/openstack/pre-install.yml +++ /dev/null @@ -1,14 +0,0 @@ ---- -############################### -# OpenShift Pre-Requisites - -# - subscribe hosts -# - prepare docker -# - other prep (install additional packages, etc.) -# -- hosts: OSEv3 - become: true - roles: - - { role: subscription-manager, when: hostvars.localhost.rhsm_register, tags: 'subscription-manager', ansible_sudo: true } - - { role: docker, tags: 'docker' } - - { role: openshift-prep, tags: 'openshift-prep' } diff --git a/playbooks/provisioning/openstack/pre_tasks.yml b/playbooks/provisioning/openstack/pre_tasks.yml deleted file mode 100644 index a4ff7c4ac..000000000 --- a/playbooks/provisioning/openstack/pre_tasks.yml +++ /dev/null @@ -1,39 +0,0 @@ ---- -- name: Generate Environment ID - set_fact: - env_random_id: "{{ ansible_date_time.epoch }}" - run_once: true - delegate_to: localhost - -- name: Set default Environment ID - set_fact: - default_env_id: "casl-{{ lookup('env','OS_USERNAME') }}-{{ env_random_id }}" - delegate_to: localhost - -- name: Setting Common Facts - set_fact: - env_id: "{{ env_id | default(default_env_id) }}" - delegate_to: localhost - -- name: Set Dynamic Inventory Filters - become: false - shell: > - export OS_INV_FILTER_KEY=clusterid && export OS_INV_FILTER_VALUE={{ env_id }} - delegate_to: localhost - -- name: Updating DNS domain to include env_id (if not empty) - set_fact: - full_dns_domain: "{{ (env_id|trim == '') | ternary(public_dns_domain, env_id + '.' + public_dns_domain) }}" - delegate_to: localhost - -- name: Set the APP domain for OpenShift use - set_fact: - openshift_app_domain: "{{ openshift_app_domain | default('apps') }}" - delegate_to: localhost - -- name: Set the default app domain for routing purposes - set_fact: - openshift_master_default_subdomain: "{{ openshift_app_domain }}.{{ full_dns_domain }}" - delegate_to: localhost - when: - - openshift_master_default_subdomain is undefined diff --git a/playbooks/provisioning/openstack/provision-openstack.yml b/playbooks/provisioning/openstack/provision-openstack.yml deleted file mode 100644 index c7ad782c9..000000000 --- a/playbooks/provisioning/openstack/provision-openstack.yml +++ /dev/null @@ -1,49 +0,0 @@ ---- -- hosts: localhost - gather_facts: True - pre_tasks: - - include: pre_tasks.yml - roles: - - role: openstack-stack - stack_name: "{{ env_id }}.{{ public_dns_domain }}" - dns_domain: "{{ public_dns_domain }}" - dns_nameservers: "{{ public_dns_nameservers }}" - subnet_prefix: "{{ openstack_subnet_prefix }}" - ssh_public_key: "{{ openstack_ssh_public_key }}" - openstack_image: "{{ openstack_default_image_name }}" - lb_flavor: "{{ openstack_default_flavor | default('m1.small') }}" - etcd_flavor: "{{ openstack_default_flavor | default('m1.small') }}" - master_flavor: "{{ openstack_default_flavor | default('m1.medium') }}" - node_flavor: "{{ openstack_default_flavor | default('m1.medium') }}" - infra_flavor: "{{ openstack_default_flavor | default('m1.medium') }}" - dns_flavor: "{{ openstack_default_flavor | default('m1.small') }}" - external_network: "{{ openstack_external_network_name }}" - num_etcd: "{{ openstack_num_etcd | default(0) }}" - num_masters: "{{ openstack_num_masters }}" - num_nodes: "{{ openstack_num_nodes }}" - num_infra: "{{ openstack_num_infra }}" - num_dns: "{{ openstack_num_dns | default(1) }}" - master_volume_size: "{{ docker_volume_size }}" - app_volume_size: "{{ docker_volume_size }}" - infra_volume_size: "{{ docker_volume_size }}" - - -- name: Refresh Server inventory - hosts: localhost - connection: local - gather_facts: False - tasks: - - meta: refresh_inventory - -- hosts: cluster_hosts - gather_facts: false - tasks: - - name: Debug hostvar - debug: - msg: "{{ hostvars[inventory_hostname] }}" - verbosity: 2 - - name: waiting for server to come back - local_action: wait_for host={{ hostvars[inventory_hostname]['ansible_ssh_host'] }} port=22 delay=30 timeout=300 - become: false - -- include: post-provision-openstack.yml diff --git a/playbooks/provisioning/openstack/provision.yaml b/playbooks/provisioning/openstack/provision.yaml deleted file mode 100644 index 7cde5e8b8..000000000 --- a/playbooks/provisioning/openstack/provision.yaml +++ /dev/null @@ -1,4 +0,0 @@ ---- -- include: "provision-openstack.yml" - -- include: "pre-install.yml" diff --git a/playbooks/provisioning/openstack/roles b/playbooks/provisioning/openstack/roles deleted file mode 120000 index e2b799b9d..000000000 --- a/playbooks/provisioning/openstack/roles +++ /dev/null @@ -1 +0,0 @@ -../../../roles/ \ No newline at end of file diff --git a/playbooks/provisioning/openstack/sample-inventory/ansible.cfg b/playbooks/provisioning/openstack/sample-inventory/ansible.cfg deleted file mode 100644 index a701e59ac..000000000 --- a/playbooks/provisioning/openstack/sample-inventory/ansible.cfg +++ /dev/null @@ -1,19 +0,0 @@ -# config file for ansible -- http://ansible.com/ -# ============================================== -[defaults] -forks = 50 -# work around privilege escalation timeouts in ansible -timeout = 30 -host_key_checking = false -inventory = inventory -inventory_ignore_extensions = secrets.py, .pyc -gathering = smart -retry_files_enabled = false -fact_caching = jsonfile -fact_caching_connection = .ansible/cached_facts -fact_caching_timeout = 900 - -[ssh_connection] -ssh_args = -o ControlMaster=auto -o ControlPersist=900s -o GSSAPIAuthentication=no -control_path = /var/tmp/%%h-%%r -pipelining = True diff --git a/playbooks/provisioning/openstack/sample-inventory/clouds.yaml b/playbooks/provisioning/openstack/sample-inventory/clouds.yaml deleted file mode 100644 index 8182d2995..000000000 --- a/playbooks/provisioning/openstack/sample-inventory/clouds.yaml +++ /dev/null @@ -1,5 +0,0 @@ ---- -ansible: - use_hostnames: True - expand_hostvars: True - fail_on_errors: True diff --git a/playbooks/provisioning/openstack/sample-inventory/group_vars/OSEv3.yml b/playbooks/provisioning/openstack/sample-inventory/group_vars/OSEv3.yml deleted file mode 100644 index 32ec43387..000000000 --- a/playbooks/provisioning/openstack/sample-inventory/group_vars/OSEv3.yml +++ /dev/null @@ -1,10 +0,0 @@ ---- -openshift_deployment_type: openshift-enterprise -openshift_release: v3.5 -openshift_master_default_subdomain: "apps.{{ env_id }}.{{ public_dns_domain }}" - -# NOTE(shadower): do not remove this line, otherwise the default node labels -# won't be set up. -openshift_node_labels: "{{ openstack.metadata.node_labels }}" - -osm_default_node_selector: 'region=primary' diff --git a/playbooks/provisioning/openstack/sample-inventory/group_vars/all.yml b/playbooks/provisioning/openstack/sample-inventory/group_vars/all.yml deleted file mode 100644 index 047923253..000000000 --- a/playbooks/provisioning/openstack/sample-inventory/group_vars/all.yml +++ /dev/null @@ -1,57 +0,0 @@ ---- -env_id: "openshift" -public_dns_domain: "example.com" -public_dns_nameservers: [] - -openstack_ssh_public_key: "openshift" -openstack_default_image_name: "rhel73" -openstack_default_flavor: "m1.medium" -openstack_external_network_name: "public" - -openstack_num_masters: 1 -openstack_num_infra: 1 -openstack_num_nodes: 2 - -docker_volume_size: "15" - -openstack_subnet_prefix: "192.168.99" - -# # Red Hat subscription -# # Using Red Hat Satellite: -# rhsm_register: True -# rhsm_satellite: 'sat-6.example.com' -# rhsm_org: 'OPENSHIFT_ORG' -# rhsm_activationkey: '' - -# # Or using RHN username, password and optionally pool: -# rhsm_register: True -# rhsm_username: '' -# rhsm_password: '' -# rhsm_pool: '' - -# rhsm_repos: -# - "rhel-7-server-rpms" -# - "rhel-7-server-ose-3.5-rpms" -# - "rhel-7-server-extras-rpms" -# - "rhel-7-fast-datapath-rpms" - - -# # Roll-your-own DNS -# openstack_num_dns: 0 -# external_nsupdate_keys: -# public: -# key_secret: 'SKqKNdpfk7llKxZ57bbxUnUDobaaJp9t8CjXLJPl+fRI5mPcSBuxTAyvJPa6Y9R7vUg9DwCy/6WTpgLNqnV4Hg==' -# key_algorithm: 'hmac-md5' -# server: '192.168.1.1' -# private: -# key_secret: 'kVE2bVTgZjrdJipxPhID8BEZmbHD8cExlVPR+zbFpW6la8kL5wpXiwOh8q5AAosXQI5t95UXwq3Inx8QT58duw==' -# key_algorithm: 'hmac-md5' -# server: '192.168.1.2' - - -# NOTE(shadower): Do not change this value. The Ansible user is currently -# hardcoded to `openshift`. -ansible_user: openshift - -# Use a single security group for a cluster -openstack_flat_secgrp: false diff --git a/playbooks/provisioning/openstack/sample-inventory/hosts b/playbooks/provisioning/openstack/sample-inventory/hosts deleted file mode 100644 index 5f73b60f6..000000000 --- a/playbooks/provisioning/openstack/sample-inventory/hosts +++ /dev/null @@ -1,44 +0,0 @@ -#[all:vars] -# For all group_vars, see ./group_vars/all.yml - -# Create an OSEv3 group that contains the master, nodes, etcd, and lb groups. -# The lb group lets Ansible configure HAProxy as the load balancing solution. -# Comment lb out if your load balancer is pre-configured. -[cluster_hosts:children] -OSEv3 -dns - -[OSEv3:children] -masters -nodes -etcd - -# Set variables common for all OSEv3 hosts -#[OSEv3:vars] - -# For OSEv3 normal group vars, see ./group_vars/OSEv3.yml - -# Host Groups - -[masters:children] -masters.openshift.example.com - -[etcd:children] -etcd.openshift.example.com - -[nodes:children] -masters -infra.openshift.example.com -nodes.openshift.example.com - -[infra_hosts:children] -infra.openshift.example.com - -[dns:children] -dns.openshift.example.com - -[masters.openshift.example.com] -[etcd.openshift.example.com] -[infra.openshift.example.com] -[nodes.openshift.example.com] -[dns.openshift.example.com] diff --git a/playbooks/provisioning/openstack/sample-inventory/openstack.py b/playbooks/provisioning/openstack/sample-inventory/openstack.py deleted file mode 100755 index 8de73e1e0..000000000 --- a/playbooks/provisioning/openstack/sample-inventory/openstack.py +++ /dev/null @@ -1,252 +0,0 @@ -#!/usr/bin/env python - -# Copyright (c) 2012, Marco Vito Moscaritolo -# Copyright (c) 2013, Jesse Keating -# Copyright (c) 2015, Hewlett-Packard Development Company, L.P. -# Copyright (c) 2016, Rackspace Australia -# -# This module is free software: you can redistribute it and/or modify -# it under the terms of the GNU General Public License as published by -# the Free Software Foundation, either version 3 of the License, or -# (at your option) any later version. -# -# This software is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# GNU General Public License for more details. -# -# You should have received a copy of the GNU General Public License -# along with this software. If not, see . - -# The OpenStack Inventory module uses os-client-config for configuration. -# https://github.com/stackforge/os-client-config -# This means it will either: -# - Respect normal OS_* environment variables like other OpenStack tools -# - Read values from a clouds.yaml file. -# If you want to configure via clouds.yaml, you can put the file in: -# - Current directory -# - ~/.config/openstack/clouds.yaml -# - /etc/openstack/clouds.yaml -# - /etc/ansible/openstack.yml -# The clouds.yaml file can contain entries for multiple clouds and multiple -# regions of those clouds. If it does, this inventory module will connect to -# all of them and present them as one contiguous inventory. -# -# See the adjacent openstack.yml file for an example config file -# There are two ansible inventory specific options that can be set in -# the inventory section. -# expand_hostvars controls whether or not the inventory will make extra API -# calls to fill out additional information about each server -# use_hostnames changes the behavior from registering every host with its UUID -# and making a group of its hostname to only doing this if the -# hostname in question has more than one server -# fail_on_errors causes the inventory to fail and return no hosts if one cloud -# has failed (for example, bad credentials or being offline). -# When set to False, the inventory will return hosts from -# whichever other clouds it can contact. (Default: True) - -import argparse -import collections -import os -import sys -import time -from distutils.version import StrictVersion - -try: - import json -except ImportError: - import simplejson as json - -import os_client_config -import shade -import shade.inventory - -CONFIG_FILES = ['/etc/ansible/openstack.yaml', '/etc/ansible/openstack.yml'] - - -def get_groups_from_server(server_vars, namegroup=True): - groups = [] - - region = server_vars['region'] - cloud = server_vars['cloud'] - metadata = server_vars.get('metadata', {}) - - # Create a group for the cloud - groups.append(cloud) - - # Create a group on region - groups.append(region) - - # And one by cloud_region - groups.append("%s_%s" % (cloud, region)) - - # Check if group metadata key in servers' metadata - if 'group' in metadata: - groups.append(metadata['group']) - - for extra_group in metadata.get('groups', '').split(','): - if extra_group: - groups.append(extra_group.strip()) - - groups.append('instance-%s' % server_vars['id']) - if namegroup: - groups.append(server_vars['name']) - - for key in ('flavor', 'image'): - if 'name' in server_vars[key]: - groups.append('%s-%s' % (key, server_vars[key]['name'])) - - for key, value in iter(metadata.items()): - groups.append('meta-%s_%s' % (key, value)) - - az = server_vars.get('az', None) - if az: - # Make groups for az, region_az and cloud_region_az - groups.append(az) - groups.append('%s_%s' % (region, az)) - groups.append('%s_%s_%s' % (cloud, region, az)) - return groups - - -def get_host_groups(inventory, refresh=False): - (cache_file, cache_expiration_time) = get_cache_settings() - if is_cache_stale(cache_file, cache_expiration_time, refresh=refresh): - groups = to_json(get_host_groups_from_cloud(inventory)) - open(cache_file, 'w').write(groups) - else: - groups = open(cache_file, 'r').read() - return groups - - -def append_hostvars(hostvars, groups, key, server, namegroup=False): - hostvars[key] = dict( - ansible_ssh_host=server['interface_ip'], - openshift_hostname=server['name'], - openshift_public_hostname=server['name'], - openstack=server) - for group in get_groups_from_server(server, namegroup=namegroup): - groups[group].append(key) - - -def get_host_groups_from_cloud(inventory): - groups = collections.defaultdict(list) - firstpass = collections.defaultdict(list) - hostvars = {} - list_args = {} - if hasattr(inventory, 'extra_config'): - use_hostnames = inventory.extra_config['use_hostnames'] - list_args['expand'] = inventory.extra_config['expand_hostvars'] - if StrictVersion(shade.__version__) >= StrictVersion("1.6.0"): - list_args['fail_on_cloud_config'] = \ - inventory.extra_config['fail_on_errors'] - else: - use_hostnames = False - - for server in inventory.list_hosts(**list_args): - - if 'interface_ip' not in server: - continue - try: - if server["metadata"][os.environ['OS_INV_FILTER_KEY']] == os.environ['OS_INV_FILTER_VALUE']: - firstpass[server['name']].append(server) - except Exception: - firstpass[server['name']].append(server) - for name, servers in firstpass.items(): - if len(servers) == 1 and use_hostnames: - append_hostvars(hostvars, groups, name, servers[0]) - else: - server_ids = set() - # Trap for duplicate results - for server in servers: - server_ids.add(server['id']) - if len(server_ids) == 1 and use_hostnames: - append_hostvars(hostvars, groups, name, servers[0]) - else: - for server in servers: - append_hostvars( - hostvars, groups, server['id'], server, - namegroup=True) - groups['_meta'] = {'hostvars': hostvars} - return groups - - -def is_cache_stale(cache_file, cache_expiration_time, refresh=False): - ''' Determines if cache file has expired, or if it is still valid ''' - if refresh: - return True - if os.path.isfile(cache_file) and os.path.getsize(cache_file) > 0: - mod_time = os.path.getmtime(cache_file) - current_time = time.time() - if (mod_time + cache_expiration_time) > current_time: - return False - return True - - -def get_cache_settings(): - config = os_client_config.config.OpenStackConfig( - config_files=os_client_config.config.CONFIG_FILES + CONFIG_FILES) - # For inventory-wide caching - cache_expiration_time = config.get_cache_expiration_time() - cache_path = config.get_cache_path() - if not os.path.exists(cache_path): - os.makedirs(cache_path) - cache_file = os.path.join(cache_path, 'ansible-inventory.cache') - return (cache_file, cache_expiration_time) - - -def to_json(in_dict): - return json.dumps(in_dict, sort_keys=True, indent=2) - - -def parse_args(): - parser = argparse.ArgumentParser(description='OpenStack Inventory Module') - parser.add_argument('--private', - action='store_true', - help='Use private address for ansible host') - parser.add_argument('--refresh', action='store_true', - help='Refresh cached information') - parser.add_argument('--debug', action='store_true', default=False, - help='Enable debug output') - group = parser.add_mutually_exclusive_group(required=True) - group.add_argument('--list', action='store_true', - help='List active servers') - group.add_argument('--host', help='List details about the specific host') - - return parser.parse_args() - - -def main(): - args = parse_args() - try: - config_files = os_client_config.config.CONFIG_FILES + CONFIG_FILES - shade.simple_logging(debug=args.debug) - inventory_args = dict( - refresh=args.refresh, - config_files=config_files, - private=args.private, - ) - if hasattr(shade.inventory.OpenStackInventory, 'extra_config'): - inventory_args.update(dict( - config_key='ansible', - config_defaults={ - 'use_hostnames': False, - 'expand_hostvars': True, - 'fail_on_errors': True, - } - )) - - inventory = shade.inventory.OpenStackInventory(**inventory_args) - - if args.list: - output = get_host_groups(inventory, refresh=args.refresh) - elif args.host: - output = to_json(inventory.get_host(args.host)) - print(output) - except shade.OpenStackCloudException as e: - sys.stderr.write('%s\n' % str(e)) - sys.exit(1) - sys.exit(0) - - -if __name__ == '__main__': - main() diff --git a/roles/dns-server-detect/defaults/main.yml b/roles/dns-server-detect/defaults/main.yml deleted file mode 100644 index 58bd861cd..000000000 --- a/roles/dns-server-detect/defaults/main.yml +++ /dev/null @@ -1,3 +0,0 @@ ---- - -external_nsupdate_keys: {} diff --git a/roles/dns-server-detect/tasks/main.yml b/roles/dns-server-detect/tasks/main.yml deleted file mode 100644 index 183c0a0ca..000000000 --- a/roles/dns-server-detect/tasks/main.yml +++ /dev/null @@ -1,36 +0,0 @@ ---- -- fail: - msg: 'Missing required private DNS server(s)' - when: - - external_nsupdate_keys['private'] is undefined - - hostvars[groups['dns'][0]] is undefined - -- fail: - msg: 'Missing required public DNS server(s)' - when: - - external_nsupdate_keys['public'] is undefined - - hostvars[groups['dns'][0]] is undefined - -- name: "Set the private DNS server to use the external value (if provided)" - set_fact: - private_dns_server: "{{ external_nsupdate_keys['private']['server'] }}" - when: - - external_nsupdate_keys['private'] is defined - -- name: "Set the private DNS server to use the provisioned value" - set_fact: - private_dns_server: "{{ hostvars[groups['dns'][0]].openstack.private_v4 }}" - when: - - private_dns_server is undefined - -- name: "Set the public DNS server to use the external value (if provided)" - set_fact: - public_dns_server: "{{ external_nsupdate_keys['public']['server'] }}" - when: - - external_nsupdate_keys['public'] is defined - -- name: "Set the public DNS server to use the provisioned value" - set_fact: - public_dns_server: "{{ hostvars[groups['dns'][0]].openstack.public_v4 }}" - when: - - public_dns_server is undefined diff --git a/roles/hostnames/tasks/main.yaml b/roles/hostnames/tasks/main.yaml deleted file mode 100644 index c49852210..000000000 --- a/roles/hostnames/tasks/main.yaml +++ /dev/null @@ -1,26 +0,0 @@ ---- -- name: Setting Hostname Fact - set_fact: - new_hostname: "{{ custom_hostname | default(inventory_hostname_short) }}" - -- name: Setting FQDN Fact - set_fact: - new_fqdn: "{{ new_hostname }}.{{ full_dns_domain }}" - -- name: Setting hostname and DNS domain - hostname: name="{{ new_fqdn }}" - -- name: Check for cloud.cfg - stat: path=/etc/cloud/cloud.cfg - register: cloud_cfg - -- name: Prevent cloud-init updates of hostname/fqdn (if applicable) - lineinfile: - dest: /etc/cloud/cloud.cfg - state: present - regexp: "{{ item.regexp }}" - line: "{{ item.line }}" - with_items: - - { regexp: '^ - set_hostname', line: '# - set_hostname' } - - { regexp: '^ - update_hostname', line: '# - update_hostname' } - when: cloud_cfg.stat.exists == True diff --git a/roles/hostnames/test/inv b/roles/hostnames/test/inv deleted file mode 100644 index ffbe6e03d..000000000 --- a/roles/hostnames/test/inv +++ /dev/null @@ -1,12 +0,0 @@ -[all:vars] -dns_domain=example.com - -[openshift_masters] -192.168.124.41 dns_private_ip=1.1.1.41 dns_public_ip=192.168.124.41 -192.168.124.117 dns_private_ip=1.1.1.117 dns_public_ip=192.168.124.117 - -[openshift_nodes] -192.168.124.40 dns_private_ip=1.1.1.40 dns_public_ip=192.168.124.40 - -#[dns] -#192.168.124.117 dns_private_ip=1.1.1.117 diff --git a/roles/hostnames/test/roles b/roles/hostnames/test/roles deleted file mode 120000 index e2b799b9d..000000000 --- a/roles/hostnames/test/roles +++ /dev/null @@ -1 +0,0 @@ -../../../roles/ \ No newline at end of file diff --git a/roles/hostnames/test/test.retry b/roles/hostnames/test/test.retry deleted file mode 100644 index 63fc08e4c..000000000 --- a/roles/hostnames/test/test.retry +++ /dev/null @@ -1,3 +0,0 @@ -192.168.124.117 -192.168.124.40 -192.168.124.41 diff --git a/roles/hostnames/test/test.yaml b/roles/hostnames/test/test.yaml deleted file mode 100644 index 0c56aea51..000000000 --- a/roles/hostnames/test/test.yaml +++ /dev/null @@ -1,4 +0,0 @@ ---- -- hosts: all - roles: - - role: hostnames diff --git a/roles/hostnames/vars/main.yaml b/roles/hostnames/vars/main.yaml deleted file mode 100644 index 3eecb8dc4..000000000 --- a/roles/hostnames/vars/main.yaml +++ /dev/null @@ -1,2 +0,0 @@ ---- -counter: 1 diff --git a/roles/hostnames/vars/records.yaml b/roles/hostnames/vars/records.yaml deleted file mode 100644 index 0cadc8181..000000000 --- a/roles/hostnames/vars/records.yaml +++ /dev/null @@ -1,28 +0,0 @@ ---- -- name: "Building Records" - set_fact: - dns_records_add: - - view: private - zone: example.com - entries: - - type: A - hostname: master1.example.com - ip: 172.16.15.94 - - type: A - hostname: node1.example.com - ip: 172.16.15.86 - - type: A - hostname: node2.example.com - ip: 172.16.15.87 - - view: public - zone: example.com - entries: - - type: A - hostname: master1.example.com - ip: 10.3.10.116 - - type: A - hostname: node1.example.com - ip: 10.3.11.46 - - type: A - hostname: node2.example.com - ip: 10.3.12.6 diff --git a/roles/openshift-prep/tasks/main.yml b/roles/openshift-prep/tasks/main.yml deleted file mode 100644 index 5e484e75f..000000000 --- a/roles/openshift-prep/tasks/main.yml +++ /dev/null @@ -1,4 +0,0 @@ ---- -# Starting Point for OpenShift Installation and Configuration -- include: prerequisites.yml - tags: [prerequisites] diff --git a/roles/openshift-prep/tasks/prerequisites.yml b/roles/openshift-prep/tasks/prerequisites.yml deleted file mode 100644 index 60507636f..000000000 --- a/roles/openshift-prep/tasks/prerequisites.yml +++ /dev/null @@ -1,35 +0,0 @@ ---- -- name: "Cleaning yum repositories" - command: "yum clean all" - -- name: "Install required packages" - yum: - name: "{{ item }}" - state: latest - with_items: - - wget - - git - - net-tools - - bind-utils - - bridge-utils - - bash-completion - - vim-enhanced - -- name: "Update all packages (this can take a very long time)" - yum: - name: "*" - state: latest - -- name: "Verify hostname" - shell: hostnamectl status | awk "/Static hostname/"'{ print $3 }' - register: hostname_fqdn - -- name: "Set hostname if required" - hostname: - name: "{{ ansible_fqdn }}" - when: hostname_fqdn.stdout != ansible_fqdn - -- name: "Verify SELinux is enforcing" - fail: - msg: "SELinux is required for OpenShift and has been detected as '{{ ansible_selinux.config_mode }}'" - when: ansible_selinux.config_mode != "enforcing" diff --git a/roles/openstack-stack/README.md b/roles/openstack-stack/README.md deleted file mode 100644 index 509c9de6c..000000000 --- a/roles/openstack-stack/README.md +++ /dev/null @@ -1,9 +0,0 @@ -# Role openstack-stack - -Role for spinning up instances using OpenStack Heat. - -## To Test - -``` -ansible-playbook casl-ansible/roles/openstack-stack/test/stack-create-test.yml -``` diff --git a/roles/openstack-stack/defaults/main.yml b/roles/openstack-stack/defaults/main.yml deleted file mode 100644 index 2a4ef3a45..000000000 --- a/roles/openstack-stack/defaults/main.yml +++ /dev/null @@ -1,12 +0,0 @@ ---- -dns_volume_size: 1 -ssh_ingress_cidr: 0.0.0.0/0 -node_ingress_cidr: 0.0.0.0/0 -master_ingress_cidr: 0.0.0.0/0 -lb_ingress_cidr: 0.0.0.0/0 -num_etcd: 0 -num_masters: 1 -num_nodes: 1 -num_dns: 1 -num_infra: 1 -etcd_volume_size: 2 diff --git a/roles/openstack-stack/tasks/main.yml b/roles/openstack-stack/tasks/main.yml deleted file mode 100644 index 71c7bbe0d..000000000 --- a/roles/openstack-stack/tasks/main.yml +++ /dev/null @@ -1,41 +0,0 @@ ---- -- name: create HOT stack template prefix - register: stack_template_pre - tempfile: - state: directory - prefix: casl-ansible - -- name: set template paths - set_fact: - stack_template_path: "{{ stack_template_pre.path }}/stack.yaml" - server_template_path: "{{ stack_template_pre.path }}/server.yaml" - user_data_template_path: "{{ stack_template_pre.path }}/user-data" - -- name: generate HOT stack template from jinja2 template - template: - src: heat_stack.yaml.j2 - dest: "{{ stack_template_path }}" - -- name: generate HOT server template from jinja2 template - template: - src: heat_stack_server.yaml.j2 - dest: "{{ server_template_path }}" - -- name: generate user_data from jinja2 template - template: - src: user_data.j2 - dest: "{{ user_data_template_path }}" - -- name: create stack - ignore_errors: False - register: stack_create - os_stack: - name: "{{ stack_name }}" - state: present - template: "{{ stack_template_path }}" - wait: yes - -- name: cleanup temp files - file: - path: "{{ stack_template_pre.path }}" - state: absent diff --git a/roles/openstack-stack/templates/heat_stack.yaml.j2 b/roles/openstack-stack/templates/heat_stack.yaml.j2 deleted file mode 100644 index c750865a5..000000000 --- a/roles/openstack-stack/templates/heat_stack.yaml.j2 +++ /dev/null @@ -1,753 +0,0 @@ -heat_template_version: 2016-10-14 - -description: OpenShift cluster - -parameters: - -outputs: - - etcd_names: - description: Name of the etcds - value: { get_attr: [ etcd, name ] } - - etcd_ips: - description: IPs of the etcds - value: { get_attr: [ etcd, private_ip ] } - - etcd_floating_ips: - description: Floating IPs of the etcds - value: { get_attr: [ etcd, floating_ip ] } - - master_names: - description: Name of the masters - value: { get_attr: [ masters, name ] } - - master_ips: - description: IPs of the masters - value: { get_attr: [ masters, private_ip ] } - - master_floating_ips: - description: Floating IPs of the masters - value: { get_attr: [ masters, floating_ip ] } - - node_names: - description: Name of the nodes - value: { get_attr: [ compute_nodes, name ] } - - node_ips: - description: IPs of the nodes - value: { get_attr: [ compute_nodes, private_ip ] } - - node_floating_ips: - description: Floating IPs of the nodes - value: { get_attr: [ compute_nodes, floating_ip ] } - - infra_names: - description: Name of the nodes - value: { get_attr: [ infra_nodes, name ] } - - infra_ips: - description: IPs of the nodes - value: { get_attr: [ infra_nodes, private_ip ] } - - infra_floating_ips: - description: Floating IPs of the nodes - value: { get_attr: [ infra_nodes, floating_ip ] } - - dns_name: - description: Name of the DNS - value: - get_attr: - - dns - - name - - dns_floating_ip: - description: Floating IP of the DNS - value: - get_attr: - - dns - - addresses - - str_replace: - template: openshift-ansible-cluster_id-net - params: - cluster_id: {{ stack_name }} - - 1 - - addr - -resources: - - net: - type: OS::Neutron::Net - properties: - name: - str_replace: - template: openshift-ansible-cluster_id-net - params: - cluster_id: {{ stack_name }} - - subnet: - type: OS::Neutron::Subnet - properties: - name: - str_replace: - template: openshift-ansible-cluster_id-subnet - params: - cluster_id: {{ stack_name }} - network: { get_resource: net } - cidr: - str_replace: - template: subnet_24_prefix.0/24 - params: - subnet_24_prefix: {{ subnet_prefix }} - allocation_pools: - - start: - str_replace: - template: subnet_24_prefix.3 - params: - subnet_24_prefix: {{ subnet_prefix }} - end: - str_replace: - template: subnet_24_prefix.254 - params: - subnet_24_prefix: {{ subnet_prefix }} - dns_nameservers: - {% for nameserver in dns_nameservers %} - - {{ nameserver }} - {% endfor %} - - router: - type: OS::Neutron::Router - properties: - name: - str_replace: - template: openshift-ansible-cluster_id-router - params: - cluster_id: {{ stack_name }} - external_gateway_info: - network: {{ external_network }} - - interface: - type: OS::Neutron::RouterInterface - properties: - router_id: { get_resource: router } - subnet_id: { get_resource: subnet } - -# keypair: -# type: OS::Nova::KeyPair -# properties: -# name: -# str_replace: -# template: openshift-ansible-cluster_id-keypair -# params: -# cluster_id: {{ stack_name }} -# public_key: {{ ssh_public_key }} - -{% if openstack_flat_secgrp|bool %} - flat-secgrp: - type: OS::Neutron::SecurityGroup - properties: - name: - str_replace: - template: openshift-ansible-cluster_id-flat-secgrp - params: - cluster_id: {{ stack_name }} - description: - str_replace: - template: Security group for cluster_id OpenShift cluster - params: - cluster_id: {{ stack_name }} - rules: - - direction: ingress - protocol: tcp - port_range_min: 22 - port_range_max: 22 - remote_ip_prefix: {{ ssh_ingress_cidr }} - - direction: ingress - protocol: tcp - port_range_min: 4001 - port_range_max: 4001 - - direction: ingress - protocol: tcp - port_range_min: 8443 - port_range_max: 8444 - - direction: ingress - protocol: tcp - port_range_min: 53 - port_range_max: 53 - - direction: ingress - protocol: udp - port_range_min: 53 - port_range_max: 53 - - direction: ingress - protocol: tcp - port_range_min: 8053 - port_range_max: 8053 - - direction: ingress - protocol: udp - port_range_min: 8053 - port_range_max: 8053 - - direction: ingress - protocol: tcp - port_range_min: 24224 - port_range_max: 24224 - - direction: ingress - protocol: udp - port_range_min: 24224 - port_range_max: 24224 - - direction: ingress - protocol: tcp - port_range_min: 2224 - port_range_max: 2224 - - direction: ingress - protocol: udp - port_range_min: 5404 - port_range_max: 5405 - - direction: ingress - protocol: tcp - port_range_min: 9090 - port_range_max: 9090 - - direction: ingress - protocol: tcp - port_range_min: 2379 - port_range_max: 2380 - remote_mode: remote_group_id - - direction: ingress - protocol: tcp - port_range_min: 10250 - port_range_max: 10250 - remote_mode: remote_group_id - - direction: ingress - protocol: udp - port_range_min: 10250 - port_range_max: 10250 - remote_mode: remote_group_id - - direction: ingress - protocol: tcp - port_range_min: 10255 - port_range_max: 10255 - remote_mode: remote_group_id - - direction: ingress - protocol: udp - port_range_min: 10255 - port_range_max: 10255 - remote_mode: remote_group_id - - direction: ingress - protocol: udp - port_range_min: 4789 - port_range_max: 4789 - remote_mode: remote_group_id - - direction: ingress - protocol: tcp - port_range_min: 30000 - port_range_max: 32767 - remote_ip_prefix: {{ node_ingress_cidr }} - - direction: ingress - protocol: tcp - port_range_min: 30000 - port_range_max: 32767 - remote_ip_prefix: "{{ openstack_subnet_prefix }}.0/24" - - direction: ingress - protocol: tcp - port_range_min: 80 - port_range_max: 80 - - direction: ingress - protocol: tcp - port_range_min: 443 - port_range_max: 443 -{% else %} - master-secgrp: - type: OS::Neutron::SecurityGroup - properties: - name: - str_replace: - template: openshift-ansible-cluster_id-master-secgrp - params: - cluster_id: {{ stack_name }} - description: - str_replace: - template: Security group for cluster_id OpenShift cluster master - params: - cluster_id: {{ stack_name }} - rules: - - direction: ingress - protocol: tcp - port_range_min: 22 - port_range_max: 22 - remote_ip_prefix: {{ ssh_ingress_cidr }} - - direction: ingress - protocol: tcp - port_range_min: 4001 - port_range_max: 4001 - - direction: ingress - protocol: tcp - port_range_min: 8443 - port_range_max: 8444 - - direction: ingress - protocol: tcp - port_range_min: 53 - port_range_max: 53 - - direction: ingress - protocol: udp - port_range_min: 53 - port_range_max: 53 - - direction: ingress - protocol: tcp - port_range_min: 8053 - port_range_max: 8053 - - direction: ingress - protocol: udp - port_range_min: 8053 - port_range_max: 8053 - - direction: ingress - protocol: tcp - port_range_min: 24224 - port_range_max: 24224 - - direction: ingress - protocol: udp - port_range_min: 24224 - port_range_max: 24224 - - direction: ingress - protocol: tcp - port_range_min: 2224 - port_range_max: 2224 - - direction: ingress - protocol: udp - port_range_min: 5404 - port_range_max: 5405 - - direction: ingress - protocol: tcp - port_range_min: 9090 - port_range_max: 9090 - - etcd-secgrp: - type: OS::Neutron::SecurityGroup - properties: - name: - str_replace: - template: openshift-ansible-cluster_id-etcd-secgrp - params: - cluster_id: {{ stack_name }} - description: - str_replace: - template: Security group for cluster_id etcd cluster - params: - cluster_id: {{ stack_name }} - rules: - - direction: ingress - protocol: tcp - port_range_min: 22 - port_range_max: 22 - remote_ip_prefix: {{ ssh_ingress_cidr }} - - direction: ingress - protocol: tcp - port_range_min: 2379 - port_range_max: 2379 - remote_mode: remote_group_id - remote_group_id: { get_resource: master-secgrp } - - direction: ingress - protocol: tcp - port_range_min: 2380 - port_range_max: 2380 - remote_mode: remote_group_id - - node-secgrp: - type: OS::Neutron::SecurityGroup - properties: - name: - str_replace: - template: openshift-ansible-cluster_id-node-secgrp - params: - cluster_id: {{ stack_name }} - description: - str_replace: - template: Security group for cluster_id OpenShift cluster nodes - params: - cluster_id: {{ stack_name }} - rules: - - direction: ingress - protocol: tcp - port_range_min: 22 - port_range_max: 22 - remote_ip_prefix: {{ ssh_ingress_cidr }} - - direction: ingress - protocol: tcp - port_range_min: 10250 - port_range_max: 10250 - remote_mode: remote_group_id - - direction: ingress - protocol: tcp - port_range_min: 10255 - port_range_max: 10255 - remote_mode: remote_group_id - - direction: ingress - protocol: udp - port_range_min: 10255 - port_range_max: 10255 - remote_mode: remote_group_id - - direction: ingress - protocol: udp - port_range_min: 4789 - port_range_max: 4789 - remote_mode: remote_group_id - - direction: ingress - protocol: tcp - port_range_min: 30000 - port_range_max: 32767 - remote_ip_prefix: {{ node_ingress_cidr }} - - direction: ingress - protocol: tcp - port_range_min: 30000 - port_range_max: 32767 - remote_ip_prefix: "{{ openstack_subnet_prefix }}.0/24" - - infra-secgrp: - type: OS::Neutron::SecurityGroup - properties: - name: - str_replace: - template: openshift-ansible-cluster_id-infra-secgrp - params: - cluster_id: {{ stack_name }} - description: - str_replace: - template: Security group for cluster_id OpenShift infrastructure cluster nodes - params: - cluster_id: {{ stack_name }} - rules: - - direction: ingress - protocol: tcp - port_range_min: 80 - port_range_max: 80 - - direction: ingress - protocol: tcp - port_range_min: 443 - port_range_max: 443 -{% endif %} - - dns-secgrp: - type: OS::Neutron::SecurityGroup - properties: - name: - str_replace: - template: openshift-ansible-cluster_id-dns-secgrp - params: - cluster_id: {{ stack_name }} - description: - str_replace: - template: Security group for cluster_id cluster DNS - params: - cluster_id: {{ stack_name }} - rules: - - direction: ingress - protocol: tcp - port_range_min: 22 - port_range_max: 22 - remote_ip_prefix: {{ ssh_ingress_cidr }} - - direction: ingress - protocol: udp - port_range_min: 53 - port_range_max: 53 - remote_ip_prefix: {{ node_ingress_cidr }} - - direction: ingress - protocol: udp - port_range_min: 53 - port_range_max: 53 - remote_ip_prefix: "{{ openstack_subnet_prefix }}.0/24" - - direction: ingress - protocol: tcp - port_range_min: 53 - port_range_max: 53 - remote_ip_prefix: {{ node_ingress_cidr }} - - direction: ingress - protocol: tcp - port_range_min: 53 - port_range_max: 53 - remote_ip_prefix: "{{ openstack_subnet_prefix }}.0/24" -{% if num_masters is greaterthan 1 %} - lb-secgrp: - type: OS::Neutron::SecurityGroup - properties: - name: openshift-ansible-{{ stack_name }}-lb-secgrp - description: Security group for {{ stack_name }} cluster Load Balancer - rules: - - direction: ingress - protocol: tcp - port_range_min: 22 - port_range_max: 22 - remote_ip_prefix: {{ ssh_ingress_cidr }} - - direction: ingress - protocol: tcp - port_range_min: {{ openshift_master_api_port | default(8443) }} - port_range_max: {{ openshift_master_api_port | default(8443) }} - remote_ip_prefix: {{ lb_ingress_cidr }} - {% if openshift_master_console_port is defined and openshift_master_console_port is not equalto openshift_master_api_port %} - - direction: ingress - protocol: tcp - port_range_min: {{ openshift_master_console_port | default(8443) }} - port_range_max: {{ openshift_master_console_port | default(8443) }} - remote_ip_prefix: {{ lb_ingress_cidr }} - {% endif %} -{% endif %} - - etcd: - type: OS::Heat::ResourceGroup - properties: - count: {{ num_etcd }} - resource_def: - type: server.yaml - properties: - name: - str_replace: - template: k8s_type-%index%.cluster_id - params: - cluster_id: {{ stack_name }} - k8s_type: etcd - cluster_env: {{ public_dns_domain }} - cluster_id: {{ stack_name }} - group: - str_replace: - template: k8s_type.cluster_id - params: - k8s_type: etcds - cluster_id: {{ stack_name }} - type: etcd - image: {{ openstack_image }} - flavor: {{ etcd_flavor }} - key_name: {{ ssh_public_key }} - net: { get_resource: net } - subnet: { get_resource: subnet } - secgrp: - - { get_resource: {% if openstack_flat_secgrp|bool %}flat-secgrp{% else %}etcd-secgrp{% endif %} } - floating_network: {{ external_network }} - net_name: - str_replace: - template: openshift-ansible-cluster_id-net - params: - cluster_id: {{ stack_name }} - volume_size: {{ etcd_volume_size }} - depends_on: - - interface - -{% if num_masters is greaterthan 1 %} - loadbalancer: - type: OS::Heat::ResourceGroup - properties: - count: 1 - resource_def: - type: server.yaml - properties: - name: - str_replace: - template: k8s_type-%index%.cluster_id - params: - cluster_id: {{ stack_name }} - k8s_type: lb - cluster_env: {{ public_dns_domain }} - cluster_id: {{ stack_name }} - group: - str_replace: - template: k8s_type.cluster_id - params: - k8s_type: lb - cluster_id: {{ stack_name }} - type: lb - image: {{ openstack_image }} - flavor: {{ lb_flavor }} - key_name: {{ ssh_public_key }} - net: { get_resource: net } - subnet: { get_resource: subnet } - secgrp: - - { get_resource: lb-secgrp } - floating_network: {{ external_network }} - net_name: - str_replace: - template: openshift-ansible-cluster_id-net - params: - cluster_id: {{ stack_name }} - volume_size: 5 - depends_on: - - interface -{% endif %} - - masters: - type: OS::Heat::ResourceGroup - properties: - count: {{ num_masters }} - resource_def: - type: server.yaml - properties: - name: - str_replace: - template: k8s_type-%index%.cluster_id - params: - cluster_id: {{ stack_name }} - k8s_type: master - cluster_env: {{ public_dns_domain }} - cluster_id: {{ stack_name }} - group: - str_replace: - template: k8s_type.cluster_id - params: - k8s_type: masters - cluster_id: {{ stack_name }} - type: master - image: {{ openstack_image }} - flavor: {{ master_flavor }} - key_name: {{ ssh_public_key }} - net: { get_resource: net } - subnet: { get_resource: subnet } - secgrp: -{% if openstack_flat_secgrp|bool %} - - { get_resource: flat-secgrp } -{% else %} - - { get_resource: master-secgrp } - - { get_resource: node-secgrp } -{% if num_etcd is equalto 0 %} - - { get_resource: etcd-secgrp } -{% endif %} -{% endif %} - floating_network: {{ external_network }} - net_name: - str_replace: - template: openshift-ansible-cluster_id-net - params: - cluster_id: {{ stack_name }} - volume_size: {{ master_volume_size }} - depends_on: - - interface - - compute_nodes: - type: OS::Heat::ResourceGroup - properties: - count: {{ num_nodes }} - resource_def: - type: server.yaml - properties: - name: - str_replace: - template: subtype-k8s_type-%index%.cluster_id - params: - cluster_id: {{ stack_name }} - k8s_type: node - subtype: app - cluster_env: {{ public_dns_domain }} - cluster_id: {{ stack_name }} - group: - str_replace: - template: k8s_type.cluster_id - params: - k8s_type: nodes - cluster_id: {{ stack_name }} - type: node - subtype: app - node_labels: - region: primary - image: {{ openstack_image }} - flavor: {{ node_flavor }} - key_name: {{ ssh_public_key }} - net: { get_resource: net } - subnet: { get_resource: subnet } - secgrp: - - { get_resource: {% if openstack_flat_secgrp|bool %}flat-secgrp{% else %}node-secgrp{% endif %} } - floating_network: {{ external_network }} - net_name: - str_replace: - template: openshift-ansible-cluster_id-net - params: - cluster_id: {{ stack_name }} - volume_size: {{ app_volume_size }} - depends_on: - - interface - - infra_nodes: - type: OS::Heat::ResourceGroup - properties: - count: {{ num_infra }} - resource_def: - type: server.yaml - properties: - name: - str_replace: - template: subtypek8s_type-%index%.cluster_id - params: - cluster_id: {{ stack_name }} - k8s_type: node - subtype: infra - cluster_env: {{ public_dns_domain }} - cluster_id: {{ stack_name }} - group: - str_replace: - template: k8s_type.cluster_id - params: - k8s_type: infra - cluster_id: {{ stack_name }} - type: node - subtype: infra - node_labels: - region: infra - image: {{ openstack_image }} - flavor: {{ infra_flavor }} - key_name: {{ ssh_public_key }} - net: { get_resource: net } - subnet: { get_resource: subnet } - secgrp: -{% if openstack_flat_secgrp|bool %} - - { get_resource: flat-secgrp } -{% else %} - - { get_resource: node-secgrp } - - { get_resource: infra-secgrp } -{% endif %} - floating_network: {{ external_network }} - net_name: - str_replace: - template: openshift-ansible-cluster_id-net - params: - cluster_id: {{ stack_name }} - volume_size: {{ infra_volume_size }} - depends_on: - - interface - - dns: - type: OS::Heat::ResourceGroup - properties: - count: {{ num_dns }} - resource_def: - type: server.yaml - properties: - name: - str_replace: - template: k8s_type-%index%.cluster_id - params: - cluster_id: {{ stack_name }} - k8s_type: dns - cluster_env: {{ public_dns_domain }} - cluster_id: {{ stack_name }} - group: - str_replace: - template: k8s_type.cluster_id - params: - k8s_type: dns - cluster_id: {{ stack_name }} - type: dns - image: {{ openstack_image }} - flavor: {{ dns_flavor }} - key_name: {{ ssh_public_key }} - net: { get_resource: net } - subnet: { get_resource: subnet } - secgrp: -{% if openstack_flat_secgrp|bool %} - - { get_resource: flat-secgrp } -{% else %} - - { get_resource: node-secgrp } -{% endif %} - - { get_resource: dns-secgrp } - floating_network: {{ external_network }} - net_name: - str_replace: - template: openshift-ansible-cluster_id-net - params: - cluster_id: {{ stack_name }} - volume_size: {{ dns_volume_size }} - depends_on: - - interface - diff --git a/roles/openstack-stack/templates/heat_stack_server.yaml.j2 b/roles/openstack-stack/templates/heat_stack_server.yaml.j2 deleted file mode 100644 index 5851d3b9b..000000000 --- a/roles/openstack-stack/templates/heat_stack_server.yaml.j2 +++ /dev/null @@ -1,170 +0,0 @@ -heat_template_version: 2016-10-14 - -description: OpenShift cluster server - -parameters: - - name: - type: string - label: Name - description: Name - - group: - type: string - label: Host Group - description: The Primary Ansible Host Group - default: host - - cluster_env: - type: string - label: Cluster environment - description: Environment of the cluster - - cluster_id: - type: string - label: Cluster ID - description: Identifier of the cluster - - type: - type: string - label: Type - description: Type master or node - - subtype: - type: string - label: Sub-type - description: Sub-type compute or infra for nodes, default otherwise - default: default - - key_name: - type: string - label: Key name - description: Key name of keypair - - image: - type: string - label: Image - description: Name of the image - - flavor: - type: string - label: Flavor - description: Name of the flavor - - net: - type: string - label: Net ID - description: Net resource - - net_name: - type: string - label: Net name - description: Net name - - subnet: - type: string - label: Subnet ID - description: Subnet resource - - secgrp: - type: comma_delimited_list - label: Security groups - description: Security group resources - - floating_network: - type: string - label: Floating network - description: Network to allocate floating IP from - - availability_zone: - type: string - description: The Availability Zone to launch the instance. - default: nova - - volume_size: - type: number - description: Size of the volume to be created. - default: 1 - constraints: - - range: { min: 1, max: 1024 } - description: must be between 1 and 1024 Gb. - - node_labels: - type: json - description: OpenShift Node Labels - default: {"region": "default" } - -outputs: - - name: - description: Name of the server - value: { get_attr: [ server, name ] } - - private_ip: - description: Private IP of the server - value: - get_attr: - - server - - addresses - - { get_param: net_name } - - 0 - - addr - - floating_ip: - description: Floating IP of the server - value: - get_attr: - - server - - addresses - - { get_param: net_name } - - 1 - - addr - -resources: - - server: - type: OS::Nova::Server - properties: - name: { get_param: name } - key_name: { get_param: key_name } - image: { get_param: image } - flavor: { get_param: flavor } - networks: - - port: { get_resource: port } - user_data: - get_file: user-data - user_data_format: RAW - metadata: - group: { get_param: group } - environment: { get_param: cluster_env } - clusterid: { get_param: cluster_id } - host-type: { get_param: type } - sub-host-type: { get_param: subtype } - node_labels: { get_param: node_labels } - - port: - type: OS::Neutron::Port - properties: - network: { get_param: net } - fixed_ips: - - subnet: { get_param: subnet } - security_groups: { get_param: secgrp } - - floating-ip: - type: OS::Neutron::FloatingIP - properties: - floating_network: { get_param: floating_network } - port_id: { get_resource: port } - - cinder_volume: - type: OS::Cinder::Volume - properties: - size: { get_param: volume_size } - availability_zone: { get_param: availability_zone } - - volume_attachment: - type: OS::Cinder::VolumeAttachment - properties: - volume_id: { get_resource: cinder_volume } - instance_uuid: { get_resource: server } - mountpoint: /dev/sdb diff --git a/roles/openstack-stack/templates/user_data.j2 b/roles/openstack-stack/templates/user_data.j2 deleted file mode 100644 index eb65f7cec..000000000 --- a/roles/openstack-stack/templates/user_data.j2 +++ /dev/null @@ -1,13 +0,0 @@ -#cloud-config -disable_root: true - -system_info: - default_user: - name: openshift - sudo: ["ALL=(ALL) NOPASSWD: ALL"] - -write_files: - - path: /etc/sudoers.d/00-openshift-no-requiretty - permissions: 440 - content: | - Defaults:openshift !requiretty diff --git a/roles/openstack-stack/test/roles b/roles/openstack-stack/test/roles deleted file mode 120000 index e2b799b9d..000000000 --- a/roles/openstack-stack/test/roles +++ /dev/null @@ -1 +0,0 @@ -../../../roles/ \ No newline at end of file diff --git a/roles/openstack-stack/test/stack-create-test.yml b/roles/openstack-stack/test/stack-create-test.yml deleted file mode 100644 index 0fbf66f34..000000000 --- a/roles/openstack-stack/test/stack-create-test.yml +++ /dev/null @@ -1,16 +0,0 @@ ---- -- hosts: localhost - roles: - - role: openstack-stack - stack_name: test-stack - dns_domain: "{{ public_dns_domain }}" - dns_nameservers: "{{ public_dns_nameservers }}" - subnet_prefix: "{{ openstack_subnet_prefix }}" - ssh_public_key: "{{ openstack_ssh_public_key }}" - openstack_image: "{{ openstack_default_image_name }}" - etcd_flavor: "{{ openstack_default_flavor }}" - master_flavor: "{{ openstack_default_flavor }}" - node_flavor: "{{ openstack_default_flavor }}" - infra_flavor: "{{ openstack_default_flavor }}" - dns_flavor: "{{ openstack_default_flavor }}" - external_network: "{{ openstack_external_network_name }}" diff --git a/roles/subscription-manager/README.md b/roles/subscription-manager/README.md deleted file mode 100644 index 748de282c..000000000 --- a/roles/subscription-manager/README.md +++ /dev/null @@ -1,156 +0,0 @@ -# Red Hat Subscription Manager Ansible Role - -## Parameters - -This role depends on user specified variables. These can be set in the inventory file, group_vars or passed to the playbook from the CLI. No values are set by default which disables this role. The variables are: - -### rhsm_satellite - -Subscription Manager server hostname. If using a Satellite server set the FQDN here. If using RHSM Hosted this value must be left blank, none or false. - -Default: none - -### rhsm_username - -Subscription Manager username. Required for RHSM Hosted. Can be optionally used for Satellite, but it may be better to use **rhsm_activationkey** for this. - -Default: none - -### rhsm_password - -Subscription Manager password. Required for RHSM Hosted. Can be optionally used for Satellite, but it may be better to use **rhsm_activationkey** for this. - -NOTE: If this variable is specified on the command-line or set in a variable file it may leave your password exposed. For this reason you may perfer to use an Activation Key if using Satellite. For RHSM Hosted, your password must be specified. There are two ways to provide the password to the Ansible playbook without exposing it to prying eyes. - -1. The first method is to use a **vars_prompt** to collect the password up front one time for the playbook. Ansible will not display the password if the prompt is configured as **private** and the task will not display the password on the CLI. This is the a good method as it supports automating the task to every host with only one password entry. To enable **vars_prompt** add the following to the very top of your playbook after the **hosts** declaration and before any **pre_tasks** section: - - ``` - - hosts: localhost - # Add the following lines after a -hosts: declaration and before pre_tasks: - # Start of vars_prompt code block - vars_prompt: - - name: "rhsm_password" - prompt: "Subscription Manager password" - confirm: yes - private: yes - # End of vars_prompt code block - pre_tasks: - ``` - -2. A second method is to use an encrypted file via **ansible-vault**. This does does not require modifying any code as the previous method, but does require more work to create and encrypt the file. To accomplish this, first create a file containing at least the **rhsm_password** variable (it is also possible to specify additional variables to encrypt them all as well): - 1. Create a file to contain the variable such as **secrets.yml**: - - ``` - --- - rhsm_password: "my_secret_password" - # other variables can optionally be placed here as well - ``` - - 2. Encrypt the file with **ansible-vault**: - - ``` - $ ansible-vault encrypt secrets.yml - Vault password: - Confirm Vault password: - Encryption successful - ``` - - 3. When executing **ansible-playbook** specify **--ask-vault-pass** to be prompted for the decryption password, and also specify the location of the **secrets.yml** as such: - - ``` - $ ansible-playbook --ask-vault-pass --extra-vars=@secrets.yml --extra-vars="rhsm_username=myusername" - ``` - - NOTE: Optionally the file containing the encrypted variables can be decrypted with **ansible-vault** and the **--ask-vault-pass** option omitted to prevent any password prompting (for automated runs) and the file can be encrypted after the run. This can be used if an external system such as Jenkins would handle the decryption/encryption outside of Ansible. - -Default: none - -### rhsm_org - -Optional Subscription Manager Satellite Organization. Required for Satellite, ignored if using RHSM Hosted. - -Default: none - -### rhsm_activationkey - -Optional Subscription Manager Satellite Activation Key, use this instead of **rhsm_username** and **rhsm_password** if using Satellite to provide repositories and authentication in a key instead. - -Default: none - -### rhsm_pool - -Optional Subscription Manager pool, determine this by running **subscription-manager list --available** on a registered system. Valid for RHSM Hosted or Satellite. Specifying **rhsm_activationkey** will ignore this option. - -Default: none - -### rhsm_repos - -Optional list of repositories to enable. If left blank it is expected that the **rhsm_activationkey** will specify repos instead. If populated, a **subscription-manager repos --disable=\*** will be run and each of the specified repos explicitly enabled. Valid for RHSM Hosted or Satellite - -NOTE: If specifying this value in an inventory file as opposed to group_vars, be sure to define it as a proper list as such: - -rhsm_repos='["rhel-7-server-rpms", "rhel-7-server-ose-3.1-rpms", "rhel-7-server-extras-rpms"]' - -Default: none - -## Calling This Role -Calling this role is done at both **pre_tasks** and **roles** sections of a playbook and optionally a **vars_prompt**. - -### vars_prompt -Unfortunately **vars_prompt** can only be used at the play level before role tasks are executed, so this is the only place it can go. It also cannot be shown conditionally. For this reason it is not included in this role by default. A better method may be using a file containing the password variable encrypted with **ansible-vault**. See the **rhsm_password** section for more details. - -To Add a prompt to capture **rhsm_password**: - -``` -- hosts: localhost - # Add the following lines after a -hosts: declaration and before pre_tasks: - # Start of vars_prompt code block - vars_prompt: - - name: "rhsm_password" - prompt: "Subscription Manager password" - confirm: yes - private: yes - # End of vars_prompt code block - pre_tasks: -``` - -### pre-tasks - -A number of variable checks are performed before any tasks to ensure the proper parameters are set. To include these checks call the pre_task yaml before any roles: - -``` -pre_tasks: -- include: roles/subscription-manager/pre_tasks/pre_tasks.yml -``` - -### roles - -The bulk of the work is performed in the main.yml for this role. The pre-task play will set a variable which can be checked to contitionally include this role as such: - -``` -roles: - - { role: subscription-manager, when: hostvars.localhost.rhsm_register, tags: 'subscription-manager' } -``` - -## Running Playbooks with this Role - -- To register to RHSM Hosted or Satellite with a username and plain text password (NOTE: This may retain your password in your CLI history): - - ``` - $ ansible-playbook --extra-vars="rhsm_username=vvaldez rhsm_password=my_secret_password " - ``` - -- To register to RHSM Hosted or Satellite with username and an encrypted file containing the password: - - ``` - $ ansible-playbook --ask-vault-pass --extra-vars=@secrets.yml --extra-vars="rhsm_username=myusername" - - ``` - -- To register to a Satellite server with an activation key: - - ``` - $ ansible-playbook --extra-vars="rhsm_satellite=satellite.example.com rhsm_org=example_org rhsm_activationkey=rhel-7-ose-3-1 " - - ``` -- To ignore any Subscription Manager activities, simply do not set any parameters. diff --git a/roles/subscription-manager/pre_tasks/pre_tasks.yml b/roles/subscription-manager/pre_tasks/pre_tasks.yml deleted file mode 100644 index 464670fc0..000000000 --- a/roles/subscription-manager/pre_tasks/pre_tasks.yml +++ /dev/null @@ -1,45 +0,0 @@ ---- -- name: "Set password fact" - set_fact: - rhsm_password: "{{ rhsm_password | default(None) }}" - no_log: true - -- name: "Initialize Subscription Manager fact" - set_fact: - rhsm_register: true - -- name: "Determine if Subscription Manager should be used" - set_fact: - rhsm_register: false - when: - - rhsm_satellite is undefined or rhsm_satellite is none or rhsm_satellite|trim == '' - - rhsm_username is undefined or rhsm_username is none or rhsm_username|trim == '' - - rhsm_password is undefined or rhsm_password is none or rhsm_password|trim == '' - - rhsm_org is undefined or rhsm_org is none or rhsm_org|trim == '' - - rhsm_activationkey is undefined or rhsm_activationkey is none or rhsm_activationkey|trim == '' - - rhsm_pool is undefined or rhsm_pool is none or rhsm_pool|trim == '' - -- name: "Validate Subscription Manager organization is set" - fail: msg="Cannot register to a Satellite server without a value for the Organization via 'rhsm_org'" - when: - - rhsm_org is undefined or rhsm_org is none or rhsm_org|trim == '' - - rhsm_satellite is defined - - rhsm_satellite is not none - - rhsm_satellite|trim != '' - - rhsm_register - -- name: "Validate Subscription Manager authentication is defined" - fail: msg="Cannot register without ('rhsm_username' and 'rhsm_password') or 'rhsm_activationkey' variables set. See the README.md for details on securely prompting for a password" - when: - - (rhsm_username is undefined or rhsm_username is none or rhsm_username|trim == '') or (rhsm_password is undefined or rhsm_password is none or rhsm_password|trim == '') - - rhsm_activationkey is undefined or rhsm_activationkey is none or rhsm_activationkey|trim == '' - - rhsm_register - -- name: "Validate activation key and Hosted are not requested together" - fail: msg="Cannot register to RHSM Hosted with 'rhsm_activationkey'" - when: - - rhsm_satellite is undefined or rhsm_satellite is none or rhsm_satellite|trim == '' - - rhsm_activationkey is defined - - rhsm_activationkey is not none - - rhsm_activationkey|trim != '' - - rhsm_register diff --git a/roles/subscription-manager/tasks/main.yml b/roles/subscription-manager/tasks/main.yml deleted file mode 100644 index 8c1ae697a..000000000 --- a/roles/subscription-manager/tasks/main.yml +++ /dev/null @@ -1,122 +0,0 @@ ---- -- name: "Initialize rhsm_password variable if vars_prompt was used" - set_fact: - rhsm_password: "{{ hostvars.localhost.rhsm_password }}" - when: - - rhsm_password is not defined or rhsm_password is none or rhsm_password|trim == '' - -- name: "Initializing Subscription Manager authentication method" - set_fact: - rhsm_authentication: false - -# 'rhsm_activationkey' will take precedence even if 'rhsm_username' and 'rhsm_password' are also set -- name: "Setting Subscription Manager Activation Key Fact" - set_fact: - rhsm_authentication: "key" - when: - - rhsm_activationkey is defined - - rhsm_activationkey is not none - - rhsm_activationkey|trim != '' - - not rhsm_authentication - -# If 'rhsm_username' and 'rhsm_password' are set but not 'rhsm_activationkey', set 'rhsm_authentication' to password -- name: "Setting Subscription Manager Username and Password Fact" - set_fact: - rhsm_authentication: "password" - when: - - rhsm_username is defined - - rhsm_username is not none - - rhsm_username|trim != '' - - rhsm_password is defined - - rhsm_password is not none - - rhsm_password|trim != '' - - not rhsm_authentication - -- name: "Initializing registration status" - set_fact: - registered: false - -- name: "Checking subscription status (a failure means it is not registered and will be)" - command: "/usr/bin/subscription-manager status" - ignore_errors: yes - changed_when: no - register: check_if_registered - -- name: "Set registration fact if system is already registered" - set_fact: - registered: true - when: check_if_registered.rc == 0 - -- name: "Cleaning any old subscriptions" - command: "/usr/bin/subscription-manager clean" - when: - - not registered - - rhsm_authentication is defined - -- name: "Install Satellite certificate" - command: "rpm -Uvh --force http://{{ rhsm_satellite }}/pub/katello-ca-consumer-latest.noarch.rpm" - when: - - not registered - - rhsm_satellite is defined - - rhsm_satellite is not none - - rhsm_satellite|trim != '' - -- name: "Register to Satellite using activation key" - command: "/usr/bin/subscription-manager register --activationkey={{ rhsm_activationkey }} --org='{{ rhsm_org }}'" - when: - - not registered - - rhsm_authentication == 'key' - - rhsm_satellite is defined - - rhsm_satellite is not none - - rhsm_satellite|trim != '' - -# This can apply to either Hosted or Satellite -- name: "Register using username and password" - command: "/usr/bin/subscription-manager register --username={{ rhsm_username }} --password={{ rhsm_password }}" - no_log: true - when: - - not registered - - rhsm_authentication == "password" - - rhsm_org is not defined or rhsm_org is none or rhsm_org|trim == '' - -# This can apply to either Hosted or Satellite -- name: "Register using username, password and organization" - command: "/usr/bin/subscription-manager register --username={{ rhsm_username }} --password={{ rhsm_password }} --org={{ rhsm_org }}" - no_log: true - when: - - not registered - - rhsm_authentication == "password" - - rhsm_org is defined - - rhsm_org is not none - - rhsm_org|trim != '' - -- name: "Auto-attach to Subscription Manager Pool" - command: "/usr/bin/subscription-manager attach --auto" - when: - - not registered - - rhsm_pool is undefined or rhsm_pool is none or rhsm_pool|trim == '' - -- name: "Attach to a specific pool" - command: "/usr/bin/subscription-manager attach --pool={{ rhsm_pool }}" - when: - - rhsm_pool is defined - - rhsm_pool is not none - - rhsm_pool|trim != '' - - not registered - -- name: "Disable all repositories" - command: "/usr/bin/subscription-manager repos --disable=*" - when: - - not registered - - rhsm_repos is defined - - rhsm_repos is not none - - rhsm_repos|trim != '' - -- name: "Enable specified repositories" - command: "/usr/bin/subscription-manager repos --enable={{ item }}" - with_items: "{{ rhsm_repos }}" - when: - - not registered - - rhsm_repos is defined - - rhsm_repos is not none - - rhsm_repos|trim != '' -- cgit v1.2.3 -- cgit v1.2.3 From fb6ad9bb44f89ffbf39b18b1d263b7b80bcbd984 Mon Sep 17 00:00:00 2001 From: Bogdan Dobrelya Date: Tue, 20 Jun 2017 10:39:42 +0200 Subject: Add profiling and skippy stdout (#470) Tune an example ansible.cfg to include tasks profiling info and improve displaying of skipped tasks. Signed-off-by: Bogdan Dobrelya --- playbooks/provisioning/openstack/sample-inventory/ansible.cfg | 2 ++ 1 file changed, 2 insertions(+) diff --git a/playbooks/provisioning/openstack/sample-inventory/ansible.cfg b/playbooks/provisioning/openstack/sample-inventory/ansible.cfg index a701e59ac..1a092ed6b 100644 --- a/playbooks/provisioning/openstack/sample-inventory/ansible.cfg +++ b/playbooks/provisioning/openstack/sample-inventory/ansible.cfg @@ -12,6 +12,8 @@ retry_files_enabled = false fact_caching = jsonfile fact_caching_connection = .ansible/cached_facts fact_caching_timeout = 900 +stdout_callback = skippy +callback_whitelist = profile_tasks [ssh_connection] ssh_args = -o ControlMaster=auto -o ControlPersist=900s -o GSSAPIAuthentication=no -- cgit v1.2.3 -- cgit v1.2.3 From 0908b25d45b9a5297ed341f136f8d42e59438553 Mon Sep 17 00:00:00 2001 From: Bogdan Dobrelya Date: Wed, 21 Jun 2017 15:22:09 +0200 Subject: Use cached facts, do not become for localhost (#484) Prohibit sudoing for localhost played tasks, like DNS setup. Re-use cached facts to speed up deployment. Signed-off-by: Bogdan Dobrelya --- playbooks/provisioning/openstack/post-provision-openstack.yml | 8 ++++++++ playbooks/provisioning/openstack/provision-openstack.yml | 4 +++- roles/openstack-stack/test/stack-create-test.yml | 2 ++ 3 files changed, 13 insertions(+), 1 deletion(-) diff --git a/playbooks/provisioning/openstack/post-provision-openstack.yml b/playbooks/provisioning/openstack/post-provision-openstack.yml index 4e42c1c7f..918f9e065 100644 --- a/playbooks/provisioning/openstack/post-provision-openstack.yml +++ b/playbooks/provisioning/openstack/post-provision-openstack.yml @@ -1,6 +1,7 @@ --- # Assign hostnames - hosts: cluster_hosts + gather_facts: False become: true pre_tasks: - include: pre_tasks.yml @@ -9,6 +10,7 @@ # Subscribe DNS Host to allow for configuration below - hosts: dns + gather_facts: False become: true roles: - role: subscription-manager @@ -17,11 +19,14 @@ # Determine which DNS server(s) to use for our generated records - hosts: localhost + gather_facts: False + become: False roles: - dns-server-detect # Build the DNS Server Views and Configure DNS Server(s) - hosts: dns + gather_facts: False become: true pre_tasks: - include: pre_tasks.yml @@ -32,6 +37,8 @@ # Build and process DNS Records - hosts: localhost + gather_facts: False + become: False pre_tasks: - include: pre_tasks.yml - name: "Generate dns records" @@ -41,6 +48,7 @@ # OpenShift Pre-Requisites - hosts: OSEv3 + gather_facts: False become: true tasks: - name: "Edit /etc/resolv.conf on masters/nodes" diff --git a/playbooks/provisioning/openstack/provision-openstack.yml b/playbooks/provisioning/openstack/provision-openstack.yml index c7ad782c9..a2cf7b110 100644 --- a/playbooks/provisioning/openstack/provision-openstack.yml +++ b/playbooks/provisioning/openstack/provision-openstack.yml @@ -1,6 +1,7 @@ --- - hosts: localhost gather_facts: True + become: False pre_tasks: - include: pre_tasks.yml roles: @@ -31,12 +32,13 @@ - name: Refresh Server inventory hosts: localhost connection: local + become: False gather_facts: False tasks: - meta: refresh_inventory - hosts: cluster_hosts - gather_facts: false + gather_facts: True tasks: - name: Debug hostvar debug: diff --git a/roles/openstack-stack/test/stack-create-test.yml b/roles/openstack-stack/test/stack-create-test.yml index 0fbf66f34..d80472193 100644 --- a/roles/openstack-stack/test/stack-create-test.yml +++ b/roles/openstack-stack/test/stack-create-test.yml @@ -1,5 +1,7 @@ --- - hosts: localhost + gather_facts: True + become: False roles: - role: openstack-stack stack_name: test-stack -- cgit v1.2.3 -- cgit v1.2.3 -- cgit v1.2.3 -- cgit v1.2.3 From 8219f17503e16620b4881faefc78023c696ed2e5 Mon Sep 17 00:00:00 2001 From: Tzu-Mainn Chen Date: Wed, 21 Jun 2017 18:01:48 -0400 Subject: Add node_removal_policies variable to allow for scaling down --- playbooks/provisioning/openstack/README.md | 3 +++ playbooks/provisioning/openstack/provision-openstack.yml | 1 + playbooks/provisioning/openstack/sample-inventory/group_vars/all.yml | 2 ++ roles/openstack-stack/defaults/main.yml | 1 + roles/openstack-stack/templates/heat_stack.yaml.j2 | 2 ++ 5 files changed, 9 insertions(+) diff --git a/playbooks/provisioning/openstack/README.md b/playbooks/provisioning/openstack/README.md index 423d57113..4686dfc08 100644 --- a/playbooks/provisioning/openstack/README.md +++ b/playbooks/provisioning/openstack/README.md @@ -67,6 +67,9 @@ The `openstack_num_masters`, `openstack_num_infra` and `openstack_num_nodes` values specify the number of Master, Infra and App nodes to create. +The `openstack_node_removal_policies` allows you to specify which App nodes to +remove. + The `openstack_flat_secgrp`, controls Neutron security groups creation for Heat stacks. Set it to true, if you experience issues with sec group rules quotas. It trades security for number of rules, by sharing the same set diff --git a/playbooks/provisioning/openstack/provision-openstack.yml b/playbooks/provisioning/openstack/provision-openstack.yml index c7ad782c9..b983f6652 100644 --- a/playbooks/provisioning/openstack/provision-openstack.yml +++ b/playbooks/provisioning/openstack/provision-openstack.yml @@ -23,6 +23,7 @@ num_nodes: "{{ openstack_num_nodes }}" num_infra: "{{ openstack_num_infra }}" num_dns: "{{ openstack_num_dns | default(1) }}" + node_removal_policies: "{{ openstack_node_removal_policies | to_yaml }}" master_volume_size: "{{ docker_volume_size }}" app_volume_size: "{{ docker_volume_size }}" infra_volume_size: "{{ docker_volume_size }}" diff --git a/playbooks/provisioning/openstack/sample-inventory/group_vars/all.yml b/playbooks/provisioning/openstack/sample-inventory/group_vars/all.yml index 047923253..0e128265c 100644 --- a/playbooks/provisioning/openstack/sample-inventory/group_vars/all.yml +++ b/playbooks/provisioning/openstack/sample-inventory/group_vars/all.yml @@ -12,6 +12,8 @@ openstack_num_masters: 1 openstack_num_infra: 1 openstack_num_nodes: 2 +openstack_node_removal_policies: [] + docker_volume_size: "15" openstack_subnet_prefix: "192.168.99" diff --git a/roles/openstack-stack/defaults/main.yml b/roles/openstack-stack/defaults/main.yml index 2a4ef3a45..4f859585f 100644 --- a/roles/openstack-stack/defaults/main.yml +++ b/roles/openstack-stack/defaults/main.yml @@ -9,4 +9,5 @@ num_masters: 1 num_nodes: 1 num_dns: 1 num_infra: 1 +node_removal_policies: [] etcd_volume_size: 2 diff --git a/roles/openstack-stack/templates/heat_stack.yaml.j2 b/roles/openstack-stack/templates/heat_stack.yaml.j2 index c750865a5..3916eec02 100644 --- a/roles/openstack-stack/templates/heat_stack.yaml.j2 +++ b/roles/openstack-stack/templates/heat_stack.yaml.j2 @@ -620,6 +620,8 @@ resources: type: OS::Heat::ResourceGroup properties: count: {{ num_nodes }} + removal_policies: + - resource_list: {{ node_removal_policies }} resource_def: type: server.yaml properties: -- cgit v1.2.3 -- cgit v1.2.3 From 3f10c266aab0881ab294513d4ef93a1528d33c6b Mon Sep 17 00:00:00 2001 From: Bogdan Dobrelya Date: Wed, 21 Jun 2017 13:32:48 +0200 Subject: Fix flat sec group and infra/dns sec rules Make flat sec group to only merge node/master/etcd sec rules. Add basic dns/ssh sec group and assign it to all but dns node groups. Assign only dns sec group for dns nodes. Assign only infra (and basic) sec groups for ingra nodes. Add security notes for openstack provider. Signed-off-by: Bogdan Dobrelya --- playbooks/provisioning/openstack/README.md | 11 +++ roles/openstack-stack/templates/heat_stack.yaml.j2 | 92 ++++++++-------------- 2 files changed, 44 insertions(+), 59 deletions(-) diff --git a/playbooks/provisioning/openstack/README.md b/playbooks/provisioning/openstack/README.md index 423d57113..df00e5507 100644 --- a/playbooks/provisioning/openstack/README.md +++ b/playbooks/provisioning/openstack/README.md @@ -72,6 +72,17 @@ stacks. Set it to true, if you experience issues with sec group rules quotas. It trades security for number of rules, by sharing the same set of firewall rules for master, node, etcd and infra nodes. +#### Security notes + +Configure required `*_ingress_cidr` variables to restrict public access +to provisioned servers from your laptop (a /32 notation should be used) +or your trusted network. The most important is the `node_ingress_cidr` +that restricts public access to the deployed DNS server and cluster +nodes' ephemeral ports range. + +Note, the command ``curl https://api.ipify.org`` helps fiding an external +IP address of your box (the ansible admin node). + ### Update the DNS names in `inventory/hosts` The different server groups are currently grouped by the domain name, diff --git a/roles/openstack-stack/templates/heat_stack.yaml.j2 b/roles/openstack-stack/templates/heat_stack.yaml.j2 index c750865a5..cba03e2ca 100644 --- a/roles/openstack-stack/templates/heat_stack.yaml.j2 +++ b/roles/openstack-stack/templates/heat_stack.yaml.j2 @@ -142,18 +142,17 @@ resources: # cluster_id: {{ stack_name }} # public_key: {{ ssh_public_key }} -{% if openstack_flat_secgrp|bool %} - flat-secgrp: + common-secgrp: type: OS::Neutron::SecurityGroup properties: name: str_replace: - template: openshift-ansible-cluster_id-flat-secgrp + template: openshift-ansible-cluster_id-common-secgrp params: cluster_id: {{ stack_name }} description: str_replace: - template: Security group for cluster_id OpenShift cluster + template: Basic ssh/dns security group for cluster_id OpenShift cluster params: cluster_id: {{ stack_name }} rules: @@ -162,14 +161,6 @@ resources: port_range_min: 22 port_range_max: 22 remote_ip_prefix: {{ ssh_ingress_cidr }} - - direction: ingress - protocol: tcp - port_range_min: 4001 - port_range_max: 4001 - - direction: ingress - protocol: tcp - port_range_min: 8443 - port_range_max: 8444 - direction: ingress protocol: tcp port_range_min: 53 @@ -178,6 +169,30 @@ resources: protocol: udp port_range_min: 53 port_range_max: 53 + +{% if openstack_flat_secgrp|bool %} + flat-secgrp: + type: OS::Neutron::SecurityGroup + properties: + name: + str_replace: + template: openshift-ansible-cluster_id-flat-secgrp + params: + cluster_id: {{ stack_name }} + description: + str_replace: + template: Security group for cluster_id OpenShift cluster + params: + cluster_id: {{ stack_name }} + rules: + - direction: ingress + protocol: tcp + port_range_min: 4001 + port_range_max: 4001 + - direction: ingress + protocol: tcp + port_range_min: 8443 + port_range_max: 8444 - direction: ingress protocol: tcp port_range_min: 8053 @@ -246,14 +261,6 @@ resources: port_range_min: 30000 port_range_max: 32767 remote_ip_prefix: "{{ openstack_subnet_prefix }}.0/24" - - direction: ingress - protocol: tcp - port_range_min: 80 - port_range_max: 80 - - direction: ingress - protocol: tcp - port_range_min: 443 - port_range_max: 443 {% else %} master-secgrp: type: OS::Neutron::SecurityGroup @@ -269,11 +276,6 @@ resources: params: cluster_id: {{ stack_name }} rules: - - direction: ingress - protocol: tcp - port_range_min: 22 - port_range_max: 22 - remote_ip_prefix: {{ ssh_ingress_cidr }} - direction: ingress protocol: tcp port_range_min: 4001 @@ -282,14 +284,6 @@ resources: protocol: tcp port_range_min: 8443 port_range_max: 8444 - - direction: ingress - protocol: tcp - port_range_min: 53 - port_range_max: 53 - - direction: ingress - protocol: udp - port_range_min: 53 - port_range_max: 53 - direction: ingress protocol: tcp port_range_min: 8053 @@ -333,11 +327,6 @@ resources: params: cluster_id: {{ stack_name }} rules: - - direction: ingress - protocol: tcp - port_range_min: 22 - port_range_max: 22 - remote_ip_prefix: {{ ssh_ingress_cidr }} - direction: ingress protocol: tcp port_range_min: 2379 @@ -364,11 +353,6 @@ resources: params: cluster_id: {{ stack_name }} rules: - - direction: ingress - protocol: tcp - port_range_min: 22 - port_range_max: 22 - remote_ip_prefix: {{ ssh_ingress_cidr }} - direction: ingress protocol: tcp port_range_min: 10250 @@ -399,6 +383,7 @@ resources: port_range_min: 30000 port_range_max: 32767 remote_ip_prefix: "{{ openstack_subnet_prefix }}.0/24" +{% endif %} infra-secgrp: type: OS::Neutron::SecurityGroup @@ -422,7 +407,6 @@ resources: protocol: tcp port_range_min: 443 port_range_max: 443 -{% endif %} dns-secgrp: type: OS::Neutron::SecurityGroup @@ -470,11 +454,6 @@ resources: name: openshift-ansible-{{ stack_name }}-lb-secgrp description: Security group for {{ stack_name }} cluster Load Balancer rules: - - direction: ingress - protocol: tcp - port_range_min: 22 - port_range_max: 22 - remote_ip_prefix: {{ ssh_ingress_cidr }} - direction: ingress protocol: tcp port_range_min: {{ openshift_master_api_port | default(8443) }} @@ -518,6 +497,7 @@ resources: subnet: { get_resource: subnet } secgrp: - { get_resource: {% if openstack_flat_secgrp|bool %}flat-secgrp{% else %}etcd-secgrp{% endif %} } + - { get_resource: common-secgrp } floating_network: {{ external_network }} net_name: str_replace: @@ -558,6 +538,7 @@ resources: subnet: { get_resource: subnet } secgrp: - { get_resource: lb-secgrp } + - { get_resource: common-secgrp } floating_network: {{ external_network }} net_name: str_replace: @@ -606,6 +587,7 @@ resources: - { get_resource: etcd-secgrp } {% endif %} {% endif %} + - { get_resource: common-secgrp } floating_network: {{ external_network }} net_name: str_replace: @@ -649,6 +631,7 @@ resources: subnet: { get_resource: subnet } secgrp: - { get_resource: {% if openstack_flat_secgrp|bool %}flat-secgrp{% else %}node-secgrp{% endif %} } + - { get_resource: common-secgrp } floating_network: {{ external_network }} net_name: str_replace: @@ -691,12 +674,8 @@ resources: net: { get_resource: net } subnet: { get_resource: subnet } secgrp: -{% if openstack_flat_secgrp|bool %} - - { get_resource: flat-secgrp } -{% else %} - - { get_resource: node-secgrp } - { get_resource: infra-secgrp } -{% endif %} + - { get_resource: common-secgrp } floating_network: {{ external_network }} net_name: str_replace: @@ -735,11 +714,6 @@ resources: net: { get_resource: net } subnet: { get_resource: subnet } secgrp: -{% if openstack_flat_secgrp|bool %} - - { get_resource: flat-secgrp } -{% else %} - - { get_resource: node-secgrp } -{% endif %} - { get_resource: dns-secgrp } floating_network: {{ external_network }} net_name: -- cgit v1.2.3 From 8538169d922e4be3faa2ce57caccd13f4952d1fd Mon Sep 17 00:00:00 2001 From: Katerina Pilatova Date: Fri, 23 Jun 2017 16:38:56 +0200 Subject: OSEv3.yml: added option to ignore set hardware limits for RAM and DISK --- .../provisioning/openstack/sample-inventory/group_vars/OSEv3.yml | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/playbooks/provisioning/openstack/sample-inventory/group_vars/OSEv3.yml b/playbooks/provisioning/openstack/sample-inventory/group_vars/OSEv3.yml index 32ec43387..f4431f798 100644 --- a/playbooks/provisioning/openstack/sample-inventory/group_vars/OSEv3.yml +++ b/playbooks/provisioning/openstack/sample-inventory/group_vars/OSEv3.yml @@ -8,3 +8,7 @@ openshift_master_default_subdomain: "apps.{{ env_id }}.{{ public_dns_domain }}" openshift_node_labels: "{{ openstack.metadata.node_labels }}" osm_default_node_selector: 'region=primary' + +# For POCs or demo environments that are using smaller instances than +# the official recommended values for RAM and DISK, uncomment the line below. +# openshift_disable_check: disk_availability,memory_availability -- cgit v1.2.3 From 3aaa53a83988e36baed03f1ddbe7075806ab24ee Mon Sep 17 00:00:00 2001 From: Katerina Pilatova Date: Fri, 23 Jun 2017 16:54:58 +0200 Subject: OSEv3.yml: trailing space... --- playbooks/provisioning/openstack/sample-inventory/group_vars/OSEv3.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/playbooks/provisioning/openstack/sample-inventory/group_vars/OSEv3.yml b/playbooks/provisioning/openstack/sample-inventory/group_vars/OSEv3.yml index f4431f798..7f99986f6 100644 --- a/playbooks/provisioning/openstack/sample-inventory/group_vars/OSEv3.yml +++ b/playbooks/provisioning/openstack/sample-inventory/group_vars/OSEv3.yml @@ -10,5 +10,5 @@ openshift_node_labels: "{{ openstack.metadata.node_labels }}" osm_default_node_selector: 'region=primary' # For POCs or demo environments that are using smaller instances than -# the official recommended values for RAM and DISK, uncomment the line below. +# the official recommended values for RAM and DISK, uncomment the line below. # openshift_disable_check: disk_availability,memory_availability -- cgit v1.2.3 From b1035631251b1d556dbbf794d890390665a864ce Mon Sep 17 00:00:00 2001 From: Katerina Pilatova Date: Fri, 23 Jun 2017 17:11:03 +0200 Subject: removed whitespace in front of commented variable --- playbooks/provisioning/openstack/sample-inventory/group_vars/OSEv3.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/playbooks/provisioning/openstack/sample-inventory/group_vars/OSEv3.yml b/playbooks/provisioning/openstack/sample-inventory/group_vars/OSEv3.yml index 7f99986f6..72a03132b 100644 --- a/playbooks/provisioning/openstack/sample-inventory/group_vars/OSEv3.yml +++ b/playbooks/provisioning/openstack/sample-inventory/group_vars/OSEv3.yml @@ -11,4 +11,4 @@ osm_default_node_selector: 'region=primary' # For POCs or demo environments that are using smaller instances than # the official recommended values for RAM and DISK, uncomment the line below. -# openshift_disable_check: disk_availability,memory_availability +#openshift_disable_check: disk_availability,memory_availability -- cgit v1.2.3 From 5fb0e47578a4c5272eacc99e079e8839b6ae3d55 Mon Sep 17 00:00:00 2001 From: Katerina Pilatova Date: Fri, 23 Jun 2017 17:16:57 +0200 Subject: all.yml: removed whitespaces in front of variables --- .../openstack/sample-inventory/group_vars/all.yml | 48 +++++++++++----------- 1 file changed, 24 insertions(+), 24 deletions(-) diff --git a/playbooks/provisioning/openstack/sample-inventory/group_vars/all.yml b/playbooks/provisioning/openstack/sample-inventory/group_vars/all.yml index 047923253..4ed329dd3 100644 --- a/playbooks/provisioning/openstack/sample-inventory/group_vars/all.yml +++ b/playbooks/provisioning/openstack/sample-inventory/group_vars/all.yml @@ -18,40 +18,40 @@ openstack_subnet_prefix: "192.168.99" # # Red Hat subscription # # Using Red Hat Satellite: -# rhsm_register: True -# rhsm_satellite: 'sat-6.example.com' -# rhsm_org: 'OPENSHIFT_ORG' -# rhsm_activationkey: '' +#rhsm_register: True +#rhsm_satellite: 'sat-6.example.com' +#rhsm_org: 'OPENSHIFT_ORG' +#rhsm_activationkey: '' # # Or using RHN username, password and optionally pool: -# rhsm_register: True -# rhsm_username: '' -# rhsm_password: '' -# rhsm_pool: '' +#rhsm_register: True +#rhsm_username: '' +#rhsm_password: '' +#rhsm_pool: '' -# rhsm_repos: -# - "rhel-7-server-rpms" -# - "rhel-7-server-ose-3.5-rpms" -# - "rhel-7-server-extras-rpms" -# - "rhel-7-fast-datapath-rpms" +#rhsm_repos: +# - "rhel-7-server-rpms" +# - "rhel-7-server-ose-3.5-rpms" +# - "rhel-7-server-extras-rpms" +# - "rhel-7-fast-datapath-rpms" # # Roll-your-own DNS -# openstack_num_dns: 0 -# external_nsupdate_keys: -# public: -# key_secret: 'SKqKNdpfk7llKxZ57bbxUnUDobaaJp9t8CjXLJPl+fRI5mPcSBuxTAyvJPa6Y9R7vUg9DwCy/6WTpgLNqnV4Hg==' -# key_algorithm: 'hmac-md5' -# server: '192.168.1.1' -# private: -# key_secret: 'kVE2bVTgZjrdJipxPhID8BEZmbHD8cExlVPR+zbFpW6la8kL5wpXiwOh8q5AAosXQI5t95UXwq3Inx8QT58duw==' -# key_algorithm: 'hmac-md5' -# server: '192.168.1.2' +#openstack_num_dns: 0 +#external_nsupdate_keys: +# public: +# key_secret: 'SKqKNdpfk7llKxZ57bbxUnUDobaaJp9t8CjXLJPl+fRI5mPcSBuxTAyvJPa6Y9R7vUg9DwCy/6WTpgLNqnV4Hg==' +# key_algorithm: 'hmac-md5' +# server: '192.168.1.1' +# private: +# key_secret: 'kVE2bVTgZjrdJipxPhID8BEZmbHD8cExlVPR+zbFpW6la8kL5wpXiwOh8q5AAosXQI5t95UXwq3Inx8QT58duw==' +# key_algorithm: 'hmac-md5' +# server: '192.168.1.2' # NOTE(shadower): Do not change this value. The Ansible user is currently # hardcoded to `openshift`. ansible_user: openshift -# Use a single security group for a cluster +# # Use a single security group for a cluster openstack_flat_secgrp: false -- cgit v1.2.3 From 2fa7c112561eca54e0980902bda6920506c96f92 Mon Sep 17 00:00:00 2001 From: Tzu-Mainn Chen Date: Fri, 23 Jun 2017 15:47:17 -0400 Subject: rename node_removal_policies, add some comments and defaults --- playbooks/provisioning/openstack/README.md | 4 ++-- playbooks/provisioning/openstack/provision-openstack.yml | 2 +- playbooks/provisioning/openstack/sample-inventory/group_vars/all.yml | 3 ++- roles/openstack-stack/defaults/main.yml | 2 +- roles/openstack-stack/templates/heat_stack.yaml.j2 | 2 +- 5 files changed, 7 insertions(+), 6 deletions(-) diff --git a/playbooks/provisioning/openstack/README.md b/playbooks/provisioning/openstack/README.md index 4686dfc08..37868b2ea 100644 --- a/playbooks/provisioning/openstack/README.md +++ b/playbooks/provisioning/openstack/README.md @@ -67,8 +67,8 @@ The `openstack_num_masters`, `openstack_num_infra` and `openstack_num_nodes` values specify the number of Master, Infra and App nodes to create. -The `openstack_node_removal_policies` allows you to specify which App nodes to -remove. +The `openstack_nodes_to_remove` allows you to specify the numerical indexes +of App nodes that should be removed; for example, ['0', '2'], The `openstack_flat_secgrp`, controls Neutron security groups creation for Heat stacks. Set it to true, if you experience issues with sec group rules diff --git a/playbooks/provisioning/openstack/provision-openstack.yml b/playbooks/provisioning/openstack/provision-openstack.yml index b983f6652..628044de6 100644 --- a/playbooks/provisioning/openstack/provision-openstack.yml +++ b/playbooks/provisioning/openstack/provision-openstack.yml @@ -23,7 +23,7 @@ num_nodes: "{{ openstack_num_nodes }}" num_infra: "{{ openstack_num_infra }}" num_dns: "{{ openstack_num_dns | default(1) }}" - node_removal_policies: "{{ openstack_node_removal_policies | to_yaml }}" + nodes_to_remove: "{{ openstack_nodes_to_remove | default([]) | to_yaml }}" master_volume_size: "{{ docker_volume_size }}" app_volume_size: "{{ docker_volume_size }}" infra_volume_size: "{{ docker_volume_size }}" diff --git a/playbooks/provisioning/openstack/sample-inventory/group_vars/all.yml b/playbooks/provisioning/openstack/sample-inventory/group_vars/all.yml index 0e128265c..ff9aaab63 100644 --- a/playbooks/provisioning/openstack/sample-inventory/group_vars/all.yml +++ b/playbooks/provisioning/openstack/sample-inventory/group_vars/all.yml @@ -12,7 +12,8 @@ openstack_num_masters: 1 openstack_num_infra: 1 openstack_num_nodes: 2 -openstack_node_removal_policies: [] +# # Numerical index of nodes to remove +# openstack_nodes_to_remove: [] docker_volume_size: "15" diff --git a/roles/openstack-stack/defaults/main.yml b/roles/openstack-stack/defaults/main.yml index 4f859585f..4831d6bc4 100644 --- a/roles/openstack-stack/defaults/main.yml +++ b/roles/openstack-stack/defaults/main.yml @@ -9,5 +9,5 @@ num_masters: 1 num_nodes: 1 num_dns: 1 num_infra: 1 -node_removal_policies: [] +nodes_to_remove: [] etcd_volume_size: 2 diff --git a/roles/openstack-stack/templates/heat_stack.yaml.j2 b/roles/openstack-stack/templates/heat_stack.yaml.j2 index 3916eec02..32ea5ec1d 100644 --- a/roles/openstack-stack/templates/heat_stack.yaml.j2 +++ b/roles/openstack-stack/templates/heat_stack.yaml.j2 @@ -621,7 +621,7 @@ resources: properties: count: {{ num_nodes }} removal_policies: - - resource_list: {{ node_removal_policies }} + - resource_list: {{ nodes_to_remove }} resource_def: type: server.yaml properties: -- cgit v1.2.3 -- cgit v1.2.3 From 8af0a60120689267515d7766c432a414eb55d51c Mon Sep 17 00:00:00 2001 From: Bogdan Dobrelya Date: Mon, 26 Jun 2017 09:46:41 +0200 Subject: Modify sec groups for provisioned openstack servers Drop ingress DNS rules from the common secgrp. Add an ingress ICMP rule, restricted by the ssh ingress cidr, to the common secgrp. This allows to ping servers from the control node (ansible admin node). Add dns servers into the common secgrp as well. Signed-off-by: Bogdan Dobrelya --- roles/openstack-stack/templates/heat_stack.yaml.j2 | 17 ++++------------- 1 file changed, 4 insertions(+), 13 deletions(-) diff --git a/roles/openstack-stack/templates/heat_stack.yaml.j2 b/roles/openstack-stack/templates/heat_stack.yaml.j2 index cba03e2ca..7fd52e52d 100644 --- a/roles/openstack-stack/templates/heat_stack.yaml.j2 +++ b/roles/openstack-stack/templates/heat_stack.yaml.j2 @@ -152,7 +152,7 @@ resources: cluster_id: {{ stack_name }} description: str_replace: - template: Basic ssh/dns security group for cluster_id OpenShift cluster + template: Basic ssh/icmp security group for cluster_id OpenShift cluster params: cluster_id: {{ stack_name }} rules: @@ -162,13 +162,8 @@ resources: port_range_max: 22 remote_ip_prefix: {{ ssh_ingress_cidr }} - direction: ingress - protocol: tcp - port_range_min: 53 - port_range_max: 53 - - direction: ingress - protocol: udp - port_range_min: 53 - port_range_max: 53 + protocol: icmp + remote_ip_prefix: {{ ssh_ingress_cidr }} {% if openstack_flat_secgrp|bool %} flat-secgrp: @@ -422,11 +417,6 @@ resources: params: cluster_id: {{ stack_name }} rules: - - direction: ingress - protocol: tcp - port_range_min: 22 - port_range_max: 22 - remote_ip_prefix: {{ ssh_ingress_cidr }} - direction: ingress protocol: udp port_range_min: 53 @@ -715,6 +705,7 @@ resources: subnet: { get_resource: subnet } secgrp: - { get_resource: dns-secgrp } + - { get_resource: common-secgrp } floating_network: {{ external_network }} net_name: str_replace: -- cgit v1.2.3 -- cgit v1.2.3 -- cgit v1.2.3 -- cgit v1.2.3 -- cgit v1.2.3 From d25db744f9f1fba8c08289a15632a0f7efe8111a Mon Sep 17 00:00:00 2001 From: Katerina Pilatova Date: Tue, 27 Jun 2017 16:00:49 +0200 Subject: README.md: list jinja2 as a dependency --- playbooks/provisioning/openstack/README.md | 1 + 1 file changed, 1 insertion(+) diff --git a/playbooks/provisioning/openstack/README.md b/playbooks/provisioning/openstack/README.md index a542e1493..84997b9cd 100644 --- a/playbooks/provisioning/openstack/README.md +++ b/playbooks/provisioning/openstack/README.md @@ -8,6 +8,7 @@ etc.). The result is an environment ready for openshift-ansible. ## Dependencies * [Ansible 2.3](https://pypi.python.org/pypi/ansible) +* [jinja](http://jinja.pocoo.org/docs/2.9/) * [shade](https://pypi.python.org/pypi/shade) * python-dns -- cgit v1.2.3 From 2e56553856c51344069546a189b22d34dacb8bfa Mon Sep 17 00:00:00 2001 From: Katerina Pilatova Date: Tue, 27 Jun 2017 16:02:34 +0200 Subject: README.md: fixing typo --- playbooks/provisioning/openstack/README.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/playbooks/provisioning/openstack/README.md b/playbooks/provisioning/openstack/README.md index 84997b9cd..57b72c7f3 100644 --- a/playbooks/provisioning/openstack/README.md +++ b/playbooks/provisioning/openstack/README.md @@ -8,7 +8,7 @@ etc.). The result is an environment ready for openshift-ansible. ## Dependencies * [Ansible 2.3](https://pypi.python.org/pypi/ansible) -* [jinja](http://jinja.pocoo.org/docs/2.9/) +* [jinja2](http://jinja.pocoo.org/docs/2.9/) * [shade](https://pypi.python.org/pypi/shade) * python-dns -- cgit v1.2.3 -- cgit v1.2.3 -- cgit v1.2.3 From 2d6b572891e5f9c5f8950f86ae741b386b3d8289 Mon Sep 17 00:00:00 2001 From: Bogdan Dobrelya Date: Tue, 27 Jun 2017 18:38:22 +0200 Subject: Put back node/flat secgrp for infra nodes on openstack Partially undo 2028883e936c8a1a0be031a19d531d0804a32b68 to unblock end-to-end deployments Signed-off-by: Bogdan Dobrelya --- roles/openstack-stack/templates/heat_stack.yaml.j2 | 6 ++++++ 1 file changed, 6 insertions(+) diff --git a/roles/openstack-stack/templates/heat_stack.yaml.j2 b/roles/openstack-stack/templates/heat_stack.yaml.j2 index cba03e2ca..2d957cc5b 100644 --- a/roles/openstack-stack/templates/heat_stack.yaml.j2 +++ b/roles/openstack-stack/templates/heat_stack.yaml.j2 @@ -674,6 +674,12 @@ resources: net: { get_resource: net } subnet: { get_resource: subnet } secgrp: +# TODO(bogdando) filter only required node rules into infra-secgrp +{% if openstack_flat_secgrp|bool %} + - { get_resource: flat-secgrp } +{% else %} + - { get_resource: node-secgrp } +{% endif %} - { get_resource: infra-secgrp } - { get_resource: common-secgrp } floating_network: {{ external_network }} -- cgit v1.2.3 -- cgit v1.2.3 -- cgit v1.2.3 -- cgit v1.2.3 -- cgit v1.2.3 From 46901fdba788a52743823c74bd14a82ea90f4339 Mon Sep 17 00:00:00 2001 From: Tomas Sedovic Date: Thu, 29 Jun 2017 14:22:53 +0000 Subject: Use wait_for_connection for the Heat nodes The `wait_for_connection` module is more reliable as it uses Ansible's `ping` to verify the nodes are really accessible. Using `wait_for` and checking that port 22 is open runs into the possibility of SSH being up but the public keys or users not being set up yet (as that's done with cloud-init). In addition, we were gathering facts before running the wait_for task which rendered it useless. --- playbooks/provisioning/openstack/provision-openstack.yml | 10 +++++++--- 1 file changed, 7 insertions(+), 3 deletions(-) diff --git a/playbooks/provisioning/openstack/provision-openstack.yml b/playbooks/provisioning/openstack/provision-openstack.yml index feea15d5d..18989f448 100644 --- a/playbooks/provisioning/openstack/provision-openstack.yml +++ b/playbooks/provisioning/openstack/provision-openstack.yml @@ -38,6 +38,13 @@ tasks: - meta: refresh_inventory +- hosts: cluster_hosts + name: Wait for the the nodes to come up + become: False + gather_facts: False + tasks: + - wait_for_connection: + - hosts: cluster_hosts gather_facts: True tasks: @@ -45,8 +52,5 @@ debug: msg: "{{ hostvars[inventory_hostname] }}" verbosity: 2 - - name: waiting for server to come back - local_action: wait_for host={{ hostvars[inventory_hostname]['ansible_ssh_host'] }} port=22 delay=30 timeout=300 - become: false - include: post-provision-openstack.yml -- cgit v1.2.3 From d705cb2586680f3e747ef2917bd4640504629acf Mon Sep 17 00:00:00 2001 From: Tomas Sedovic Date: Thu, 29 Jun 2017 16:54:04 +0200 Subject: Fix yaml indentation --- playbooks/provisioning/openstack/provision-openstack.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/playbooks/provisioning/openstack/provision-openstack.yml b/playbooks/provisioning/openstack/provision-openstack.yml index 18989f448..5d521432b 100644 --- a/playbooks/provisioning/openstack/provision-openstack.yml +++ b/playbooks/provisioning/openstack/provision-openstack.yml @@ -43,7 +43,7 @@ become: False gather_facts: False tasks: - - wait_for_connection: + - wait_for_connection: - hosts: cluster_hosts gather_facts: True -- cgit v1.2.3 -- cgit v1.2.3 From b28d6d787fbdc6f242aff77830a85693c148faa7 Mon Sep 17 00:00:00 2001 From: Bogdan Dobrelya Date: Thu, 29 Jun 2017 17:59:22 +0200 Subject: Manage packages to install/update for openstack provider Allow required packages and yum update all steps to be optionally disabled. Signed-off-by: Bogdan Dobrelya --- playbooks/provisioning/openstack/README.md | 10 ++++++++++ roles/openshift-prep/defaults/main.yml | 11 +++++++++++ roles/openshift-prep/tasks/prerequisites.yml | 13 ++++--------- 3 files changed, 25 insertions(+), 9 deletions(-) create mode 100644 roles/openshift-prep/defaults/main.yml diff --git a/playbooks/provisioning/openstack/README.md b/playbooks/provisioning/openstack/README.md index 57b72c7f3..43e5e4878 100644 --- a/playbooks/provisioning/openstack/README.md +++ b/playbooks/provisioning/openstack/README.md @@ -76,6 +76,10 @@ stacks. Set it to true, if you experience issues with sec group rules quotas. It trades security for number of rules, by sharing the same set of firewall rules for master, node, etcd and infra nodes. +The `required_packages` variable also provides a list of the additional +prerequisite packages to be installed before to deploy an OpenShift cluster. +Those are ignored though, if the `manage_packages: False`. + #### Security notes Configure required `*_ingress_cidr` variables to restrict public access @@ -87,6 +91,12 @@ nodes' ephemeral ports range. Note, the command ``curl https://api.ipify.org`` helps fiding an external IP address of your box (the ansible admin node). +There is also the `manage_packages` variable (defaults to True) you +may want to turn off in order to speed up the provisioning tasks. This may +be the case for development environments. When turned off, the servers will +be provisioned omitting the ``yum update`` command. This brings security +implications though, and is not recommended for production deployments. + ### Update the DNS names in `inventory/hosts` The different server groups are currently grouped by the domain name, diff --git a/roles/openshift-prep/defaults/main.yml b/roles/openshift-prep/defaults/main.yml new file mode 100644 index 000000000..fac25dcc1 --- /dev/null +++ b/roles/openshift-prep/defaults/main.yml @@ -0,0 +1,11 @@ +--- +# Defines either to install required packages and update all +manage_packages: true +required_packages: + - wget + - git + - net-tools + - bind-utils + - bridge-utils + - bash-completion + - vim-enhanced diff --git a/roles/openshift-prep/tasks/prerequisites.yml b/roles/openshift-prep/tasks/prerequisites.yml index 60507636f..433c1c4e3 100644 --- a/roles/openshift-prep/tasks/prerequisites.yml +++ b/roles/openshift-prep/tasks/prerequisites.yml @@ -6,19 +6,14 @@ yum: name: "{{ item }}" state: latest - with_items: - - wget - - git - - net-tools - - bind-utils - - bridge-utils - - bash-completion - - vim-enhanced + with_items: "{{ required_packages }}" + when: manage_packages|bool - name: "Update all packages (this can take a very long time)" yum: - name: "*" + name: '*' state: latest + when: manage_packages|bool - name: "Verify hostname" shell: hostnamectl status | awk "/Static hostname/"'{ print $3 }' -- cgit v1.2.3 From 1409e0a52d45b7781b3a23f3f7eaa8fe09d26cd6 Mon Sep 17 00:00:00 2001 From: Bogdan Dobrelya Date: Mon, 19 Jun 2017 12:24:23 +0200 Subject: Persist DNS configuration for nodes for openstack provider * Firstly, provision a Heat stack with given public resolvers. * After the DNS node configured as an authoritative server, switch the Heat stack's Neutron subnet to that resolver (private_dns_server) the way it to become the first entry pushed into the hosts /etc/resolv.conf. It will be serving the cluster domain requests for OpenShift nodes and workloads. * Drop post-provision /etc/reslov.conf nameserver hacks as not needed anymore. * Fix dns floating IPs output and add the priv IPs output as well. * Update docs, clarify localhost vs servers requirements, add required Network Manager setup step. * Use post-provision task names instead of comments. Signed-off-by: Bogdan Dobrelya --- playbooks/provisioning/openstack/README.md | 39 +++++++++++++++----- .../openstack/post-provision-openstack.yml | 42 ++++++++++++---------- .../provisioning/openstack/provision-openstack.yml | 41 ++++++--------------- playbooks/provisioning/openstack/stack_params.yaml | 23 ++++++++++++ roles/openstack-stack/tasks/main.yml | 5 +++ .../tasks/subnet_update_dns_servers.yaml | 8 +++++ roles/openstack-stack/templates/heat_stack.yaml.j2 | 23 +++++------- 7 files changed, 108 insertions(+), 73 deletions(-) create mode 100644 playbooks/provisioning/openstack/stack_params.yaml create mode 100644 roles/openstack-stack/tasks/subnet_update_dns_servers.yaml diff --git a/playbooks/provisioning/openstack/README.md b/playbooks/provisioning/openstack/README.md index 57b72c7f3..972ef705d 100644 --- a/playbooks/provisioning/openstack/README.md +++ b/playbooks/provisioning/openstack/README.md @@ -5,13 +5,19 @@ OpenStack resources (servers, networking, volumes, security groups, etc.). The result is an environment ready for openshift-ansible. -## Dependencies +## Dependencies for localhost (ansible control/admin node) * [Ansible 2.3](https://pypi.python.org/pypi/ansible) * [jinja2](http://jinja.pocoo.org/docs/2.9/) * [shade](https://pypi.python.org/pypi/shade) -* python-dns +* python-dns / [dnspython](https://pypi.python.org/pypi/dnspython) +* Become (sudo) is not required. +## Dependencies for OpenStack hosted cluster nodes (servers) + +There are no additional dependencies for the cluster nodes. Required +configuration steps are done by Heat given a specific user data config +that normally should not be changed. ## What does it do @@ -42,12 +48,27 @@ etc.). The result is an environment ready for openshift-ansible. Pay special attention to the values in the first paragraph -- these will depend on your OpenStack environment. -The `env_id` and `openstack_dns_domain` will form the DNS domain all +The `env_id` and `public_dns_domain` will form the cluster's DNS domain all your servers will be under. With the default values, this will be -`openshift.example.com`. - -`openstack_nameservers` is a list of DNS servers accessible from all -the created Nova servers. These will be serve as your DNS forwarders. +`openshift.example.com`. For workloads, the default subdomain is 'apps'. +That sudomain can be set as well by the `openshift_app_domain` variable in +the inventory. + +The `public_dns_nameservers` is a list of DNS servers accessible from all +the created Nova servers. These will be serving as your DNS forwarders for +external FQDNs that do not belong to the cluster's DNS domain and its subdomains. + +The `openshift_use_dnsmasq` controls either dnsmasq is deployed or not. +By default, dnsmasq is deployed and comes as the hosts' /etc/resolv.conf file +first nameserver entry that points to the local host instance of the dnsmasq +daemon that in turn proxies DNS requests to the authoritative DNS server. +When Network Manager is enabled for provisioned cluster nodes, which is +normally the case, you should not change the defaults and always deploy dnsmasq. + +Note that the authoritative DNS server is configured on post provsision +steps, and the Neutron subnet for the Heat stack is updated to point to that +server in the end. So the provisioned servers will start using it natively +as a default nameserver that comes from the NetworkManager and cloud-init. `openstack_ssh_key` is a Nova keypair -- you can see your keypairs with `openstack keypair list`. @@ -136,8 +157,8 @@ Once it succeeds, you can install openshift by running: ansible-playbook --become --user openshift --private-key ~/.ssh/openshift -i inventory/ openshift-ansible/playbooks/byo/openshift-node/network_manager.yml ansible-playbook --become --user openshift --private-key ~/.ssh/openshift -i inventory/ openshift-ansible/playbooks/byo/config.yml -Note, the `network_manager.yml` is only required if you're deploying OpenShift -origin. +Note, the `network_manager.yml` step is mandatory and is required for persisting +the hosts' DNS configs. ## License diff --git a/playbooks/provisioning/openstack/post-provision-openstack.yml b/playbooks/provisioning/openstack/post-provision-openstack.yml index 918f9e065..412ccd221 100644 --- a/playbooks/provisioning/openstack/post-provision-openstack.yml +++ b/playbooks/provisioning/openstack/post-provision-openstack.yml @@ -1,6 +1,6 @@ --- -# Assign hostnames -- hosts: cluster_hosts +- name: Assign hostnames + hosts: cluster_hosts gather_facts: False become: true pre_tasks: @@ -8,8 +8,8 @@ roles: - role: hostnames -# Subscribe DNS Host to allow for configuration below -- hosts: dns +- name: Subscribe DNS Host to allow for configuration below + hosts: dns gather_facts: False become: true roles: @@ -17,15 +17,15 @@ when: hostvars.localhost.rhsm_register tags: 'subscription-manager' -# Determine which DNS server(s) to use for our generated records -- hosts: localhost +- name: Determine which DNS server(s) to use for our generated records + hosts: localhost gather_facts: False become: False roles: - dns-server-detect -# Build the DNS Server Views and Configure DNS Server(s) -- hosts: dns +- name: Build the DNS Server Views and Configure DNS Server(s) + hosts: dns gather_facts: False become: true pre_tasks: @@ -35,8 +35,8 @@ roles: - role: dns-server -# Build and process DNS Records -- hosts: localhost +- name: Build and process DNS Records + hosts: localhost gather_facts: False become: False pre_tasks: @@ -46,18 +46,22 @@ roles: - role: dns -# OpenShift Pre-Requisites -- hosts: OSEv3 +- name: Switch the stack subnet to the configured private DNS server + hosts: localhost + gather_facts: False + become: False + vars_files: + - stack_params.yaml + tasks: + - include_role: + name: openstack-stack + tasks_from: subnet_update_dns_servers + +- name: OpenShift Pre-Requisites + hosts: OSEv3 gather_facts: False become: true tasks: - - name: "Edit /etc/resolv.conf on masters/nodes" - lineinfile: - state: present - dest: /etc/resolv.conf - regexp: "nameserver {{ hostvars['localhost'].private_dns_server }}" - line: "nameserver {{ hostvars['localhost'].private_dns_server }}" - insertafter: search* - name: "Include DNS configuration to ensure proper name resolution" lineinfile: state: present diff --git a/playbooks/provisioning/openstack/provision-openstack.yml b/playbooks/provisioning/openstack/provision-openstack.yml index 5d521432b..0c673af2f 100644 --- a/playbooks/provisioning/openstack/provision-openstack.yml +++ b/playbooks/provisioning/openstack/provision-openstack.yml @@ -2,33 +2,12 @@ - hosts: localhost gather_facts: True become: False + vars_files: + - stack_params.yaml pre_tasks: - - include: pre_tasks.yml + - include: pre_tasks.yml roles: - - role: openstack-stack - stack_name: "{{ env_id }}.{{ public_dns_domain }}" - dns_domain: "{{ public_dns_domain }}" - dns_nameservers: "{{ public_dns_nameservers }}" - subnet_prefix: "{{ openstack_subnet_prefix }}" - ssh_public_key: "{{ openstack_ssh_public_key }}" - openstack_image: "{{ openstack_default_image_name }}" - lb_flavor: "{{ openstack_default_flavor | default('m1.small') }}" - etcd_flavor: "{{ openstack_default_flavor | default('m1.small') }}" - master_flavor: "{{ openstack_default_flavor | default('m1.medium') }}" - node_flavor: "{{ openstack_default_flavor | default('m1.medium') }}" - infra_flavor: "{{ openstack_default_flavor | default('m1.medium') }}" - dns_flavor: "{{ openstack_default_flavor | default('m1.small') }}" - external_network: "{{ openstack_external_network_name }}" - num_etcd: "{{ openstack_num_etcd | default(0) }}" - num_masters: "{{ openstack_num_masters }}" - num_nodes: "{{ openstack_num_nodes }}" - num_infra: "{{ openstack_num_infra }}" - num_dns: "{{ openstack_num_dns | default(1) }}" - nodes_to_remove: "{{ openstack_nodes_to_remove | default([]) | to_yaml }}" - master_volume_size: "{{ docker_volume_size }}" - app_volume_size: "{{ docker_volume_size }}" - infra_volume_size: "{{ docker_volume_size }}" - + - role: openstack-stack - name: Refresh Server inventory hosts: localhost @@ -36,21 +15,21 @@ become: False gather_facts: False tasks: - - meta: refresh_inventory + - meta: refresh_inventory - hosts: cluster_hosts name: Wait for the the nodes to come up become: False gather_facts: False tasks: - - wait_for_connection: + - wait_for_connection: - hosts: cluster_hosts gather_facts: True tasks: - - name: Debug hostvar - debug: - msg: "{{ hostvars[inventory_hostname] }}" - verbosity: 2 + - name: Debug hostvar + debug: + msg: "{{ hostvars[inventory_hostname] }}" + verbosity: 2 - include: post-provision-openstack.yml diff --git a/playbooks/provisioning/openstack/stack_params.yaml b/playbooks/provisioning/openstack/stack_params.yaml new file mode 100644 index 000000000..9c0b09b45 --- /dev/null +++ b/playbooks/provisioning/openstack/stack_params.yaml @@ -0,0 +1,23 @@ +--- +stack_name: "{{ env_id }}.{{ public_dns_domain }}" +dns_domain: "{{ public_dns_domain }}" +dns_nameservers: "{{ public_dns_nameservers }}" +subnet_prefix: "{{ openstack_subnet_prefix }}" +ssh_public_key: "{{ openstack_ssh_public_key }}" +openstack_image: "{{ openstack_default_image_name }}" +lb_flavor: "{{ openstack_default_flavor | default('m1.small') }}" +etcd_flavor: "{{ openstack_default_flavor | default('m1.small') }}" +master_flavor: "{{ openstack_default_flavor | default('m1.medium') }}" +node_flavor: "{{ openstack_default_flavor | default('m1.medium') }}" +infra_flavor: "{{ openstack_default_flavor | default('m1.medium') }}" +dns_flavor: "{{ openstack_default_flavor | default('m1.small') }}" +external_network: "{{ openstack_external_network_name }}" +num_etcd: "{{ openstack_num_etcd | default(0) }}" +num_masters: "{{ openstack_num_masters }}" +num_nodes: "{{ openstack_num_nodes }}" +num_infra: "{{ openstack_num_infra }}" +num_dns: "{{ openstack_num_dns | default(1) }}" +master_volume_size: "{{ docker_volume_size }}" +app_volume_size: "{{ docker_volume_size }}" +infra_volume_size: "{{ docker_volume_size }}" +nodes_to_remove: "{{ openstack_nodes_to_remove | default([]) | to_yaml }}" diff --git a/roles/openstack-stack/tasks/main.yml b/roles/openstack-stack/tasks/main.yml index 71c7bbe0d..a53e6350b 100644 --- a/roles/openstack-stack/tasks/main.yml +++ b/roles/openstack-stack/tasks/main.yml @@ -35,6 +35,11 @@ template: "{{ stack_template_path }}" wait: yes +# NOTE(bogdando) OS::Neutron::Subnet doesn't support live updates for +# dns_nameservers, so we can't do that for the "create stack" task. +- include: subnet_update_dns_servers.yaml + when: private_dns_server is defined + - name: cleanup temp files file: path: "{{ stack_template_pre.path }}" diff --git a/roles/openstack-stack/tasks/subnet_update_dns_servers.yaml b/roles/openstack-stack/tasks/subnet_update_dns_servers.yaml new file mode 100644 index 000000000..be4f07b97 --- /dev/null +++ b/roles/openstack-stack/tasks/subnet_update_dns_servers.yaml @@ -0,0 +1,8 @@ +--- +- name: Live update the subnet's DNS servers + os_subnet: + name: openshift-ansible-{{ stack_name }}-subnet + network_name: openshift-ansible-{{ stack_name }}-net + state: present + use_default_subnetpool: yes + dns_nameservers: "{{ [private_dns_server|default(public_dns_nameservers[0])]|union(public_dns_nameservers)|unique }}" diff --git a/roles/openstack-stack/templates/heat_stack.yaml.j2 b/roles/openstack-stack/templates/heat_stack.yaml.j2 index 00a46896c..8bf76b57c 100644 --- a/roles/openstack-stack/templates/heat_stack.yaml.j2 +++ b/roles/openstack-stack/templates/heat_stack.yaml.j2 @@ -61,18 +61,13 @@ outputs: - dns - name - dns_floating_ip: - description: Floating IP of the DNS - value: - get_attr: - - dns - - addresses - - str_replace: - template: openshift-ansible-cluster_id-net - params: - cluster_id: {{ stack_name }} - - 1 - - addr + dns_floating_ips: + description: Floating IPs of the DNS + value: { get_attr: [ dns, floating_ip ] } + + dns_private_ips: + description: Private IPs of the DNS + value: { get_attr: [ dns, private_ip ] } resources: @@ -111,9 +106,9 @@ resources: params: subnet_24_prefix: {{ subnet_prefix }} dns_nameservers: - {% for nameserver in dns_nameservers %} +{% for nameserver in dns_nameservers %} - {{ nameserver }} - {% endfor %} +{% endfor %} router: type: OS::Neutron::Router -- cgit v1.2.3 -- cgit v1.2.3 -- cgit v1.2.3 -- cgit v1.2.3 -- cgit v1.2.3 -- cgit v1.2.3 -- cgit v1.2.3 From 7d92318da75c0f1599465e02d58496e470725796 Mon Sep 17 00:00:00 2001 From: Tlacenka Date: Mon, 10 Jul 2017 15:45:48 +0200 Subject: Playbook prerequisites.yml checks that prerequisites are met before provisioning (#518) * prerequisites.yml: check prerequisites on localhost needed for provisioning provision.yml: includes prerequisites.yml * prerequisites: indentation fixed * prerequisites.yml: used ansible_version variable, openstack modules for ansible * prerequisites.yml: os_keypair is not suitable for this purpose * prerequisites.yml: openstack keypair command exchanged for shade - there is no Ansible module for this now - os_keypair is not suitable for this purpose - python-openstackclient dependency is not desirable --- playbooks/provisioning/openstack/prerequisites.yml | 76 ++++++++++++++++++++++ playbooks/provisioning/openstack/provision.yaml | 2 + 2 files changed, 78 insertions(+) create mode 100644 playbooks/provisioning/openstack/prerequisites.yml diff --git a/playbooks/provisioning/openstack/prerequisites.yml b/playbooks/provisioning/openstack/prerequisites.yml new file mode 100644 index 000000000..71a99fc82 --- /dev/null +++ b/playbooks/provisioning/openstack/prerequisites.yml @@ -0,0 +1,76 @@ +--- +- hosts: localhost + tasks: + + # Check ansible + - name: Check Ansible version + assert: + that: > + (ansible_version.major == 2 and ansible_version.minor >= 3) or + (ansible_version.major > 2) + msg: "Ansible version must be at least 2.3" + + # Check shade + - name: Try to import python module shade + command: python -c "import shade" + ignore_errors: yes + register: shade_result + - name: Check if shade is installed + assert: + that: 'shade_result.rc == 0' + msg: "Python module shade is not installed" + + # Check python-dns + - name: Try to import python DNS module + command: python -c "import dns" + ignore_errors: yes + register: pythondns_result + - name: Check if python-dns is installed + assert: + that: 'pythondns_result.rc == 0' + msg: "Python module python-dns is not installed" + + # Check jinja2 + - name: Try to import jinja2 module + command: python -c "import jinja2" + ignore_errors: yes + register: jinja_result + - name: Check if jinja2 is installed + assert: + that: 'jinja_result.rc == 0' + msg: "Python module jinja2 is not installed" + + # Check Glance image + - name: Try to get image facts + os_image_facts: + image: "{{ openstack_default_image_name }}" + register: image_result + - name: Check that image is available + assert: + that: "image_result.ansible_facts.openstack_image" + msg: "Image {{ openstack_default_image_name }} is not available" + + # Check network name + - name: Try to get network facts + os_networks_facts: + name: "{{ openstack_external_network_name }}" + register: network_result + - name: Check that network is available + assert: + that: "network_result.ansible_facts.openstack_networks" + msg: "Network {{ openstack_external_network_name }} is not available" + + # Check keypair + # TODO kpilatov: there is no Ansible module for getting OS keypairs + # (os_keypair is not suitable for this) + # this method does not force python-openstackclient dependency + - name: Try to show keypair + command: > + python -c 'import shade; cloud = shade.openstack_cloud(); + exit(cloud.get_keypair("{{ openstack_ssh_public_key }}") is None)' + ignore_errors: yes + register: key_result + - name: Check that keypair is available + assert: + that: 'key_result.rc == 0' + msg: "Keypair {{ openstack_ssh_public_key }} is not available" diff --git a/playbooks/provisioning/openstack/provision.yaml b/playbooks/provisioning/openstack/provision.yaml index 7cde5e8b8..92b6d3356 100644 --- a/playbooks/provisioning/openstack/provision.yaml +++ b/playbooks/provisioning/openstack/provision.yaml @@ -1,4 +1,6 @@ --- +- include: "prerequisites.yml" + - include: "provision-openstack.yml" - include: "pre-install.yml" -- cgit v1.2.3 -- cgit v1.2.3 -- cgit v1.2.3 From 25a2d4f772d735bc31e7a891e16e3d7d7002cd68 Mon Sep 17 00:00:00 2001 From: Bogdan Dobrelya Date: Wed, 12 Jul 2017 11:52:11 +0200 Subject: Install DNS roles from casl-infra with galaxy (#529) Signed-off-by: Bogdan Dobrelya --- playbooks/provisioning/openstack/README.md | 19 ++++++++++++++++++- .../provisioning/openstack/galaxy-requirements.yaml | 6 ++++++ .../openstack/post-provision-openstack.yml | 4 ++-- 3 files changed, 26 insertions(+), 3 deletions(-) create mode 100644 playbooks/provisioning/openstack/galaxy-requirements.yaml diff --git a/playbooks/provisioning/openstack/README.md b/playbooks/provisioning/openstack/README.md index 34b548b9b..05e7e791a 100644 --- a/playbooks/provisioning/openstack/README.md +++ b/playbooks/provisioning/openstack/README.md @@ -4,10 +4,10 @@ This repository contains playbooks and Heat templates to provision OpenStack resources (servers, networking, volumes, security groups, etc.). The result is an environment ready for openshift-ansible. - ## Dependencies for localhost (ansible control/admin node) * [Ansible 2.3](https://pypi.python.org/pypi/ansible) +* [Ansible-galaxy](https://pypi.python.org/pypi/ansible-galaxy-local-deps) * [jinja2](http://jinja.pocoo.org/docs/2.9/) * [shade](https://pypi.python.org/pypi/shade) * python-dns / [dnspython](https://pypi.python.org/pypi/dnspython) @@ -19,6 +19,23 @@ There are no additional dependencies for the cluster nodes. Required configuration steps are done by Heat given a specific user data config that normally should not be changed. +## Required galaxy modules + +In order to pull in external dependencies for DNS configuration steps, +the following commads need to be executed: + + ansible-galaxy install \ + -r openshift-ansible-contrib/playbooks/provisioning/openstack/galaxy-requirements.yaml \ + -p openshift-ansible-contrib/roles + +Alternatively you can install directly from github: + + ansible-galaxy install git+https://github.com/redhat-cop/infra-ansible,master \ + -p openshift-ansible-contrib/roles + +Note, this assumes we're in the directory that contains the clonned +openshift-ansible-contrib repo in its root path. + ## What does it do * Create Nova servers with floating IP addresses attached diff --git a/playbooks/provisioning/openstack/galaxy-requirements.yaml b/playbooks/provisioning/openstack/galaxy-requirements.yaml new file mode 100644 index 000000000..93dd14ec2 --- /dev/null +++ b/playbooks/provisioning/openstack/galaxy-requirements.yaml @@ -0,0 +1,6 @@ +--- +# This is the Ansible Galaxy requirements file to pull in the correct roles + +# From 'infra-ansible' +- src: https://github.com/redhat-cop/infra-ansible + version: master diff --git a/playbooks/provisioning/openstack/post-provision-openstack.yml b/playbooks/provisioning/openstack/post-provision-openstack.yml index 412ccd221..8d4ba3c12 100644 --- a/playbooks/provisioning/openstack/post-provision-openstack.yml +++ b/playbooks/provisioning/openstack/post-provision-openstack.yml @@ -33,7 +33,7 @@ - name: "Generate dns-server views" include: openstack_dns_views.yml roles: - - role: dns-server + - role: infra-ansible/roles/dns-server - name: Build and process DNS Records hosts: localhost @@ -44,7 +44,7 @@ - name: "Generate dns records" include: openstack_dns_records.yml roles: - - role: dns + - role: infra-ansible/roles/dns - name: Switch the stack subnet to the configured private DNS server hosts: localhost -- cgit v1.2.3 From a3a61ab4544d97dbc76dcd278c0f17d7a17fa022 Mon Sep 17 00:00:00 2001 From: Tomas Sedovic Date: Wed, 12 Jul 2017 17:30:00 +0200 Subject: Add defaults values for some openstack vars (#539) * Add defaults values for some openstack vars Ansible shows errors when the `rhsm_register` and `openstack_flat_secgrp` values are not present in the inventory even though they have sensible default values. This makes them both default to false when they're not specified. * Comment out the flat security group option in inv It's no longer required to be there so let's comment it out. --- playbooks/provisioning/openstack/post-provision-openstack.yml | 2 +- .../openstack/sample-inventory/group_vars/all.yml | 4 ++-- roles/openstack-stack/templates/heat_stack.yaml.j2 | 11 +++++------ 3 files changed, 8 insertions(+), 9 deletions(-) diff --git a/playbooks/provisioning/openstack/post-provision-openstack.yml b/playbooks/provisioning/openstack/post-provision-openstack.yml index 8d4ba3c12..460c6596b 100644 --- a/playbooks/provisioning/openstack/post-provision-openstack.yml +++ b/playbooks/provisioning/openstack/post-provision-openstack.yml @@ -14,7 +14,7 @@ become: true roles: - role: subscription-manager - when: hostvars.localhost.rhsm_register + when: hostvars.localhost.rhsm_register|default(False) tags: 'subscription-manager' - name: Determine which DNS server(s) to use for our generated records diff --git a/playbooks/provisioning/openstack/sample-inventory/group_vars/all.yml b/playbooks/provisioning/openstack/sample-inventory/group_vars/all.yml index 7c9033828..c7e54f6cb 100644 --- a/playbooks/provisioning/openstack/sample-inventory/group_vars/all.yml +++ b/playbooks/provisioning/openstack/sample-inventory/group_vars/all.yml @@ -56,5 +56,5 @@ openstack_subnet_prefix: "192.168.99" # hardcoded to `openshift`. ansible_user: openshift -# # Use a single security group for a cluster -openstack_flat_secgrp: false +# # Use a single security group for a cluster (default: false) +#openstack_flat_secgrp: false diff --git a/roles/openstack-stack/templates/heat_stack.yaml.j2 b/roles/openstack-stack/templates/heat_stack.yaml.j2 index 8bf76b57c..566b57ef8 100644 --- a/roles/openstack-stack/templates/heat_stack.yaml.j2 +++ b/roles/openstack-stack/templates/heat_stack.yaml.j2 @@ -160,7 +160,7 @@ resources: protocol: icmp remote_ip_prefix: {{ ssh_ingress_cidr }} -{% if openstack_flat_secgrp|bool %} +{% if openstack_flat_secgrp|default(False)|bool %} flat-secgrp: type: OS::Neutron::SecurityGroup properties: @@ -481,7 +481,7 @@ resources: net: { get_resource: net } subnet: { get_resource: subnet } secgrp: - - { get_resource: {% if openstack_flat_secgrp|bool %}flat-secgrp{% else %}etcd-secgrp{% endif %} } + - { get_resource: {% if openstack_flat_secgrp|default(False)|bool %}flat-secgrp{% else %}etcd-secgrp{% endif %} } - { get_resource: common-secgrp } floating_network: {{ external_network }} net_name: @@ -563,7 +563,7 @@ resources: net: { get_resource: net } subnet: { get_resource: subnet } secgrp: -{% if openstack_flat_secgrp|bool %} +{% if openstack_flat_secgrp|default(False)|bool %} - { get_resource: flat-secgrp } {% else %} - { get_resource: master-secgrp } @@ -617,7 +617,7 @@ resources: net: { get_resource: net } subnet: { get_resource: subnet } secgrp: - - { get_resource: {% if openstack_flat_secgrp|bool %}flat-secgrp{% else %}node-secgrp{% endif %} } + - { get_resource: {% if openstack_flat_secgrp|default(False)|bool %}flat-secgrp{% else %}node-secgrp{% endif %} } - { get_resource: common-secgrp } floating_network: {{ external_network }} net_name: @@ -662,7 +662,7 @@ resources: subnet: { get_resource: subnet } secgrp: # TODO(bogdando) filter only required node rules into infra-secgrp -{% if openstack_flat_secgrp|bool %} +{% if openstack_flat_secgrp|default(False)|bool %} - { get_resource: flat-secgrp } {% else %} - { get_resource: node-secgrp } @@ -718,4 +718,3 @@ resources: volume_size: {{ dns_volume_size }} depends_on: - interface - -- cgit v1.2.3 From 9aa8a79f3e8530a89ee32f1b4980b5669317cfba Mon Sep 17 00:00:00 2001 From: Tomas Sedovic Date: Wed, 12 Jul 2017 17:30:16 +0200 Subject: Switch the sample inventory to CentOS (#541) * Switch the sample inventory to CentOS This changes the image name and deployment types to use centos instead of rhel and sets `rhsm_register` to false. With these changes, the inventory should be immediately deployable using the default values (assuming the image, network and flavor names match). Ideally, the upstream CI will just end up using this inventory with little to no changes, too at some point. * Specify the origin openshift_release --- .../openstack/sample-inventory/group_vars/OSEv3.yml | 11 +++++++++-- .../openstack/sample-inventory/group_vars/all.yml | 4 +++- 2 files changed, 12 insertions(+), 3 deletions(-) diff --git a/playbooks/provisioning/openstack/sample-inventory/group_vars/OSEv3.yml b/playbooks/provisioning/openstack/sample-inventory/group_vars/OSEv3.yml index 72a03132b..70e4d8cb1 100644 --- a/playbooks/provisioning/openstack/sample-inventory/group_vars/OSEv3.yml +++ b/playbooks/provisioning/openstack/sample-inventory/group_vars/OSEv3.yml @@ -1,6 +1,8 @@ --- -openshift_deployment_type: openshift-enterprise -openshift_release: v3.5 +openshift_deployment_type: origin +openshift_release: 1.5.1 +#openshift_deployment_type: openshift-enterprise +#openshift_release: v3.5 openshift_master_default_subdomain: "apps.{{ env_id }}.{{ public_dns_domain }}" # NOTE(shadower): do not remove this line, otherwise the default node labels @@ -9,6 +11,11 @@ openshift_node_labels: "{{ openstack.metadata.node_labels }}" osm_default_node_selector: 'region=primary' +# NOTE(shadower): the hostname check seems to always fail because the +# host's floating IP address doesn't match the address received from +# inside the host. +openshift_override_hostname_check: true + # For POCs or demo environments that are using smaller instances than # the official recommended values for RAM and DISK, uncomment the line below. #openshift_disable_check: disk_availability,memory_availability diff --git a/playbooks/provisioning/openstack/sample-inventory/group_vars/all.yml b/playbooks/provisioning/openstack/sample-inventory/group_vars/all.yml index c7e54f6cb..f1cdff86a 100644 --- a/playbooks/provisioning/openstack/sample-inventory/group_vars/all.yml +++ b/playbooks/provisioning/openstack/sample-inventory/group_vars/all.yml @@ -4,7 +4,7 @@ public_dns_domain: "example.com" public_dns_nameservers: [] openstack_ssh_public_key: "openshift" -openstack_default_image_name: "rhel73" +openstack_default_image_name: "centos7" openstack_default_flavor: "m1.medium" openstack_external_network_name: "public" @@ -20,6 +20,8 @@ docker_volume_size: "15" openstack_subnet_prefix: "192.168.99" # # Red Hat subscription +rhsm_register: False + # # Using Red Hat Satellite: #rhsm_register: True #rhsm_satellite: 'sat-6.example.com' -- cgit v1.2.3 -- cgit v1.2.3 -- cgit v1.2.3 -- cgit v1.2.3 From bb483b5877a18422d382f2348ad53d4de07a5fd7 Mon Sep 17 00:00:00 2001 From: Tomas Sedovic Date: Thu, 13 Jul 2017 13:13:40 +0000 Subject: Replace greaterthan and equalto in openstack-stack These two Jinja filters were added in 2.8 which is notably not packaged in CentOS and RHEL. This removes them in favour of the `==` and `>` operators which are available in Jinja 2.7. --- roles/openstack-stack/templates/heat_stack.yaml.j2 | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/roles/openstack-stack/templates/heat_stack.yaml.j2 b/roles/openstack-stack/templates/heat_stack.yaml.j2 index 566b57ef8..992f6257b 100644 --- a/roles/openstack-stack/templates/heat_stack.yaml.j2 +++ b/roles/openstack-stack/templates/heat_stack.yaml.j2 @@ -432,7 +432,7 @@ resources: port_range_min: 53 port_range_max: 53 remote_ip_prefix: "{{ openstack_subnet_prefix }}.0/24" -{% if num_masters is greaterthan 1 %} +{% if num_masters > 1 %} lb-secgrp: type: OS::Neutron::SecurityGroup properties: @@ -444,7 +444,7 @@ resources: port_range_min: {{ openshift_master_api_port | default(8443) }} port_range_max: {{ openshift_master_api_port | default(8443) }} remote_ip_prefix: {{ lb_ingress_cidr }} - {% if openshift_master_console_port is defined and openshift_master_console_port is not equalto openshift_master_api_port %} + {% if openshift_master_console_port is defined and openshift_master_console_port != openshift_master_api_port %} - direction: ingress protocol: tcp port_range_min: {{ openshift_master_console_port | default(8443) }} @@ -493,7 +493,7 @@ resources: depends_on: - interface -{% if num_masters is greaterthan 1 %} +{% if num_masters > 1 %} loadbalancer: type: OS::Heat::ResourceGroup properties: @@ -568,7 +568,7 @@ resources: {% else %} - { get_resource: master-secgrp } - { get_resource: node-secgrp } -{% if num_etcd is equalto 0 %} +{% if num_etcd == 0 %} - { get_resource: etcd-secgrp } {% endif %} {% endif %} -- cgit v1.2.3 -- cgit v1.2.3 -- cgit v1.2.3 From fb3d95ff05257906d846562b752fb9258794dc38 Mon Sep 17 00:00:00 2001 From: Tomas Sedovic Date: Fri, 14 Jul 2017 12:22:51 +0200 Subject: Set up NetworkManager automatically (#542) * Set up NetworkManager automatically This removes the extra step of running the `openshift-ansible/playbooks/byo/openshift-node/network_manager.yml` before installing openshift. In addition, the playbook relies on a host group that the provisioning doesn't provide (oo_all_hosts). Instead, we set up NetworkManager on CentOS nodes automatically. And we restart it on RHEL (which is necessary for the nodes to pick up the new DNS we configured the subnet with). This makes the provisioning easier and more resilient. * Apply the node-network-manager role to every node It makes the code simpler and more consistent across distros. --- playbooks/provisioning/openstack/README.md | 3 --- .../openstack/post-provision-openstack.yml | 6 ++++-- roles/node-network-manager/tasks/main.yml | 22 ++++++++++++++++++++++ 3 files changed, 26 insertions(+), 5 deletions(-) create mode 100644 roles/node-network-manager/tasks/main.yml diff --git a/playbooks/provisioning/openstack/README.md b/playbooks/provisioning/openstack/README.md index 05e7e791a..5c2f61202 100644 --- a/playbooks/provisioning/openstack/README.md +++ b/playbooks/provisioning/openstack/README.md @@ -181,11 +181,8 @@ file, this is how you stat the provisioning process: Once it succeeds, you can install openshift by running: - ansible-playbook --become --user openshift --private-key ~/.ssh/openshift -i inventory/ openshift-ansible/playbooks/byo/openshift-node/network_manager.yml ansible-playbook --become --user openshift --private-key ~/.ssh/openshift -i inventory/ openshift-ansible/playbooks/byo/config.yml -Note, the `network_manager.yml` step is mandatory and is required for persisting -the hosts' DNS configs. ## License diff --git a/playbooks/provisioning/openstack/post-provision-openstack.yml b/playbooks/provisioning/openstack/post-provision-openstack.yml index 460c6596b..53db5061c 100644 --- a/playbooks/provisioning/openstack/post-provision-openstack.yml +++ b/playbooks/provisioning/openstack/post-provision-openstack.yml @@ -59,12 +59,14 @@ - name: OpenShift Pre-Requisites hosts: OSEv3 - gather_facts: False + gather_facts: true become: true - tasks: + pre_tasks: - name: "Include DNS configuration to ensure proper name resolution" lineinfile: state: present dest: /etc/sysconfig/network regexp: "IP4_NAMESERVERS={{ hostvars['localhost'].private_dns_server }}" line: "IP4_NAMESERVERS={{ hostvars['localhost'].private_dns_server }}" + roles: + - node-network-manager diff --git a/roles/node-network-manager/tasks/main.yml b/roles/node-network-manager/tasks/main.yml new file mode 100644 index 000000000..6a17855e7 --- /dev/null +++ b/roles/node-network-manager/tasks/main.yml @@ -0,0 +1,22 @@ +--- +- name: install NetworkManager + package: + name: NetworkManager + state: present + +- name: configure NetworkManager + lineinfile: + dest: "/etc/sysconfig/network-scripts/ifcfg-{{ ansible_default_ipv4['interface'] }}" + regexp: '^{{ item }}=' + line: '{{ item }}=yes' + state: present + create: yes + with_items: + - 'USE_PEERDNS' + - 'NM_CONTROLLED' + +- name: enable and start NetworkManager + service: + name: NetworkManager + state: restarted + enabled: yes -- cgit v1.2.3 -- cgit v1.2.3 -- cgit v1.2.3 From bef7807177915fe4861fcef5c4a78884f49b3b0e Mon Sep 17 00:00:00 2001 From: Tlacenka Date: Mon, 17 Jul 2017 12:47:03 +0200 Subject: Retry tasks in the subscription manager role (#552) * subscription manager: added 10 retries after 1 second delay * subscription manager: added untils * sub manager: typo --- roles/subscription-manager/tasks/main.yml | 28 ++++++++++++++++++++++++++++ 1 file changed, 28 insertions(+) diff --git a/roles/subscription-manager/tasks/main.yml b/roles/subscription-manager/tasks/main.yml index 8c1ae697a..e4c9fdffb 100644 --- a/roles/subscription-manager/tasks/main.yml +++ b/roles/subscription-manager/tasks/main.yml @@ -52,6 +52,10 @@ when: - not registered - rhsm_authentication is defined + register: cleaningsubs_result + until: cleaningsubs_result.rc == 0 + retries: 10 + delay: 1 - name: "Install Satellite certificate" command: "rpm -Uvh --force http://{{ rhsm_satellite }}/pub/katello-ca-consumer-latest.noarch.rpm" @@ -69,6 +73,10 @@ - rhsm_satellite is defined - rhsm_satellite is not none - rhsm_satellite|trim != '' + register: register_key_result + until: register_key_result.rc == 0 + retries: 10 + delay: 1 # This can apply to either Hosted or Satellite - name: "Register using username and password" @@ -78,6 +86,10 @@ - not registered - rhsm_authentication == "password" - rhsm_org is not defined or rhsm_org is none or rhsm_org|trim == '' + register: register_userpw_result + until: register_userpw_result.rc == 0 + retries: 10 + delay: 1 # This can apply to either Hosted or Satellite - name: "Register using username, password and organization" @@ -89,12 +101,20 @@ - rhsm_org is defined - rhsm_org is not none - rhsm_org|trim != '' + register: register_userpworg_result + until: register_userpworg_result.rc == 0 + retries: 10 + delay: 1 - name: "Auto-attach to Subscription Manager Pool" command: "/usr/bin/subscription-manager attach --auto" when: - not registered - rhsm_pool is undefined or rhsm_pool is none or rhsm_pool|trim == '' + register: autoattach_result + until: autoattach_result.rc == 0 + retries: 10 + delay: 1 - name: "Attach to a specific pool" command: "/usr/bin/subscription-manager attach --pool={{ rhsm_pool }}" @@ -103,6 +123,10 @@ - rhsm_pool is not none - rhsm_pool|trim != '' - not registered + register: attachpool_result + until: attachpool_result.rc == 0 + retries: 10 + delay: 1 - name: "Disable all repositories" command: "/usr/bin/subscription-manager repos --disable=*" @@ -120,3 +144,7 @@ - rhsm_repos is defined - rhsm_repos is not none - rhsm_repos|trim != '' + register: enablerepos_result + until: enablerepos_result.rc == 0 + retries: 10 + delay: 1 -- cgit v1.2.3 From a0d2dd9d29e8622e739870baf172f2b8a7e9c6a0 Mon Sep 17 00:00:00 2001 From: Bogdan Dobrelya Date: Mon, 17 Jul 2017 14:05:42 +0200 Subject: Add a role to generate a static inventory (#540) * Add the static-inventory role that configures the inventory/hosts file by the given path, or creates it for you. Signed-off-by: Bogdan Dobrelya --- roles/static_inventory/defaults/main.yml | 8 +++ roles/static_inventory/tasks/checkpoint.yml | 17 ++++++ roles/static_inventory/tasks/main.yml | 6 +++ roles/static_inventory/tasks/openstack.yml | 47 +++++++++++++++++ roles/static_inventory/templates/inventory.j2 | 76 +++++++++++++++++++++++++++ 5 files changed, 154 insertions(+) create mode 100644 roles/static_inventory/defaults/main.yml create mode 100644 roles/static_inventory/tasks/checkpoint.yml create mode 100644 roles/static_inventory/tasks/main.yml create mode 100644 roles/static_inventory/tasks/openstack.yml create mode 100644 roles/static_inventory/templates/inventory.j2 diff --git a/roles/static_inventory/defaults/main.yml b/roles/static_inventory/defaults/main.yml new file mode 100644 index 000000000..315965cde --- /dev/null +++ b/roles/static_inventory/defaults/main.yml @@ -0,0 +1,8 @@ +--- +# Either to checkpoint the dynamic inventory into a static one +refresh_inventory: True +inventory: static +inventory_path: ~/openstack-inventory + +# SSH key to access nodes +private_ssh_key: ~/.ssh/openshift diff --git a/roles/static_inventory/tasks/checkpoint.yml b/roles/static_inventory/tasks/checkpoint.yml new file mode 100644 index 000000000..c0365bd3d --- /dev/null +++ b/roles/static_inventory/tasks/checkpoint.yml @@ -0,0 +1,17 @@ +--- +- name: check for static inventory dir + stat: + path: "{{ inventory_path }}" + register: stat_inventory_path + +- name: create static inventory dir + file: + path: "{{ inventory_path }}" + state: directory + mode: 0750 + when: not stat_inventory_path.stat.exists + +- name: create inventory from template + template: + src: inventory.j2 + dest: "{{ inventory_path }}/hosts" diff --git a/roles/static_inventory/tasks/main.yml b/roles/static_inventory/tasks/main.yml new file mode 100644 index 000000000..15c81690e --- /dev/null +++ b/roles/static_inventory/tasks/main.yml @@ -0,0 +1,6 @@ +--- +- name: Generate in-memory inventory + include: openstack.yml + +- name: Checkpoint in-memory data into a static inventory + include: checkpoint.yml diff --git a/roles/static_inventory/tasks/openstack.yml b/roles/static_inventory/tasks/openstack.yml new file mode 100644 index 000000000..a25502835 --- /dev/null +++ b/roles/static_inventory/tasks/openstack.yml @@ -0,0 +1,47 @@ +--- +- no_log: true + block: + - name: fetch all nodes from openstack shade dynamic inventory + command: shade-inventory --list + register: registered_nodes_output + when: refresh_inventory|bool + + - name: set fact for openstack inventory cluster nodes + set_fact: + registered_nodes: "{{ (registered_nodes_output.stdout | from_json) | json_query(q) }}" + vars: + q: "[] | [?metadata.clusterid=='{{stack_name}}']" + when: + - refresh_inventory|bool + + - name: set_fact for openstack inventory nodes + set_fact: + registered_nodes_floating: "{{ (registered_nodes_output.stdout | from_json) | json_query(q2) }}" + vars: + q: "[] | [?metadata.group=='infra.{{stack_name}}']" + q2: "[] | [?metadata.clusterid=='{{stack_name}}'] | [?public_v4!='']" + when: + - refresh_inventory|bool + + - name: Add cluster nodes w/o floating IPs to inventory + with_items: "{{ registered_nodes }}" + when: not item in registered_nodes_floating + add_host: + name: '{{ item.name }}' + groups: '{{ item.metadata.group }}' + ansible_host: '{{ item.private_v4 }}' + ansible_fqdn: '{{ item.name }}' + ansible_private_key_file: '{{ private_ssh_key }}' + private_v4: '{{ item.private_v4 }}' + + - name: Add cluster nodes with floating IPs to inventory + with_items: "{{ registered_nodes_floating }}" + when: item in registered_nodes_floating + add_host: + name: '{{ item.name }}' + groups: '{{ item.metadata.group }}' + ansible_host: '{{ item.public_v4 }}' + ansible_fqdn: '{{ item.name }}' + ansible_private_key_file: '{{ private_ssh_key }}' + private_v4: '{{ item.private_v4 }}' + public_v4: '{{ item.public_v4 }}' diff --git a/roles/static_inventory/templates/inventory.j2 b/roles/static_inventory/templates/inventory.j2 new file mode 100644 index 000000000..aa87e2b11 --- /dev/null +++ b/roles/static_inventory/templates/inventory.j2 @@ -0,0 +1,76 @@ +# BEGIN Autogenerated hosts +{% for host in groups['all'] %} +{% if hostvars[host].get('ansible_connection', '') == 'local' %} +{{ host }} ansible_connection=local +{% else %} + +{{ host }}{% if 'ansible_host' in hostvars[host] +%} ansible_host={{ hostvars[host]['ansible_host'] }}{% endif %} +{% if 'private_v4' in hostvars[host] +%} private_v4={{ hostvars[host]['private_v4'] }}{% endif %} +{% if 'public_v4' in hostvars[host] +%} public_v4={{ hostvars[host]['public_v4'] }}{% endif %} +{% if 'ansible_private_key_file' in hostvars[host] +%} ansible_private_key_file={{ hostvars[host]['ansible_private_key_file'] }}{% endif %} + +{% endif %} +{% endfor %} +# END autogenerated hosts + +#[all:vars] +# For all group_vars, see ./group_vars/all.yml + +# Create an OSEv3 group that contains the master, nodes, etcd, and lb groups. +# The lb group lets Ansible configure HAProxy as the load balancing solution. +# Comment lb out if your load balancer is pre-configured. +[cluster_hosts:children] +OSEv3 +dns + +[OSEv3:children] +masters +nodes +etcd + +# Set variables common for all OSEv3 hosts +#[OSEv3:vars] + +# For OSEv3 normal group vars, see ./group_vars/OSEv3.yml + +# Host Groups + +[masters:children] +masters.{{ stack_name }} + +[etcd:children] +etcd.{{ stack_name }} + +[nodes:children] +masters +infra.{{ stack_name }} +nodes.{{ stack_name }} + +[infra_hosts:children] +infra.{{ stack_name }} + +[dns:children] +dns.{{ stack_name }} + +# Empty placeholders for all groups of the cluster nodes +[masters.{{ stack_name }}] +[etcd.{{ stack_name }}] +[infra.{{ stack_name }}] +[nodes.{{ stack_name }}] +[dns.{{ stack_name }}] + +# BEGIN Autogenerated groups +{% for group in groups %} +{% if group not in ['ungrouped', 'all'] %} +[{{ group }}] +{% for host in groups[group] %} +{{ host }} +{% endfor %} + +{% endif %} +{% endfor %} +# END Autogenerated groups -- cgit v1.2.3 -- cgit v1.2.3 From 86b132e4bb1e5c58c1b194403f7d61fa34b20171 Mon Sep 17 00:00:00 2001 From: Katerina Pilatova Date: Tue, 18 Jul 2017 14:00:17 +0200 Subject: README: added prerequisity for a repository needed for python-openstackclient installation --- playbooks/provisioning/openstack/README.md | 1 + 1 file changed, 1 insertion(+) diff --git a/playbooks/provisioning/openstack/README.md b/playbooks/provisioning/openstack/README.md index 5c2f61202..0d8433367 100644 --- a/playbooks/provisioning/openstack/README.md +++ b/playbooks/provisioning/openstack/README.md @@ -12,6 +12,7 @@ etc.). The result is an environment ready for openshift-ansible. * [shade](https://pypi.python.org/pypi/shade) * python-dns / [dnspython](https://pypi.python.org/pypi/dnspython) * Become (sudo) is not required. +* `rhel-7-server-openstack-10-rpms` repository (in order to be able to install `python-openstackclient`) ## Dependencies for OpenStack hosted cluster nodes (servers) -- cgit v1.2.3 From 7040d1c9562d275bd1cef3059646db696a5f954e Mon Sep 17 00:00:00 2001 From: Katerina Pilatova Date: Tue, 18 Jul 2017 16:38:31 +0200 Subject: dependencies: python-heatclient and python-openstackclient added to optional dependencies --- playbooks/provisioning/openstack/README.md | 6 ++++++ 1 file changed, 6 insertions(+) diff --git a/playbooks/provisioning/openstack/README.md b/playbooks/provisioning/openstack/README.md index 0d8433367..e5ec68458 100644 --- a/playbooks/provisioning/openstack/README.md +++ b/playbooks/provisioning/openstack/README.md @@ -14,6 +14,12 @@ etc.). The result is an environment ready for openshift-ansible. * Become (sudo) is not required. * `rhel-7-server-openstack-10-rpms` repository (in order to be able to install `python-openstackclient`) +### Optional Dependencies forlocalhost +**Note**: When using rhel images, `rhel-7-server-openstack-10-rpms` repository is required in order to install these packages. + +* `python-openstackclient` +* `python-heatclient` + ## Dependencies for OpenStack hosted cluster nodes (servers) There are no additional dependencies for the cluster nodes. Required -- cgit v1.2.3 From 5a94e47f12a85daf1f93e1ea695689808c9a481d Mon Sep 17 00:00:00 2001 From: Katerina Pilatova Date: Tue, 18 Jul 2017 16:40:33 +0200 Subject: README: typo --- playbooks/provisioning/openstack/README.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/playbooks/provisioning/openstack/README.md b/playbooks/provisioning/openstack/README.md index e5ec68458..fe68abb19 100644 --- a/playbooks/provisioning/openstack/README.md +++ b/playbooks/provisioning/openstack/README.md @@ -14,7 +14,7 @@ etc.). The result is an environment ready for openshift-ansible. * Become (sudo) is not required. * `rhel-7-server-openstack-10-rpms` repository (in order to be able to install `python-openstackclient`) -### Optional Dependencies forlocalhost +### Optional Dependencies for localhost **Note**: When using rhel images, `rhel-7-server-openstack-10-rpms` repository is required in order to install these packages. * `python-openstackclient` -- cgit v1.2.3 -- cgit v1.2.3 From 7081dd61c6d591ebff565795c460066f7de3809c Mon Sep 17 00:00:00 2001 From: Katerina Pilatova Date: Wed, 19 Jul 2017 11:49:56 +0200 Subject: README: fix --- playbooks/provisioning/openstack/README.md | 1 - 1 file changed, 1 deletion(-) diff --git a/playbooks/provisioning/openstack/README.md b/playbooks/provisioning/openstack/README.md index fe68abb19..6dd60cd88 100644 --- a/playbooks/provisioning/openstack/README.md +++ b/playbooks/provisioning/openstack/README.md @@ -12,7 +12,6 @@ etc.). The result is an environment ready for openshift-ansible. * [shade](https://pypi.python.org/pypi/shade) * python-dns / [dnspython](https://pypi.python.org/pypi/dnspython) * Become (sudo) is not required. -* `rhel-7-server-openstack-10-rpms` repository (in order to be able to install `python-openstackclient`) ### Optional Dependencies for localhost **Note**: When using rhel images, `rhel-7-server-openstack-10-rpms` repository is required in order to install these packages. -- cgit v1.2.3 From 63e623561f8fbc54e87248edf789b6c5d395cf26 Mon Sep 17 00:00:00 2001 From: Tomas Sedovic Date: Wed, 19 Jul 2017 15:45:04 +0200 Subject: Set ansible_become for the OSEv3 group Because openshift-ansible requires root on the cluster nodes, but it doesn't explicitly set it in the playbooks (like we do), let's set it in our inventory instead of requiring to pass `--become` to `ansible-playbook`. That will simplify the installation steps as well as let us include the provisioning and openshift-ansible playbooks in a single playbook. --- playbooks/provisioning/openstack/README.md | 2 +- .../provisioning/openstack/sample-inventory/group_vars/OSEv3.yml | 4 ++++ 2 files changed, 5 insertions(+), 1 deletion(-) diff --git a/playbooks/provisioning/openstack/README.md b/playbooks/provisioning/openstack/README.md index 5c2f61202..ac648a559 100644 --- a/playbooks/provisioning/openstack/README.md +++ b/playbooks/provisioning/openstack/README.md @@ -181,7 +181,7 @@ file, this is how you stat the provisioning process: Once it succeeds, you can install openshift by running: - ansible-playbook --become --user openshift --private-key ~/.ssh/openshift -i inventory/ openshift-ansible/playbooks/byo/config.yml + ansible-playbook --user openshift --private-key ~/.ssh/openshift -i inventory/ openshift-ansible/playbooks/byo/config.yml ## License diff --git a/playbooks/provisioning/openstack/sample-inventory/group_vars/OSEv3.yml b/playbooks/provisioning/openstack/sample-inventory/group_vars/OSEv3.yml index 70e4d8cb1..4ce96a031 100644 --- a/playbooks/provisioning/openstack/sample-inventory/group_vars/OSEv3.yml +++ b/playbooks/provisioning/openstack/sample-inventory/group_vars/OSEv3.yml @@ -19,3 +19,7 @@ openshift_override_hostname_check: true # For POCs or demo environments that are using smaller instances than # the official recommended values for RAM and DISK, uncomment the line below. #openshift_disable_check: disk_availability,memory_availability + +# NOTE(shadower): Always switch to root on the OSEv3 nodes. +# openshift-ansible requires an explicit `become`. +ansible_become: true -- cgit v1.2.3 From 244d4f2347526e6e7428e81f882793aaca75a770 Mon Sep 17 00:00:00 2001 From: Tlacenka Date: Wed, 19 Jul 2017 17:40:53 +0200 Subject: During provisioning, make unnecessary packages optional under a switch (#561) * openshift-prep: bash-completion and vim-enhanced packages are now optional under install_debug_packages switch * openshift-prep: new line removal --- roles/openshift-prep/defaults/main.yml | 2 ++ roles/openshift-prep/tasks/prerequisites.yml | 7 +++++++ 2 files changed, 9 insertions(+) diff --git a/roles/openshift-prep/defaults/main.yml b/roles/openshift-prep/defaults/main.yml index fac25dcc1..c8c9a00c0 100644 --- a/roles/openshift-prep/defaults/main.yml +++ b/roles/openshift-prep/defaults/main.yml @@ -1,11 +1,13 @@ --- # Defines either to install required packages and update all manage_packages: true +install_debug_packages: false required_packages: - wget - git - net-tools - bind-utils - bridge-utils +debug_packages: - bash-completion - vim-enhanced diff --git a/roles/openshift-prep/tasks/prerequisites.yml b/roles/openshift-prep/tasks/prerequisites.yml index 433c1c4e3..b7601aa48 100644 --- a/roles/openshift-prep/tasks/prerequisites.yml +++ b/roles/openshift-prep/tasks/prerequisites.yml @@ -9,6 +9,13 @@ with_items: "{{ required_packages }}" when: manage_packages|bool +- name: "Install debug packages (optional)" + yum: + name: "{{ item }}" + state: latest + with_items: "{{ debug_packages }}" + when: install_debug_packages|bool + - name: "Update all packages (this can take a very long time)" yum: name: '*' -- cgit v1.2.3 -- cgit v1.2.3 -- cgit v1.2.3 -- cgit v1.2.3 -- cgit v1.2.3 From 018d410c4d441a8f66e8ac71d82f4ce46b508364 Mon Sep 17 00:00:00 2001 From: Bogdan Dobrelya Date: Thu, 20 Jul 2017 14:52:11 +0200 Subject: Include masters into etcd group, when it is empty (#559) Signed-off-by: Bogdan Dobrelya --- roles/static_inventory/templates/inventory.j2 | 1 + 1 file changed, 1 insertion(+) diff --git a/roles/static_inventory/templates/inventory.j2 b/roles/static_inventory/templates/inventory.j2 index aa87e2b11..ba806f286 100644 --- a/roles/static_inventory/templates/inventory.j2 +++ b/roles/static_inventory/templates/inventory.j2 @@ -44,6 +44,7 @@ masters.{{ stack_name }} [etcd:children] etcd.{{ stack_name }} +{% if 'etcd' not in groups or groups['etcd']|length == 0 %}masters.{{ stack_name }}{% endif %} [nodes:children] masters -- cgit v1.2.3 From 1975fb57b4ddee77eec6f849f2c7677e2ee3d6df Mon Sep 17 00:00:00 2001 From: Bogdan Dobrelya Date: Thu, 20 Jul 2017 14:53:01 +0200 Subject: Generate static inventory with shade inventory (#538) * Autogenerate inventory/hosts when 'inventory: static' (Default), with the shade-inventory tool. * Drop unused anymore: openstack.py and associated GPL notes, an example static inventory, omit manual updates for the inventory DNS names in the deployment guide. * Switch openstack.py formatted inventory hostvars to the shade-inventory format (omit openstack.* from hostvars). * Populate node labels from inventory vars instead of the heat templates combined with inventory vars. * Add app (k8s minions) nodes group for primary node labels. Signed-off-by: Bogdan Dobrelya --- .../provisioning/openstack/INVENTORY-LICENSE.txt | 674 --------------------- playbooks/provisioning/openstack/README.md | 19 +- .../openstack/openstack_dns_records.yml | 50 +- .../provisioning/openstack/openstack_dns_views.yml | 18 +- playbooks/provisioning/openstack/pre_tasks.yml | 6 - .../sample-inventory/group_vars/OSEv3.yml | 8 +- .../provisioning/openstack/sample-inventory/hosts | 44 -- .../openstack/sample-inventory/openstack.py | 252 -------- roles/common/defaults/main.yml | 6 + roles/dns-server-detect/tasks/main.yml | 20 +- roles/openstack-stack/meta/main.yml | 3 + roles/openstack-stack/templates/heat_stack.yaml.j2 | 8 +- roles/static_inventory/meta/main.yml | 3 + roles/static_inventory/templates/inventory.j2 | 10 +- 14 files changed, 80 insertions(+), 1041 deletions(-) delete mode 100644 playbooks/provisioning/openstack/INVENTORY-LICENSE.txt delete mode 100644 playbooks/provisioning/openstack/sample-inventory/hosts delete mode 100755 playbooks/provisioning/openstack/sample-inventory/openstack.py create mode 100644 roles/common/defaults/main.yml create mode 100644 roles/openstack-stack/meta/main.yml create mode 100644 roles/static_inventory/meta/main.yml diff --git a/playbooks/provisioning/openstack/INVENTORY-LICENSE.txt b/playbooks/provisioning/openstack/INVENTORY-LICENSE.txt deleted file mode 100644 index 94a9ed024..000000000 --- a/playbooks/provisioning/openstack/INVENTORY-LICENSE.txt +++ /dev/null @@ -1,674 +0,0 @@ - GNU GENERAL PUBLIC LICENSE - Version 3, 29 June 2007 - - Copyright (C) 2007 Free Software Foundation, Inc. - Everyone is permitted to copy and distribute verbatim copies - of this license document, but changing it is not allowed. - - Preamble - - The GNU General Public License is a free, copyleft license for -software and other kinds of works. - - The licenses for most software and other practical works are designed -to take away your freedom to share and change the works. By contrast, -the GNU General Public License is intended to guarantee your freedom to -share and change all versions of a program--to make sure it remains free -software for all its users. We, the Free Software Foundation, use the -GNU General Public License for most of our software; it applies also to -any other work released this way by its authors. You can apply it to -your programs, too. - - When we speak of free software, we are referring to freedom, not -price. Our General Public Licenses are designed to make sure that you -have the freedom to distribute copies of free software (and charge for -them if you wish), that you receive source code or can get it if you -want it, that you can change the software or use pieces of it in new -free programs, and that you know you can do these things. - - To protect your rights, we need to prevent others from denying you -these rights or asking you to surrender the rights. Therefore, you have -certain responsibilities if you distribute copies of the software, or if -you modify it: responsibilities to respect the freedom of others. - - For example, if you distribute copies of such a program, whether -gratis or for a fee, you must pass on to the recipients the same -freedoms that you received. You must make sure that they, too, receive -or can get the source code. And you must show them these terms so they -know their rights. - - Developers that use the GNU GPL protect your rights with two steps: -(1) assert copyright on the software, and (2) offer you this License -giving you legal permission to copy, distribute and/or modify it. - - For the developers' and authors' protection, the GPL clearly explains -that there is no warranty for this free software. For both users' and -authors' sake, the GPL requires that modified versions be marked as -changed, so that their problems will not be attributed erroneously to -authors of previous versions. - - Some devices are designed to deny users access to install or run -modified versions of the software inside them, although the manufacturer -can do so. This is fundamentally incompatible with the aim of -protecting users' freedom to change the software. The systematic -pattern of such abuse occurs in the area of products for individuals to -use, which is precisely where it is most unacceptable. Therefore, we -have designed this version of the GPL to prohibit the practice for those -products. If such problems arise substantially in other domains, we -stand ready to extend this provision to those domains in future versions -of the GPL, as needed to protect the freedom of users. - - Finally, every program is threatened constantly by software patents. -States should not allow patents to restrict development and use of -software on general-purpose computers, but in those that do, we wish to -avoid the special danger that patents applied to a free program could -make it effectively proprietary. To prevent this, the GPL assures that -patents cannot be used to render the program non-free. - - The precise terms and conditions for copying, distribution and -modification follow. - - TERMS AND CONDITIONS - - 0. Definitions. - - "This License" refers to version 3 of the GNU General Public License. - - "Copyright" also means copyright-like laws that apply to other kinds of -works, such as semiconductor masks. - - "The Program" refers to any copyrightable work licensed under this -License. Each licensee is addressed as "you". "Licensees" and -"recipients" may be individuals or organizations. - - To "modify" a work means to copy from or adapt all or part of the work -in a fashion requiring copyright permission, other than the making of an -exact copy. The resulting work is called a "modified version" of the -earlier work or a work "based on" the earlier work. - - A "covered work" means either the unmodified Program or a work based -on the Program. - - To "propagate" a work means to do anything with it that, without -permission, would make you directly or secondarily liable for -infringement under applicable copyright law, except executing it on a -computer or modifying a private copy. Propagation includes copying, -distribution (with or without modification), making available to the -public, and in some countries other activities as well. - - To "convey" a work means any kind of propagation that enables other -parties to make or receive copies. Mere interaction with a user through -a computer network, with no transfer of a copy, is not conveying. - - An interactive user interface displays "Appropriate Legal Notices" -to the extent that it includes a convenient and prominently visible -feature that (1) displays an appropriate copyright notice, and (2) -tells the user that there is no warranty for the work (except to the -extent that warranties are provided), that licensees may convey the -work under this License, and how to view a copy of this License. If -the interface presents a list of user commands or options, such as a -menu, a prominent item in the list meets this criterion. - - 1. Source Code. - - The "source code" for a work means the preferred form of the work -for making modifications to it. "Object code" means any non-source -form of a work. - - A "Standard Interface" means an interface that either is an official -standard defined by a recognized standards body, or, in the case of -interfaces specified for a particular programming language, one that -is widely used among developers working in that language. - - The "System Libraries" of an executable work include anything, other -than the work as a whole, that (a) is included in the normal form of -packaging a Major Component, but which is not part of that Major -Component, and (b) serves only to enable use of the work with that -Major Component, or to implement a Standard Interface for which an -implementation is available to the public in source code form. A -"Major Component", in this context, means a major essential component -(kernel, window system, and so on) of the specific operating system -(if any) on which the executable work runs, or a compiler used to -produce the work, or an object code interpreter used to run it. - - The "Corresponding Source" for a work in object code form means all -the source code needed to generate, install, and (for an executable -work) run the object code and to modify the work, including scripts to -control those activities. However, it does not include the work's -System Libraries, or general-purpose tools or generally available free -programs which are used unmodified in performing those activities but -which are not part of the work. For example, Corresponding Source -includes interface definition files associated with source files for -the work, and the source code for shared libraries and dynamically -linked subprograms that the work is specifically designed to require, -such as by intimate data communication or control flow between those -subprograms and other parts of the work. - - The Corresponding Source need not include anything that users -can regenerate automatically from other parts of the Corresponding -Source. - - The Corresponding Source for a work in source code form is that -same work. - - 2. Basic Permissions. - - All rights granted under this License are granted for the term of -copyright on the Program, and are irrevocable provided the stated -conditions are met. This License explicitly affirms your unlimited -permission to run the unmodified Program. The output from running a -covered work is covered by this License only if the output, given its -content, constitutes a covered work. This License acknowledges your -rights of fair use or other equivalent, as provided by copyright law. - - You may make, run and propagate covered works that you do not -convey, without conditions so long as your license otherwise remains -in force. You may convey covered works to others for the sole purpose -of having them make modifications exclusively for you, or provide you -with facilities for running those works, provided that you comply with -the terms of this License in conveying all material for which you do -not control copyright. Those thus making or running the covered works -for you must do so exclusively on your behalf, under your direction -and control, on terms that prohibit them from making any copies of -your copyrighted material outside their relationship with you. - - Conveying under any other circumstances is permitted solely under -the conditions stated below. Sublicensing is not allowed; section 10 -makes it unnecessary. - - 3. Protecting Users' Legal Rights From Anti-Circumvention Law. - - No covered work shall be deemed part of an effective technological -measure under any applicable law fulfilling obligations under article -11 of the WIPO copyright treaty adopted on 20 December 1996, or -similar laws prohibiting or restricting circumvention of such -measures. - - When you convey a covered work, you waive any legal power to forbid -circumvention of technological measures to the extent such circumvention -is effected by exercising rights under this License with respect to -the covered work, and you disclaim any intention to limit operation or -modification of the work as a means of enforcing, against the work's -users, your or third parties' legal rights to forbid circumvention of -technological measures. - - 4. Conveying Verbatim Copies. - - You may convey verbatim copies of the Program's source code as you -receive it, in any medium, provided that you conspicuously and -appropriately publish on each copy an appropriate copyright notice; -keep intact all notices stating that this License and any -non-permissive terms added in accord with section 7 apply to the code; -keep intact all notices of the absence of any warranty; and give all -recipients a copy of this License along with the Program. - - You may charge any price or no price for each copy that you convey, -and you may offer support or warranty protection for a fee. - - 5. Conveying Modified Source Versions. - - You may convey a work based on the Program, or the modifications to -produce it from the Program, in the form of source code under the -terms of section 4, provided that you also meet all of these conditions: - - a) The work must carry prominent notices stating that you modified - it, and giving a relevant date. - - b) The work must carry prominent notices stating that it is - released under this License and any conditions added under section - 7. This requirement modifies the requirement in section 4 to - "keep intact all notices". - - c) You must license the entire work, as a whole, under this - License to anyone who comes into possession of a copy. This - License will therefore apply, along with any applicable section 7 - additional terms, to the whole of the work, and all its parts, - regardless of how they are packaged. This License gives no - permission to license the work in any other way, but it does not - invalidate such permission if you have separately received it. - - d) If the work has interactive user interfaces, each must display - Appropriate Legal Notices; however, if the Program has interactive - interfaces that do not display Appropriate Legal Notices, your - work need not make them do so. - - A compilation of a covered work with other separate and independent -works, which are not by their nature extensions of the covered work, -and which are not combined with it such as to form a larger program, -in or on a volume of a storage or distribution medium, is called an -"aggregate" if the compilation and its resulting copyright are not -used to limit the access or legal rights of the compilation's users -beyond what the individual works permit. Inclusion of a covered work -in an aggregate does not cause this License to apply to the other -parts of the aggregate. - - 6. Conveying Non-Source Forms. - - You may convey a covered work in object code form under the terms -of sections 4 and 5, provided that you also convey the -machine-readable Corresponding Source under the terms of this License, -in one of these ways: - - a) Convey the object code in, or embodied in, a physical product - (including a physical distribution medium), accompanied by the - Corresponding Source fixed on a durable physical medium - customarily used for software interchange. - - b) Convey the object code in, or embodied in, a physical product - (including a physical distribution medium), accompanied by a - written offer, valid for at least three years and valid for as - long as you offer spare parts or customer support for that product - model, to give anyone who possesses the object code either (1) a - copy of the Corresponding Source for all the software in the - product that is covered by this License, on a durable physical - medium customarily used for software interchange, for a price no - more than your reasonable cost of physically performing this - conveying of source, or (2) access to copy the - Corresponding Source from a network server at no charge. - - c) Convey individual copies of the object code with a copy of the - written offer to provide the Corresponding Source. This - alternative is allowed only occasionally and noncommercially, and - only if you received the object code with such an offer, in accord - with subsection 6b. - - d) Convey the object code by offering access from a designated - place (gratis or for a charge), and offer equivalent access to the - Corresponding Source in the same way through the same place at no - further charge. You need not require recipients to copy the - Corresponding Source along with the object code. If the place to - copy the object code is a network server, the Corresponding Source - may be on a different server (operated by you or a third party) - that supports equivalent copying facilities, provided you maintain - clear directions next to the object code saying where to find the - Corresponding Source. Regardless of what server hosts the - Corresponding Source, you remain obligated to ensure that it is - available for as long as needed to satisfy these requirements. - - e) Convey the object code using peer-to-peer transmission, provided - you inform other peers where the object code and Corresponding - Source of the work are being offered to the general public at no - charge under subsection 6d. - - A separable portion of the object code, whose source code is excluded -from the Corresponding Source as a System Library, need not be -included in conveying the object code work. - - A "User Product" is either (1) a "consumer product", which means any -tangible personal property which is normally used for personal, family, -or household purposes, or (2) anything designed or sold for incorporation -into a dwelling. In determining whether a product is a consumer product, -doubtful cases shall be resolved in favor of coverage. For a particular -product received by a particular user, "normally used" refers to a -typical or common use of that class of product, regardless of the status -of the particular user or of the way in which the particular user -actually uses, or expects or is expected to use, the product. A product -is a consumer product regardless of whether the product has substantial -commercial, industrial or non-consumer uses, unless such uses represent -the only significant mode of use of the product. - - "Installation Information" for a User Product means any methods, -procedures, authorization keys, or other information required to install -and execute modified versions of a covered work in that User Product from -a modified version of its Corresponding Source. The information must -suffice to ensure that the continued functioning of the modified object -code is in no case prevented or interfered with solely because -modification has been made. - - If you convey an object code work under this section in, or with, or -specifically for use in, a User Product, and the conveying occurs as -part of a transaction in which the right of possession and use of the -User Product is transferred to the recipient in perpetuity or for a -fixed term (regardless of how the transaction is characterized), the -Corresponding Source conveyed under this section must be accompanied -by the Installation Information. But this requirement does not apply -if neither you nor any third party retains the ability to install -modified object code on the User Product (for example, the work has -been installed in ROM). - - The requirement to provide Installation Information does not include a -requirement to continue to provide support service, warranty, or updates -for a work that has been modified or installed by the recipient, or for -the User Product in which it has been modified or installed. Access to a -network may be denied when the modification itself materially and -adversely affects the operation of the network or violates the rules and -protocols for communication across the network. - - Corresponding Source conveyed, and Installation Information provided, -in accord with this section must be in a format that is publicly -documented (and with an implementation available to the public in -source code form), and must require no special password or key for -unpacking, reading or copying. - - 7. Additional Terms. - - "Additional permissions" are terms that supplement the terms of this -License by making exceptions from one or more of its conditions. -Additional permissions that are applicable to the entire Program shall -be treated as though they were included in this License, to the extent -that they are valid under applicable law. If additional permissions -apply only to part of the Program, that part may be used separately -under those permissions, but the entire Program remains governed by -this License without regard to the additional permissions. - - When you convey a copy of a covered work, you may at your option -remove any additional permissions from that copy, or from any part of -it. (Additional permissions may be written to require their own -removal in certain cases when you modify the work.) You may place -additional permissions on material, added by you to a covered work, -for which you have or can give appropriate copyright permission. - - Notwithstanding any other provision of this License, for material you -add to a covered work, you may (if authorized by the copyright holders of -that material) supplement the terms of this License with terms: - - a) Disclaiming warranty or limiting liability differently from the - terms of sections 15 and 16 of this License; or - - b) Requiring preservation of specified reasonable legal notices or - author attributions in that material or in the Appropriate Legal - Notices displayed by works containing it; or - - c) Prohibiting misrepresentation of the origin of that material, or - requiring that modified versions of such material be marked in - reasonable ways as different from the original version; or - - d) Limiting the use for publicity purposes of names of licensors or - authors of the material; or - - e) Declining to grant rights under trademark law for use of some - trade names, trademarks, or service marks; or - - f) Requiring indemnification of licensors and authors of that - material by anyone who conveys the material (or modified versions of - it) with contractual assumptions of liability to the recipient, for - any liability that these contractual assumptions directly impose on - those licensors and authors. - - All other non-permissive additional terms are considered "further -restrictions" within the meaning of section 10. If the Program as you -received it, or any part of it, contains a notice stating that it is -governed by this License along with a term that is a further -restriction, you may remove that term. If a license document contains -a further restriction but permits relicensing or conveying under this -License, you may add to a covered work material governed by the terms -of that license document, provided that the further restriction does -not survive such relicensing or conveying. - - If you add terms to a covered work in accord with this section, you -must place, in the relevant source files, a statement of the -additional terms that apply to those files, or a notice indicating -where to find the applicable terms. - - Additional terms, permissive or non-permissive, may be stated in the -form of a separately written license, or stated as exceptions; -the above requirements apply either way. - - 8. Termination. - - You may not propagate or modify a covered work except as expressly -provided under this License. Any attempt otherwise to propagate or -modify it is void, and will automatically terminate your rights under -this License (including any patent licenses granted under the third -paragraph of section 11). - - However, if you cease all violation of this License, then your -license from a particular copyright holder is reinstated (a) -provisionally, unless and until the copyright holder explicitly and -finally terminates your license, and (b) permanently, if the copyright -holder fails to notify you of the violation by some reasonable means -prior to 60 days after the cessation. - - Moreover, your license from a particular copyright holder is -reinstated permanently if the copyright holder notifies you of the -violation by some reasonable means, this is the first time you have -received notice of violation of this License (for any work) from that -copyright holder, and you cure the violation prior to 30 days after -your receipt of the notice. - - Termination of your rights under this section does not terminate the -licenses of parties who have received copies or rights from you under -this License. If your rights have been terminated and not permanently -reinstated, you do not qualify to receive new licenses for the same -material under section 10. - - 9. Acceptance Not Required for Having Copies. - - You are not required to accept this License in order to receive or -run a copy of the Program. Ancillary propagation of a covered work -occurring solely as a consequence of using peer-to-peer transmission -to receive a copy likewise does not require acceptance. However, -nothing other than this License grants you permission to propagate or -modify any covered work. These actions infringe copyright if you do -not accept this License. Therefore, by modifying or propagating a -covered work, you indicate your acceptance of this License to do so. - - 10. Automatic Licensing of Downstream Recipients. - - Each time you convey a covered work, the recipient automatically -receives a license from the original licensors, to run, modify and -propagate that work, subject to this License. You are not responsible -for enforcing compliance by third parties with this License. - - An "entity transaction" is a transaction transferring control of an -organization, or substantially all assets of one, or subdividing an -organization, or merging organizations. If propagation of a covered -work results from an entity transaction, each party to that -transaction who receives a copy of the work also receives whatever -licenses to the work the party's predecessor in interest had or could -give under the previous paragraph, plus a right to possession of the -Corresponding Source of the work from the predecessor in interest, if -the predecessor has it or can get it with reasonable efforts. - - You may not impose any further restrictions on the exercise of the -rights granted or affirmed under this License. For example, you may -not impose a license fee, royalty, or other charge for exercise of -rights granted under this License, and you may not initiate litigation -(including a cross-claim or counterclaim in a lawsuit) alleging that -any patent claim is infringed by making, using, selling, offering for -sale, or importing the Program or any portion of it. - - 11. Patents. - - A "contributor" is a copyright holder who authorizes use under this -License of the Program or a work on which the Program is based. The -work thus licensed is called the contributor's "contributor version". - - A contributor's "essential patent claims" are all patent claims -owned or controlled by the contributor, whether already acquired or -hereafter acquired, that would be infringed by some manner, permitted -by this License, of making, using, or selling its contributor version, -but do not include claims that would be infringed only as a -consequence of further modification of the contributor version. For -purposes of this definition, "control" includes the right to grant -patent sublicenses in a manner consistent with the requirements of -this License. - - Each contributor grants you a non-exclusive, worldwide, royalty-free -patent license under the contributor's essential patent claims, to -make, use, sell, offer for sale, import and otherwise run, modify and -propagate the contents of its contributor version. - - In the following three paragraphs, a "patent license" is any express -agreement or commitment, however denominated, not to enforce a patent -(such as an express permission to practice a patent or covenant not to -sue for patent infringement). To "grant" such a patent license to a -party means to make such an agreement or commitment not to enforce a -patent against the party. - - If you convey a covered work, knowingly relying on a patent license, -and the Corresponding Source of the work is not available for anyone -to copy, free of charge and under the terms of this License, through a -publicly available network server or other readily accessible means, -then you must either (1) cause the Corresponding Source to be so -available, or (2) arrange to deprive yourself of the benefit of the -patent license for this particular work, or (3) arrange, in a manner -consistent with the requirements of this License, to extend the patent -license to downstream recipients. "Knowingly relying" means you have -actual knowledge that, but for the patent license, your conveying the -covered work in a country, or your recipient's use of the covered work -in a country, would infringe one or more identifiable patents in that -country that you have reason to believe are valid. - - If, pursuant to or in connection with a single transaction or -arrangement, you convey, or propagate by procuring conveyance of, a -covered work, and grant a patent license to some of the parties -receiving the covered work authorizing them to use, propagate, modify -or convey a specific copy of the covered work, then the patent license -you grant is automatically extended to all recipients of the covered -work and works based on it. - - A patent license is "discriminatory" if it does not include within -the scope of its coverage, prohibits the exercise of, or is -conditioned on the non-exercise of one or more of the rights that are -specifically granted under this License. You may not convey a covered -work if you are a party to an arrangement with a third party that is -in the business of distributing software, under which you make payment -to the third party based on the extent of your activity of conveying -the work, and under which the third party grants, to any of the -parties who would receive the covered work from you, a discriminatory -patent license (a) in connection with copies of the covered work -conveyed by you (or copies made from those copies), or (b) primarily -for and in connection with specific products or compilations that -contain the covered work, unless you entered into that arrangement, -or that patent license was granted, prior to 28 March 2007. - - Nothing in this License shall be construed as excluding or limiting -any implied license or other defenses to infringement that may -otherwise be available to you under applicable patent law. - - 12. No Surrender of Others' Freedom. - - If conditions are imposed on you (whether by court order, agreement or -otherwise) that contradict the conditions of this License, they do not -excuse you from the conditions of this License. If you cannot convey a -covered work so as to satisfy simultaneously your obligations under this -License and any other pertinent obligations, then as a consequence you may -not convey it at all. For example, if you agree to terms that obligate you -to collect a royalty for further conveying from those to whom you convey -the Program, the only way you could satisfy both those terms and this -License would be to refrain entirely from conveying the Program. - - 13. Use with the GNU Affero General Public License. - - Notwithstanding any other provision of this License, you have -permission to link or combine any covered work with a work licensed -under version 3 of the GNU Affero General Public License into a single -combined work, and to convey the resulting work. The terms of this -License will continue to apply to the part which is the covered work, -but the special requirements of the GNU Affero General Public License, -section 13, concerning interaction through a network will apply to the -combination as such. - - 14. Revised Versions of this License. - - The Free Software Foundation may publish revised and/or new versions of -the GNU General Public License from time to time. Such new versions will -be similar in spirit to the present version, but may differ in detail to -address new problems or concerns. - - Each version is given a distinguishing version number. If the -Program specifies that a certain numbered version of the GNU General -Public License "or any later version" applies to it, you have the -option of following the terms and conditions either of that numbered -version or of any later version published by the Free Software -Foundation. If the Program does not specify a version number of the -GNU General Public License, you may choose any version ever published -by the Free Software Foundation. - - If the Program specifies that a proxy can decide which future -versions of the GNU General Public License can be used, that proxy's -public statement of acceptance of a version permanently authorizes you -to choose that version for the Program. - - Later license versions may give you additional or different -permissions. However, no additional obligations are imposed on any -author or copyright holder as a result of your choosing to follow a -later version. - - 15. Disclaimer of Warranty. - - THERE IS NO WARRANTY FOR THE PROGRAM, TO THE EXTENT PERMITTED BY -APPLICABLE LAW. EXCEPT WHEN OTHERWISE STATED IN WRITING THE COPYRIGHT -HOLDERS AND/OR OTHER PARTIES PROVIDE THE PROGRAM "AS IS" WITHOUT WARRANTY -OF ANY KIND, EITHER EXPRESSED OR IMPLIED, INCLUDING, BUT NOT LIMITED TO, -THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR -PURPOSE. THE ENTIRE RISK AS TO THE QUALITY AND PERFORMANCE OF THE PROGRAM -IS WITH YOU. SHOULD THE PROGRAM PROVE DEFECTIVE, YOU ASSUME THE COST OF -ALL NECESSARY SERVICING, REPAIR OR CORRECTION. - - 16. Limitation of Liability. - - IN NO EVENT UNLESS REQUIRED BY APPLICABLE LAW OR AGREED TO IN WRITING -WILL ANY COPYRIGHT HOLDER, OR ANY OTHER PARTY WHO MODIFIES AND/OR CONVEYS -THE PROGRAM AS PERMITTED ABOVE, BE LIABLE TO YOU FOR DAMAGES, INCLUDING ANY -GENERAL, SPECIAL, INCIDENTAL OR CONSEQUENTIAL DAMAGES ARISING OUT OF THE -USE OR INABILITY TO USE THE PROGRAM (INCLUDING BUT NOT LIMITED TO LOSS OF -DATA OR DATA BEING RENDERED INACCURATE OR LOSSES SUSTAINED BY YOU OR THIRD -PARTIES OR A FAILURE OF THE PROGRAM TO OPERATE WITH ANY OTHER PROGRAMS), -EVEN IF SUCH HOLDER OR OTHER PARTY HAS BEEN ADVISED OF THE POSSIBILITY OF -SUCH DAMAGES. - - 17. Interpretation of Sections 15 and 16. - - If the disclaimer of warranty and limitation of liability provided -above cannot be given local legal effect according to their terms, -reviewing courts shall apply local law that most closely approximates -an absolute waiver of all civil liability in connection with the -Program, unless a warranty or assumption of liability accompanies a -copy of the Program in return for a fee. - - END OF TERMS AND CONDITIONS - - How to Apply These Terms to Your New Programs - - If you develop a new program, and you want it to be of the greatest -possible use to the public, the best way to achieve this is to make it -free software which everyone can redistribute and change under these terms. - - To do so, attach the following notices to the program. It is safest -to attach them to the start of each source file to most effectively -state the exclusion of warranty; and each file should have at least -the "copyright" line and a pointer to where the full notice is found. - - - Copyright (C) - - This program is free software: you can redistribute it and/or modify - it under the terms of the GNU General Public License as published by - the Free Software Foundation, either version 3 of the License, or - (at your option) any later version. - - This program is distributed in the hope that it will be useful, - but WITHOUT ANY WARRANTY; without even the implied warranty of - MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - GNU General Public License for more details. - - You should have received a copy of the GNU General Public License - along with this program. If not, see . - -Also add information on how to contact you by electronic and paper mail. - - If the program does terminal interaction, make it output a short -notice like this when it starts in an interactive mode: - - Copyright (C) - This program comes with ABSOLUTELY NO WARRANTY; for details type `show w'. - This is free software, and you are welcome to redistribute it - under certain conditions; type `show c' for details. - -The hypothetical commands `show w' and `show c' should show the appropriate -parts of the General Public License. Of course, your program's commands -might be different; for a GUI interface, you would use an "about box". - - You should also get your employer (if you work as a programmer) or school, -if any, to sign a "copyright disclaimer" for the program, if necessary. -For more information on this, and how to apply and follow the GNU GPL, see -. - - The GNU General Public License does not permit incorporating your program -into proprietary programs. If your program is a subroutine library, you -may consider it more useful to permit linking proprietary applications with -the library. If this is what you want to do, use the GNU Lesser General -Public License instead of this License. But first, please read -. diff --git a/playbooks/provisioning/openstack/README.md b/playbooks/provisioning/openstack/README.md index a92bc8837..d5b7c53ee 100644 --- a/playbooks/provisioning/openstack/README.md +++ b/playbooks/provisioning/openstack/README.md @@ -112,6 +112,9 @@ The `openstack_num_masters`, `openstack_num_infra` and `openstack_num_nodes` values specify the number of Master, Infra and App nodes to create. +The `openshift_cluster_node_labels` defines custom labels for your openshift +cluster node groups, like app or infra nodes. For example: `{'region': 'infra'}`. + The `openstack_nodes_to_remove` allows you to specify the numerical indexes of App nodes that should be removed; for example, ['0', '2'], @@ -141,18 +144,6 @@ be the case for development environments. When turned off, the servers will be provisioned omitting the ``yum update`` command. This brings security implications though, and is not recommended for production deployments. -### Update the DNS names in `inventory/hosts` - -The different server groups are currently grouped by the domain name, -so if you end up using a different domain than -`openshift.example.com`, you will need to update the `inventory/hosts` -file. - -For example, if your final domain is `my.cloud.com`, you can run this -command to fix update the `hosts` file: - - sed -i 's/openshift.example.com/my.cloud.com/' inventory/hosts - ### Configure the OpenShift parameters Finally, you need to update the DNS entry in @@ -193,6 +184,4 @@ Once it succeeds, you can install openshift by running: ## License As the rest of the openshift-ansible-contrib repository, the code here is -licensed under Apache 2. However, the openstack.py file under -`sample-inventory` is GPLv3+. See the INVENTORY-LICENSE.txt file for the full -text of the license. +licensed under Apache 2. diff --git a/playbooks/provisioning/openstack/openstack_dns_records.yml b/playbooks/provisioning/openstack/openstack_dns_records.yml index b32b70ba9..b5f0840c5 100644 --- a/playbooks/provisioning/openstack/openstack_dns_records.yml +++ b/playbooks/provisioning/openstack/openstack_dns_records.yml @@ -1,7 +1,7 @@ --- - name: "Generate list of private A records" set_fact: - private_records: "{{ private_records | default([]) + [ { 'type': 'A', 'hostname': hostvars[item]['ansible_hostname'], 'ip': hostvars[item]['openstack']['private_v4'] } ] }}" + private_records: "{{ private_records | default([]) + [ { 'type': 'A', 'hostname': hostvars[item]['ansible_hostname'], 'ip': hostvars[item]['private_v4'] } ] }}" with_items: "{{ groups['cluster_hosts'] }}" - name: "Set the private DNS server to use the external value (if provided)" @@ -10,36 +10,36 @@ nsupdate_key_secret_private: "{{ external_nsupdate_keys['private']['key_secret'] }}" nsupdate_key_algorithm_private: "{{ external_nsupdate_keys['private']['key_algorithm'] }}" when: - - external_nsupdate_keys is defined - - external_nsupdate_keys['private'] is defined + - external_nsupdate_keys is defined + - external_nsupdate_keys['private'] is defined - name: "Set the private DNS server to use the provisioned value" set_fact: - nsupdate_server_private: "{{ hostvars[groups['dns'][0]].openstack.public_v4 }}" + nsupdate_server_private: "{{ hostvars[groups['dns'][0]].public_v4 }}" nsupdate_key_secret_private: "{{ hostvars[groups['dns'][0]].nsupdate_keys['private-' + full_dns_domain].key_secret }}" nsupdate_key_algorithm_private: "{{ hostvars[groups['dns'][0]].nsupdate_keys['private-' + full_dns_domain].key_algorithm }}" when: - - nsupdate_server_private is undefined + - nsupdate_server_private is undefined - name: "Generate the private Add section for DNS" set_fact: private_named_records: - - view: "private" - zone: "{{ full_dns_domain }}" - server: "{{ nsupdate_server_private }}" - key_name: "{{ ( 'private-' + full_dns_domain ) }}" - key_secret: "{{ nsupdate_key_secret_private }}" - key_algorithm: "{{ nsupdate_key_algorithm_private | lower }}" - entries: "{{ private_records }}" + - view: "private" + zone: "{{ full_dns_domain }}" + server: "{{ nsupdate_server_private }}" + key_name: "{{ ( 'private-' + full_dns_domain ) }}" + key_secret: "{{ nsupdate_key_secret_private }}" + key_algorithm: "{{ nsupdate_key_algorithm_private | lower }}" + entries: "{{ private_records }}" - name: "Generate list of public A records" set_fact: - public_records: "{{ public_records | default([]) + [ { 'type': 'A', 'hostname': hostvars[item]['ansible_hostname'], 'ip': hostvars[item]['openstack']['public_v4'] } ] }}" + public_records: "{{ public_records | default([]) + [ { 'type': 'A', 'hostname': hostvars[item]['ansible_hostname'], 'ip': hostvars[item]['public_v4'] } ] }}" with_items: "{{ groups['cluster_hosts'] }}" - name: "Add wildcard records to the public A records" set_fact: - public_records: "{{ public_records | default([]) + [ { 'type': 'A', 'hostname': '*.' + openshift_app_domain, 'ip': hostvars[item]['openstack']['public_v4'] } ] }}" + public_records: "{{ public_records | default([]) + [ { 'type': 'A', 'hostname': '*.' + openshift_app_domain, 'ip': hostvars[item]['public_v4'] } ] }}" with_items: "{{ groups['infra_hosts'] }}" - name: "Set the public DNS server details to use the external value (if provided)" @@ -48,27 +48,27 @@ nsupdate_key_secret_public: "{{ external_nsupdate_keys['public']['key_secret'] }}" nsupdate_key_algorithm_public: "{{ external_nsupdate_keys['public']['key_algorithm'] }}" when: - - external_nsupdate_keys is defined - - external_nsupdate_keys['public'] is defined + - external_nsupdate_keys is defined + - external_nsupdate_keys['public'] is defined - name: "Set the public DNS server details to use the provisioned value" set_fact: - nsupdate_server_public: "{{ hostvars[groups['dns'][0]].openstack.public_v4 }}" + nsupdate_server_public: "{{ hostvars[groups['dns'][0]].public_v4 }}" nsupdate_key_secret_public: "{{ hostvars[groups['dns'][0]].nsupdate_keys['public-' + full_dns_domain].key_secret }}" nsupdate_key_algorithm_public: "{{ hostvars[groups['dns'][0]].nsupdate_keys['public-' + full_dns_domain].key_algorithm }}" when: - - nsupdate_server_public is undefined + - nsupdate_server_public is undefined - name: "Generate the public Add section for DNS" set_fact: public_named_records: - - view: "public" - zone: "{{ full_dns_domain }}" - server: "{{ nsupdate_server_public }}" - key_name: "{{ ( 'public-' + full_dns_domain ) }}" - key_secret: "{{ nsupdate_key_secret_public }}" - key_algorithm: "{{ nsupdate_key_algorithm_public | lower }}" - entries: "{{ public_records }}" + - view: "public" + zone: "{{ full_dns_domain }}" + server: "{{ nsupdate_server_public }}" + key_name: "{{ ( 'public-' + full_dns_domain ) }}" + key_secret: "{{ nsupdate_key_secret_public }}" + key_algorithm: "{{ nsupdate_key_algorithm_public | lower }}" + entries: "{{ public_records }}" - name: "Generate the final dns_records_add" set_fact: diff --git a/playbooks/provisioning/openstack/openstack_dns_views.yml b/playbooks/provisioning/openstack/openstack_dns_views.yml index ea0a7cb96..7165b4269 100644 --- a/playbooks/provisioning/openstack/openstack_dns_views.yml +++ b/playbooks/provisioning/openstack/openstack_dns_views.yml @@ -1,24 +1,24 @@ --- - name: "Generate ACL list for DNS server" set_fact: - acl_list: "{{ acl_list | default([]) + [ (hostvars[item]['openstack']['private_v4'] + '/32') ] }}" + acl_list: "{{ acl_list | default([]) + [ (hostvars[item]['private_v4'] + '/32') ] }}" with_items: "{{ groups['cluster_hosts'] }}" - name: "Generate the private view" set_fact: private_named_view: - - name: "private" - acl_entry: "{{ acl_list }}" - zone: - - dns_domain: "{{ full_dns_domain }}" + - name: "private" + acl_entry: "{{ acl_list }}" + zone: + - dns_domain: "{{ full_dns_domain }}" - name: "Generate the public view" set_fact: public_named_view: - - name: "public" - zone: - - dns_domain: "{{ full_dns_domain }}" - forwarder: "{{ public_dns_nameservers }}" + - name: "public" + zone: + - dns_domain: "{{ full_dns_domain }}" + forwarder: "{{ public_dns_nameservers }}" - name: "Generate the final named_config_views" set_fact: diff --git a/playbooks/provisioning/openstack/pre_tasks.yml b/playbooks/provisioning/openstack/pre_tasks.yml index a4ff7c4ac..d73945644 100644 --- a/playbooks/provisioning/openstack/pre_tasks.yml +++ b/playbooks/provisioning/openstack/pre_tasks.yml @@ -15,12 +15,6 @@ env_id: "{{ env_id | default(default_env_id) }}" delegate_to: localhost -- name: Set Dynamic Inventory Filters - become: false - shell: > - export OS_INV_FILTER_KEY=clusterid && export OS_INV_FILTER_VALUE={{ env_id }} - delegate_to: localhost - - name: Updating DNS domain to include env_id (if not empty) set_fact: full_dns_domain: "{{ (env_id|trim == '') | ternary(public_dns_domain, env_id + '.' + public_dns_domain) }}" diff --git a/playbooks/provisioning/openstack/sample-inventory/group_vars/OSEv3.yml b/playbooks/provisioning/openstack/sample-inventory/group_vars/OSEv3.yml index 4ce96a031..a16c1d867 100644 --- a/playbooks/provisioning/openstack/sample-inventory/group_vars/OSEv3.yml +++ b/playbooks/provisioning/openstack/sample-inventory/group_vars/OSEv3.yml @@ -5,9 +5,11 @@ openshift_release: 1.5.1 #openshift_release: v3.5 openshift_master_default_subdomain: "apps.{{ env_id }}.{{ public_dns_domain }}" -# NOTE(shadower): do not remove this line, otherwise the default node labels -# won't be set up. -openshift_node_labels: "{{ openstack.metadata.node_labels }}" +#openshift_cluster_node_labels: +# app: +# region: primary +# infra: +# region: infra osm_default_node_selector: 'region=primary' diff --git a/playbooks/provisioning/openstack/sample-inventory/hosts b/playbooks/provisioning/openstack/sample-inventory/hosts deleted file mode 100644 index 5f73b60f6..000000000 --- a/playbooks/provisioning/openstack/sample-inventory/hosts +++ /dev/null @@ -1,44 +0,0 @@ -#[all:vars] -# For all group_vars, see ./group_vars/all.yml - -# Create an OSEv3 group that contains the master, nodes, etcd, and lb groups. -# The lb group lets Ansible configure HAProxy as the load balancing solution. -# Comment lb out if your load balancer is pre-configured. -[cluster_hosts:children] -OSEv3 -dns - -[OSEv3:children] -masters -nodes -etcd - -# Set variables common for all OSEv3 hosts -#[OSEv3:vars] - -# For OSEv3 normal group vars, see ./group_vars/OSEv3.yml - -# Host Groups - -[masters:children] -masters.openshift.example.com - -[etcd:children] -etcd.openshift.example.com - -[nodes:children] -masters -infra.openshift.example.com -nodes.openshift.example.com - -[infra_hosts:children] -infra.openshift.example.com - -[dns:children] -dns.openshift.example.com - -[masters.openshift.example.com] -[etcd.openshift.example.com] -[infra.openshift.example.com] -[nodes.openshift.example.com] -[dns.openshift.example.com] diff --git a/playbooks/provisioning/openstack/sample-inventory/openstack.py b/playbooks/provisioning/openstack/sample-inventory/openstack.py deleted file mode 100755 index 8de73e1e0..000000000 --- a/playbooks/provisioning/openstack/sample-inventory/openstack.py +++ /dev/null @@ -1,252 +0,0 @@ -#!/usr/bin/env python - -# Copyright (c) 2012, Marco Vito Moscaritolo -# Copyright (c) 2013, Jesse Keating -# Copyright (c) 2015, Hewlett-Packard Development Company, L.P. -# Copyright (c) 2016, Rackspace Australia -# -# This module is free software: you can redistribute it and/or modify -# it under the terms of the GNU General Public License as published by -# the Free Software Foundation, either version 3 of the License, or -# (at your option) any later version. -# -# This software is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# GNU General Public License for more details. -# -# You should have received a copy of the GNU General Public License -# along with this software. If not, see . - -# The OpenStack Inventory module uses os-client-config for configuration. -# https://github.com/stackforge/os-client-config -# This means it will either: -# - Respect normal OS_* environment variables like other OpenStack tools -# - Read values from a clouds.yaml file. -# If you want to configure via clouds.yaml, you can put the file in: -# - Current directory -# - ~/.config/openstack/clouds.yaml -# - /etc/openstack/clouds.yaml -# - /etc/ansible/openstack.yml -# The clouds.yaml file can contain entries for multiple clouds and multiple -# regions of those clouds. If it does, this inventory module will connect to -# all of them and present them as one contiguous inventory. -# -# See the adjacent openstack.yml file for an example config file -# There are two ansible inventory specific options that can be set in -# the inventory section. -# expand_hostvars controls whether or not the inventory will make extra API -# calls to fill out additional information about each server -# use_hostnames changes the behavior from registering every host with its UUID -# and making a group of its hostname to only doing this if the -# hostname in question has more than one server -# fail_on_errors causes the inventory to fail and return no hosts if one cloud -# has failed (for example, bad credentials or being offline). -# When set to False, the inventory will return hosts from -# whichever other clouds it can contact. (Default: True) - -import argparse -import collections -import os -import sys -import time -from distutils.version import StrictVersion - -try: - import json -except ImportError: - import simplejson as json - -import os_client_config -import shade -import shade.inventory - -CONFIG_FILES = ['/etc/ansible/openstack.yaml', '/etc/ansible/openstack.yml'] - - -def get_groups_from_server(server_vars, namegroup=True): - groups = [] - - region = server_vars['region'] - cloud = server_vars['cloud'] - metadata = server_vars.get('metadata', {}) - - # Create a group for the cloud - groups.append(cloud) - - # Create a group on region - groups.append(region) - - # And one by cloud_region - groups.append("%s_%s" % (cloud, region)) - - # Check if group metadata key in servers' metadata - if 'group' in metadata: - groups.append(metadata['group']) - - for extra_group in metadata.get('groups', '').split(','): - if extra_group: - groups.append(extra_group.strip()) - - groups.append('instance-%s' % server_vars['id']) - if namegroup: - groups.append(server_vars['name']) - - for key in ('flavor', 'image'): - if 'name' in server_vars[key]: - groups.append('%s-%s' % (key, server_vars[key]['name'])) - - for key, value in iter(metadata.items()): - groups.append('meta-%s_%s' % (key, value)) - - az = server_vars.get('az', None) - if az: - # Make groups for az, region_az and cloud_region_az - groups.append(az) - groups.append('%s_%s' % (region, az)) - groups.append('%s_%s_%s' % (cloud, region, az)) - return groups - - -def get_host_groups(inventory, refresh=False): - (cache_file, cache_expiration_time) = get_cache_settings() - if is_cache_stale(cache_file, cache_expiration_time, refresh=refresh): - groups = to_json(get_host_groups_from_cloud(inventory)) - open(cache_file, 'w').write(groups) - else: - groups = open(cache_file, 'r').read() - return groups - - -def append_hostvars(hostvars, groups, key, server, namegroup=False): - hostvars[key] = dict( - ansible_ssh_host=server['interface_ip'], - openshift_hostname=server['name'], - openshift_public_hostname=server['name'], - openstack=server) - for group in get_groups_from_server(server, namegroup=namegroup): - groups[group].append(key) - - -def get_host_groups_from_cloud(inventory): - groups = collections.defaultdict(list) - firstpass = collections.defaultdict(list) - hostvars = {} - list_args = {} - if hasattr(inventory, 'extra_config'): - use_hostnames = inventory.extra_config['use_hostnames'] - list_args['expand'] = inventory.extra_config['expand_hostvars'] - if StrictVersion(shade.__version__) >= StrictVersion("1.6.0"): - list_args['fail_on_cloud_config'] = \ - inventory.extra_config['fail_on_errors'] - else: - use_hostnames = False - - for server in inventory.list_hosts(**list_args): - - if 'interface_ip' not in server: - continue - try: - if server["metadata"][os.environ['OS_INV_FILTER_KEY']] == os.environ['OS_INV_FILTER_VALUE']: - firstpass[server['name']].append(server) - except Exception: - firstpass[server['name']].append(server) - for name, servers in firstpass.items(): - if len(servers) == 1 and use_hostnames: - append_hostvars(hostvars, groups, name, servers[0]) - else: - server_ids = set() - # Trap for duplicate results - for server in servers: - server_ids.add(server['id']) - if len(server_ids) == 1 and use_hostnames: - append_hostvars(hostvars, groups, name, servers[0]) - else: - for server in servers: - append_hostvars( - hostvars, groups, server['id'], server, - namegroup=True) - groups['_meta'] = {'hostvars': hostvars} - return groups - - -def is_cache_stale(cache_file, cache_expiration_time, refresh=False): - ''' Determines if cache file has expired, or if it is still valid ''' - if refresh: - return True - if os.path.isfile(cache_file) and os.path.getsize(cache_file) > 0: - mod_time = os.path.getmtime(cache_file) - current_time = time.time() - if (mod_time + cache_expiration_time) > current_time: - return False - return True - - -def get_cache_settings(): - config = os_client_config.config.OpenStackConfig( - config_files=os_client_config.config.CONFIG_FILES + CONFIG_FILES) - # For inventory-wide caching - cache_expiration_time = config.get_cache_expiration_time() - cache_path = config.get_cache_path() - if not os.path.exists(cache_path): - os.makedirs(cache_path) - cache_file = os.path.join(cache_path, 'ansible-inventory.cache') - return (cache_file, cache_expiration_time) - - -def to_json(in_dict): - return json.dumps(in_dict, sort_keys=True, indent=2) - - -def parse_args(): - parser = argparse.ArgumentParser(description='OpenStack Inventory Module') - parser.add_argument('--private', - action='store_true', - help='Use private address for ansible host') - parser.add_argument('--refresh', action='store_true', - help='Refresh cached information') - parser.add_argument('--debug', action='store_true', default=False, - help='Enable debug output') - group = parser.add_mutually_exclusive_group(required=True) - group.add_argument('--list', action='store_true', - help='List active servers') - group.add_argument('--host', help='List details about the specific host') - - return parser.parse_args() - - -def main(): - args = parse_args() - try: - config_files = os_client_config.config.CONFIG_FILES + CONFIG_FILES - shade.simple_logging(debug=args.debug) - inventory_args = dict( - refresh=args.refresh, - config_files=config_files, - private=args.private, - ) - if hasattr(shade.inventory.OpenStackInventory, 'extra_config'): - inventory_args.update(dict( - config_key='ansible', - config_defaults={ - 'use_hostnames': False, - 'expand_hostvars': True, - 'fail_on_errors': True, - } - )) - - inventory = shade.inventory.OpenStackInventory(**inventory_args) - - if args.list: - output = get_host_groups(inventory, refresh=args.refresh) - elif args.host: - output = to_json(inventory.get_host(args.host)) - print(output) - except shade.OpenStackCloudException as e: - sys.stderr.write('%s\n' % str(e)) - sys.exit(1) - sys.exit(0) - - -if __name__ == '__main__': - main() diff --git a/roles/common/defaults/main.yml b/roles/common/defaults/main.yml new file mode 100644 index 000000000..8db591374 --- /dev/null +++ b/roles/common/defaults/main.yml @@ -0,0 +1,6 @@ +--- +openshift_cluster_node_labels: + app: + region: primary + infra: + region: infra diff --git a/roles/dns-server-detect/tasks/main.yml b/roles/dns-server-detect/tasks/main.yml index 183c0a0ca..cd775814f 100644 --- a/roles/dns-server-detect/tasks/main.yml +++ b/roles/dns-server-detect/tasks/main.yml @@ -2,35 +2,35 @@ - fail: msg: 'Missing required private DNS server(s)' when: - - external_nsupdate_keys['private'] is undefined - - hostvars[groups['dns'][0]] is undefined + - external_nsupdate_keys['private'] is undefined + - hostvars[groups['dns'][0]] is undefined - fail: msg: 'Missing required public DNS server(s)' when: - - external_nsupdate_keys['public'] is undefined - - hostvars[groups['dns'][0]] is undefined + - external_nsupdate_keys['public'] is undefined + - hostvars[groups['dns'][0]] is undefined - name: "Set the private DNS server to use the external value (if provided)" set_fact: private_dns_server: "{{ external_nsupdate_keys['private']['server'] }}" when: - - external_nsupdate_keys['private'] is defined + - external_nsupdate_keys['private'] is defined - name: "Set the private DNS server to use the provisioned value" set_fact: - private_dns_server: "{{ hostvars[groups['dns'][0]].openstack.private_v4 }}" + private_dns_server: "{{ hostvars[groups['dns'][0]].private_v4 }}" when: - - private_dns_server is undefined + - private_dns_server is undefined - name: "Set the public DNS server to use the external value (if provided)" set_fact: public_dns_server: "{{ external_nsupdate_keys['public']['server'] }}" when: - - external_nsupdate_keys['public'] is defined + - external_nsupdate_keys['public'] is defined - name: "Set the public DNS server to use the provisioned value" set_fact: - public_dns_server: "{{ hostvars[groups['dns'][0]].openstack.public_v4 }}" + public_dns_server: "{{ hostvars[groups['dns'][0]].public_v4 }}" when: - - public_dns_server is undefined + - public_dns_server is undefined diff --git a/roles/openstack-stack/meta/main.yml b/roles/openstack-stack/meta/main.yml new file mode 100644 index 000000000..fdda41bb3 --- /dev/null +++ b/roles/openstack-stack/meta/main.yml @@ -0,0 +1,3 @@ +--- +dependencies: + - role: common diff --git a/roles/openstack-stack/templates/heat_stack.yaml.j2 b/roles/openstack-stack/templates/heat_stack.yaml.j2 index 992f6257b..54941db06 100644 --- a/roles/openstack-stack/templates/heat_stack.yaml.j2 +++ b/roles/openstack-stack/templates/heat_stack.yaml.j2 @@ -610,7 +610,9 @@ resources: type: node subtype: app node_labels: - region: primary +{% for k, v in openshift_cluster_node_labels.app.iteritems() %} + {{ k|e }}: {{ v|e }} +{% endfor %} image: {{ openstack_image }} flavor: {{ node_flavor }} key_name: {{ ssh_public_key }} @@ -654,7 +656,9 @@ resources: type: node subtype: infra node_labels: - region: infra +{% for k, v in openshift_cluster_node_labels.infra.iteritems() %} + {{ k|e }}: {{ v|e }} +{% endfor %} image: {{ openstack_image }} flavor: {{ infra_flavor }} key_name: {{ ssh_public_key }} diff --git a/roles/static_inventory/meta/main.yml b/roles/static_inventory/meta/main.yml new file mode 100644 index 000000000..fdda41bb3 --- /dev/null +++ b/roles/static_inventory/meta/main.yml @@ -0,0 +1,3 @@ +--- +dependencies: + - role: common diff --git a/roles/static_inventory/templates/inventory.j2 b/roles/static_inventory/templates/inventory.j2 index ba806f286..9d129cf10 100644 --- a/roles/static_inventory/templates/inventory.j2 +++ b/roles/static_inventory/templates/inventory.j2 @@ -19,6 +19,11 @@ #[all:vars] # For all group_vars, see ./group_vars/all.yml +[infra_hosts:vars] +openshift_node_labels={{ openshift_cluster_node_labels.infra | to_json | quote }} + +[app:vars] +openshift_node_labels={{ openshift_cluster_node_labels.app | to_json | quote }} # Create an OSEv3 group that contains the master, nodes, etcd, and lb groups. # The lb group lets Ansible configure HAProxy as the load balancing solution. @@ -28,7 +33,6 @@ OSEv3 dns [OSEv3:children] -masters nodes etcd @@ -54,6 +58,9 @@ nodes.{{ stack_name }} [infra_hosts:children] infra.{{ stack_name }} +[app:children] +nodes.{{ stack_name }} + [dns:children] dns.{{ stack_name }} @@ -62,6 +69,7 @@ dns.{{ stack_name }} [etcd.{{ stack_name }}] [infra.{{ stack_name }}] [nodes.{{ stack_name }}] +[app.{{ stack_name }}] [dns.{{ stack_name }}] # BEGIN Autogenerated groups -- cgit v1.2.3 From e7a7d1642c1ffbfe23cd5ad2d920e842f0cae4b2 Mon Sep 17 00:00:00 2001 From: Bogdan Dobrelya Date: Thu, 20 Jul 2017 16:53:05 +0200 Subject: Static inventory autogeneration (#550) * At the provisioning stage, allow users to auto-generate a static inventory w/o manual steps needed. The alternative to go fully dynamic TBD. * Move openshift pre-install playbook to the post provision playbook, where the second part of the pre install tasks is already placed. Signed-off-by: Bogdan Dobrelya --- playbooks/provisioning/openstack/README.md | 46 ++++++++++++-- .../openstack/post-provision-openstack.yml | 72 ++++++++++++++-------- .../provisioning/openstack/provision-openstack.yml | 19 ++---- playbooks/provisioning/openstack/provision.yaml | 2 - .../openstack/sample-inventory/group_vars/all.yml | 11 ++++ 5 files changed, 101 insertions(+), 49 deletions(-) diff --git a/playbooks/provisioning/openstack/README.md b/playbooks/provisioning/openstack/README.md index d5b7c53ee..0b0382834 100644 --- a/playbooks/provisioning/openstack/README.md +++ b/playbooks/provisioning/openstack/README.md @@ -93,8 +93,9 @@ steps, and the Neutron subnet for the Heat stack is updated to point to that server in the end. So the provisioned servers will start using it natively as a default nameserver that comes from the NetworkManager and cloud-init. -`openstack_ssh_key` is a Nova keypair -- you can see your keypairs with -`openstack keypair list`. +`openstack_ssh_key` is a Nova keypair - you can see your keypairs with +`openstack keypair list`. This guide assumes that its corresponding private +key is `~/.ssh/openshift`, stored on the ansible admin (control) node. `openstack_default_image_name` is the name of the Glance image the servers will use. You can @@ -127,6 +128,14 @@ The `required_packages` variable also provides a list of the additional prerequisite packages to be installed before to deploy an OpenShift cluster. Those are ignored though, if the `manage_packages: False`. +The `openstack_inventory` controls either a static inventory will be created after the +cluster nodes provisioned on OpenStack cloud. Note, the fully dynamic inventory +is yet to be supported, so the static inventory will be created anyway. + +The `openstack_inventory_path` points the directory to host the generated static inventory. +It should point to the copied example inventory directory, otherwise ti creates +a new one for you. + #### Security notes Configure required `*_ingress_cidr` variables to restrict public access @@ -164,21 +173,48 @@ variables for the `inventory/group_vars/OSEv3.yml`, `all.yml`: origin_release: 1.5.1 openshift_deployment_type: "{{ deployment_type }}" +### Configure static inventory + +Example inventory variables: + + openstack_private_ssh_key: ~/.ssh/openshift + openstack_inventory: static + openstack_inventory_path: ../../../../inventory + + +In this guide, the latter points to the current directory, where you run ansible commands +from. + +To verify nodes connectivity, use the command: + + ansible -v -i inventory/hosts -m ping all + +If something is broken, double-check the inventory variables, paths and the +generated `/hosts` file. + +The `inventory: dynamic` can be used instead to access cluster nodes directly via +floating IPs. In this mode you can not use a bastion node and should specify +the dynamic inventory file in your ansible commands , like `-i openstack.py`. + ## Deployment ### Run the playbook Assuming your OpenStack (Keystone) credentials are in the `keystonerc` -file, this is how you stat the provisioning process: +this is how you stat the provisioning process from your ansible control node: . keystonerc - ansible-playbook -i inventory --timeout 30 --private-key ~/.ssh/openshift openshift-ansible-contrib/playbooks/provisioning/openstack/provision.yaml + ansible-playbook openshift-ansible-contrib/playbooks/provisioning/openstack/provision.yaml + +Note, here you start with an empty inventory. The static inventory will be populated +with data so you can omit providing additional arguments for future ansible commands. + ### Install OpenShift Once it succeeds, you can install openshift by running: - ansible-playbook --user openshift --private-key ~/.ssh/openshift -i inventory/ openshift-ansible/playbooks/byo/config.yml + ansible-playbook openshift-ansible/playbooks/byo/config.yml ## License diff --git a/playbooks/provisioning/openstack/post-provision-openstack.yml b/playbooks/provisioning/openstack/post-provision-openstack.yml index 53db5061c..a807c4d2f 100644 --- a/playbooks/provisioning/openstack/post-provision-openstack.yml +++ b/playbooks/provisioning/openstack/post-provision-openstack.yml @@ -1,72 +1,90 @@ --- +- hosts: cluster_hosts + name: Wait for the the nodes to come up + become: False + gather_facts: False + tasks: + - wait_for_connection: + +- hosts: cluster_hosts + gather_facts: True + tasks: + - name: Debug hostvar + debug: + msg: "{{ hostvars[inventory_hostname] }}" + verbosity: 2 + +- name: OpenShift Pre-Requisites (part 1) + include: pre-install.yml + - name: Assign hostnames hosts: cluster_hosts gather_facts: False become: true pre_tasks: - - include: pre_tasks.yml + - include: pre_tasks.yml roles: - - role: hostnames + - role: hostnames - name: Subscribe DNS Host to allow for configuration below hosts: dns gather_facts: False become: true roles: - - role: subscription-manager - when: hostvars.localhost.rhsm_register|default(False) - tags: 'subscription-manager' + - role: subscription-manager + when: hostvars.localhost.rhsm_register|default(False) + tags: 'subscription-manager' - name: Determine which DNS server(s) to use for our generated records hosts: localhost gather_facts: False become: False roles: - - dns-server-detect + - dns-server-detect - name: Build the DNS Server Views and Configure DNS Server(s) hosts: dns gather_facts: False become: true pre_tasks: - - include: pre_tasks.yml - - name: "Generate dns-server views" - include: openstack_dns_views.yml + - include: pre_tasks.yml + - name: "Generate dns-server views" + include: openstack_dns_views.yml roles: - - role: infra-ansible/roles/dns-server + - role: infra-ansible/roles/dns-server - name: Build and process DNS Records hosts: localhost - gather_facts: False + gather_facts: True become: False pre_tasks: - - include: pre_tasks.yml - - name: "Generate dns records" - include: openstack_dns_records.yml + - include: pre_tasks.yml + - name: "Generate dns records" + include: openstack_dns_records.yml roles: - - role: infra-ansible/roles/dns + - role: infra-ansible/roles/dns - name: Switch the stack subnet to the configured private DNS server hosts: localhost gather_facts: False become: False vars_files: - - stack_params.yaml + - stack_params.yaml tasks: - - include_role: - name: openstack-stack - tasks_from: subnet_update_dns_servers + - include_role: + name: openstack-stack + tasks_from: subnet_update_dns_servers -- name: OpenShift Pre-Requisites +- name: OpenShift Pre-Requisites (part 2) hosts: OSEv3 gather_facts: true become: true pre_tasks: - - name: "Include DNS configuration to ensure proper name resolution" - lineinfile: - state: present - dest: /etc/sysconfig/network - regexp: "IP4_NAMESERVERS={{ hostvars['localhost'].private_dns_server }}" - line: "IP4_NAMESERVERS={{ hostvars['localhost'].private_dns_server }}" + - name: "Include DNS configuration to ensure proper name resolution" + lineinfile: + state: present + dest: /etc/sysconfig/network + regexp: "IP4_NAMESERVERS={{ hostvars['localhost'].private_dns_server }}" + line: "IP4_NAMESERVERS={{ hostvars['localhost'].private_dns_server }}" roles: - - node-network-manager + - node-network-manager diff --git a/playbooks/provisioning/openstack/provision-openstack.yml b/playbooks/provisioning/openstack/provision-openstack.yml index 0c673af2f..0cac37aaf 100644 --- a/playbooks/provisioning/openstack/provision-openstack.yml +++ b/playbooks/provisioning/openstack/provision-openstack.yml @@ -8,6 +8,10 @@ - include: pre_tasks.yml roles: - role: openstack-stack + - role: static_inventory + when: openstack_inventory|default('static') == 'static' + inventory_path: "{{ openstack_inventory_path|default(inventory_dir) }}" + private_ssh_key: "{{ openstack_private_ssh_key|default('~/.ssh/id_rsa') }}" - name: Refresh Server inventory hosts: localhost @@ -17,19 +21,4 @@ tasks: - meta: refresh_inventory -- hosts: cluster_hosts - name: Wait for the the nodes to come up - become: False - gather_facts: False - tasks: - - wait_for_connection: - -- hosts: cluster_hosts - gather_facts: True - tasks: - - name: Debug hostvar - debug: - msg: "{{ hostvars[inventory_hostname] }}" - verbosity: 2 - - include: post-provision-openstack.yml diff --git a/playbooks/provisioning/openstack/provision.yaml b/playbooks/provisioning/openstack/provision.yaml index 92b6d3356..474c9c803 100644 --- a/playbooks/provisioning/openstack/provision.yaml +++ b/playbooks/provisioning/openstack/provision.yaml @@ -2,5 +2,3 @@ - include: "prerequisites.yml" - include: "provision-openstack.yml" - -- include: "pre-install.yml" diff --git a/playbooks/provisioning/openstack/sample-inventory/group_vars/all.yml b/playbooks/provisioning/openstack/sample-inventory/group_vars/all.yml index f1cdff86a..9eb36ab13 100644 --- a/playbooks/provisioning/openstack/sample-inventory/group_vars/all.yml +++ b/playbooks/provisioning/openstack/sample-inventory/group_vars/all.yml @@ -60,3 +60,14 @@ ansible_user: openshift # # Use a single security group for a cluster (default: false) #openstack_flat_secgrp: false + +# # Openstack inventory type and cluster nodes access pattern +# # Defaults to 'static'. +# # Use 'dynamic' to access cluster nodes directly, via floating IPs +# # and given a dynamic inventory script, like openstack.py +#openstack_inventory: static +# # The path to checkpoint the static inventory from the in-memory one +#openstack_inventory_path: ../../../../inventory + +# # The Nova key-pair's private SSH key to access inventory nodes +#openstack_private_ssh_key: ~/.ssh/openshift -- cgit v1.2.3 -- cgit v1.2.3 -- cgit v1.2.3 From 76b277f9649d7932ae84a544633e7dd5c5cd12c4 Mon Sep 17 00:00:00 2001 From: Tlacenka Date: Mon, 24 Jul 2017 10:39:22 +0200 Subject: README: Added note about infra-ansible installation (#574) * README in provisioning: note about infra-ansible not updating versions if one exists * README in provisioning: minor change * README: improved readability --- playbooks/provisioning/openstack/README.md | 6 +++++- 1 file changed, 5 insertions(+), 1 deletion(-) diff --git a/playbooks/provisioning/openstack/README.md b/playbooks/provisioning/openstack/README.md index 0b0382834..1ff586b49 100644 --- a/playbooks/provisioning/openstack/README.md +++ b/playbooks/provisioning/openstack/README.md @@ -39,8 +39,12 @@ Alternatively you can install directly from github: ansible-galaxy install git+https://github.com/redhat-cop/infra-ansible,master \ -p openshift-ansible-contrib/roles -Note, this assumes we're in the directory that contains the clonned +Notes: +* This assumes we're in the directory that contains the clonned openshift-ansible-contrib repo in its root path. +* When trying to install a different version, the previous one must be removed first +(`infra-ansible` directory from [roles](https://github.com/openshift/openshift-ansible-contrib/tree/master/roles)). +Otherwise, even if there are differences between the two versions, installation of the newer version is skipped. ## What does it do -- cgit v1.2.3 -- cgit v1.2.3 -- cgit v1.2.3 -- cgit v1.2.3 -- cgit v1.2.3 -- cgit v1.2.3 -- cgit v1.2.3 -- cgit v1.2.3 -- cgit v1.2.3 -- cgit v1.2.3 -- cgit v1.2.3 -- cgit v1.2.3 -- cgit v1.2.3 From cc67080cae89834c1b0a531870b5ee6425b0b2ac Mon Sep 17 00:00:00 2001 From: Tomas Sedovic Date: Tue, 25 Jul 2017 15:17:20 +0200 Subject: Set openshift_hostname explicitly for openstack (#579) This fixes a regression caused by the move to the static inventory. The nodes in `oc get nodes` should be (and had been) identified by their hostnames (e.g. master-0.openshift.example.com), but are now using their internal IP addresses instead. --- roles/static_inventory/templates/inventory.j2 | 1 + 1 file changed, 1 insertion(+) diff --git a/roles/static_inventory/templates/inventory.j2 b/roles/static_inventory/templates/inventory.j2 index 9d129cf10..464726a0b 100644 --- a/roles/static_inventory/templates/inventory.j2 +++ b/roles/static_inventory/templates/inventory.j2 @@ -12,6 +12,7 @@ %} public_v4={{ hostvars[host]['public_v4'] }}{% endif %} {% if 'ansible_private_key_file' in hostvars[host] %} ansible_private_key_file={{ hostvars[host]['ansible_private_key_file'] }}{% endif %} + openshift_hostname={{ host }} {% endif %} {% endfor %} -- cgit v1.2.3 -- cgit v1.2.3 -- cgit v1.2.3 -- cgit v1.2.3 From 677fd46cf37cab5f995170b3567939d784ebb07a Mon Sep 17 00:00:00 2001 From: Bogdan Dobrelya Date: Wed, 5 Jul 2017 12:46:57 +0200 Subject: Add bastion and ssh config for the static inventory role * Autogenerate SSH config for static inventory and bastion. * When using bastion, use FQDN for inventory's ansible_host and SSH config's Hostname. Simplifies accessing nodes by names instead of private IPs. Signed-off-by: Bogdan Dobrelya --- roles/static_inventory/defaults/main.yml | 13 +++++++++++ roles/static_inventory/tasks/main.yml | 4 ++++ roles/static_inventory/tasks/openstack.yml | 25 ++++++++++++++++++++-- roles/static_inventory/tasks/sshconfig.yml | 13 +++++++++++ roles/static_inventory/templates/inventory.j2 | 4 ++++ .../templates/openstack_ssh_config.j2 | 21 ++++++++++++++++++ 6 files changed, 78 insertions(+), 2 deletions(-) create mode 100644 roles/static_inventory/tasks/sshconfig.yml create mode 100644 roles/static_inventory/templates/openstack_ssh_config.j2 diff --git a/roles/static_inventory/defaults/main.yml b/roles/static_inventory/defaults/main.yml index 315965cde..63de45646 100644 --- a/roles/static_inventory/defaults/main.yml +++ b/roles/static_inventory/defaults/main.yml @@ -4,5 +4,18 @@ refresh_inventory: True inventory: static inventory_path: ~/openstack-inventory +# Either to configure bastion +use_bastion: true + +# SSH user/key/options to access hosts via bastion +ssh_user: openshift +ssh_options: >- + -o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no + -o ConnectTimeout=90 -o ControlMaster=auto -o ControlPersist=270s + -o ServerAliveInterval=30 -o GSSAPIAuthentication=no + # SSH key to access nodes private_ssh_key: ~/.ssh/openshift + +# The patch to store the generated config to access bastion/hosts +ssh_config_path: /tmp/ssh.config.ansible diff --git a/roles/static_inventory/tasks/main.yml b/roles/static_inventory/tasks/main.yml index 15c81690e..b58866017 100644 --- a/roles/static_inventory/tasks/main.yml +++ b/roles/static_inventory/tasks/main.yml @@ -4,3 +4,7 @@ - name: Checkpoint in-memory data into a static inventory include: checkpoint.yml + +- name: Generate SSH config for accessing hosts via bastion + include: sshconfig.yml + when: use_bastion|bool diff --git a/roles/static_inventory/tasks/openstack.yml b/roles/static_inventory/tasks/openstack.yml index a25502835..95d0d172f 100644 --- a/roles/static_inventory/tasks/openstack.yml +++ b/roles/static_inventory/tasks/openstack.yml @@ -16,12 +16,14 @@ - name: set_fact for openstack inventory nodes set_fact: + registered_bastion_nodes: "{{ (registered_nodes_output.stdout | from_json) | json_query(q) }}" registered_nodes_floating: "{{ (registered_nodes_output.stdout | from_json) | json_query(q2) }}" vars: q: "[] | [?metadata.group=='infra.{{stack_name}}']" q2: "[] | [?metadata.clusterid=='{{stack_name}}'] | [?public_v4!='']" when: - refresh_inventory|bool + - use_bastion|bool - name: Add cluster nodes w/o floating IPs to inventory with_items: "{{ registered_nodes }}" @@ -29,9 +31,11 @@ add_host: name: '{{ item.name }}' groups: '{{ item.metadata.group }}' - ansible_host: '{{ item.private_v4 }}' + ansible_host: "{% if use_bastion|bool %}{{ item.name }}{% else %}{{ item.private_v4 }}{% endif %}" ansible_fqdn: '{{ item.name }}' + ansible_user: '{{ ssh_user }}' ansible_private_key_file: '{{ private_ssh_key }}' + ansible_ssh_extra_args: '-F {{ ssh_config_path }}' private_v4: '{{ item.private_v4 }}' - name: Add cluster nodes with floating IPs to inventory @@ -40,8 +44,25 @@ add_host: name: '{{ item.name }}' groups: '{{ item.metadata.group }}' - ansible_host: '{{ item.public_v4 }}' + ansible_host: "{% if use_bastion|bool %}{{ item.name }}{% else %}{{ item.private_v4 }}{% endif %}" ansible_fqdn: '{{ item.name }}' + ansible_user: '{{ ssh_user }}' ansible_private_key_file: '{{ private_ssh_key }}' + ansible_ssh_extra_args: '-F {{ ssh_config_path }}' private_v4: '{{ item.private_v4 }}' public_v4: '{{ item.public_v4 }}' + + - name: Add bastion node to inventory + add_host: + name: bastion + groups: bastions + ansible_host: '{{ registered_bastion_nodes[0].public_v4 }}' + ansible_fqdn: '{{ registered_bastion_nodes[0].name }}' + ansible_user: '{{ ssh_user }}' + ansible_private_key_file: '{{ private_ssh_key }}' + ansible_ssh_extra_args: '-F {{ ssh_config_path }}' + private_v4: '{{ registered_bastion_nodes[0].private_v4 }}' + public_v4: '{{ registered_bastion_nodes[0].public_v4 }}' + when: + - registered_bastion_nodes is defined + - use_bastion|bool diff --git a/roles/static_inventory/tasks/sshconfig.yml b/roles/static_inventory/tasks/sshconfig.yml new file mode 100644 index 000000000..7119fe6ff --- /dev/null +++ b/roles/static_inventory/tasks/sshconfig.yml @@ -0,0 +1,13 @@ +--- +- name: set ssh proxy command prefix for accessing nodes via bastion + set_fact: + ssh_proxy_command: >- + ssh {{ ssh_options }} + -i {{ private_ssh_key }} + {{ ssh_user }}@{{ hostvars['bastion'].ansible_host }} + +- name: regenerate ssh config + template: + src: openstack_ssh_config.j2 + dest: "{{ ssh_config_path }}" + mode: 0644 diff --git a/roles/static_inventory/templates/inventory.j2 b/roles/static_inventory/templates/inventory.j2 index 464726a0b..ac74db35c 100644 --- a/roles/static_inventory/templates/inventory.j2 +++ b/roles/static_inventory/templates/inventory.j2 @@ -10,8 +10,12 @@ %} private_v4={{ hostvars[host]['private_v4'] }}{% endif %} {% if 'public_v4' in hostvars[host] %} public_v4={{ hostvars[host]['public_v4'] }}{% endif %} +{% if 'ansible_user' in hostvars[host] +%} ansible_user={{ hostvars[host]['ansible_user'] }}{% endif %} {% if 'ansible_private_key_file' in hostvars[host] %} ansible_private_key_file={{ hostvars[host]['ansible_private_key_file'] }}{% endif %} +{% if 'ansible_ssh_extra_args' in hostvars[host] +%} ansible_ssh_extra_args={{ hostvars[host]['ansible_ssh_extra_args']|quote }}{% endif %} openshift_hostname={{ host }} {% endif %} diff --git a/roles/static_inventory/templates/openstack_ssh_config.j2 b/roles/static_inventory/templates/openstack_ssh_config.j2 new file mode 100644 index 000000000..ad5d1253a --- /dev/null +++ b/roles/static_inventory/templates/openstack_ssh_config.j2 @@ -0,0 +1,21 @@ +Host * + IdentitiesOnly yes + +Host bastion + Hostname {{ hostvars['bastion'].ansible_host }} + IdentityFile {{ hostvars['bastion'].ansible_private_key_file }} + User {{ ssh_user }} + StrictHostKeyChecking no + UserKnownHostsFile=/dev/null + +{% for host in groups['all'] | difference(groups['bastions'][0]) %} + +Host {{ host }} + Hostname {{ hostvars[host].ansible_host }} + ProxyCommand {{ ssh_proxy_command }} -W {{ hostvars[host].private_v4 }}:22 + IdentityFile {{ hostvars[host].ansible_private_key_file }} + User {{ ssh_user }} + StrictHostKeyChecking no + UserKnownHostsFile=/dev/null + +{% endfor %} -- cgit v1.2.3 From df8f5f0e251a014ab30dabd62c17e151b7fe36e8 Mon Sep 17 00:00:00 2001 From: Bogdan Dobrelya Date: Wed, 12 Jul 2017 13:09:45 +0200 Subject: Options for bastion, SSH config, static inventory autogeneration * At the provisioning stage, allow users to auto-generate SSH config, when using a static inventory. * Run playbooks to provsion and post-provision as a separate, when using a bastion. This re-applies the SSH config, which ansible can't do on the fly. * Support a pre-installed bastion node, colocated with the 1st infra node. * With a bastion enabled, reduce floating IP footprint to infra and dns nodes only, effectively isolating a cluster in a private network. Signed-off-by: Bogdan Dobrelya --- playbooks/provisioning/openstack/README.md | 31 ++++- .../openstack/openstack_dns_records.yml | 2 + .../openstack/post-provision-openstack.yml | 6 +- .../provisioning/openstack/provision-openstack.yml | 11 +- .../openstack/sample-inventory/group_vars/all.yml | 7 + playbooks/provisioning/openstack/stack_params.yaml | 1 + roles/openstack-stack/defaults/main.yml | 2 + roles/openstack-stack/tasks/main.yml | 9 +- roles/openstack-stack/templates/heat_stack.yaml.j2 | 25 ++++ .../templates/heat_stack_server_nofloating.yaml.j2 | 149 +++++++++++++++++++++ roles/static_inventory/tasks/openstack.yml | 7 +- roles/static_inventory/templates/inventory.j2 | 5 +- 12 files changed, 238 insertions(+), 17 deletions(-) create mode 100644 roles/openstack-stack/templates/heat_stack_server_nofloating.yaml.j2 diff --git a/playbooks/provisioning/openstack/README.md b/playbooks/provisioning/openstack/README.md index 1ff586b49..6b9e5a3a9 100644 --- a/playbooks/provisioning/openstack/README.md +++ b/playbooks/provisioning/openstack/README.md @@ -40,7 +40,7 @@ Alternatively you can install directly from github: -p openshift-ansible-contrib/roles Notes: -* This assumes we're in the directory that contains the clonned +* This assumes we're in the directory that contains the clonned openshift-ansible-contrib repo in its root path. * When trying to install a different version, the previous one must be removed first (`infra-ansible` directory from [roles](https://github.com/openshift/openshift-ansible-contrib/tree/master/roles)). @@ -177,16 +177,30 @@ variables for the `inventory/group_vars/OSEv3.yml`, `all.yml`: origin_release: 1.5.1 openshift_deployment_type: "{{ deployment_type }}" -### Configure static inventory +### Configure static inventory and access via a bastion node Example inventory variables: + openstack_use_bastion: true + bastion_ingress_cidr: "{{openstack_subnet_prefix}}.0/24" openstack_private_ssh_key: ~/.ssh/openshift openstack_inventory: static openstack_inventory_path: ../../../../inventory + openstack_ssh_config_path: /tmp/ssh.config.openshift.ansible.openshift.example.com +The `openstack_subnet_prefix` is the openstack private network for your cluster. +And the `bastion_ingress_cidr` defines accepted range for SSH connections to nodes +additionally to the `ssh_ingress_cidr`` (see the security notes above). -In this guide, the latter points to the current directory, where you run ansible commands +The SSH config will be stored on the ansible control node by the +gitven path. Ansible uses it automatically. To access the cluster nodes with +that ssh config, use the `-F` prefix, f.e.: + + ssh -F /tmp/ssh.config.openshift.ansible.openshift.example.com master-0.openshift.example.com echo OK + +Note, relative paths will not work for the `openstack_ssh_config_path`, but it +works for the `openstack_private_ssh_key` and `openstack_inventory_path`. In this +guide, the latter points to the current directory, where you run ansible commands from. To verify nodes connectivity, use the command: @@ -194,7 +208,7 @@ To verify nodes connectivity, use the command: ansible -v -i inventory/hosts -m ping all If something is broken, double-check the inventory variables, paths and the -generated `/hosts` file. +generated `/hosts` and `openstack_ssh_config_path` files. The `inventory: dynamic` can be used instead to access cluster nodes directly via floating IPs. In this mode you can not use a bastion node and should specify @@ -213,6 +227,15 @@ this is how you stat the provisioning process from your ansible control node: Note, here you start with an empty inventory. The static inventory will be populated with data so you can omit providing additional arguments for future ansible commands. +If bastion enabled, the generates SSH config must be applied for ansible. +Otherwise, it is auto included by the previous step. In order to execute it +as a separate playbook, use the following command: + + ansible-playbook openshift-ansible-contrib/playbooks/provisioning/openstack/post-provision-openstack.yml + +The first infra node then becomes a bastion node as well and proxies access +for future ansible commands. The post-provision step also configures Satellite, +if requested, and DNS server, and ensures other OpenShift requirements to be met. ### Install OpenShift diff --git a/playbooks/provisioning/openstack/openstack_dns_records.yml b/playbooks/provisioning/openstack/openstack_dns_records.yml index b5f0840c5..980221ed6 100644 --- a/playbooks/provisioning/openstack/openstack_dns_records.yml +++ b/playbooks/provisioning/openstack/openstack_dns_records.yml @@ -36,11 +36,13 @@ set_fact: public_records: "{{ public_records | default([]) + [ { 'type': 'A', 'hostname': hostvars[item]['ansible_hostname'], 'ip': hostvars[item]['public_v4'] } ] }}" with_items: "{{ groups['cluster_hosts'] }}" + when: hostvars[item]['public_v4'] is defined - name: "Add wildcard records to the public A records" set_fact: public_records: "{{ public_records | default([]) + [ { 'type': 'A', 'hostname': '*.' + openshift_app_domain, 'ip': hostvars[item]['public_v4'] } ] }}" with_items: "{{ groups['infra_hosts'] }}" + when: hostvars[item]['public_v4'] is defined - name: "Set the public DNS server details to use the external value (if provided)" set_fact: diff --git a/playbooks/provisioning/openstack/post-provision-openstack.yml b/playbooks/provisioning/openstack/post-provision-openstack.yml index a807c4d2f..c7df74a87 100644 --- a/playbooks/provisioning/openstack/post-provision-openstack.yml +++ b/playbooks/provisioning/openstack/post-provision-openstack.yml @@ -4,7 +4,11 @@ become: False gather_facts: False tasks: - - wait_for_connection: + - when: not openstack_use_bastion|default(False)|bool + wait_for_connection: + - when: openstack_use_bastion|default(False)|bool + delegate_to: bastion + wait_for_connection: - hosts: cluster_hosts gather_facts: True diff --git a/playbooks/provisioning/openstack/provision-openstack.yml b/playbooks/provisioning/openstack/provision-openstack.yml index 0cac37aaf..6ec944d56 100644 --- a/playbooks/provisioning/openstack/provision-openstack.yml +++ b/playbooks/provisioning/openstack/provision-openstack.yml @@ -12,13 +12,20 @@ when: openstack_inventory|default('static') == 'static' inventory_path: "{{ openstack_inventory_path|default(inventory_dir) }}" private_ssh_key: "{{ openstack_private_ssh_key|default('~/.ssh/id_rsa') }}" + ssh_config_path: "{{ openstack_ssh_config_path|default('/tmp/ssh.config.openshift.ansible' + '.' + stack_name) }}" + ssh_user: "{{ ansible_user }}" -- name: Refresh Server inventory +- name: Refresh Server inventory or exit to apply SSH config hosts: localhost connection: local become: False gather_facts: False tasks: - - meta: refresh_inventory + - name: Exit to apply SSH config for a bastion + meta: end_play + when: openstack_use_bastion|default(False)|bool + - name: Refresh Server inventory + meta: refresh_inventory - include: post-provision-openstack.yml + when: not openstack_use_bastion|default(False)|bool diff --git a/playbooks/provisioning/openstack/sample-inventory/group_vars/all.yml b/playbooks/provisioning/openstack/sample-inventory/group_vars/all.yml index 9eb36ab13..6d07f9b56 100644 --- a/playbooks/provisioning/openstack/sample-inventory/group_vars/all.yml +++ b/playbooks/provisioning/openstack/sample-inventory/group_vars/all.yml @@ -69,5 +69,12 @@ ansible_user: openshift # # The path to checkpoint the static inventory from the in-memory one #openstack_inventory_path: ../../../../inventory +# # Use bastion node to access cluster nodes (Defaults to False). +# # Requires a static inventory. +#openstack_use_bastion: False +#bastion_ingress_cidr: "{{openstack_subnet_prefix}}.0/24" +# # # The Nova key-pair's private SSH key to access inventory nodes #openstack_private_ssh_key: ~/.ssh/openshift +# # The path for the SSH config to access all nodes +#openstack_ssh_config_path: /tmp/ssh.config.openshift.ansible.{{ env_id }}.{{ public_dns_domain }} diff --git a/playbooks/provisioning/openstack/stack_params.yaml b/playbooks/provisioning/openstack/stack_params.yaml index 9c0b09b45..c3a42ab06 100644 --- a/playbooks/provisioning/openstack/stack_params.yaml +++ b/playbooks/provisioning/openstack/stack_params.yaml @@ -21,3 +21,4 @@ master_volume_size: "{{ docker_volume_size }}" app_volume_size: "{{ docker_volume_size }}" infra_volume_size: "{{ docker_volume_size }}" nodes_to_remove: "{{ openstack_nodes_to_remove | default([]) | to_yaml }}" +use_bastion: "{{ openstack_use_bastion|default(False) }}" diff --git a/roles/openstack-stack/defaults/main.yml b/roles/openstack-stack/defaults/main.yml index 4831d6bc4..803a96389 100644 --- a/roles/openstack-stack/defaults/main.yml +++ b/roles/openstack-stack/defaults/main.yml @@ -4,6 +4,7 @@ ssh_ingress_cidr: 0.0.0.0/0 node_ingress_cidr: 0.0.0.0/0 master_ingress_cidr: 0.0.0.0/0 lb_ingress_cidr: 0.0.0.0/0 +bastion_ingress_cidr: 0.0.0.0/0 num_etcd: 0 num_masters: 1 num_nodes: 1 @@ -11,3 +12,4 @@ num_dns: 1 num_infra: 1 nodes_to_remove: [] etcd_volume_size: 2 +use_bastion: False diff --git a/roles/openstack-stack/tasks/main.yml b/roles/openstack-stack/tasks/main.yml index a53e6350b..9b4855294 100644 --- a/roles/openstack-stack/tasks/main.yml +++ b/roles/openstack-stack/tasks/main.yml @@ -8,7 +8,6 @@ - name: set template paths set_fact: stack_template_path: "{{ stack_template_pre.path }}/stack.yaml" - server_template_path: "{{ stack_template_pre.path }}/server.yaml" user_data_template_path: "{{ stack_template_pre.path }}/user-data" - name: generate HOT stack template from jinja2 template @@ -19,7 +18,13 @@ - name: generate HOT server template from jinja2 template template: src: heat_stack_server.yaml.j2 - dest: "{{ server_template_path }}" + dest: "{{ stack_template_pre.path }}/server.yaml" + +- name: generate HOT server w/o floating IPs template from jinja2 template + template: + src: heat_stack_server_nofloating.yaml.j2 + dest: "{{ stack_template_pre.path }}/server_nofloating.yaml" + when: use_bastion|bool - name: generate user_data from jinja2 template template: diff --git a/roles/openstack-stack/templates/heat_stack.yaml.j2 b/roles/openstack-stack/templates/heat_stack.yaml.j2 index 54941db06..524f466ff 100644 --- a/roles/openstack-stack/templates/heat_stack.yaml.j2 +++ b/roles/openstack-stack/templates/heat_stack.yaml.j2 @@ -156,6 +156,13 @@ resources: port_range_min: 22 port_range_max: 22 remote_ip_prefix: {{ ssh_ingress_cidr }} +{% if use_bastion|bool %} + - direction: ingress + protocol: tcp + port_range_min: 22 + port_range_max: 22 + remote_ip_prefix: {{ bastion_ingress_cidr }} +{% endif %} - direction: ingress protocol: icmp remote_ip_prefix: {{ ssh_ingress_cidr }} @@ -458,7 +465,11 @@ resources: properties: count: {{ num_etcd }} resource_def: +{% if use_bastion|bool %} + type: server_nofloating.yaml +{% else %} type: server.yaml +{% endif %} properties: name: str_replace: @@ -483,7 +494,9 @@ resources: secgrp: - { get_resource: {% if openstack_flat_secgrp|default(False)|bool %}flat-secgrp{% else %}etcd-secgrp{% endif %} } - { get_resource: common-secgrp } +{% if not use_bastion|bool %} floating_network: {{ external_network }} +{% endif %} net_name: str_replace: template: openshift-ansible-cluster_id-net @@ -540,7 +553,11 @@ resources: properties: count: {{ num_masters }} resource_def: +{% if use_bastion|bool %} + type: server_nofloating.yaml +{% else %} type: server.yaml +{% endif %} properties: name: str_replace: @@ -573,7 +590,9 @@ resources: {% endif %} {% endif %} - { get_resource: common-secgrp } +{% if not use_bastion|bool %} floating_network: {{ external_network }} +{% endif %} net_name: str_replace: template: openshift-ansible-cluster_id-net @@ -590,7 +609,11 @@ resources: removal_policies: - resource_list: {{ nodes_to_remove }} resource_def: +{% if use_bastion|bool %} + type: server_nofloating.yaml +{% else %} type: server.yaml +{% endif %} properties: name: str_replace: @@ -621,7 +644,9 @@ resources: secgrp: - { get_resource: {% if openstack_flat_secgrp|default(False)|bool %}flat-secgrp{% else %}node-secgrp{% endif %} } - { get_resource: common-secgrp } +{% if not use_bastion|bool %} floating_network: {{ external_network }} +{% endif %} net_name: str_replace: template: openshift-ansible-cluster_id-net diff --git a/roles/openstack-stack/templates/heat_stack_server_nofloating.yaml.j2 b/roles/openstack-stack/templates/heat_stack_server_nofloating.yaml.j2 new file mode 100644 index 000000000..792a8b90c --- /dev/null +++ b/roles/openstack-stack/templates/heat_stack_server_nofloating.yaml.j2 @@ -0,0 +1,149 @@ +heat_template_version: 2016-10-14 + +description: OpenShift cluster server w/o floating IP + +parameters: + + name: + type: string + label: Name + description: Name + + group: + type: string + label: Host Group + description: The Primary Ansible Host Group + default: host + + cluster_env: + type: string + label: Cluster environment + description: Environment of the cluster + + cluster_id: + type: string + label: Cluster ID + description: Identifier of the cluster + + type: + type: string + label: Type + description: Type master or node + + subtype: + type: string + label: Sub-type + description: Sub-type compute or infra for nodes, default otherwise + default: default + + key_name: + type: string + label: Key name + description: Key name of keypair + + image: + type: string + label: Image + description: Name of the image + + flavor: + type: string + label: Flavor + description: Name of the flavor + + net: + type: string + label: Net ID + description: Net resource + + net_name: + type: string + label: Net name + description: Net name + + subnet: + type: string + label: Subnet ID + description: Subnet resource + + secgrp: + type: comma_delimited_list + label: Security groups + description: Security group resources + + availability_zone: + type: string + description: The Availability Zone to launch the instance. + default: nova + + volume_size: + type: number + description: Size of the volume to be created. + default: 1 + constraints: + - range: { min: 1, max: 1024 } + description: must be between 1 and 1024 Gb. + + node_labels: + type: json + description: OpenShift Node Labels + default: {"region": "default" } + +outputs: + + name: + description: Name of the server + value: { get_attr: [ server_nofloating, name ] } + + private_ip: + description: Private IP of the server + value: + get_attr: + - server_nofloating + - addresses + - { get_param: net_name } + - 0 + - addr + +resources: + + server_nofloating: + type: OS::Nova::Server + properties: + name: { get_param: name } + key_name: { get_param: key_name } + image: { get_param: image } + flavor: { get_param: flavor } + networks: + - port: { get_resource: port } + user_data: + get_file: user-data + user_data_format: RAW + metadata: + group: { get_param: group } + environment: { get_param: cluster_env } + clusterid: { get_param: cluster_id } + host-type: { get_param: type } + sub-host-type: { get_param: subtype } + node_labels: { get_param: node_labels } + + port: + type: OS::Neutron::Port + properties: + network: { get_param: net } + fixed_ips: + - subnet: { get_param: subnet } + security_groups: { get_param: secgrp } + + cinder_volume: + type: OS::Cinder::Volume + properties: + size: { get_param: volume_size } + availability_zone: { get_param: availability_zone } + + volume_attachment: + type: OS::Cinder::VolumeAttachment + properties: + volume_id: { get_resource: cinder_volume } + instance_uuid: { get_resource: server_nofloating } + mountpoint: /dev/sdb diff --git a/roles/static_inventory/tasks/openstack.yml b/roles/static_inventory/tasks/openstack.yml index 95d0d172f..499adf08c 100644 --- a/roles/static_inventory/tasks/openstack.yml +++ b/roles/static_inventory/tasks/openstack.yml @@ -23,11 +23,9 @@ q2: "[] | [?metadata.clusterid=='{{stack_name}}'] | [?public_v4!='']" when: - refresh_inventory|bool - - use_bastion|bool - name: Add cluster nodes w/o floating IPs to inventory - with_items: "{{ registered_nodes }}" - when: not item in registered_nodes_floating + with_items: "{{ registered_nodes|difference(registered_nodes_floating) }}" add_host: name: '{{ item.name }}' groups: '{{ item.metadata.group }}' @@ -40,11 +38,10 @@ - name: Add cluster nodes with floating IPs to inventory with_items: "{{ registered_nodes_floating }}" - when: item in registered_nodes_floating add_host: name: '{{ item.name }}' groups: '{{ item.metadata.group }}' - ansible_host: "{% if use_bastion|bool %}{{ item.name }}{% else %}{{ item.private_v4 }}{% endif %}" + ansible_host: "{% if use_bastion|bool %}{{ item.name }}{% else %}{{ item.public_v4 }}{% endif %}" ansible_fqdn: '{{ item.name }}' ansible_user: '{{ ssh_user }}' ansible_private_key_file: '{{ private_ssh_key }}' diff --git a/roles/static_inventory/templates/inventory.j2 b/roles/static_inventory/templates/inventory.j2 index ac74db35c..24dc9d4a8 100644 --- a/roles/static_inventory/templates/inventory.j2 +++ b/roles/static_inventory/templates/inventory.j2 @@ -14,9 +14,8 @@ %} ansible_user={{ hostvars[host]['ansible_user'] }}{% endif %} {% if 'ansible_private_key_file' in hostvars[host] %} ansible_private_key_file={{ hostvars[host]['ansible_private_key_file'] }}{% endif %} -{% if 'ansible_ssh_extra_args' in hostvars[host] -%} ansible_ssh_extra_args={{ hostvars[host]['ansible_ssh_extra_args']|quote }}{% endif %} - openshift_hostname={{ host }} +{% if use_bastion|bool and 'ansible_ssh_extra_args' in hostvars[host] +%} ansible_ssh_extra_args={{ hostvars[host]['ansible_ssh_extra_args']|quote }}{% endif %} openshift_hostname={{ host }} {% endif %} {% endfor %} -- cgit v1.2.3 -- cgit v1.2.3 -- cgit v1.2.3 -- cgit v1.2.3 -- cgit v1.2.3 -- cgit v1.2.3 -- cgit v1.2.3 -- cgit v1.2.3 -- cgit v1.2.3 From dbc08f8ee57c939ce20bc681150aa6ed4de46b50 Mon Sep 17 00:00:00 2001 From: Dan Jurgensmeyer Date: Wed, 26 Jul 2017 09:34:39 -0600 Subject: Add wildcard pointer to Private DNS --- playbooks/provisioning/openstack/openstack_dns_records.yml | 5 +++++ 1 file changed, 5 insertions(+) diff --git a/playbooks/provisioning/openstack/openstack_dns_records.yml b/playbooks/provisioning/openstack/openstack_dns_records.yml index b5f0840c5..1e2ee3fe3 100644 --- a/playbooks/provisioning/openstack/openstack_dns_records.yml +++ b/playbooks/provisioning/openstack/openstack_dns_records.yml @@ -4,6 +4,11 @@ private_records: "{{ private_records | default([]) + [ { 'type': 'A', 'hostname': hostvars[item]['ansible_hostname'], 'ip': hostvars[item]['private_v4'] } ] }}" with_items: "{{ groups['cluster_hosts'] }}" +- name: "Add wildcard records to the private A records for infrahosts" + set_fact: + private_records: "{{ private_records | default([]) + [ { 'type': 'A', 'hostname': '*.' + openshift_app_domain, 'ip': hostvars[item]['openstack']['private_v4'] } ] }}" + with_items: "{{ groups['infra_hosts'] }}" + - name: "Set the private DNS server to use the external value (if provided)" set_fact: nsupdate_server_private: "{{ external_nsupdate_keys['private']['server'] }}" -- cgit v1.2.3 -- cgit v1.2.3 -- cgit v1.2.3 From b3459298c194b51a83289eeda5be79ef818b788f Mon Sep 17 00:00:00 2001 From: Dan Jurgensmeyer Date: Wed, 26 Jul 2017 17:16:55 -0600 Subject: removed openstack --- playbooks/provisioning/openstack/openstack_dns_records.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/playbooks/provisioning/openstack/openstack_dns_records.yml b/playbooks/provisioning/openstack/openstack_dns_records.yml index 1e2ee3fe3..9ec37fdb4 100644 --- a/playbooks/provisioning/openstack/openstack_dns_records.yml +++ b/playbooks/provisioning/openstack/openstack_dns_records.yml @@ -6,7 +6,7 @@ - name: "Add wildcard records to the private A records for infrahosts" set_fact: - private_records: "{{ private_records | default([]) + [ { 'type': 'A', 'hostname': '*.' + openshift_app_domain, 'ip': hostvars[item]['openstack']['private_v4'] } ] }}" + private_records: "{{ private_records | default([]) + [ { 'type': 'A', 'hostname': '*.' + openshift_app_domain, 'ip': hostvars[item]['private_v4'] } ] }}" with_items: "{{ groups['infra_hosts'] }}" - name: "Set the private DNS server to use the external value (if provided)" -- cgit v1.2.3 -- cgit v1.2.3 -- cgit v1.2.3 -- cgit v1.2.3 -- cgit v1.2.3 -- cgit v1.2.3 -- cgit v1.2.3 -- cgit v1.2.3 -- cgit v1.2.3 -- cgit v1.2.3 -- cgit v1.2.3 -- cgit v1.2.3 -- cgit v1.2.3 From 56bd0c0417b4a5d79a106a0aed771a4ca477d572 Mon Sep 17 00:00:00 2001 From: Bogdan Dobrelya Date: Fri, 28 Jul 2017 16:44:36 +0200 Subject: Note about jmespath requirement for control node (#599) Signed-off-by: Bogdan Dobrelya --- playbooks/provisioning/openstack/README.md | 1 + playbooks/provisioning/openstack/prerequisites.yml | 10 ++++++++++ 2 files changed, 11 insertions(+) diff --git a/playbooks/provisioning/openstack/README.md b/playbooks/provisioning/openstack/README.md index 6b9e5a3a9..8e99dd14b 100644 --- a/playbooks/provisioning/openstack/README.md +++ b/playbooks/provisioning/openstack/README.md @@ -10,6 +10,7 @@ etc.). The result is an environment ready for openshift-ansible. * [Ansible-galaxy](https://pypi.python.org/pypi/ansible-galaxy-local-deps) * [jinja2](http://jinja.pocoo.org/docs/2.9/) * [shade](https://pypi.python.org/pypi/shade) +* python-jmespath / [jmespath](https://pypi.python.org/pypi/jmespath) * python-dns / [dnspython](https://pypi.python.org/pypi/dnspython) * Become (sudo) is not required. diff --git a/playbooks/provisioning/openstack/prerequisites.yml b/playbooks/provisioning/openstack/prerequisites.yml index 71a99fc82..dd4f980b2 100644 --- a/playbooks/provisioning/openstack/prerequisites.yml +++ b/playbooks/provisioning/openstack/prerequisites.yml @@ -20,6 +20,16 @@ that: 'shade_result.rc == 0' msg: "Python module shade is not installed" + # Check jmespath + - name: Try to import python module shade + command: python -c "import jmespath" + ignore_errors: yes + register: jmespath_result + - name: Check if jmespath is installed + assert: + that: 'jmespath_result.rc == 0' + msg: "Python module jmespath is not installed" + # Check python-dns - name: Try to import python DNS module command: python -c "import dns" -- cgit v1.2.3 -- cgit v1.2.3 -- cgit v1.2.3 -- cgit v1.2.3 -- cgit v1.2.3 -- cgit v1.2.3 From 5820aa4371aec8218426cdceab3360c6955fe018 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?=C3=98ystein=20Bedin?= Date: Wed, 2 Aug 2017 14:40:08 +0000 Subject: Moving common DNS roles out of the playbook area (#605) --- .../openstack/openstack_dns_records.yml | 82 ---------------------- .../provisioning/openstack/openstack_dns_views.yml | 25 ------- .../openstack/post-provision-openstack.yml | 6 +- roles/dns-records/tasks/main.yml | 82 ++++++++++++++++++++++ roles/dns-views/tasks/main.yml | 25 +++++++ 5 files changed, 109 insertions(+), 111 deletions(-) delete mode 100644 playbooks/provisioning/openstack/openstack_dns_records.yml delete mode 100644 playbooks/provisioning/openstack/openstack_dns_views.yml create mode 100644 roles/dns-records/tasks/main.yml create mode 100644 roles/dns-views/tasks/main.yml diff --git a/playbooks/provisioning/openstack/openstack_dns_records.yml b/playbooks/provisioning/openstack/openstack_dns_records.yml deleted file mode 100644 index 3672a8ea6..000000000 --- a/playbooks/provisioning/openstack/openstack_dns_records.yml +++ /dev/null @@ -1,82 +0,0 @@ ---- -- name: "Generate list of private A records" - set_fact: - private_records: "{{ private_records | default([]) + [ { 'type': 'A', 'hostname': hostvars[item]['ansible_hostname'], 'ip': hostvars[item]['private_v4'] } ] }}" - with_items: "{{ groups['cluster_hosts'] }}" - -- name: "Add wildcard records to the private A records for infrahosts" - set_fact: - private_records: "{{ private_records | default([]) + [ { 'type': 'A', 'hostname': '*.' + openshift_app_domain, 'ip': hostvars[item]['private_v4'] } ] }}" - with_items: "{{ groups['infra_hosts'] }}" - -- name: "Set the private DNS server to use the external value (if provided)" - set_fact: - nsupdate_server_private: "{{ external_nsupdate_keys['private']['server'] }}" - nsupdate_key_secret_private: "{{ external_nsupdate_keys['private']['key_secret'] }}" - nsupdate_key_algorithm_private: "{{ external_nsupdate_keys['private']['key_algorithm'] }}" - when: - - external_nsupdate_keys is defined - - external_nsupdate_keys['private'] is defined - -- name: "Set the private DNS server to use the provisioned value" - set_fact: - nsupdate_server_private: "{{ hostvars[groups['dns'][0]].public_v4 }}" - nsupdate_key_secret_private: "{{ hostvars[groups['dns'][0]].nsupdate_keys['private-' + full_dns_domain].key_secret }}" - nsupdate_key_algorithm_private: "{{ hostvars[groups['dns'][0]].nsupdate_keys['private-' + full_dns_domain].key_algorithm }}" - when: - - nsupdate_server_private is undefined - -- name: "Generate the private Add section for DNS" - set_fact: - private_named_records: - - view: "private" - zone: "{{ full_dns_domain }}" - server: "{{ nsupdate_server_private }}" - key_name: "{{ ( 'private-' + full_dns_domain ) }}" - key_secret: "{{ nsupdate_key_secret_private }}" - key_algorithm: "{{ nsupdate_key_algorithm_private | lower }}" - entries: "{{ private_records }}" - -- name: "Generate list of public A records" - set_fact: - public_records: "{{ public_records | default([]) + [ { 'type': 'A', 'hostname': hostvars[item]['ansible_hostname'], 'ip': hostvars[item]['public_v4'] } ] }}" - with_items: "{{ groups['cluster_hosts'] }}" - when: hostvars[item]['public_v4'] is defined - -- name: "Add wildcard records to the public A records" - set_fact: - public_records: "{{ public_records | default([]) + [ { 'type': 'A', 'hostname': '*.' + openshift_app_domain, 'ip': hostvars[item]['public_v4'] } ] }}" - with_items: "{{ groups['infra_hosts'] }}" - when: hostvars[item]['public_v4'] is defined - -- name: "Set the public DNS server details to use the external value (if provided)" - set_fact: - nsupdate_server_public: "{{ external_nsupdate_keys['public']['server'] }}" - nsupdate_key_secret_public: "{{ external_nsupdate_keys['public']['key_secret'] }}" - nsupdate_key_algorithm_public: "{{ external_nsupdate_keys['public']['key_algorithm'] }}" - when: - - external_nsupdate_keys is defined - - external_nsupdate_keys['public'] is defined - -- name: "Set the public DNS server details to use the provisioned value" - set_fact: - nsupdate_server_public: "{{ hostvars[groups['dns'][0]].public_v4 }}" - nsupdate_key_secret_public: "{{ hostvars[groups['dns'][0]].nsupdate_keys['public-' + full_dns_domain].key_secret }}" - nsupdate_key_algorithm_public: "{{ hostvars[groups['dns'][0]].nsupdate_keys['public-' + full_dns_domain].key_algorithm }}" - when: - - nsupdate_server_public is undefined - -- name: "Generate the public Add section for DNS" - set_fact: - public_named_records: - - view: "public" - zone: "{{ full_dns_domain }}" - server: "{{ nsupdate_server_public }}" - key_name: "{{ ( 'public-' + full_dns_domain ) }}" - key_secret: "{{ nsupdate_key_secret_public }}" - key_algorithm: "{{ nsupdate_key_algorithm_public | lower }}" - entries: "{{ public_records }}" - -- name: "Generate the final dns_records_add" - set_fact: - dns_records_add: "{{ private_named_records + public_named_records }}" diff --git a/playbooks/provisioning/openstack/openstack_dns_views.yml b/playbooks/provisioning/openstack/openstack_dns_views.yml deleted file mode 100644 index 7165b4269..000000000 --- a/playbooks/provisioning/openstack/openstack_dns_views.yml +++ /dev/null @@ -1,25 +0,0 @@ ---- -- name: "Generate ACL list for DNS server" - set_fact: - acl_list: "{{ acl_list | default([]) + [ (hostvars[item]['private_v4'] + '/32') ] }}" - with_items: "{{ groups['cluster_hosts'] }}" - -- name: "Generate the private view" - set_fact: - private_named_view: - - name: "private" - acl_entry: "{{ acl_list }}" - zone: - - dns_domain: "{{ full_dns_domain }}" - -- name: "Generate the public view" - set_fact: - public_named_view: - - name: "public" - zone: - - dns_domain: "{{ full_dns_domain }}" - forwarder: "{{ public_dns_nameservers }}" - -- name: "Generate the final named_config_views" - set_fact: - named_config_views: "{{ private_named_view + public_named_view }}" diff --git a/playbooks/provisioning/openstack/post-provision-openstack.yml b/playbooks/provisioning/openstack/post-provision-openstack.yml index c7df74a87..f683b77be 100644 --- a/playbooks/provisioning/openstack/post-provision-openstack.yml +++ b/playbooks/provisioning/openstack/post-provision-openstack.yml @@ -52,9 +52,8 @@ become: true pre_tasks: - include: pre_tasks.yml - - name: "Generate dns-server views" - include: openstack_dns_views.yml roles: + - role: dns-views - role: infra-ansible/roles/dns-server - name: Build and process DNS Records @@ -63,9 +62,8 @@ become: False pre_tasks: - include: pre_tasks.yml - - name: "Generate dns records" - include: openstack_dns_records.yml roles: + - role: dns-records - role: infra-ansible/roles/dns - name: Switch the stack subnet to the configured private DNS server diff --git a/roles/dns-records/tasks/main.yml b/roles/dns-records/tasks/main.yml new file mode 100644 index 000000000..3672a8ea6 --- /dev/null +++ b/roles/dns-records/tasks/main.yml @@ -0,0 +1,82 @@ +--- +- name: "Generate list of private A records" + set_fact: + private_records: "{{ private_records | default([]) + [ { 'type': 'A', 'hostname': hostvars[item]['ansible_hostname'], 'ip': hostvars[item]['private_v4'] } ] }}" + with_items: "{{ groups['cluster_hosts'] }}" + +- name: "Add wildcard records to the private A records for infrahosts" + set_fact: + private_records: "{{ private_records | default([]) + [ { 'type': 'A', 'hostname': '*.' + openshift_app_domain, 'ip': hostvars[item]['private_v4'] } ] }}" + with_items: "{{ groups['infra_hosts'] }}" + +- name: "Set the private DNS server to use the external value (if provided)" + set_fact: + nsupdate_server_private: "{{ external_nsupdate_keys['private']['server'] }}" + nsupdate_key_secret_private: "{{ external_nsupdate_keys['private']['key_secret'] }}" + nsupdate_key_algorithm_private: "{{ external_nsupdate_keys['private']['key_algorithm'] }}" + when: + - external_nsupdate_keys is defined + - external_nsupdate_keys['private'] is defined + +- name: "Set the private DNS server to use the provisioned value" + set_fact: + nsupdate_server_private: "{{ hostvars[groups['dns'][0]].public_v4 }}" + nsupdate_key_secret_private: "{{ hostvars[groups['dns'][0]].nsupdate_keys['private-' + full_dns_domain].key_secret }}" + nsupdate_key_algorithm_private: "{{ hostvars[groups['dns'][0]].nsupdate_keys['private-' + full_dns_domain].key_algorithm }}" + when: + - nsupdate_server_private is undefined + +- name: "Generate the private Add section for DNS" + set_fact: + private_named_records: + - view: "private" + zone: "{{ full_dns_domain }}" + server: "{{ nsupdate_server_private }}" + key_name: "{{ ( 'private-' + full_dns_domain ) }}" + key_secret: "{{ nsupdate_key_secret_private }}" + key_algorithm: "{{ nsupdate_key_algorithm_private | lower }}" + entries: "{{ private_records }}" + +- name: "Generate list of public A records" + set_fact: + public_records: "{{ public_records | default([]) + [ { 'type': 'A', 'hostname': hostvars[item]['ansible_hostname'], 'ip': hostvars[item]['public_v4'] } ] }}" + with_items: "{{ groups['cluster_hosts'] }}" + when: hostvars[item]['public_v4'] is defined + +- name: "Add wildcard records to the public A records" + set_fact: + public_records: "{{ public_records | default([]) + [ { 'type': 'A', 'hostname': '*.' + openshift_app_domain, 'ip': hostvars[item]['public_v4'] } ] }}" + with_items: "{{ groups['infra_hosts'] }}" + when: hostvars[item]['public_v4'] is defined + +- name: "Set the public DNS server details to use the external value (if provided)" + set_fact: + nsupdate_server_public: "{{ external_nsupdate_keys['public']['server'] }}" + nsupdate_key_secret_public: "{{ external_nsupdate_keys['public']['key_secret'] }}" + nsupdate_key_algorithm_public: "{{ external_nsupdate_keys['public']['key_algorithm'] }}" + when: + - external_nsupdate_keys is defined + - external_nsupdate_keys['public'] is defined + +- name: "Set the public DNS server details to use the provisioned value" + set_fact: + nsupdate_server_public: "{{ hostvars[groups['dns'][0]].public_v4 }}" + nsupdate_key_secret_public: "{{ hostvars[groups['dns'][0]].nsupdate_keys['public-' + full_dns_domain].key_secret }}" + nsupdate_key_algorithm_public: "{{ hostvars[groups['dns'][0]].nsupdate_keys['public-' + full_dns_domain].key_algorithm }}" + when: + - nsupdate_server_public is undefined + +- name: "Generate the public Add section for DNS" + set_fact: + public_named_records: + - view: "public" + zone: "{{ full_dns_domain }}" + server: "{{ nsupdate_server_public }}" + key_name: "{{ ( 'public-' + full_dns_domain ) }}" + key_secret: "{{ nsupdate_key_secret_public }}" + key_algorithm: "{{ nsupdate_key_algorithm_public | lower }}" + entries: "{{ public_records }}" + +- name: "Generate the final dns_records_add" + set_fact: + dns_records_add: "{{ private_named_records + public_named_records }}" diff --git a/roles/dns-views/tasks/main.yml b/roles/dns-views/tasks/main.yml new file mode 100644 index 000000000..7165b4269 --- /dev/null +++ b/roles/dns-views/tasks/main.yml @@ -0,0 +1,25 @@ +--- +- name: "Generate ACL list for DNS server" + set_fact: + acl_list: "{{ acl_list | default([]) + [ (hostvars[item]['private_v4'] + '/32') ] }}" + with_items: "{{ groups['cluster_hosts'] }}" + +- name: "Generate the private view" + set_fact: + private_named_view: + - name: "private" + acl_entry: "{{ acl_list }}" + zone: + - dns_domain: "{{ full_dns_domain }}" + +- name: "Generate the public view" + set_fact: + public_named_view: + - name: "public" + zone: + - dns_domain: "{{ full_dns_domain }}" + forwarder: "{{ public_dns_nameservers }}" + +- name: "Generate the final named_config_views" + set_fact: + named_config_views: "{{ private_named_view + public_named_view }}" -- cgit v1.2.3 -- cgit v1.2.3 -- cgit v1.2.3 -- cgit v1.2.3 -- cgit v1.2.3 From bc73ea59b62f6b24426171c9dc370ad6509e99a7 Mon Sep 17 00:00:00 2001 From: Tomas Sedovic Date: Fri, 4 Aug 2017 14:12:05 +0200 Subject: Remove clouds.yaml from sample-inventory With the move to the static inventory, we don't need it anymore so it's now just an unnecessary step in the deployment. Note that the users may still want to use clouds.yaml for openstack credentials instead of sourcing the `OS_*` environment variables, but they can do that at their discression. The reason we had the clouds.yaml here was because the `openstack.py` dynamic inventory used the servers' UUID's as ansible hosts by default and the options we put in caused it to use the hostnames (as desired). --- playbooks/provisioning/openstack/README.md | 4 ---- playbooks/provisioning/openstack/sample-inventory/clouds.yaml | 5 ----- 2 files changed, 9 deletions(-) delete mode 100644 playbooks/provisioning/openstack/sample-inventory/clouds.yaml diff --git a/playbooks/provisioning/openstack/README.md b/playbooks/provisioning/openstack/README.md index 8e99dd14b..c7b2ea975 100644 --- a/playbooks/provisioning/openstack/README.md +++ b/playbooks/provisioning/openstack/README.md @@ -63,10 +63,6 @@ Otherwise, even if there are differences between the two versions, installation cp -r openshift-ansible-contrib/playbooks/provisioning/openstack/sample-inventory inventory -### Copy clouds.yaml - - cp openshift-ansible-contrib/playbooks/provisioning/openstack/sample-inventory/clouds.yaml clouds.yaml - ### Copy ansible config cp openshift-ansible-contrib/playbooks/provisioning/openstack/sample-inventory/ansible.cfg ansible.cfg diff --git a/playbooks/provisioning/openstack/sample-inventory/clouds.yaml b/playbooks/provisioning/openstack/sample-inventory/clouds.yaml deleted file mode 100644 index 8182d2995..000000000 --- a/playbooks/provisioning/openstack/sample-inventory/clouds.yaml +++ /dev/null @@ -1,5 +0,0 @@ ---- -ansible: - use_hostnames: True - expand_hostvars: True - fail_on_errors: True -- cgit v1.2.3 From e4cb854086c845fa301cddaefcba1e3accaa17d8 Mon Sep 17 00:00:00 2001 From: Tomas Sedovic Date: Fri, 4 Aug 2017 15:26:35 +0200 Subject: Allow using ephemeral volumes for docker storage (#615) For testing cases it's sometimes useful to not create Cinder volumes for the VMs. It can also sometimes be a little faster and more robust (but unfit for production). This adds an option called `ephemeral_volumes` that will use the VM's storage instead of creating volumes when set to true. --- playbooks/provisioning/openstack/sample-inventory/group_vars/all.yml | 5 +++++ roles/openstack-stack/templates/heat_stack_server.yaml.j2 | 2 ++ roles/openstack-stack/templates/heat_stack_server_nofloating.yaml.j2 | 2 ++ 3 files changed, 9 insertions(+) diff --git a/playbooks/provisioning/openstack/sample-inventory/group_vars/all.yml b/playbooks/provisioning/openstack/sample-inventory/group_vars/all.yml index 6d07f9b56..8f337546c 100644 --- a/playbooks/provisioning/openstack/sample-inventory/group_vars/all.yml +++ b/playbooks/provisioning/openstack/sample-inventory/group_vars/all.yml @@ -78,3 +78,8 @@ ansible_user: openshift #openstack_private_ssh_key: ~/.ssh/openshift # # The path for the SSH config to access all nodes #openstack_ssh_config_path: /tmp/ssh.config.openshift.ansible.{{ env_id }}.{{ public_dns_domain }} + + +# If you want to use the VM storage instead of Cinder volumes, set this to `true`. +# NOTE: this is for testing only! Your data will be gone once the VM disappears! +# ephemeral_volumes: false diff --git a/roles/openstack-stack/templates/heat_stack_server.yaml.j2 b/roles/openstack-stack/templates/heat_stack_server.yaml.j2 index 5851d3b9b..85af311ec 100644 --- a/roles/openstack-stack/templates/heat_stack_server.yaml.j2 +++ b/roles/openstack-stack/templates/heat_stack_server.yaml.j2 @@ -156,6 +156,7 @@ resources: floating_network: { get_param: floating_network } port_id: { get_resource: port } +{% if not ephemeral_volumes|default(false)|bool %} cinder_volume: type: OS::Cinder::Volume properties: @@ -168,3 +169,4 @@ resources: volume_id: { get_resource: cinder_volume } instance_uuid: { get_resource: server } mountpoint: /dev/sdb +{% endif %} diff --git a/roles/openstack-stack/templates/heat_stack_server_nofloating.yaml.j2 b/roles/openstack-stack/templates/heat_stack_server_nofloating.yaml.j2 index 792a8b90c..a22b7c6d0 100644 --- a/roles/openstack-stack/templates/heat_stack_server_nofloating.yaml.j2 +++ b/roles/openstack-stack/templates/heat_stack_server_nofloating.yaml.j2 @@ -135,6 +135,7 @@ resources: - subnet: { get_param: subnet } security_groups: { get_param: secgrp } +{% if not ephemeral_volumes|default(false)|bool %} cinder_volume: type: OS::Cinder::Volume properties: @@ -147,3 +148,4 @@ resources: volume_id: { get_resource: cinder_volume } instance_uuid: { get_resource: server_nofloating } mountpoint: /dev/sdb +{% endif %} -- cgit v1.2.3 -- cgit v1.2.3 -- cgit v1.2.3 -- cgit v1.2.3 -- cgit v1.2.3 -- cgit v1.2.3 -- cgit v1.2.3 -- cgit v1.2.3 -- cgit v1.2.3 -- cgit v1.2.3 -- cgit v1.2.3 -- cgit v1.2.3 -- cgit v1.2.3 -- cgit v1.2.3 -- cgit v1.2.3 -- cgit v1.2.3 -- cgit v1.2.3 -- cgit v1.2.3 -- cgit v1.2.3 -- cgit v1.2.3 From 784443b0d88597b988c3d5c58bc6358f5c73675e Mon Sep 17 00:00:00 2001 From: Bogdan Dobrelya Date: Tue, 15 Aug 2017 17:48:58 +0200 Subject: Support multiple private networks for static inventory (#604) Add openstack_private_network_name to filter by a wanted private network. Signed-off-by: Bogdan Dobrelya --- playbooks/provisioning/openstack/README.md | 4 ++++ .../openstack/sample-inventory/group_vars/all.yml | 1 + playbooks/provisioning/openstack/stack_params.yaml | 1 + roles/static_inventory/defaults/main.yml | 2 ++ roles/static_inventory/tasks/openstack.yml | 20 ++++++++++++++++---- 5 files changed, 24 insertions(+), 4 deletions(-) diff --git a/playbooks/provisioning/openstack/README.md b/playbooks/provisioning/openstack/README.md index c7b2ea975..98c847d88 100644 --- a/playbooks/provisioning/openstack/README.md +++ b/playbooks/provisioning/openstack/README.md @@ -110,6 +110,10 @@ providing external connectivity. It is often called `public`, `external` or `ext-net`. You can see your networks with `openstack network list`. +`openstack_private_network_name` is the name of the private Neutron network +providing admin/control access for ansible. It can be merged with other +cluster networks, there are no special requirements for networking. + The `openstack_num_masters`, `openstack_num_infra` and `openstack_num_nodes` values specify the number of Master, Infra and App nodes to create. diff --git a/playbooks/provisioning/openstack/sample-inventory/group_vars/all.yml b/playbooks/provisioning/openstack/sample-inventory/group_vars/all.yml index 8f337546c..210caee16 100644 --- a/playbooks/provisioning/openstack/sample-inventory/group_vars/all.yml +++ b/playbooks/provisioning/openstack/sample-inventory/group_vars/all.yml @@ -7,6 +7,7 @@ openstack_ssh_public_key: "openshift" openstack_default_image_name: "centos7" openstack_default_flavor: "m1.medium" openstack_external_network_name: "public" +#openstack_private_network_name: "openshift-ansible-{{ stack_name }}-net" openstack_num_masters: 1 openstack_num_infra: 1 diff --git a/playbooks/provisioning/openstack/stack_params.yaml b/playbooks/provisioning/openstack/stack_params.yaml index c3a42ab06..e8434861b 100644 --- a/playbooks/provisioning/openstack/stack_params.yaml +++ b/playbooks/provisioning/openstack/stack_params.yaml @@ -5,6 +5,7 @@ dns_nameservers: "{{ public_dns_nameservers }}" subnet_prefix: "{{ openstack_subnet_prefix }}" ssh_public_key: "{{ openstack_ssh_public_key }}" openstack_image: "{{ openstack_default_image_name }}" +openstack_private_network: "{{ openstack_private_network_name | default ('openshift-ansible-' + stack_name + '-net') }}" lb_flavor: "{{ openstack_default_flavor | default('m1.small') }}" etcd_flavor: "{{ openstack_default_flavor | default('m1.small') }}" master_flavor: "{{ openstack_default_flavor | default('m1.medium') }}" diff --git a/roles/static_inventory/defaults/main.yml b/roles/static_inventory/defaults/main.yml index 63de45646..5b8aacf5c 100644 --- a/roles/static_inventory/defaults/main.yml +++ b/roles/static_inventory/defaults/main.yml @@ -19,3 +19,5 @@ private_ssh_key: ~/.ssh/openshift # The patch to store the generated config to access bastion/hosts ssh_config_path: /tmp/ssh.config.ansible + +openstack_private_network: private diff --git a/roles/static_inventory/tasks/openstack.yml b/roles/static_inventory/tasks/openstack.yml index 499adf08c..75d0ee6d5 100644 --- a/roles/static_inventory/tasks/openstack.yml +++ b/roles/static_inventory/tasks/openstack.yml @@ -29,12 +29,20 @@ add_host: name: '{{ item.name }}' groups: '{{ item.metadata.group }}' - ansible_host: "{% if use_bastion|bool %}{{ item.name }}{% else %}{{ item.private_v4 }}{% endif %}" + ansible_host: >- + {% if use_bastion|bool -%} + {{ item.name }} + {%- else -%} + {%- set node = registered_nodes | json_query("[?name=='" + item.name + "']") -%} + {{ node[0].addresses[openstack_private_network|quote][0].addr }} + {%- endif %} ansible_fqdn: '{{ item.name }}' ansible_user: '{{ ssh_user }}' ansible_private_key_file: '{{ private_ssh_key }}' ansible_ssh_extra_args: '-F {{ ssh_config_path }}' - private_v4: '{{ item.private_v4 }}' + private_v4: >- + {% set node = registered_nodes | json_query("[?name=='" + item.name + "']") -%} + {{ node[0].addresses[openstack_private_network|quote][0].addr }} - name: Add cluster nodes with floating IPs to inventory with_items: "{{ registered_nodes_floating }}" @@ -46,7 +54,9 @@ ansible_user: '{{ ssh_user }}' ansible_private_key_file: '{{ private_ssh_key }}' ansible_ssh_extra_args: '-F {{ ssh_config_path }}' - private_v4: '{{ item.private_v4 }}' + private_v4: >- + {% set node = registered_nodes | json_query("[?name=='" + item.name + "']") -%} + {{ node[0].addresses[openstack_private_network|quote][0].addr }} public_v4: '{{ item.public_v4 }}' - name: Add bastion node to inventory @@ -58,7 +68,9 @@ ansible_user: '{{ ssh_user }}' ansible_private_key_file: '{{ private_ssh_key }}' ansible_ssh_extra_args: '-F {{ ssh_config_path }}' - private_v4: '{{ registered_bastion_nodes[0].private_v4 }}' + private_v4: >- + {% set node = registered_nodes | json_query("[?name=='" + registered_bastion_nodes[0].name + "']") -%} + {{ node[0].addresses[openstack_private_network|quote][0].addr }} public_v4: '{{ registered_bastion_nodes[0].public_v4 }}' when: - registered_bastion_nodes is defined -- cgit v1.2.3 From 3d9676911df8eb0fc4ce03c5ccfab049b430f87b Mon Sep 17 00:00:00 2001 From: Tlacenka Date: Tue, 15 Aug 2017 19:17:59 +0200 Subject: Specify different image names for roles (#637) * all.yml: set up new variables for specifying images for roles * stack_params.yaml: add image name variables for different roles * more roles added * heat_stack.yaml.j2: openstack_image changed to updated image names * README: updated documentation for specifying image names --- playbooks/provisioning/openstack/README.md | 9 ++++++--- .../openstack/sample-inventory/group_vars/all.yml | 12 +++++++++++- playbooks/provisioning/openstack/stack_params.yaml | 6 ++++++ roles/openstack-stack/templates/heat_stack.yaml.j2 | 12 ++++++------ 4 files changed, 29 insertions(+), 10 deletions(-) diff --git a/playbooks/provisioning/openstack/README.md b/playbooks/provisioning/openstack/README.md index 98c847d88..216205947 100644 --- a/playbooks/provisioning/openstack/README.md +++ b/playbooks/provisioning/openstack/README.md @@ -98,9 +98,12 @@ as a default nameserver that comes from the NetworkManager and cloud-init. `openstack keypair list`. This guide assumes that its corresponding private key is `~/.ssh/openshift`, stored on the ansible admin (control) node. -`openstack_default_image_name` is the name of the Glance image the -servers will use. You can -see your images with `openstack image list`. +`openstack_default_image_name` is the default name of the Glance image the +servers will use. You can see your images with `openstack image list`. +In order to set a different image for a role, uncomment the line with the +corresponding variable (e.g. `openstack_lb_image_name` for load balancer) and +set its value to another available image name. `openstack_default_image_name` +must stay defined as it is used as a default value for the rest of the roles. `openstack_default_flavor` is the Nova flavor the servers will use. You can see your flavors with `openstack flavor list`. diff --git a/playbooks/provisioning/openstack/sample-inventory/group_vars/all.yml b/playbooks/provisioning/openstack/sample-inventory/group_vars/all.yml index 210caee16..8cb913cec 100644 --- a/playbooks/provisioning/openstack/sample-inventory/group_vars/all.yml +++ b/playbooks/provisioning/openstack/sample-inventory/group_vars/all.yml @@ -4,11 +4,21 @@ public_dns_domain: "example.com" public_dns_nameservers: [] openstack_ssh_public_key: "openshift" -openstack_default_image_name: "centos7" openstack_default_flavor: "m1.medium" openstack_external_network_name: "public" #openstack_private_network_name: "openshift-ansible-{{ stack_name }}-net" +# # Used Images +# # - set specific images for roles by uncommenting corresponding lines +# # - note: do not remove openstack_default_image_name definition +#openstack_master_image_name: "centos7" +#openstack_infra_image_name: "centos7" +#openstack_node_image_name: "centos7" +#openstack_lb_image_name: "centos7" +#openstack_etcd_image_name: "centos7" +#openstack_dns_image_name: "centos7" +openstack_default_image_name: "centos7" + openstack_num_masters: 1 openstack_num_infra: 1 openstack_num_nodes: 2 diff --git a/playbooks/provisioning/openstack/stack_params.yaml b/playbooks/provisioning/openstack/stack_params.yaml index e8434861b..78790e5a6 100644 --- a/playbooks/provisioning/openstack/stack_params.yaml +++ b/playbooks/provisioning/openstack/stack_params.yaml @@ -5,6 +5,12 @@ dns_nameservers: "{{ public_dns_nameservers }}" subnet_prefix: "{{ openstack_subnet_prefix }}" ssh_public_key: "{{ openstack_ssh_public_key }}" openstack_image: "{{ openstack_default_image_name }}" +openstack_master_image: "{{ openstack_master_image_name | default(openstack_default_image_name) }}" +openstack_infra_image: "{{ openstack_infra_image_name | default(openstack_default_image_name) }}" +openstack_node_image: "{{ openstack_node_image_name | default(openstack_default_image_name) }}" +openstack_lb_image: "{{ openstack_lb_image_name | default(openstack_default_image_name) }}" +openstack_etcd_image: "{{ openstack_etcd_image_name | default(openstack_default_image_name) }}" +openstack_dns_image: "{{ openstack_dns_image_name | default(openstack_default_image_name) }}" openstack_private_network: "{{ openstack_private_network_name | default ('openshift-ansible-' + stack_name + '-net') }}" lb_flavor: "{{ openstack_default_flavor | default('m1.small') }}" etcd_flavor: "{{ openstack_default_flavor | default('m1.small') }}" diff --git a/roles/openstack-stack/templates/heat_stack.yaml.j2 b/roles/openstack-stack/templates/heat_stack.yaml.j2 index 524f466ff..c41bf15be 100644 --- a/roles/openstack-stack/templates/heat_stack.yaml.j2 +++ b/roles/openstack-stack/templates/heat_stack.yaml.j2 @@ -486,7 +486,7 @@ resources: k8s_type: etcds cluster_id: {{ stack_name }} type: etcd - image: {{ openstack_image }} + image: {{ openstack_etcd_image }} flavor: {{ etcd_flavor }} key_name: {{ ssh_public_key }} net: { get_resource: net } @@ -529,7 +529,7 @@ resources: k8s_type: lb cluster_id: {{ stack_name }} type: lb - image: {{ openstack_image }} + image: {{ openstack_lb_image }} flavor: {{ lb_flavor }} key_name: {{ ssh_public_key }} net: { get_resource: net } @@ -574,7 +574,7 @@ resources: k8s_type: masters cluster_id: {{ stack_name }} type: master - image: {{ openstack_image }} + image: {{ openstack_master_image }} flavor: {{ master_flavor }} key_name: {{ ssh_public_key }} net: { get_resource: net } @@ -636,7 +636,7 @@ resources: {% for k, v in openshift_cluster_node_labels.app.iteritems() %} {{ k|e }}: {{ v|e }} {% endfor %} - image: {{ openstack_image }} + image: {{ openstack_node_image }} flavor: {{ node_flavor }} key_name: {{ ssh_public_key }} net: { get_resource: net } @@ -684,7 +684,7 @@ resources: {% for k, v in openshift_cluster_node_labels.infra.iteritems() %} {{ k|e }}: {{ v|e }} {% endfor %} - image: {{ openstack_image }} + image: {{ openstack_infra_image }} flavor: {{ infra_flavor }} key_name: {{ ssh_public_key }} net: { get_resource: net } @@ -730,7 +730,7 @@ resources: k8s_type: dns cluster_id: {{ stack_name }} type: dns - image: {{ openstack_image }} + image: {{ openstack_dns_image }} flavor: {{ dns_flavor }} key_name: {{ ssh_public_key }} net: { get_resource: net } -- cgit v1.2.3 -- cgit v1.2.3 From 4ddb3fb369008395f8e2dc225cb6e08ca59a115b Mon Sep 17 00:00:00 2001 From: Tlacenka Date: Tue, 15 Aug 2017 20:37:18 +0200 Subject: group_vars/all.yml, stack_params.yaml, README: specifying flavors enabled and documented (#638) --- playbooks/provisioning/openstack/README.md | 6 +++++- .../openstack/sample-inventory/group_vars/all.yml | 12 +++++++++++- playbooks/provisioning/openstack/stack_params.yaml | 12 ++++++------ 3 files changed, 22 insertions(+), 8 deletions(-) diff --git a/playbooks/provisioning/openstack/README.md b/playbooks/provisioning/openstack/README.md index 216205947..79e153fe1 100644 --- a/playbooks/provisioning/openstack/README.md +++ b/playbooks/provisioning/openstack/README.md @@ -105,8 +105,12 @@ corresponding variable (e.g. `openstack_lb_image_name` for load balancer) and set its value to another available image name. `openstack_default_image_name` must stay defined as it is used as a default value for the rest of the roles. -`openstack_default_flavor` is the Nova flavor the servers will use. +`openstack_default_flavor` is the default Nova flavor the servers will use. You can see your flavors with `openstack flavor list`. +In order to set a different flavor for a role, uncomment the line with the +corresponding variable (e.g. `openstack_lb_flavor` for load balancer) and +set its value to another available flavor. `openstack_default_flavor` must +stay defined as it is used as a default value for the rest of the roles. `openstack_external_network_name` is the name of the Neutron network providing external connectivity. It is often called `public`, diff --git a/playbooks/provisioning/openstack/sample-inventory/group_vars/all.yml b/playbooks/provisioning/openstack/sample-inventory/group_vars/all.yml index 8cb913cec..3dd0b3d79 100644 --- a/playbooks/provisioning/openstack/sample-inventory/group_vars/all.yml +++ b/playbooks/provisioning/openstack/sample-inventory/group_vars/all.yml @@ -4,7 +4,6 @@ public_dns_domain: "example.com" public_dns_nameservers: [] openstack_ssh_public_key: "openshift" -openstack_default_flavor: "m1.medium" openstack_external_network_name: "public" #openstack_private_network_name: "openshift-ansible-{{ stack_name }}-net" @@ -23,6 +22,17 @@ openstack_num_masters: 1 openstack_num_infra: 1 openstack_num_nodes: 2 +# # Used Flavors +# # - set specific flavors for roles by uncommenting corresponding lines +# # - note: do note remove openstack_default_flavor definition +#openstack_master_flavor: "m1.medium" +#openstack_infra_flavor: "m1.medium" +#openstack_node_flavor: "m1.medium" +#openstack_lb_flavor: "m1.medium" +#openstack_etcd_flavor: "m1.medium" +#openstack_dns_flavor: "m1.medium" +openstack_default_flavor: "m1.medium" + # # Numerical index of nodes to remove # openstack_nodes_to_remove: [] diff --git a/playbooks/provisioning/openstack/stack_params.yaml b/playbooks/provisioning/openstack/stack_params.yaml index 78790e5a6..6c920d2a2 100644 --- a/playbooks/provisioning/openstack/stack_params.yaml +++ b/playbooks/provisioning/openstack/stack_params.yaml @@ -5,6 +5,12 @@ dns_nameservers: "{{ public_dns_nameservers }}" subnet_prefix: "{{ openstack_subnet_prefix }}" ssh_public_key: "{{ openstack_ssh_public_key }}" openstack_image: "{{ openstack_default_image_name }}" +lb_flavor: "{{ openstack_lb_flavor | default(openstack_default_flavor) }}" +etcd_flavor: "{{ openstack_etcd_flavor | default(openstack_default_flavor) }}" +master_flavor: "{{ openstack_master_flavor | default(openstack_default_flavor) }}" +node_flavor: "{{ openstack_node_flavor | default(openstack_default_flavor) }}" +infra_flavor: "{{ openstack_infra_flavor | default(openstack_default_flavor) }}" +dns_flavor: "{{ openstack_dns_flavor | default(openstack_default_flavor) }}" openstack_master_image: "{{ openstack_master_image_name | default(openstack_default_image_name) }}" openstack_infra_image: "{{ openstack_infra_image_name | default(openstack_default_image_name) }}" openstack_node_image: "{{ openstack_node_image_name | default(openstack_default_image_name) }}" @@ -12,12 +18,6 @@ openstack_lb_image: "{{ openstack_lb_image_name | default(openstack_default_imag openstack_etcd_image: "{{ openstack_etcd_image_name | default(openstack_default_image_name) }}" openstack_dns_image: "{{ openstack_dns_image_name | default(openstack_default_image_name) }}" openstack_private_network: "{{ openstack_private_network_name | default ('openshift-ansible-' + stack_name + '-net') }}" -lb_flavor: "{{ openstack_default_flavor | default('m1.small') }}" -etcd_flavor: "{{ openstack_default_flavor | default('m1.small') }}" -master_flavor: "{{ openstack_default_flavor | default('m1.medium') }}" -node_flavor: "{{ openstack_default_flavor | default('m1.medium') }}" -infra_flavor: "{{ openstack_default_flavor | default('m1.medium') }}" -dns_flavor: "{{ openstack_default_flavor | default('m1.small') }}" external_network: "{{ openstack_external_network_name }}" num_etcd: "{{ openstack_num_etcd | default(0) }}" num_masters: "{{ openstack_num_masters }}" -- cgit v1.2.3 -- cgit v1.2.3 -- cgit v1.2.3 -- cgit v1.2.3 From 6ebad037254b0c254638f6e6dfbd48e451a1ceeb Mon Sep 17 00:00:00 2001 From: Bogdan Dobrelya Date: Wed, 16 Aug 2017 09:14:06 +0200 Subject: Access UI via a bastion node (#596) When using a bastion and a single master, use the lb-secgrp to access UI port allowed from the ingress bastion node cidr. For HA (masters>1), UI still should be accessed via the LB node's ingress cidr, omitting the bastion. Signed-off-by: Bogdan Dobrelya --- playbooks/provisioning/openstack/README.md | 18 ++++++++++++++++++ playbooks/provisioning/openstack/stack_params.yaml | 1 + roles/openstack-stack/defaults/main.yml | 1 + roles/openstack-stack/templates/heat_stack.yaml.j2 | 20 +++++++++++++++----- roles/static_inventory/defaults/main.yml | 6 ++++++ roles/static_inventory/tasks/main.yml | 7 +++++++ roles/static_inventory/tasks/sshtun.yml | 15 +++++++++++++++ .../static_inventory/templates/ssh-tunnel.service.j2 | 20 ++++++++++++++++++++ 8 files changed, 83 insertions(+), 5 deletions(-) create mode 100644 roles/static_inventory/tasks/sshtun.yml create mode 100644 roles/static_inventory/templates/ssh-tunnel.service.j2 diff --git a/playbooks/provisioning/openstack/README.md b/playbooks/provisioning/openstack/README.md index 79e153fe1..d7fa76b0f 100644 --- a/playbooks/provisioning/openstack/README.md +++ b/playbooks/provisioning/openstack/README.md @@ -251,6 +251,24 @@ Once it succeeds, you can install openshift by running: ansible-playbook openshift-ansible/playbooks/byo/config.yml +### Access UI + +OpenShift UI may be accessed via the 1st master node FQDN, port 8443. + +When using a bastion, you may want to make an SSH tunnel from your control node +to access UI on the `https://localhost:8443`, with this inventory variable: + + openshift_ui_ssh_tunnel: True + +Note, this requires sudo rights on the ansible control node and an absolute path +for the `openstack_private_ssh_key`. You should also update the control node's +`/etc/hosts`: + + 127.0.0.1 master-0.openshift.example.com + +In order to access UI, the ssh-tunnel service will be created and started on the +control node. Make sure to remove these changes and the service manually, when not +needed anymore. ## License diff --git a/playbooks/provisioning/openstack/stack_params.yaml b/playbooks/provisioning/openstack/stack_params.yaml index 6c920d2a2..8f36d5c4f 100644 --- a/playbooks/provisioning/openstack/stack_params.yaml +++ b/playbooks/provisioning/openstack/stack_params.yaml @@ -29,3 +29,4 @@ app_volume_size: "{{ docker_volume_size }}" infra_volume_size: "{{ docker_volume_size }}" nodes_to_remove: "{{ openstack_nodes_to_remove | default([]) | to_yaml }}" use_bastion: "{{ openstack_use_bastion|default(False) }}" +ui_ssh_tunnel: "{{ openshift_ui_ssh_tunnel|default(False) }}" diff --git a/roles/openstack-stack/defaults/main.yml b/roles/openstack-stack/defaults/main.yml index 803a96389..c8529612e 100644 --- a/roles/openstack-stack/defaults/main.yml +++ b/roles/openstack-stack/defaults/main.yml @@ -13,3 +13,4 @@ num_infra: 1 nodes_to_remove: [] etcd_volume_size: 2 use_bastion: False +ui_ssh_tunnel: False diff --git a/roles/openstack-stack/templates/heat_stack.yaml.j2 b/roles/openstack-stack/templates/heat_stack.yaml.j2 index c41bf15be..a670ff0e3 100644 --- a/roles/openstack-stack/templates/heat_stack.yaml.j2 +++ b/roles/openstack-stack/templates/heat_stack.yaml.j2 @@ -439,7 +439,7 @@ resources: port_range_min: 53 port_range_max: 53 remote_ip_prefix: "{{ openstack_subnet_prefix }}.0/24" -{% if num_masters > 1 %} +{% if num_masters > 1 or ui_ssh_tunnel|bool %} lb-secgrp: type: OS::Neutron::SecurityGroup properties: @@ -450,14 +450,21 @@ resources: protocol: tcp port_range_min: {{ openshift_master_api_port | default(8443) }} port_range_max: {{ openshift_master_api_port | default(8443) }} - remote_ip_prefix: {{ lb_ingress_cidr }} - {% if openshift_master_console_port is defined and openshift_master_console_port != openshift_master_api_port %} + remote_ip_prefix: {{ lb_ingress_cidr | default(bastion_ingress_cidr) }} +{% if ui_ssh_tunnel|bool %} + - direction: ingress + protocol: tcp + port_range_min: {{ openshift_master_api_port | default(8443) }} + port_range_max: {{ openshift_master_api_port | default(8443) }} + remote_ip_prefix: {{ ssh_ingress_cidr }} +{% endif %} +{% if openshift_master_console_port is defined and openshift_master_console_port != openshift_master_api_port %} - direction: ingress protocol: tcp port_range_min: {{ openshift_master_console_port | default(8443) }} port_range_max: {{ openshift_master_console_port | default(8443) }} - remote_ip_prefix: {{ lb_ingress_cidr }} - {% endif %} + remote_ip_prefix: {{ lb_ingress_cidr | default(bastion_ingress_cidr) }} +{% endif %} {% endif %} etcd: @@ -695,6 +702,9 @@ resources: - { get_resource: flat-secgrp } {% else %} - { get_resource: node-secgrp } +{% endif %} +{% if ui_ssh_tunnel|bool and num_masters < 2 %} + - { get_resource: lb-secgrp } {% endif %} - { get_resource: infra-secgrp } - { get_resource: common-secgrp } diff --git a/roles/static_inventory/defaults/main.yml b/roles/static_inventory/defaults/main.yml index 5b8aacf5c..871700f8c 100644 --- a/roles/static_inventory/defaults/main.yml +++ b/roles/static_inventory/defaults/main.yml @@ -20,4 +20,10 @@ private_ssh_key: ~/.ssh/openshift # The patch to store the generated config to access bastion/hosts ssh_config_path: /tmp/ssh.config.ansible +# The IP:port to make an SSH tunnel to access UI on the 1st master +# via bastion node (requires sudo on the ansible control node) +ui_ssh_tunnel: False +ui_port: "{{ openshift_master_api_port | default(8443) }}" +target_ip: "{{ hostvars[groups['masters.' + stack_name|quote][0]].private_v4 }}" + openstack_private_network: private diff --git a/roles/static_inventory/tasks/main.yml b/roles/static_inventory/tasks/main.yml index b58866017..24e11beb6 100644 --- a/roles/static_inventory/tasks/main.yml +++ b/roles/static_inventory/tasks/main.yml @@ -8,3 +8,10 @@ - name: Generate SSH config for accessing hosts via bastion include: sshconfig.yml when: use_bastion|bool + +- name: Configure SSH tunneling to access UI + include: sshtun.yml + become: true + when: + - use_bastion|bool + - ui_ssh_tunnel|bool diff --git a/roles/static_inventory/tasks/sshtun.yml b/roles/static_inventory/tasks/sshtun.yml new file mode 100644 index 000000000..b0e4c832c --- /dev/null +++ b/roles/static_inventory/tasks/sshtun.yml @@ -0,0 +1,15 @@ +--- +- name: Create ssh tunnel systemd service + template: + src: ssh-tunnel.service.j2 + dest: /etc/systemd/system/ssh-tunnel.service + mode: 0644 + +- name: reload the systemctl daemon after file update + command: systemctl daemon-reload + +- name: Enable ssh tunnel service + service: + name: ssh-tunnel + enabled: true + state: restarted diff --git a/roles/static_inventory/templates/ssh-tunnel.service.j2 b/roles/static_inventory/templates/ssh-tunnel.service.j2 new file mode 100644 index 000000000..0d1cf8f79 --- /dev/null +++ b/roles/static_inventory/templates/ssh-tunnel.service.j2 @@ -0,0 +1,20 @@ +[Unit] +Description=Set up ssh tunneling for OpenShift cluster UI +After=network.target + +[Service] +ExecStart=/usr/bin/ssh -NT -o \ + ServerAliveInterval=60 -o \ + UserKnownHostsFile=/dev/null -o \ + StrictHostKeyChecking=no -o \ + ExitOnForwardFailure=no -i \ + {{ private_ssh_key }} {{ ssh_user }}@{{ hostvars['bastion'].ansible_host }} \ + -L 0.0.0.0:{{ ui_port }}:{{ target_ip }}:{{ ui_port }} + + +# Restart every >2 seconds to avoid StartLimitInterval failure +RestartSec=5 +Restart=always + +[Install] +WantedBy=multi-user.target -- cgit v1.2.3 -- cgit v1.2.3 -- cgit v1.2.3 -- cgit v1.2.3 From d41308f238b1c8dac35682e64f661c2e4b01c317 Mon Sep 17 00:00:00 2001 From: Tlacenka Date: Wed, 16 Aug 2017 11:09:02 +0200 Subject: Set custom hostnames for servers (#643) * README, all.yml, stack_params.yml, heat_stack.yaml.j2: hostname customisation added * hostnames customisation: default set in stack_params * heat_stack: bug fix * fixed commented defaults in group_vars/all.yml --- playbooks/provisioning/openstack/README.md | 4 ++++ .../openstack/sample-inventory/group_vars/all.yml | 9 +++++++++ playbooks/provisioning/openstack/stack_params.yaml | 6 ++++++ roles/openstack-stack/templates/heat_stack.yaml.j2 | 18 ++++++++---------- 4 files changed, 27 insertions(+), 10 deletions(-) diff --git a/playbooks/provisioning/openstack/README.md b/playbooks/provisioning/openstack/README.md index d7fa76b0f..afaeb430b 100644 --- a/playbooks/provisioning/openstack/README.md +++ b/playbooks/provisioning/openstack/README.md @@ -78,6 +78,10 @@ your servers will be under. With the default values, this will be That sudomain can be set as well by the `openshift_app_domain` variable in the inventory. +The `openstack__hostname` is a set of variables used for customising +hostnames of servers with a given role. When such a variable stays commented, +default hostname (usually the role name) is used. + The `public_dns_nameservers` is a list of DNS servers accessible from all the created Nova servers. These will be serving as your DNS forwarders for external FQDNs that do not belong to the cluster's DNS domain and its subdomains. diff --git a/playbooks/provisioning/openstack/sample-inventory/group_vars/all.yml b/playbooks/provisioning/openstack/sample-inventory/group_vars/all.yml index 3dd0b3d79..19f916508 100644 --- a/playbooks/provisioning/openstack/sample-inventory/group_vars/all.yml +++ b/playbooks/provisioning/openstack/sample-inventory/group_vars/all.yml @@ -3,6 +3,15 @@ env_id: "openshift" public_dns_domain: "example.com" public_dns_nameservers: [] +# # Used Hostnames +# # - set custom hostnames for roles by uncommenting corresponding lines +#openstack_master_hostname: "master" +#openstack_infra_hostname: "infra-node" +#openstack_node_hostname: "app-node" +#openstack_lb_hostname: "lb" +#openstack_etcd_hostname: "etcd" +#openstack_dns_hostname: "dns" + openstack_ssh_public_key: "openshift" openstack_external_network_name: "public" #openstack_private_network_name: "openshift-ansible-{{ stack_name }}-net" diff --git a/playbooks/provisioning/openstack/stack_params.yaml b/playbooks/provisioning/openstack/stack_params.yaml index 8f36d5c4f..27fa5ec8c 100644 --- a/playbooks/provisioning/openstack/stack_params.yaml +++ b/playbooks/provisioning/openstack/stack_params.yaml @@ -3,6 +3,12 @@ stack_name: "{{ env_id }}.{{ public_dns_domain }}" dns_domain: "{{ public_dns_domain }}" dns_nameservers: "{{ public_dns_nameservers }}" subnet_prefix: "{{ openstack_subnet_prefix }}" +master_hostname: "{{ openstack_master_hostname | default('master') }}" +infra_hostname: "{{ openstack_infra_hostname | default('infra-node') }}" +node_hostname: "{{ openstack_node_hostname | default('app-node') }}" +lb_hostname: "{{ openstack_lb_hostname | default('lb') }}" +etcd_hostname: "{{ openstack_etcd_hostname | default('etcd') }}" +dns_hostname: "{{ openstack_dns_hostname | default('dns') }}" ssh_public_key: "{{ openstack_ssh_public_key }}" openstack_image: "{{ openstack_default_image_name }}" lb_flavor: "{{ openstack_lb_flavor | default(openstack_default_flavor) }}" diff --git a/roles/openstack-stack/templates/heat_stack.yaml.j2 b/roles/openstack-stack/templates/heat_stack.yaml.j2 index a670ff0e3..3623035c6 100644 --- a/roles/openstack-stack/templates/heat_stack.yaml.j2 +++ b/roles/openstack-stack/templates/heat_stack.yaml.j2 @@ -483,7 +483,7 @@ resources: template: k8s_type-%index%.cluster_id params: cluster_id: {{ stack_name }} - k8s_type: etcd + k8s_type: {{ etcd_hostname }} cluster_env: {{ public_dns_domain }} cluster_id: {{ stack_name }} group: @@ -526,7 +526,7 @@ resources: template: k8s_type-%index%.cluster_id params: cluster_id: {{ stack_name }} - k8s_type: lb + k8s_type: {{ lb_hostname }} cluster_env: {{ public_dns_domain }} cluster_id: {{ stack_name }} group: @@ -571,7 +571,7 @@ resources: template: k8s_type-%index%.cluster_id params: cluster_id: {{ stack_name }} - k8s_type: master + k8s_type: {{ master_hostname }} cluster_env: {{ public_dns_domain }} cluster_id: {{ stack_name }} group: @@ -624,11 +624,10 @@ resources: properties: name: str_replace: - template: subtype-k8s_type-%index%.cluster_id + template: sub_type_k8s_type-%index%.cluster_id params: cluster_id: {{ stack_name }} - k8s_type: node - subtype: app + sub_type_k8s_type: {{ node_hostname }} cluster_env: {{ public_dns_domain }} cluster_id: {{ stack_name }} group: @@ -672,11 +671,10 @@ resources: properties: name: str_replace: - template: subtypek8s_type-%index%.cluster_id + template: sub_type_k8s_type-%index%.cluster_id params: cluster_id: {{ stack_name }} - k8s_type: node - subtype: infra + sub_type_k8s_type: {{ infra_hostname }} cluster_env: {{ public_dns_domain }} cluster_id: {{ stack_name }} group: @@ -730,7 +728,7 @@ resources: template: k8s_type-%index%.cluster_id params: cluster_id: {{ stack_name }} - k8s_type: dns + k8s_type: {{ dns_hostname }} cluster_env: {{ public_dns_domain }} cluster_id: {{ stack_name }} group: -- cgit v1.2.3 -- cgit v1.2.3 -- cgit v1.2.3 -- cgit v1.2.3 From 998634ffd25a17ff581a124396fd1183706f2478 Mon Sep 17 00:00:00 2001 From: ioggstream Date: Wed, 16 Aug 2017 16:43:58 +0200 Subject: Avoid server recreation in case of user_data modification. (#651) --- roles/openstack-stack/templates/heat_stack_server.yaml.j2 | 1 + roles/openstack-stack/templates/heat_stack_server_nofloating.yaml.j2 | 1 + 2 files changed, 2 insertions(+) diff --git a/roles/openstack-stack/templates/heat_stack_server.yaml.j2 b/roles/openstack-stack/templates/heat_stack_server.yaml.j2 index 85af311ec..32fb166f6 100644 --- a/roles/openstack-stack/templates/heat_stack_server.yaml.j2 +++ b/roles/openstack-stack/templates/heat_stack_server.yaml.j2 @@ -134,6 +134,7 @@ resources: user_data: get_file: user-data user_data_format: RAW + user_data_update_policy: IGNORE metadata: group: { get_param: group } environment: { get_param: cluster_env } diff --git a/roles/openstack-stack/templates/heat_stack_server_nofloating.yaml.j2 b/roles/openstack-stack/templates/heat_stack_server_nofloating.yaml.j2 index a22b7c6d0..638fc8b45 100644 --- a/roles/openstack-stack/templates/heat_stack_server_nofloating.yaml.j2 +++ b/roles/openstack-stack/templates/heat_stack_server_nofloating.yaml.j2 @@ -119,6 +119,7 @@ resources: user_data: get_file: user-data user_data_format: RAW + user_data_update_policy: IGNORE metadata: group: { get_param: group } environment: { get_param: cluster_env } -- cgit v1.2.3 From cd08737844ac12701a3c1f51f527cc9543a94db5 Mon Sep 17 00:00:00 2001 From: Davis Phillips Date: Wed, 16 Aug 2017 15:27:08 -0500 Subject: Adding 3.6 files and modifications --- roles/docker-storage-setup/tasks/main.yaml | 9 +++++++++ 1 file changed, 9 insertions(+) diff --git a/roles/docker-storage-setup/tasks/main.yaml b/roles/docker-storage-setup/tasks/main.yaml index 7202bc46b..06bdd1b11 100644 --- a/roles/docker-storage-setup/tasks/main.yaml +++ b/roles/docker-storage-setup/tasks/main.yaml @@ -6,3 +6,12 @@ owner: root group: root mode: 0644 + +- name: stop docker + service: name=docker state=stopped + +- name: run docker-storage-setup + command: docker-storage-setup + +- name: start docker + service: name=docker state=started -- cgit v1.2.3 From 41346f348bc3e37384f4ae008e51e3ba1fb6b1bd Mon Sep 17 00:00:00 2001 From: Davis Phillips Date: Wed, 16 Aug 2017 15:31:01 -0500 Subject: remove additional docker storage setup --- roles/docker-storage-setup/tasks/main.yaml | 9 --------- 1 file changed, 9 deletions(-) diff --git a/roles/docker-storage-setup/tasks/main.yaml b/roles/docker-storage-setup/tasks/main.yaml index 06bdd1b11..7202bc46b 100644 --- a/roles/docker-storage-setup/tasks/main.yaml +++ b/roles/docker-storage-setup/tasks/main.yaml @@ -6,12 +6,3 @@ owner: root group: root mode: 0644 - -- name: stop docker - service: name=docker state=stopped - -- name: run docker-storage-setup - command: docker-storage-setup - -- name: start docker - service: name=docker state=started -- cgit v1.2.3 -- cgit v1.2.3 -- cgit v1.2.3 -- cgit v1.2.3 -- cgit v1.2.3 -- cgit v1.2.3 -- cgit v1.2.3 -- cgit v1.2.3 From 6a528d5803619f93c734c23be44a2021f1d35ee9 Mon Sep 17 00:00:00 2001 From: Tlacenka Date: Thu, 17 Aug 2017 13:48:20 +0200 Subject: Configure different Docker volume sizes for different roles (#644) * README, all.yml, stack_params.yaml, openstack-stack: added docker volume size customisation - app_volume_size changed to node_volume_size (it is node everywhere else) * all.yml, stack_params.yaml,openstack-stack: added customisation for lb, etcd, dns * README: updated * README: updated info about ephemeral volumes --- playbooks/provisioning/openstack/README.md | 10 ++++++++++ .../provisioning/openstack/sample-inventory/group_vars/all.yml | 9 +++++++++ playbooks/provisioning/openstack/stack_params.yaml | 9 ++++++--- roles/openstack-stack/defaults/main.yml | 3 ++- roles/openstack-stack/templates/heat_stack.yaml.j2 | 4 ++-- 5 files changed, 29 insertions(+), 6 deletions(-) diff --git a/playbooks/provisioning/openstack/README.md b/playbooks/provisioning/openstack/README.md index afaeb430b..ae572f9b6 100644 --- a/playbooks/provisioning/openstack/README.md +++ b/playbooks/provisioning/openstack/README.md @@ -135,6 +135,16 @@ cluster node groups, like app or infra nodes. For example: `{'region': 'infra'}` The `openstack_nodes_to_remove` allows you to specify the numerical indexes of App nodes that should be removed; for example, ['0', '2'], +The `docker_volume_size` is the default Docker volume size the servers will use. +In order to set a different volume size for a role, +uncomment the line with the corresponding variable (e. g. `docker_master_volume_size` +for master) and change its value. `docker_volume_size` must stay defined as it is +used as a default value for some of the servers (master, infra, app node). +The rest of the roles (etcd, load balancer, dns) have their defaults hard-coded. + +**Note**: If the `ephemeral_volumes` is set to `true`, the `*_volume_size` variables +will be ignored and the deployment will not create any cinder volumes. + The `openstack_flat_secgrp`, controls Neutron security groups creation for Heat stacks. Set it to true, if you experience issues with sec group rules quotas. It trades security for number of rules, by sharing the same set diff --git a/playbooks/provisioning/openstack/sample-inventory/group_vars/all.yml b/playbooks/provisioning/openstack/sample-inventory/group_vars/all.yml index 19f916508..bdd98d239 100644 --- a/playbooks/provisioning/openstack/sample-inventory/group_vars/all.yml +++ b/playbooks/provisioning/openstack/sample-inventory/group_vars/all.yml @@ -45,6 +45,15 @@ openstack_default_flavor: "m1.medium" # # Numerical index of nodes to remove # openstack_nodes_to_remove: [] +# # Docker volume size +# # - set specific volume size for roles by uncommenting corresponding lines +# # - note: do not remove docker_default_volume_size definition +#docker_master_volume_size: "15" +#docker_infra_volume_size: "15" +#docker_node_volume_size: "15" +#docker_etcd_volume_size: "2" +#docker_dns_volume_size: "1" +#docker_lb_volume_size: "5" docker_volume_size: "15" openstack_subnet_prefix: "192.168.99" diff --git a/playbooks/provisioning/openstack/stack_params.yaml b/playbooks/provisioning/openstack/stack_params.yaml index 27fa5ec8c..60e9bcf45 100644 --- a/playbooks/provisioning/openstack/stack_params.yaml +++ b/playbooks/provisioning/openstack/stack_params.yaml @@ -30,9 +30,12 @@ num_masters: "{{ openstack_num_masters }}" num_nodes: "{{ openstack_num_nodes }}" num_infra: "{{ openstack_num_infra }}" num_dns: "{{ openstack_num_dns | default(1) }}" -master_volume_size: "{{ docker_volume_size }}" -app_volume_size: "{{ docker_volume_size }}" -infra_volume_size: "{{ docker_volume_size }}" +master_volume_size: "{{ docker_master_volume_size | default(docker_volume_size) }}" +infra_volume_size: "{{ docker_infra_volume_size | default(docker_volume_size) }}" +node_volume_size: "{{ docker_node_volume_size | default(docker_volume_size) }}" +etcd_volume_size: "{{ docker_etcd_volume_size | default('2') }}" +dns_volume_size: "{{ docker_dns_volume_size | default('1') }}" +lb_volume_size: "{{ docker_lb_volume_size | default('5') }}" nodes_to_remove: "{{ openstack_nodes_to_remove | default([]) | to_yaml }}" use_bastion: "{{ openstack_use_bastion|default(False) }}" ui_ssh_tunnel: "{{ openshift_ui_ssh_tunnel|default(False) }}" diff --git a/roles/openstack-stack/defaults/main.yml b/roles/openstack-stack/defaults/main.yml index c8529612e..fbca0bdf6 100644 --- a/roles/openstack-stack/defaults/main.yml +++ b/roles/openstack-stack/defaults/main.yml @@ -1,5 +1,4 @@ --- -dns_volume_size: 1 ssh_ingress_cidr: 0.0.0.0/0 node_ingress_cidr: 0.0.0.0/0 master_ingress_cidr: 0.0.0.0/0 @@ -12,5 +11,7 @@ num_dns: 1 num_infra: 1 nodes_to_remove: [] etcd_volume_size: 2 +dns_volume_size: 1 +lb_volume_size: 5 use_bastion: False ui_ssh_tunnel: False diff --git a/roles/openstack-stack/templates/heat_stack.yaml.j2 b/roles/openstack-stack/templates/heat_stack.yaml.j2 index 3623035c6..c0da4c184 100644 --- a/roles/openstack-stack/templates/heat_stack.yaml.j2 +++ b/roles/openstack-stack/templates/heat_stack.yaml.j2 @@ -550,7 +550,7 @@ resources: template: openshift-ansible-cluster_id-net params: cluster_id: {{ stack_name }} - volume_size: 5 + volume_size: {{ lb_volume_size }} depends_on: - interface {% endif %} @@ -658,7 +658,7 @@ resources: template: openshift-ansible-cluster_id-net params: cluster_id: {{ stack_name }} - volume_size: {{ app_volume_size }} + volume_size: {{ node_volume_size }} depends_on: - interface -- cgit v1.2.3 -- cgit v1.2.3 -- cgit v1.2.3 -- cgit v1.2.3 -- cgit v1.2.3 -- cgit v1.2.3 -- cgit v1.2.3 -- cgit v1.2.3 -- cgit v1.2.3 -- cgit v1.2.3 -- cgit v1.2.3 -- cgit v1.2.3 -- cgit v1.2.3 -- cgit v1.2.3 -- cgit v1.2.3 -- cgit v1.2.3 -- cgit v1.2.3 -- cgit v1.2.3 -- cgit v1.2.3 -- cgit v1.2.3 -- cgit v1.2.3 -- cgit v1.2.3 From ec07a43c6ac64d220458b688ded7ce3634eeb0d7 Mon Sep 17 00:00:00 2001 From: Tomas Sedovic Date: Mon, 21 Aug 2017 12:22:54 +0200 Subject: Update openshift_release in the sample inventory (#647) * Update openshift_release in the sample inventory This removes setting the version for Openshift Origin, because the only the latest release is actually available. So if a new Origin release comes up, the installation will fail. --- playbooks/provisioning/openstack/README.md | 1 - playbooks/provisioning/openstack/sample-inventory/group_vars/OSEv3.yml | 1 - 2 files changed, 2 deletions(-) diff --git a/playbooks/provisioning/openstack/README.md b/playbooks/provisioning/openstack/README.md index ae572f9b6..099b017bb 100644 --- a/playbooks/provisioning/openstack/README.md +++ b/playbooks/provisioning/openstack/README.md @@ -196,7 +196,6 @@ Note, that in order to deploy OpenShift origin, you should update the following variables for the `inventory/group_vars/OSEv3.yml`, `all.yml`: deployment_type: origin - origin_release: 1.5.1 openshift_deployment_type: "{{ deployment_type }}" ### Configure static inventory and access via a bastion node diff --git a/playbooks/provisioning/openstack/sample-inventory/group_vars/OSEv3.yml b/playbooks/provisioning/openstack/sample-inventory/group_vars/OSEv3.yml index a16c1d867..6ceeff827 100644 --- a/playbooks/provisioning/openstack/sample-inventory/group_vars/OSEv3.yml +++ b/playbooks/provisioning/openstack/sample-inventory/group_vars/OSEv3.yml @@ -1,6 +1,5 @@ --- openshift_deployment_type: origin -openshift_release: 1.5.1 #openshift_deployment_type: openshift-enterprise #openshift_release: v3.5 openshift_master_default_subdomain: "apps.{{ env_id }}.{{ public_dns_domain }}" -- cgit v1.2.3 From 603f218f4e7e3ee67188029f9cbb81713111c4ee Mon Sep 17 00:00:00 2001 From: Tomas Sedovic Date: Mon, 21 Aug 2017 13:04:49 +0200 Subject: Ignore *.cfg and *.crt in the openstack inventory (#672) This allows our users to keep the ansible.cfg file in the inventory as well as putting e.g. LDAP certificates in. Fixes #481 --- playbooks/provisioning/openstack/sample-inventory/ansible.cfg | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/playbooks/provisioning/openstack/sample-inventory/ansible.cfg b/playbooks/provisioning/openstack/sample-inventory/ansible.cfg index 1a092ed6b..81d8ae10c 100644 --- a/playbooks/provisioning/openstack/sample-inventory/ansible.cfg +++ b/playbooks/provisioning/openstack/sample-inventory/ansible.cfg @@ -6,7 +6,7 @@ forks = 50 timeout = 30 host_key_checking = false inventory = inventory -inventory_ignore_extensions = secrets.py, .pyc +inventory_ignore_extensions = secrets.py, .pyc, .cfg, .crt gathering = smart retry_files_enabled = false fact_caching = jsonfile -- cgit v1.2.3 -- cgit v1.2.3 -- cgit v1.2.3 From f4b584fcef4fad12be931631e0c95ac677799ee7 Mon Sep 17 00:00:00 2001 From: Bogdan Dobrelya Date: Wed, 16 Aug 2017 11:04:27 +0200 Subject: Add docs and defaults for multi-master setup Additionally, add the lb group to contain lb nodes to the static inventory template. Include the lb group into the OSEv3 group, in order to apply the cluster group vars to it. Signed-off-by: Bogdan Dobrelya --- playbooks/provisioning/openstack/README.md | 13 +++++++++++++ .../openstack/sample-inventory/group_vars/OSEv3.yml | 4 ++++ roles/static_inventory/templates/inventory.j2 | 5 +++++ 3 files changed, 22 insertions(+) diff --git a/playbooks/provisioning/openstack/README.md b/playbooks/provisioning/openstack/README.md index 099b017bb..358ed182b 100644 --- a/playbooks/provisioning/openstack/README.md +++ b/playbooks/provisioning/openstack/README.md @@ -162,6 +162,19 @@ The `openstack_inventory_path` points the directory to host the generated static It should point to the copied example inventory directory, otherwise ti creates a new one for you. +#### Multi-master configuration + +Please refer to the official documentation for the +[multi-master setup](https://docs.openshift.com/container-platform/3.6/install_config/install/advanced_install.html#multiple-masters) +and define the corresponding [inventory +variables](https://docs.openshift.com/container-platform/3.6/install_config/install/advanced_install.html#configuring-cluster-variables) +in `inventory/group_vars/OSEv3.yml`. For example, given a load balancer node +under the ansible group named `ext_lb`: + + openshift_master_cluster_method: native + openshift_master_cluster_hostname: "{{ groups.ext_lb.0 }}" + openshift_master_cluster_public_hostname: "{{ groups.ext_lb.0 }}" + #### Security notes Configure required `*_ingress_cidr` variables to restrict public access diff --git a/playbooks/provisioning/openstack/sample-inventory/group_vars/OSEv3.yml b/playbooks/provisioning/openstack/sample-inventory/group_vars/OSEv3.yml index 6ceeff827..9d47815ec 100644 --- a/playbooks/provisioning/openstack/sample-inventory/group_vars/OSEv3.yml +++ b/playbooks/provisioning/openstack/sample-inventory/group_vars/OSEv3.yml @@ -4,6 +4,10 @@ openshift_deployment_type: origin #openshift_release: v3.5 openshift_master_default_subdomain: "apps.{{ env_id }}.{{ public_dns_domain }}" +openshift_master_cluster_method: native +openshift_master_cluster_hostname: "{{ groups.lb.0|default(groups.masters.0) }}" +openshift_master_cluster_public_hostname: "{{ groups.lb.0|default(groups.masters.0) }}" + #openshift_cluster_node_labels: # app: # region: primary diff --git a/roles/static_inventory/templates/inventory.j2 b/roles/static_inventory/templates/inventory.j2 index 24dc9d4a8..987c98ec6 100644 --- a/roles/static_inventory/templates/inventory.j2 +++ b/roles/static_inventory/templates/inventory.j2 @@ -39,6 +39,7 @@ dns [OSEv3:children] nodes etcd +lb # Set variables common for all OSEv3 hosts #[OSEv3:vars] @@ -68,6 +69,9 @@ nodes.{{ stack_name }} [dns:children] dns.{{ stack_name }} +[lb:children] +lb.{{ stack_name }} + # Empty placeholders for all groups of the cluster nodes [masters.{{ stack_name }}] [etcd.{{ stack_name }}] @@ -75,6 +79,7 @@ dns.{{ stack_name }} [nodes.{{ stack_name }}] [app.{{ stack_name }}] [dns.{{ stack_name }}] +[lb.{{ stack_name }}] # BEGIN Autogenerated groups {% for group in groups %} -- cgit v1.2.3 -- cgit v1.2.3 -- cgit v1.2.3 -- cgit v1.2.3 -- cgit v1.2.3 -- cgit v1.2.3 -- cgit v1.2.3 -- cgit v1.2.3 From ce9b66f71b60857f644cc5a3559a5c21af5d9b24 Mon Sep 17 00:00:00 2001 From: tzumainn Date: Wed, 23 Aug 2017 09:58:07 -0400 Subject: Add documentation regarding running custom post-provision tasks (#678) * Add documentation regarding running custom post-provision tasks * moved post-provision doc to openstack README * added reference to OSEv3, clarified some text --- playbooks/provisioning/openstack/README.md | 38 ++++++++++++++++++++++++++++++ 1 file changed, 38 insertions(+) diff --git a/playbooks/provisioning/openstack/README.md b/playbooks/provisioning/openstack/README.md index 358ed182b..002c2f6aa 100644 --- a/playbooks/provisioning/openstack/README.md +++ b/playbooks/provisioning/openstack/README.md @@ -271,6 +271,44 @@ The first infra node then becomes a bastion node as well and proxies access for future ansible commands. The post-provision step also configures Satellite, if requested, and DNS server, and ensures other OpenShift requirements to be met. +### Running Custom Post-Provision Actions + +If you'd like to run post-provision actions, you can do so by creating a custom playbook. Here's one example that adds additional YUM repositories: + +``` +--- +- hosts: app + tasks: + + # enable EPL + - name: Add repository + yum_repository: + name: epel + description: EPEL YUM repo + baseurl: https://download.fedoraproject.org/pub/epel/$releasever/$basearch/ +``` + +This example runs against app nodes. The list of options include: + + - cluster_hosts (all hosts: app, infra, masters, dns, lb) + - OSEv3 (app, infra, masters) + - app + - dns + - masters + - infra_hosts + +After writing your custom playbook, run it like this: + +``` +ansible-playbook --private-key ~/.ssh/openshift -i myinventory/ custom-playbook.yaml +``` + +If you'd like to limit the run to one particular host, you can do so as follows: + +``` +ansible-playbook --private-key ~/.ssh/openshift -i myinventory/ custom-playbook.yaml -l app-node-0.openshift.example.com +``` + ### Install OpenShift Once it succeeds, you can install openshift by running: -- cgit v1.2.3 From 2a0afda0940b63d71f05c0d11834e3b4582f4e90 Mon Sep 17 00:00:00 2001 From: Tlacenka Date: Wed, 23 Aug 2017 16:39:17 +0200 Subject: Fix node label customisation (#679) * node labels: add checks for custom labels - README: add more info about customising labels - pre_tasks: add checks for label values, set to empty dict if undefined - group_vars: move labels customisation from OSEv3 to all * pre_tasks: tried a new approach to updating variables * pre_tasks: variable update fixed * pre_tasks: rollback upscaling changes (to be added in upscaling PR) * pre_tasks: blank line removed * pre_tasks: add check for undefined variable (should not happen though) * pre_tasks: be sure to have regions defined --- playbooks/provisioning/openstack/README.md | 10 +++++++++- playbooks/provisioning/openstack/pre_tasks.yml | 16 ++++++++++++++++ .../openstack/sample-inventory/group_vars/OSEv3.yml | 6 ------ .../openstack/sample-inventory/group_vars/all.yml | 9 +++++++++ 4 files changed, 34 insertions(+), 7 deletions(-) diff --git a/playbooks/provisioning/openstack/README.md b/playbooks/provisioning/openstack/README.md index 002c2f6aa..c9f651032 100644 --- a/playbooks/provisioning/openstack/README.md +++ b/playbooks/provisioning/openstack/README.md @@ -130,7 +130,15 @@ The `openstack_num_masters`, `openstack_num_infra` and App nodes to create. The `openshift_cluster_node_labels` defines custom labels for your openshift -cluster node groups, like app or infra nodes. For example: `{'region': 'infra'}`. +cluster node groups. It currently supports app and infra node groups. +The default value of this variable sets `region: primary` to app nodes and +`region: infra` to infra nodes. +An example of setting a customised label: +``` +openshift_cluster_node_labels: + app: + mylabel: myvalue +``` The `openstack_nodes_to_remove` allows you to specify the numerical indexes of App nodes that should be removed; for example, ['0', '2'], diff --git a/playbooks/provisioning/openstack/pre_tasks.yml b/playbooks/provisioning/openstack/pre_tasks.yml index d73945644..be29dad16 100644 --- a/playbooks/provisioning/openstack/pre_tasks.yml +++ b/playbooks/provisioning/openstack/pre_tasks.yml @@ -31,3 +31,19 @@ delegate_to: localhost when: - openshift_master_default_subdomain is undefined + +# Check that openshift_cluster_node_labels has regions defined for all groups +# NOTE(kpilatov): if node labels are to be enabled for more groups, +# this check needs to be modified as well +- name: Set openshift_cluster_node_labels if undefined (should not happen) + set_fact: + openshift_cluster_node_labels: {'app': {'region': 'primary'}, 'infra': {'region': 'infra'}} + when: openshift_cluster_node_labels is not defined + +- name: Set openshift_cluster_node_labels for the infra group + set_fact: + openshift_cluster_node_labels: "{{ openshift_cluster_node_labels | combine({'infra': {'region': 'infra'}}, recursive=True) }}" + +- name: Set openshift_cluster_node_labels for the app group + set_fact: + openshift_cluster_node_labels: "{{ openshift_cluster_node_labels | combine({'app': {'region': 'primary'}}, recursive=True) }}" diff --git a/playbooks/provisioning/openstack/sample-inventory/group_vars/OSEv3.yml b/playbooks/provisioning/openstack/sample-inventory/group_vars/OSEv3.yml index 9d47815ec..4d27ae873 100644 --- a/playbooks/provisioning/openstack/sample-inventory/group_vars/OSEv3.yml +++ b/playbooks/provisioning/openstack/sample-inventory/group_vars/OSEv3.yml @@ -8,12 +8,6 @@ openshift_master_cluster_method: native openshift_master_cluster_hostname: "{{ groups.lb.0|default(groups.masters.0) }}" openshift_master_cluster_public_hostname: "{{ groups.lb.0|default(groups.masters.0) }}" -#openshift_cluster_node_labels: -# app: -# region: primary -# infra: -# region: infra - osm_default_node_selector: 'region=primary' # NOTE(shadower): the hostname check seems to always fail because the diff --git a/playbooks/provisioning/openstack/sample-inventory/group_vars/all.yml b/playbooks/provisioning/openstack/sample-inventory/group_vars/all.yml index bdd98d239..4b077be0a 100644 --- a/playbooks/provisioning/openstack/sample-inventory/group_vars/all.yml +++ b/playbooks/provisioning/openstack/sample-inventory/group_vars/all.yml @@ -122,3 +122,12 @@ ansible_user: openshift # If you want to use the VM storage instead of Cinder volumes, set this to `true`. # NOTE: this is for testing only! Your data will be gone once the VM disappears! # ephemeral_volumes: false + +# # OpenShift node labels +# # - in order to customise node labels for app and/or infra group, set the +# # openshift_cluster_node_labels variable +#openshift_cluster_node_labels: +# app: +# region: primary +# infra: +# region: infra -- cgit v1.2.3 -- cgit v1.2.3 From 7be1f76a53518dd48092a996841971eb4fd43f27 Mon Sep 17 00:00:00 2001 From: Bogdan Dobrelya Date: Thu, 24 Aug 2017 11:03:07 +0200 Subject: Do not repeat pre_tasks for post-provision playbook (#689) Move repeating pre_tasks to pre-install (OpenShift Pre-Requisites) step. Signed-off-by: Bogdan Dobrelya --- playbooks/provisioning/openstack/post-provision-openstack.yml | 6 ------ playbooks/provisioning/openstack/pre-install.yml | 5 +++++ 2 files changed, 5 insertions(+), 6 deletions(-) diff --git a/playbooks/provisioning/openstack/post-provision-openstack.yml b/playbooks/provisioning/openstack/post-provision-openstack.yml index f683b77be..28f3e5fcf 100644 --- a/playbooks/provisioning/openstack/post-provision-openstack.yml +++ b/playbooks/provisioning/openstack/post-provision-openstack.yml @@ -25,8 +25,6 @@ hosts: cluster_hosts gather_facts: False become: true - pre_tasks: - - include: pre_tasks.yml roles: - role: hostnames @@ -50,8 +48,6 @@ hosts: dns gather_facts: False become: true - pre_tasks: - - include: pre_tasks.yml roles: - role: dns-views - role: infra-ansible/roles/dns-server @@ -60,8 +56,6 @@ hosts: localhost gather_facts: True become: False - pre_tasks: - - include: pre_tasks.yml roles: - role: dns-records - role: infra-ansible/roles/dns diff --git a/playbooks/provisioning/openstack/pre-install.yml b/playbooks/provisioning/openstack/pre-install.yml index 629182d49..9b49136da 100644 --- a/playbooks/provisioning/openstack/pre-install.yml +++ b/playbooks/provisioning/openstack/pre-install.yml @@ -12,3 +12,8 @@ - { role: subscription-manager, when: hostvars.localhost.rhsm_register, tags: 'subscription-manager', ansible_sudo: true } - { role: docker, tags: 'docker' } - { role: openshift-prep, tags: 'openshift-prep' } + +- hosts: localhost:cluster_hosts + become: False + tasks: + - include: pre_tasks.yml -- cgit v1.2.3 -- cgit v1.2.3 -- cgit v1.2.3 -- cgit v1.2.3 From ce3be1e039fd3bddf245bdaed83466f12b59937b Mon Sep 17 00:00:00 2001 From: Bogdan Dobrelya Date: Fri, 25 Aug 2017 09:25:20 +0200 Subject: Cast num_* as int for jinja templates (#685) Signed-off-by: Bogdan Dobrelya --- roles/openstack-stack/templates/heat_stack.yaml.j2 | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/roles/openstack-stack/templates/heat_stack.yaml.j2 b/roles/openstack-stack/templates/heat_stack.yaml.j2 index c0da4c184..1ecf84aa6 100644 --- a/roles/openstack-stack/templates/heat_stack.yaml.j2 +++ b/roles/openstack-stack/templates/heat_stack.yaml.j2 @@ -439,7 +439,7 @@ resources: port_range_min: 53 port_range_max: 53 remote_ip_prefix: "{{ openstack_subnet_prefix }}.0/24" -{% if num_masters > 1 or ui_ssh_tunnel|bool %} +{% if num_masters|int > 1 or ui_ssh_tunnel|bool %} lb-secgrp: type: OS::Neutron::SecurityGroup properties: @@ -513,7 +513,7 @@ resources: depends_on: - interface -{% if num_masters > 1 %} +{% if num_masters|int > 1 %} loadbalancer: type: OS::Heat::ResourceGroup properties: @@ -592,7 +592,7 @@ resources: {% else %} - { get_resource: master-secgrp } - { get_resource: node-secgrp } -{% if num_etcd == 0 %} +{% if num_etcd|int == 0 %} - { get_resource: etcd-secgrp } {% endif %} {% endif %} @@ -701,7 +701,7 @@ resources: {% else %} - { get_resource: node-secgrp } {% endif %} -{% if ui_ssh_tunnel|bool and num_masters < 2 %} +{% if ui_ssh_tunnel|bool and num_masters|int < 2 %} - { get_resource: lb-secgrp } {% endif %} - { get_resource: infra-secgrp } -- cgit v1.2.3 From 9593ffb85ab6c2b5ee3964d7566932cf9ae768c9 Mon Sep 17 00:00:00 2001 From: Tlacenka Date: Fri, 25 Aug 2017 13:30:02 +0200 Subject: Added checks for configured images and flavors (#688) * prerequisites, custom_*_check: added checking that specified images/flavors are available - uses stack_params as a source of variable value which is then passed to the HOT * minor fixes --- .../openstack/custom_flavor_check.yaml | 9 ++++++ .../provisioning/openstack/custom_image_check.yaml | 9 ++++++ playbooks/provisioning/openstack/prerequisites.yml | 32 ++++++++++++++++++++++ 3 files changed, 50 insertions(+) create mode 100644 playbooks/provisioning/openstack/custom_flavor_check.yaml create mode 100644 playbooks/provisioning/openstack/custom_image_check.yaml diff --git a/playbooks/provisioning/openstack/custom_flavor_check.yaml b/playbooks/provisioning/openstack/custom_flavor_check.yaml new file mode 100644 index 000000000..e11874c28 --- /dev/null +++ b/playbooks/provisioning/openstack/custom_flavor_check.yaml @@ -0,0 +1,9 @@ +--- +- name: Try to get flavor facts + os_flavor_facts: + name: "{{ flavor }}" + register: flavor_result +- name: Check that custom flavor is available + assert: + that: "flavor_result.ansible_facts.openstack_flavors" + msg: "Flavor {{ flavor }} is not available." diff --git a/playbooks/provisioning/openstack/custom_image_check.yaml b/playbooks/provisioning/openstack/custom_image_check.yaml new file mode 100644 index 000000000..452e1e4d8 --- /dev/null +++ b/playbooks/provisioning/openstack/custom_image_check.yaml @@ -0,0 +1,9 @@ +--- +- name: Try to get image facts + os_image_facts: + image: "{{ image }}" + register: image_result +- name: Check that custom image is available + assert: + that: "image_result.ansible_facts.openstack_image" + msg: "Image {{ image }} is not available." diff --git a/playbooks/provisioning/openstack/prerequisites.yml b/playbooks/provisioning/openstack/prerequisites.yml index dd4f980b2..a87c06705 100644 --- a/playbooks/provisioning/openstack/prerequisites.yml +++ b/playbooks/provisioning/openstack/prerequisites.yml @@ -84,3 +84,35 @@ assert: that: 'key_result.rc == 0' msg: "Keypair {{ openstack_ssh_public_key }} is not available" + +# Check that custom images and flavors exist +- hosts: localhost + + # Include variables that will be used by heat + vars_files: + - stack_params.yaml + + tasks: + # Check that custom images are available + - include: custom_image_check.yaml + with_items: + - "{{ openstack_master_image }}" + - "{{ openstack_infra_image }}" + - "{{ openstack_node_image }}" + - "{{ openstack_lb_image }}" + - "{{ openstack_etcd_image }}" + - "{{ openstack_dns_image }}" + loop_control: + loop_var: image + + # Check that custom flavors are available + - include: custom_flavor_check.yaml + with_items: + - "{{ master_flavor }}" + - "{{ infra_flavor }}" + - "{{ node_flavor }}" + - "{{ lb_flavor }}" + - "{{ etcd_flavor }}" + - "{{ dns_flavor }}" + loop_control: + loop_var: flavor -- cgit v1.2.3 From 2ea1ccfb37461a70d329655f7eeaaab090f1ca0d Mon Sep 17 00:00:00 2001 From: Bogdan Dobrelya Date: Fri, 25 Aug 2017 16:15:40 +0200 Subject: Support external/pre-provisioned authoritative cluster DNS (#690) * Document how to use fully external DNS servers w/o provisioning dns servers group with Heat. * Document how to use a mixed servers setup for dynamic records updates mathing public or private views. * Allow custom nsupdate key names for OSP10 dns service compatibility. The osp-dns configures the named service with the fixed key_name 'update-key'. Add optional key_name for the external_nsupdate_keys public section to allow custom key names. --- playbooks/provisioning/openstack/README.md | 56 +++++++++++++++++++--- roles/dns-records/tasks/main.yml | 6 ++- roles/openstack-stack/templates/heat_stack.yaml.j2 | 7 +++ 3 files changed, 61 insertions(+), 8 deletions(-) diff --git a/playbooks/provisioning/openstack/README.md b/playbooks/provisioning/openstack/README.md index c9f651032..2eb9aa9cd 100644 --- a/playbooks/provisioning/openstack/README.md +++ b/playbooks/provisioning/openstack/README.md @@ -53,8 +53,9 @@ Otherwise, even if there are differences between the two versions, installation * Assigns Cinder volumes to the servers * Set up an `openshift` user with sudo privileges * Optionally attach Red Hat subscriptions -* Set up a bind-based DNS server -* When deploying more than one master, set up a HAproxy server +* Sets up a bind-based DNS server or configures the cluster servers to use an external DNS server. +* Supports mixed in-stack/external DNS servers for dynamic updates. +* When deploying more than one master, sets up a HAproxy server ## Set up @@ -69,9 +70,17 @@ Otherwise, even if there are differences between the two versions, installation ### Update `inventory/group_vars/all.yml` +#### DNS configuration variables + Pay special attention to the values in the first paragraph -- these will depend on your OpenStack environment. +Note that the provsisioning playbooks update the original Neutron subnet +created with the Heat stack to point to the configured DNS servers. +So the provisioned cluster nodes will start using those natively as +default nameservers. Technically, this allows to deploy OpenShift clusters +without dnsmasq proxies. + The `env_id` and `public_dns_domain` will form the cluster's DNS domain all your servers will be under. With the default values, this will be `openshift.example.com`. For workloads, the default subdomain is 'apps'. @@ -93,10 +102,45 @@ daemon that in turn proxies DNS requests to the authoritative DNS server. When Network Manager is enabled for provisioned cluster nodes, which is normally the case, you should not change the defaults and always deploy dnsmasq. -Note that the authoritative DNS server is configured on post provsision -steps, and the Neutron subnet for the Heat stack is updated to point to that -server in the end. So the provisioned servers will start using it natively -as a default nameserver that comes from the NetworkManager and cloud-init. +`external_nsupdate_keys` describes an external authoritative DNS server(s) +processing dynamic records updates in the public and private cluster views: + + external_nsupdate_keys: + public: + key_secret: + key_algorithm: 'hmac-md5' + key_name: 'update-key' + server: + private: + key_secret: + key_algorithm: 'hmac-sha256' + server: + +Here, for the public view section, we specified another key algorithm and +optional `key_name`, which normally defaults to the cluster's DNS domain. +This just illustrates a compatibility mode with a DNS service deployed +by OpenShift on OSP10 reference architecture, and used in a mixed mode with +another external DNS server. + +Another example defines an external DNS server for the public view +additionally to the in-stack DNS server used for the private view only: + + external_nsupdate_keys: + public: + key_secret: + key_algorithm: 'hmac-sha256' + server: + +Here, updates matching the public view will be hitting the given public +server IP. While updates matching the private view will be sent to the +auto evaluated in-stack DNS server's **public** IP. + +Note, for the in-stack DNS server, private view updates may be sent only +via the public IP of the server. You can not send updates via the private +IP yet. This forces the in-stack private server to have a floating IP. +See also the [security notes](#security-notes) + +#### Other configuration variables `openstack_ssh_key` is a Nova keypair - you can see your keypairs with `openstack keypair list`. This guide assumes that its corresponding private diff --git a/roles/dns-records/tasks/main.yml b/roles/dns-records/tasks/main.yml index 3672a8ea6..e9bce9718 100644 --- a/roles/dns-records/tasks/main.yml +++ b/roles/dns-records/tasks/main.yml @@ -14,6 +14,7 @@ nsupdate_server_private: "{{ external_nsupdate_keys['private']['server'] }}" nsupdate_key_secret_private: "{{ external_nsupdate_keys['private']['key_secret'] }}" nsupdate_key_algorithm_private: "{{ external_nsupdate_keys['private']['key_algorithm'] }}" + nsupdate_private_key_name: "{{ external_nsupdate_keys['private']['key_name']|default('private-' + full_dns_domain) }}" when: - external_nsupdate_keys is defined - external_nsupdate_keys['private'] is defined @@ -32,7 +33,7 @@ - view: "private" zone: "{{ full_dns_domain }}" server: "{{ nsupdate_server_private }}" - key_name: "{{ ( 'private-' + full_dns_domain ) }}" + key_name: "{{ nsupdate_private_key_name|default('private-' + full_dns_domain) }}" key_secret: "{{ nsupdate_key_secret_private }}" key_algorithm: "{{ nsupdate_key_algorithm_private | lower }}" entries: "{{ private_records }}" @@ -54,6 +55,7 @@ nsupdate_server_public: "{{ external_nsupdate_keys['public']['server'] }}" nsupdate_key_secret_public: "{{ external_nsupdate_keys['public']['key_secret'] }}" nsupdate_key_algorithm_public: "{{ external_nsupdate_keys['public']['key_algorithm'] }}" + nsupdate_public_key_name: "{{ external_nsupdate_keys['public']['key_name']|default('public-' + full_dns_domain) }}" when: - external_nsupdate_keys is defined - external_nsupdate_keys['public'] is defined @@ -72,7 +74,7 @@ - view: "public" zone: "{{ full_dns_domain }}" server: "{{ nsupdate_server_public }}" - key_name: "{{ ( 'public-' + full_dns_domain ) }}" + key_name: "{{ nsupdate_public_key_name|default('public-' + full_dns_domain) }}" key_secret: "{{ nsupdate_key_secret_public }}" key_algorithm: "{{ nsupdate_key_algorithm_public | lower }}" entries: "{{ public_records }}" diff --git a/roles/openstack-stack/templates/heat_stack.yaml.j2 b/roles/openstack-stack/templates/heat_stack.yaml.j2 index 1ecf84aa6..ea2742a2c 100644 --- a/roles/openstack-stack/templates/heat_stack.yaml.j2 +++ b/roles/openstack-stack/templates/heat_stack.yaml.j2 @@ -54,6 +54,7 @@ outputs: description: Floating IPs of the nodes value: { get_attr: [ infra_nodes, floating_ip ] } +{% if num_dns|int > 0 %} dns_name: description: Name of the DNS value: @@ -68,6 +69,7 @@ outputs: dns_private_ips: description: Private IPs of the DNS value: { get_attr: [ dns, private_ip ] } +{% endif %} resources: @@ -405,6 +407,7 @@ resources: port_range_min: 443 port_range_max: 443 +{% if num_dns|int > 0 %} dns-secgrp: type: OS::Neutron::SecurityGroup properties: @@ -439,6 +442,8 @@ resources: port_range_min: 53 port_range_max: 53 remote_ip_prefix: "{{ openstack_subnet_prefix }}.0/24" +{% endif %} + {% if num_masters|int > 1 or ui_ssh_tunnel|bool %} lb-secgrp: type: OS::Neutron::SecurityGroup @@ -716,6 +721,7 @@ resources: depends_on: - interface +{% if num_dns|int > 0 %} dns: type: OS::Heat::ResourceGroup properties: @@ -755,3 +761,4 @@ resources: volume_size: {{ dns_volume_size }} depends_on: - interface +{% endif %} -- cgit v1.2.3 -- cgit v1.2.3 -- cgit v1.2.3 -- cgit v1.2.3 From 8008fd49227a750a6a5cf5cae8700f0fe0970bce Mon Sep 17 00:00:00 2001 From: tzumainn Date: Thu, 31 Aug 2017 04:38:38 -0400 Subject: Add custom post-provision playbook for adding yum repos (#697) * Add custom post-provision playbook for adding yum repos * fixed formatting issues * requested corrections and formatting changes --- playbooks/provisioning/openstack/README.md | 26 ++++++++++++++-------- .../openstack/custom-actions/add-yum-repos.yml | 12 ++++++++++ 2 files changed, 29 insertions(+), 9 deletions(-) create mode 100644 playbooks/provisioning/openstack/custom-actions/add-yum-repos.yml diff --git a/playbooks/provisioning/openstack/README.md b/playbooks/provisioning/openstack/README.md index 2eb9aa9cd..57d5839c8 100644 --- a/playbooks/provisioning/openstack/README.md +++ b/playbooks/provisioning/openstack/README.md @@ -325,7 +325,19 @@ if requested, and DNS server, and ensures other OpenShift requirements to be met ### Running Custom Post-Provision Actions -If you'd like to run post-provision actions, you can do so by creating a custom playbook. Here's one example that adds additional YUM repositories: +A custom playbook can be run like this: + +``` +ansible-playbook --private-key ~/.ssh/openshift -i inventory/ openshift-ansible-contrib/playbooks/provisioning/openstack/custom-actions/custom-playbook.yml +``` + +If you'd like to limit the run to one particular host, you can do so as follows: + +``` +ansible-playbook --private-key ~/.ssh/openshift -i inventory/ openshift-ansible-contrib/playbooks/provisioning/openstack/custom-actions/custom-playbook.yml -l app-node-0.openshift.example.com +``` + +You can also create your own custom playbook. Here's one example that adds additional YUM repositories: ``` --- @@ -349,17 +361,13 @@ This example runs against app nodes. The list of options include: - masters - infra_hosts -After writing your custom playbook, run it like this: +Please consider contributing your custom playbook back to openshift-ansible-contrib! -``` -ansible-playbook --private-key ~/.ssh/openshift -i myinventory/ custom-playbook.yaml -``` +A library of custom post-provision actions exists in `openshift-ansible-contrib/playbooks/provisioning/openstack/custom-actions`. Playbooks include: -If you'd like to limit the run to one particular host, you can do so as follows: +##### add-yum-repos.yml -``` -ansible-playbook --private-key ~/.ssh/openshift -i myinventory/ custom-playbook.yaml -l app-node-0.openshift.example.com -``` +[add-yum-repos.yml](https://github.com/openshift/openshift-ansible-contrib/blob/master/playbooks/provisioning/openstack/custom-actions/add-yum-repos.yml) adds a list of custom yum repositories to every node in the cluster. ### Install OpenShift diff --git a/playbooks/provisioning/openstack/custom-actions/add-yum-repos.yml b/playbooks/provisioning/openstack/custom-actions/add-yum-repos.yml new file mode 100644 index 000000000..ffebcb642 --- /dev/null +++ b/playbooks/provisioning/openstack/custom-actions/add-yum-repos.yml @@ -0,0 +1,12 @@ +--- +- hosts: cluster_hosts + vars: + yum_repos: [] + tasks: + # enable additional yum repos + - name: Add repository + yum_repository: + name: "{{ item.name }}" + description: "{{ item.description }}" + baseurl: "{{ item.baseurl }}" + with_items: "{{ yum_repos }}" -- cgit v1.2.3 -- cgit v1.2.3 -- cgit v1.2.3 From 06abd17792fafc3adec3916f56c69800690b1431 Mon Sep 17 00:00:00 2001 From: Bogdan Dobrelya Date: Tue, 5 Sep 2017 15:56:43 +0200 Subject: Document global DNS security options (#694) * Document global DNS security options Related changes: * Do not create a view if externally managed. * Allow to specify the recursion settings for public/private views defined by the dns-view role. Signed-off-by: Bogdan Dobrelya * Document public_dns_nameservers better Also use it as the private view forwarder Signed-off-by: Bogdan Dobrelya --- playbooks/provisioning/openstack/README.md | 14 ++++++++++++++ .../openstack/sample-inventory/group_vars/all.yml | 4 ++++ roles/dns-views/defaults/main.yml | 4 ++++ roles/dns-views/tasks/main.yml | 7 ++++++- 4 files changed, 28 insertions(+), 1 deletion(-) create mode 100644 roles/dns-views/defaults/main.yml diff --git a/playbooks/provisioning/openstack/README.md b/playbooks/provisioning/openstack/README.md index 57d5839c8..b898351e6 100644 --- a/playbooks/provisioning/openstack/README.md +++ b/playbooks/provisioning/openstack/README.md @@ -94,6 +94,8 @@ default hostname (usually the role name) is used. The `public_dns_nameservers` is a list of DNS servers accessible from all the created Nova servers. These will be serving as your DNS forwarders for external FQDNs that do not belong to the cluster's DNS domain and its subdomains. +If you're unsure what to put in here, you can try the google or opendns servers, +but note that some organizations may be blocking them. The `openshift_use_dnsmasq` controls either dnsmasq is deployed or not. By default, dnsmasq is deployed and comes as the hosts' /etc/resolv.conf file @@ -244,6 +246,18 @@ be the case for development environments. When turned off, the servers will be provisioned omitting the ``yum update`` command. This brings security implications though, and is not recommended for production deployments. +##### DNS servers security options + +Aside from `node_ingress_cidr` restricting public access to in-stack DNS +servers, there are following (bind/named specific) DNS security +options available: + + named_public_recursion: 'no' + named_private_recursion: 'yes' + +External DNS servers, which is not included in the 'dns' hosts group, +are not managed. It is up to you to configure such ones. + ### Configure the OpenShift parameters Finally, you need to update the DNS entry in diff --git a/playbooks/provisioning/openstack/sample-inventory/group_vars/all.yml b/playbooks/provisioning/openstack/sample-inventory/group_vars/all.yml index 4b077be0a..5028141d2 100644 --- a/playbooks/provisioning/openstack/sample-inventory/group_vars/all.yml +++ b/playbooks/provisioning/openstack/sample-inventory/group_vars/all.yml @@ -92,6 +92,10 @@ rhsm_register: False # key_algorithm: 'hmac-md5' # server: '192.168.1.2' +# # Customize DNS server security options +#named_public_recursion: 'no' +#named_private_recursion: 'yes' + # NOTE(shadower): Do not change this value. The Ansible user is currently # hardcoded to `openshift`. diff --git a/roles/dns-views/defaults/main.yml b/roles/dns-views/defaults/main.yml new file mode 100644 index 000000000..c9f8248af --- /dev/null +++ b/roles/dns-views/defaults/main.yml @@ -0,0 +1,4 @@ +--- +external_nsupdate_keys: {} +named_private_recursion: 'yes' +named_public_recursion: 'no' diff --git a/roles/dns-views/tasks/main.yml b/roles/dns-views/tasks/main.yml index 7165b4269..ffbad2e3f 100644 --- a/roles/dns-views/tasks/main.yml +++ b/roles/dns-views/tasks/main.yml @@ -8,18 +8,23 @@ set_fact: private_named_view: - name: "private" + recursion: "{{ named_private_recursion }}" acl_entry: "{{ acl_list }}" zone: - dns_domain: "{{ full_dns_domain }}" + forwarder: "{{ public_dns_nameservers }}" + when: external_nsupdate_keys['private'] is undefined - name: "Generate the public view" set_fact: public_named_view: - name: "public" + recursion: "{{ named_public_recursion }}" zone: - dns_domain: "{{ full_dns_domain }}" forwarder: "{{ public_dns_nameservers }}" + when: external_nsupdate_keys['public'] is undefined - name: "Generate the final named_config_views" set_fact: - named_config_views: "{{ private_named_view + public_named_view }}" + named_config_views: "{{ private_named_view|default([]) + public_named_view|default([]) }}" -- cgit v1.2.3 From daa0b91119d2c16860a19b4ead2d0d128f8bc5ce Mon Sep 17 00:00:00 2001 From: Tomas Sedovic Date: Wed, 6 Sep 2017 10:24:16 +0200 Subject: Allow using a provider network (#701) * Allow using a provider network This adds a new option `openstack_provider_network_name` which will take a name of an existing network and put the servers there. It will also prevent creating floating IP addresses as the provider network's IPs should already be accessible without any additional routing required. Fixes #622 * Requested changes Don't fail on external/private networks and use role defaults for the provider network. * Add missing endif --- playbooks/provisioning/openstack/README.md | 18 ++++ playbooks/provisioning/openstack/prerequisites.yml | 2 + .../openstack/sample-inventory/group_vars/all.yml | 6 ++ playbooks/provisioning/openstack/stack_params.yaml | 10 +- roles/openstack-stack/defaults/main.yml | 1 + .../tasks/subnet_update_dns_servers.yaml | 1 + roles/openstack-stack/templates/heat_stack.yaml.j2 | 113 +++++++++++++++------ .../templates/heat_stack_server.yaml.j2 | 12 +++ roles/static_inventory/tasks/openstack.yml | 25 ++++- 9 files changed, 153 insertions(+), 35 deletions(-) diff --git a/playbooks/provisioning/openstack/README.md b/playbooks/provisioning/openstack/README.md index b898351e6..4e74627dc 100644 --- a/playbooks/provisioning/openstack/README.md +++ b/playbooks/provisioning/openstack/README.md @@ -229,6 +229,24 @@ under the ansible group named `ext_lb`: openshift_master_cluster_hostname: "{{ groups.ext_lb.0 }}" openshift_master_cluster_public_hostname: "{{ groups.ext_lb.0 }}" +#### Provider Network + +Normally, the playbooks create a new Neutron network and subnet and attach +floating IP addresses to each node. If you have a provider network set up, this +is all unnecessary as you can just access servers that are placed in the +provider network directly. + +To use a provider network, set its name in `openstack_provider_network_name` in +`inventory/group_vars/all.yml`. + +If you set the provider network name, the `openstack_external_network_name` and +`openstack_private_network_name` fields will be ignored. + +**NOTE**: this will not update the nodes' DNS, so running openshift-ansible +right after provisioning will fail (unless you're using an external DNS server +your provider network knows about). You must make sure your nodes are able to +resolve each other by name. + #### Security notes Configure required `*_ingress_cidr` variables to restrict public access diff --git a/playbooks/provisioning/openstack/prerequisites.yml b/playbooks/provisioning/openstack/prerequisites.yml index a87c06705..f2f720f8b 100644 --- a/playbooks/provisioning/openstack/prerequisites.yml +++ b/playbooks/provisioning/openstack/prerequisites.yml @@ -65,10 +65,12 @@ os_networks_facts: name: "{{ openstack_external_network_name }}" register: network_result + when: not openstack_provider_network_name|default(None) - name: Check that network is available assert: that: "network_result.ansible_facts.openstack_networks" msg: "Network {{ openstack_external_network_name }} is not available" + when: not openstack_provider_network_name|default(None) # Check keypair # TODO kpilatov: there is no Ansible module for getting OS keypairs diff --git a/playbooks/provisioning/openstack/sample-inventory/group_vars/all.yml b/playbooks/provisioning/openstack/sample-inventory/group_vars/all.yml index 5028141d2..0e198342c 100644 --- a/playbooks/provisioning/openstack/sample-inventory/group_vars/all.yml +++ b/playbooks/provisioning/openstack/sample-inventory/group_vars/all.yml @@ -16,6 +16,12 @@ openstack_ssh_public_key: "openshift" openstack_external_network_name: "public" #openstack_private_network_name: "openshift-ansible-{{ stack_name }}-net" +## If you want to use a provider network, set its name here. +## NOTE: the `openstack_external_network_name` and +## `openstack_private_network_name` options will be ignored when using a +## provider network. +#openstack_provider_network_name: "provider" + # # Used Images # # - set specific images for roles by uncommenting corresponding lines # # - note: do not remove openstack_default_image_name definition diff --git a/playbooks/provisioning/openstack/stack_params.yaml b/playbooks/provisioning/openstack/stack_params.yaml index 60e9bcf45..484c06889 100644 --- a/playbooks/provisioning/openstack/stack_params.yaml +++ b/playbooks/provisioning/openstack/stack_params.yaml @@ -23,8 +23,14 @@ openstack_node_image: "{{ openstack_node_image_name | default(openstack_default_ openstack_lb_image: "{{ openstack_lb_image_name | default(openstack_default_image_name) }}" openstack_etcd_image: "{{ openstack_etcd_image_name | default(openstack_default_image_name) }}" openstack_dns_image: "{{ openstack_dns_image_name | default(openstack_default_image_name) }}" -openstack_private_network: "{{ openstack_private_network_name | default ('openshift-ansible-' + stack_name + '-net') }}" -external_network: "{{ openstack_external_network_name }}" +openstack_private_network: >- + {% if openstack_provider_network_name | default(None) -%} + {{ openstack_provider_network_name }} + {%- else -%} + {{ openstack_private_network_name | default ('openshift-ansible-' + stack_name + '-net') }} + {%- endif -%} +provider_network: "{{ openstack_provider_network_name | default(None) }}" +external_network: "{{ openstack_external_network_name | default(None) }}" num_etcd: "{{ openstack_num_etcd | default(0) }}" num_masters: "{{ openstack_num_masters }}" num_nodes: "{{ openstack_num_nodes }}" diff --git a/roles/openstack-stack/defaults/main.yml b/roles/openstack-stack/defaults/main.yml index fbca0bdf6..c16b5dc00 100644 --- a/roles/openstack-stack/defaults/main.yml +++ b/roles/openstack-stack/defaults/main.yml @@ -15,3 +15,4 @@ dns_volume_size: 1 lb_volume_size: 5 use_bastion: False ui_ssh_tunnel: False +provider_network: None diff --git a/roles/openstack-stack/tasks/subnet_update_dns_servers.yaml b/roles/openstack-stack/tasks/subnet_update_dns_servers.yaml index be4f07b97..af28fc98f 100644 --- a/roles/openstack-stack/tasks/subnet_update_dns_servers.yaml +++ b/roles/openstack-stack/tasks/subnet_update_dns_servers.yaml @@ -6,3 +6,4 @@ state: present use_default_subnetpool: yes dns_nameservers: "{{ [private_dns_server|default(public_dns_nameservers[0])]|union(public_dns_nameservers)|unique }}" + when: not provider_network diff --git a/roles/openstack-stack/templates/heat_stack.yaml.j2 b/roles/openstack-stack/templates/heat_stack.yaml.j2 index ea2742a2c..b6b5e3613 100644 --- a/roles/openstack-stack/templates/heat_stack.yaml.j2 +++ b/roles/openstack-stack/templates/heat_stack.yaml.j2 @@ -73,6 +73,7 @@ outputs: resources: +{% if not provider_network %} net: type: OS::Neutron::Net properties: @@ -129,6 +130,8 @@ resources: router_id: { get_resource: router } subnet_id: { get_resource: subnet } +{% endif %} + # keypair: # type: OS::Nova::KeyPair # properties: @@ -501,22 +504,29 @@ resources: image: {{ openstack_etcd_image }} flavor: {{ etcd_flavor }} key_name: {{ ssh_public_key }} +{% if provider_network %} + net: {{ provider_network }} + net_name: {{ provider_network }} +{% else %} net: { get_resource: net } subnet: { get_resource: subnet } - secgrp: - - { get_resource: {% if openstack_flat_secgrp|default(False)|bool %}flat-secgrp{% else %}etcd-secgrp{% endif %} } - - { get_resource: common-secgrp } -{% if not use_bastion|bool %} - floating_network: {{ external_network }} -{% endif %} net_name: str_replace: template: openshift-ansible-cluster_id-net params: cluster_id: {{ stack_name }} +{% endif %} + secgrp: + - { get_resource: {% if openstack_flat_secgrp|default(False)|bool %}flat-secgrp{% else %}etcd-secgrp{% endif %} } + - { get_resource: common-secgrp } +{% if not use_bastion|bool and not provider_network %} + floating_network: {{ external_network }} +{% endif %} volume_size: {{ etcd_volume_size }} +{% if not provider_network %} depends_on: - interface +{% endif %} {% if num_masters|int > 1 %} loadbalancer: @@ -544,20 +554,29 @@ resources: image: {{ openstack_lb_image }} flavor: {{ lb_flavor }} key_name: {{ ssh_public_key }} +{% if provider_network %} + net: {{ provider_network }} + net_name: {{ provider_network }} +{% else %} net: { get_resource: net } subnet: { get_resource: subnet } - secgrp: - - { get_resource: lb-secgrp } - - { get_resource: common-secgrp } - floating_network: {{ external_network }} net_name: str_replace: template: openshift-ansible-cluster_id-net params: cluster_id: {{ stack_name }} +{% endif %} + secgrp: + - { get_resource: lb-secgrp } + - { get_resource: common-secgrp } + {% if not provider_network %} + floating_network: {{ external_network }} + {% endif %} volume_size: {{ lb_volume_size }} + {% if not provider_network %} depends_on: - interface + {% endif %} {% endif %} masters: @@ -589,8 +608,18 @@ resources: image: {{ openstack_master_image }} flavor: {{ master_flavor }} key_name: {{ ssh_public_key }} +{% if provider_network %} + net: {{ provider_network }} + net_name: {{ provider_network }} +{% else %} net: { get_resource: net } subnet: { get_resource: subnet } + net_name: + str_replace: + template: openshift-ansible-cluster_id-net + params: + cluster_id: {{ stack_name }} +{% endif %} secgrp: {% if openstack_flat_secgrp|default(False)|bool %} - { get_resource: flat-secgrp } @@ -602,17 +631,14 @@ resources: {% endif %} {% endif %} - { get_resource: common-secgrp } -{% if not use_bastion|bool %} +{% if not use_bastion|bool and not provider_network %} floating_network: {{ external_network }} {% endif %} - net_name: - str_replace: - template: openshift-ansible-cluster_id-net - params: - cluster_id: {{ stack_name }} volume_size: {{ master_volume_size }} +{% if not provider_network %} depends_on: - interface +{% endif %} compute_nodes: type: OS::Heat::ResourceGroup @@ -650,22 +676,29 @@ resources: image: {{ openstack_node_image }} flavor: {{ node_flavor }} key_name: {{ ssh_public_key }} +{% if provider_network %} + net: {{ provider_network }} + net_name: {{ provider_network }} +{% else %} net: { get_resource: net } subnet: { get_resource: subnet } - secgrp: - - { get_resource: {% if openstack_flat_secgrp|default(False)|bool %}flat-secgrp{% else %}node-secgrp{% endif %} } - - { get_resource: common-secgrp } -{% if not use_bastion|bool %} - floating_network: {{ external_network }} -{% endif %} net_name: str_replace: template: openshift-ansible-cluster_id-net params: cluster_id: {{ stack_name }} +{% endif %} + secgrp: + - { get_resource: {% if openstack_flat_secgrp|default(False)|bool %}flat-secgrp{% else %}node-secgrp{% endif %} } + - { get_resource: common-secgrp } +{% if not use_bastion|bool and not provider_network %} + floating_network: {{ external_network }} +{% endif %} volume_size: {{ node_volume_size }} +{% if not provider_network %} depends_on: - interface +{% endif %} infra_nodes: type: OS::Heat::ResourceGroup @@ -697,8 +730,18 @@ resources: image: {{ openstack_infra_image }} flavor: {{ infra_flavor }} key_name: {{ ssh_public_key }} +{% if provider_network %} + net: {{ provider_network }} + net_name: {{ provider_network }} +{% else %} net: { get_resource: net } subnet: { get_resource: subnet } + net_name: + str_replace: + template: openshift-ansible-cluster_id-net + params: + cluster_id: {{ stack_name }} +{% endif %} secgrp: # TODO(bogdando) filter only required node rules into infra-secgrp {% if openstack_flat_secgrp|default(False)|bool %} @@ -711,15 +754,14 @@ resources: {% endif %} - { get_resource: infra-secgrp } - { get_resource: common-secgrp } +{% if not provider_network %} floating_network: {{ external_network }} - net_name: - str_replace: - template: openshift-ansible-cluster_id-net - params: - cluster_id: {{ stack_name }} +{% endif %} volume_size: {{ infra_volume_size }} +{% if not provider_network %} depends_on: - interface +{% endif %} {% if num_dns|int > 0 %} dns: @@ -747,18 +789,27 @@ resources: image: {{ openstack_dns_image }} flavor: {{ dns_flavor }} key_name: {{ ssh_public_key }} +{% if provider_network %} + net: {{ provider_network }} + net_name: {{ provider_network }} +{% else %} net: { get_resource: net } subnet: { get_resource: subnet } - secgrp: - - { get_resource: dns-secgrp } - - { get_resource: common-secgrp } - floating_network: {{ external_network }} net_name: str_replace: template: openshift-ansible-cluster_id-net params: cluster_id: {{ stack_name }} +{% endif %} + secgrp: + - { get_resource: dns-secgrp } + - { get_resource: common-secgrp } +{% if not provider_network %} + floating_network: {{ external_network }} +{% endif %} volume_size: {{ dns_volume_size }} +{% if not provider_network %} depends_on: - interface {% endif %} +{% endif %} diff --git a/roles/openstack-stack/templates/heat_stack_server.yaml.j2 b/roles/openstack-stack/templates/heat_stack_server.yaml.j2 index 32fb166f6..a520a8fe2 100644 --- a/roles/openstack-stack/templates/heat_stack_server.yaml.j2 +++ b/roles/openstack-stack/templates/heat_stack_server.yaml.j2 @@ -61,20 +61,24 @@ parameters: label: Net name description: Net name +{% if not provider_network %} subnet: type: string label: Subnet ID description: Subnet resource +{% endif %} secgrp: type: comma_delimited_list label: Security groups description: Security group resources +{% if not provider_network %} floating_network: type: string label: Floating network description: Network to allocate floating IP from +{% endif %} availability_zone: type: string @@ -117,7 +121,11 @@ outputs: - server - addresses - { get_param: net_name } +{% if provider_network %} + - 0 +{% else %} - 1 +{% endif %} - addr resources: @@ -147,15 +155,19 @@ resources: type: OS::Neutron::Port properties: network: { get_param: net } +{% if not provider_network %} fixed_ips: - subnet: { get_param: subnet } +{% endif %} security_groups: { get_param: secgrp } +{% if not provider_network %} floating-ip: type: OS::Neutron::FloatingIP properties: floating_network: { get_param: floating_network } port_id: { get_resource: port } +{% endif %} {% if not ephemeral_volumes|default(false)|bool %} cinder_volume: diff --git a/roles/static_inventory/tasks/openstack.yml b/roles/static_inventory/tasks/openstack.yml index 75d0ee6d5..e36974d93 100644 --- a/roles/static_inventory/tasks/openstack.yml +++ b/roles/static_inventory/tasks/openstack.yml @@ -24,6 +24,15 @@ when: - refresh_inventory|bool + - name: set_fact for openstack inventory nodes with provider network + set_fact: + registered_nodes_floating: "{{ (registered_nodes_output.stdout | from_json) | json_query(q) }}" + vars: + q: "[] | [?metadata.clusterid=='{{stack_name}}'] | [?public_v4=='']" + when: + - refresh_inventory|bool + - openstack_provider_network_name|default(None) + - name: Add cluster nodes w/o floating IPs to inventory with_items: "{{ registered_nodes|difference(registered_nodes_floating) }}" add_host: @@ -49,7 +58,14 @@ add_host: name: '{{ item.name }}' groups: '{{ item.metadata.group }}' - ansible_host: "{% if use_bastion|bool %}{{ item.name }}{% else %}{{ item.public_v4 }}{% endif %}" + ansible_host: >- + {% if use_bastion|bool -%} + {{ item.name }} + {%- elif openstack_provider_network_name|default(None) -%} + {{ item.private_v4 }} + {%- else -%} + {{ item.public_v4 }} + {%- endif %} ansible_fqdn: '{{ item.name }}' ansible_user: '{{ ssh_user }}' ansible_private_key_file: '{{ private_ssh_key }}' @@ -57,7 +73,12 @@ private_v4: >- {% set node = registered_nodes | json_query("[?name=='" + item.name + "']") -%} {{ node[0].addresses[openstack_private_network|quote][0].addr }} - public_v4: '{{ item.public_v4 }}' + public_v4: >- + {% if openstack_provider_network_name|default(None) -%} + {{ item.private_v4 }} + {%- else -%} + {{ item.public_v4 }} + {%- endif %} - name: Add bastion node to inventory add_host: -- cgit v1.2.3 -- cgit v1.2.3 From 97c99ad8582370803e2841b07985260886614eb2 Mon Sep 17 00:00:00 2001 From: tzumainn Date: Wed, 6 Sep 2017 09:36:09 -0400 Subject: Point openshift_master_cluster_public_hostname at master or lb if defined (#706) * Point openshift_master_cluster_public_hostname at master or load balancer if specified * cleanup * remove extraneous brackets * corrections * added doc section * add private records --- playbooks/provisioning/openstack/README.md | 9 +++++++++ roles/dns-records/tasks/main.yml | 28 ++++++++++++++++++++++++++++ 2 files changed, 37 insertions(+) diff --git a/playbooks/provisioning/openstack/README.md b/playbooks/provisioning/openstack/README.md index 4e74627dc..8b9a37537 100644 --- a/playbooks/provisioning/openstack/README.md +++ b/playbooks/provisioning/openstack/README.md @@ -295,6 +295,15 @@ variables for the `inventory/group_vars/OSEv3.yml`, `all.yml`: deployment_type: origin openshift_deployment_type: "{{ deployment_type }}" +#### Setting a custom entrypoint + +In order to set a custom entrypoint, update `openshift_master_cluster_public_hostname` + + openshift_master_cluster_public_hostname: api.openshift.example.com + +Note than an empty hostname does not work, so if your domain is `openshift.example.com`, +you cannot set this value to simply `openshift.example.com`. + ### Configure static inventory and access via a bastion node Example inventory variables: diff --git a/roles/dns-records/tasks/main.yml b/roles/dns-records/tasks/main.yml index e9bce9718..305a55195 100644 --- a/roles/dns-records/tasks/main.yml +++ b/roles/dns-records/tasks/main.yml @@ -9,6 +9,20 @@ private_records: "{{ private_records | default([]) + [ { 'type': 'A', 'hostname': '*.' + openshift_app_domain, 'ip': hostvars[item]['private_v4'] } ] }}" with_items: "{{ groups['infra_hosts'] }}" +- name: "Add public master cluster hostname records to the private A records (single master)" + set_fact: + private_records: "{{ private_records | default([]) + [ { 'type': 'A', 'hostname': (hostvars[groups.masters[0]].openshift_master_cluster_public_hostname | replace(full_dns_domain, ''))[:-1], 'ip': hostvars[groups.masters[0]].private_v4 } ] }}" + when: + - hostvars[groups.masters[0]].openshift_master_cluster_public_hostname is defined + - openstack_num_masters == 1 + +- name: "Add public master cluster hostname records to the private A records (multi-master)" + set_fact: + private_records: "{{ private_records | default([]) + [ { 'type': 'A', 'hostname': (hostvars[groups.masters[0]].openshift_master_cluster_public_hostname | replace(full_dns_domain, ''))[:-1], 'ip': hostvars[groups.lb[0]].private_v4 } ] }}" + when: + - hostvars[groups.masters[0]].openshift_master_cluster_public_hostname is defined + - openstack_num_masters > 1 + - name: "Set the private DNS server to use the external value (if provided)" set_fact: nsupdate_server_private: "{{ external_nsupdate_keys['private']['server'] }}" @@ -50,6 +64,20 @@ with_items: "{{ groups['infra_hosts'] }}" when: hostvars[item]['public_v4'] is defined +- name: "Add public master cluster hostname records to the public A records (single master)" + set_fact: + public_records: "{{ public_records | default([]) + [ { 'type': 'A', 'hostname': (hostvars[groups.masters[0]].openshift_master_cluster_public_hostname | replace(full_dns_domain, ''))[:-1], 'ip': hostvars[groups.masters[0]].public_v4 } ] }}" + when: + - hostvars[groups.masters[0]].openshift_master_cluster_public_hostname is defined + - openstack_num_masters == 1 + +- name: "Add public master cluster hostname records to the public A records (multi-master)" + set_fact: + public_records: "{{ public_records | default([]) + [ { 'type': 'A', 'hostname': (hostvars[groups.masters[0]].openshift_master_cluster_public_hostname | replace(full_dns_domain, ''))[:-1], 'ip': hostvars[groups.lb[0]].public_v4 } ] }}" + when: + - hostvars[groups.masters[0]].openshift_master_cluster_public_hostname is defined + - openstack_num_masters > 1 + - name: "Set the public DNS server details to use the external value (if provided)" set_fact: nsupdate_server_public: "{{ external_nsupdate_keys['public']['server'] }}" -- cgit v1.2.3 -- cgit v1.2.3 -- cgit v1.2.3 -- cgit v1.2.3 -- cgit v1.2.3 -- cgit v1.2.3 -- cgit v1.2.3 -- cgit v1.2.3 -- cgit v1.2.3 -- cgit v1.2.3 -- cgit v1.2.3 -- cgit v1.2.3 -- cgit v1.2.3 -- cgit v1.2.3 -- cgit v1.2.3 -- cgit v1.2.3 -- cgit v1.2.3 -- cgit v1.2.3 -- cgit v1.2.3 -- cgit v1.2.3 From 1cf6275b983a108a02b6ef178fe35e610162b963 Mon Sep 17 00:00:00 2001 From: Antoni Segura Puimedon Date: Tue, 12 Sep 2017 10:57:38 +0200 Subject: openstack: make server ports be trunk ports (#713) This ensures that the ports that the servers were using before this commit will be parent ports of Neutron trunk ports. Thanks to this, there can be nested Neutron ports inside the OS::NOva::Server resources created either in the heat stack or dynamically inside the Instances. Signed-off-by: Antoni Segura Puimedon --- roles/openstack-stack/templates/heat_stack_server.yaml.j2 | 12 ++++++++++++ .../templates/heat_stack_server_nofloating.yaml.j2 | 12 ++++++++++++ 2 files changed, 24 insertions(+) diff --git a/roles/openstack-stack/templates/heat_stack_server.yaml.j2 b/roles/openstack-stack/templates/heat_stack_server.yaml.j2 index a520a8fe2..fc797941e 100644 --- a/roles/openstack-stack/templates/heat_stack_server.yaml.j2 +++ b/roles/openstack-stack/templates/heat_stack_server.yaml.j2 @@ -138,7 +138,11 @@ resources: image: { get_param: image } flavor: { get_param: flavor } networks: +{% if use_trunk_ports|default(false)|bool %} + - port: { get_attr: [trunk-port, port_id] } +{% else %} - port: { get_resource: port } +{% endif %} user_data: get_file: user-data user_data_format: RAW @@ -151,6 +155,14 @@ resources: sub-host-type: { get_param: subtype } node_labels: { get_param: node_labels } +{% if use_trunk_ports|default(false)|bool %} + trunk-port: + type: OS::Neutron::Trunk + properties: + name: { get_param: name } + port: { get_resource: port } +{% endif %} + port: type: OS::Neutron::Port properties: diff --git a/roles/openstack-stack/templates/heat_stack_server_nofloating.yaml.j2 b/roles/openstack-stack/templates/heat_stack_server_nofloating.yaml.j2 index 638fc8b45..2c16ad778 100644 --- a/roles/openstack-stack/templates/heat_stack_server_nofloating.yaml.j2 +++ b/roles/openstack-stack/templates/heat_stack_server_nofloating.yaml.j2 @@ -115,7 +115,11 @@ resources: image: { get_param: image } flavor: { get_param: flavor } networks: +{% if use_trunk_ports|default(false)|bool %} + - port: { get_attr: [trunk-port, port_id] } +{% else %} - port: { get_resource: port } +{% endif %} user_data: get_file: user-data user_data_format: RAW @@ -128,6 +132,14 @@ resources: sub-host-type: { get_param: subtype } node_labels: { get_param: node_labels } +{% if use_trunk_ports|default(false)|bool %} + trunk-port: + type: OS::Neutron::Trunk + properties: + name: { get_param: name } + port: { get_resource: port } +{% endif %} + port: type: OS::Neutron::Port properties: -- cgit v1.2.3 -- cgit v1.2.3 From afd6a03b071eced6bd0940bb96a2a39233739523 Mon Sep 17 00:00:00 2001 From: Tomas Sedovic Date: Tue, 12 Sep 2017 17:05:56 +0200 Subject: Support Cinder-backed Openshift registry (#707) * Attach and detach a volume, wait for it to be accessible This is mostly just handling the attach/detach code, making sure the necessary vars are accessible where they need to be as well as finding out the correct device name the volume is attached as. * Create temp directory for mounts, remove some debug info * add the fs actions * Remove debug * Prepare the volume automatically if possible * Add docs and sample inventory * Read OS_* creds from shell in sample inventory * Fix yamlint complaint * Update readme This mentions the potential pitfalls when using devstack. * Better check for the router deployment in CI * Set the openshift_hoster*_wait vars to True * Fix typo --- playbooks/provisioning/openstack/README.md | 78 ++++++++++++++++++++++ .../openstack/post-provision-openstack.yml | 3 + .../prepare-and-format-cinder-volume.yaml | 75 +++++++++++++++++++++ .../sample-inventory/group_vars/OSEv3.yml | 20 ++++++ .../openstack/sample-inventory/group_vars/all.yml | 7 ++ 5 files changed, 183 insertions(+) create mode 100644 playbooks/provisioning/openstack/prepare-and-format-cinder-volume.yaml diff --git a/playbooks/provisioning/openstack/README.md b/playbooks/provisioning/openstack/README.md index 8b9a37537..267176eec 100644 --- a/playbooks/provisioning/openstack/README.md +++ b/playbooks/provisioning/openstack/README.md @@ -295,6 +295,7 @@ variables for the `inventory/group_vars/OSEv3.yml`, `all.yml`: deployment_type: origin openshift_deployment_type: "{{ deployment_type }}" + #### Setting a custom entrypoint In order to set a custom entrypoint, update `openshift_master_cluster_public_hostname` @@ -304,6 +305,83 @@ In order to set a custom entrypoint, update `openshift_master_cluster_public_hos Note than an empty hostname does not work, so if your domain is `openshift.example.com`, you cannot set this value to simply `openshift.example.com`. +### Use an existing Cinder volume for the OpenShift registry + +You can optionally use an existing Cinder volume for the storage of +your OpenShift registry. + +To do that, you need to have a Cinder volume (you can create one by +running: + + openstack volume create --size + +The volume needs to have a file system created before you put it to +use. We can do prepare it for you if you put this in inventory/group_vars/all.yml: + + prepare_and_format_registry_volume: true + +**NOTE:** doing so **will destroy any data that's currently on the volume**! + +You can also run the registry setup playbook directly: + + ansible-playbook -i inventory playbooks/provisioning/openstack/prepare-and-format-cinder-volume.yaml + +(the provisioning phase must be completed, first) + + +To instruct OpenShift to actually use the volume, you must first configure it +with the OpenStack credentials by putting the following to `OSEv3.yml`: + + ## Openstack credentials + #openshift_cloudprovider_kind=openstack + #openshift_cloudprovider_openstack_auth_url=http://openstack.example.com:35357/v2.0/ + #openshift_cloudprovider_openstack_username=username + #openshift_cloudprovider_openstack_password=password + #openshift_cloudprovider_openstack_domain_id=domain_id + #openshift_cloudprovider_openstack_domain_name=domain_name + #openshift_cloudprovider_openstack_tenant_id=tenant_id + #openshift_cloudprovider_openstack_tenant_name=tenant_name + #openshift_cloudprovider_openstack_region=region + +Note that these credentials may be different from the ones you used for +provisioning (say for quota or access control reasons). To use the same +OpenStack credentials for both, take a look at the `sample-inventory`. It shows +how to read the values from your shell environment. + +Make sure to only set the values you need from (e.g. your keystonerc or +clouds.yaml). Some of the options ar keystone V2 or V3 specific. + +**NOTE**: If you're testing this on (DevStack)[devstack], you must +explicitly set your Keystone API version to v2 (e.g. +`OS_AUTH_URL=http://10.20.30.40/identity/v2.0`) instead of the default +value provided by `openrc`. You may also encounter the following issue +with Cinder: + +https://github.com/kubernetes/kubernetes/issues/50461 + + +[devstack]: https://docs.openstack.org/devstack/latest/ + + +You can read the (OpenShift documentation on configuring +OpenStack)[openstack] for more information. + +[openstack]: https://docs.openshift.org/latest/install_config/configuring_openstack.html + + +Next we need to instruct openshift-ansible to use the Cinder volume +for it's registry. Again in `OSEv3.yml`: + + ## Use Cinder volume for Openshift registry: + #openshift_hosted_registry_storage_kind: openstack + #openshift_hosted_registry_storage_access_modes: ['ReadWriteOnce'] + #openshift_hosted_registry_storage_openstack_filesystem: xfs + #openshift_hosted_registry_storage_openstack_volumeID: e0ba2d73-d2f9-4514-a3b2-a0ced507fa05 + #openshift_hosted_registry_storage_volume_size: 10Gi + +The **Cinder volume ID**, **filesystem** and **volume size** variables must +correspond to the values in your volume. + ### Configure static inventory and access via a bastion node Example inventory variables: diff --git a/playbooks/provisioning/openstack/post-provision-openstack.yml b/playbooks/provisioning/openstack/post-provision-openstack.yml index 28f3e5fcf..116eb1244 100644 --- a/playbooks/provisioning/openstack/post-provision-openstack.yml +++ b/playbooks/provisioning/openstack/post-provision-openstack.yml @@ -84,3 +84,6 @@ line: "IP4_NAMESERVERS={{ hostvars['localhost'].private_dns_server }}" roles: - node-network-manager + +- include: prepare-and-format-cinder-volume.yaml + when: prepare_and_format_registry_volume|default(False) diff --git a/playbooks/provisioning/openstack/prepare-and-format-cinder-volume.yaml b/playbooks/provisioning/openstack/prepare-and-format-cinder-volume.yaml new file mode 100644 index 000000000..2d630f79d --- /dev/null +++ b/playbooks/provisioning/openstack/prepare-and-format-cinder-volume.yaml @@ -0,0 +1,75 @@ +--- +- hosts: localhost + gather_facts: False + become: False + tasks: + - set_fact: + cinder_volume: "{{ hostvars[groups.masters[0]].openshift_hosted_registry_storage_openstack_volumeID }}" + cinder_fs: "{{ hostvars[groups.masters[0]].openshift_hosted_registry_storage_openstack_filesystem }}" + + - name: Attach the volume to the VM + os_server_volume: + state: present + server: "{{ groups['masters'][0] }}" + volume: "{{ cinder_volume }}" + register: volume_attachment + + - set_fact: + attached_device: >- + {{ volume_attachment['attachments']|json_query("[?volume_id=='" + cinder_volume + "'].device | [0]") }} + + +- hosts: masters[0] + gather_facts: False + become: True + tasks: + - name: Wait for the device to appear + wait_for: path={{ hostvars['localhost'].attached_device }} + + - name: Create a temp directory for mounting the volume + tempfile: + prefix: cinder-volume + state: directory + register: cinder_mount_dir + + - name: Format the device + filesystem: + fstype: "{{ openshift_hosted_registry_storage_openstack_filesystem }}" + dev: "{{ hostvars['localhost'].attached_device }}" + + - name: Mount the device + mount: + name: "{{ cinder_mount_dir.path }}" + src: "{{ hostvars['localhost'].attached_device }}" + state: mounted + fstype: "{{ openshift_hosted_registry_storage_openstack_filesystem }}" + + - name: Change mode on the filesystem + file: + path: "{{ cinder_mount_dir.path }}" + state: directory + recurse: true + mode: 0777 + + - name: Unmount the device + mount: + name: "{{ cinder_mount_dir.path }}" + src: "{{ hostvars['localhost'].attached_device }}" + state: absent + fstype: "{{ openshift_hosted_registry_storage_openstack_filesystem }}" + + - name: Delete the temp directory + file: + name: "{{ cinder_mount_dir.path }}" + state: absent + + +- hosts: localhost + gather_facts: False + become: False + tasks: + - name: Detach the volume from the VM + os_server_volume: + state: absent + server: "{{ groups['masters'][0] }}" + volume: "{{ cinder_volume }}" diff --git a/playbooks/provisioning/openstack/sample-inventory/group_vars/OSEv3.yml b/playbooks/provisioning/openstack/sample-inventory/group_vars/OSEv3.yml index 4d27ae873..874ea7126 100644 --- a/playbooks/provisioning/openstack/sample-inventory/group_vars/OSEv3.yml +++ b/playbooks/provisioning/openstack/sample-inventory/group_vars/OSEv3.yml @@ -10,6 +10,26 @@ openshift_master_cluster_public_hostname: "{{ groups.lb.0|default(groups.masters osm_default_node_selector: 'region=primary' +openshift_hosted_router_wait: True +openshift_hosted_registry_wait: True + +## Openstack credentials +#openshift_cloudprovider_kind=openstack +#openshift_cloudprovider_openstack_auth_url: "{{ lookup('env','OS_AUTH_URL') }}" +#openshift_cloudprovider_openstack_username: "{{ lookup('env','OS_USERNAME') }}" +#openshift_cloudprovider_openstack_password: "{{ lookup('env','OS_PASSWORD') }}" +#openshift_cloudprovider_openstack_tenant_name: "{{ lookup('env','OS_TENANT_NAME') }}" +#openshift_cloudprovider_openstack_region="{{ lookup('env', 'OS_REGION_NAME') }}" + + +## Use Cinder volume for Openshift registry: +#openshift_hosted_registry_storage_kind: openstack +#openshift_hosted_registry_storage_access_modes: ['ReadWriteOnce'] +#openshift_hosted_registry_storage_openstack_filesystem: xfs +#openshift_hosted_registry_storage_openstack_volumeID: e0ba2d73-d2f9-4514-a3b2-a0ced507fa05 +#openshift_hosted_registry_storage_volume_size: 10Gi + + # NOTE(shadower): the hostname check seems to always fail because the # host's floating IP address doesn't match the address received from # inside the host. diff --git a/playbooks/provisioning/openstack/sample-inventory/group_vars/all.yml b/playbooks/provisioning/openstack/sample-inventory/group_vars/all.yml index 0e198342c..2e73d2e26 100644 --- a/playbooks/provisioning/openstack/sample-inventory/group_vars/all.yml +++ b/playbooks/provisioning/openstack/sample-inventory/group_vars/all.yml @@ -62,6 +62,13 @@ openstack_default_flavor: "m1.medium" #docker_lb_volume_size: "5" docker_volume_size: "15" +## Set up a filesystem on the cinder volume specified in `OSEv3.yaml`. +## You need to specify the file system and volume ID in OSEv3 via +## `openshift_hosted_registry_storage_openstack_filesystem` and +## `openshift_hosted_registry_storage_openstack_volumeID`. +## WARNING: This will delete any data on the volume! +#prepare_and_format_registry_volume: False + openstack_subnet_prefix: "192.168.99" # # Red Hat subscription -- cgit v1.2.3 From 074b3e526123da7a62c3d939859389c5f2a357b2 Mon Sep 17 00:00:00 2001 From: Eric Sauer Date: Tue, 12 Sep 2017 11:54:35 -0400 Subject: Add ability to support custom api and console ports (#712) * Add ability to support custom api and console ports * Missed an ingress rule --- roles/openstack-stack/templates/heat_stack.yaml.j2 | 16 ++++++++++++---- 1 file changed, 12 insertions(+), 4 deletions(-) diff --git a/roles/openstack-stack/templates/heat_stack.yaml.j2 b/roles/openstack-stack/templates/heat_stack.yaml.j2 index b6b5e3613..1abc67207 100644 --- a/roles/openstack-stack/templates/heat_stack.yaml.j2 +++ b/roles/openstack-stack/templates/heat_stack.yaml.j2 @@ -193,8 +193,12 @@ resources: port_range_max: 4001 - direction: ingress protocol: tcp - port_range_min: 8443 - port_range_max: 8444 + port_range_min: {{ openshift_master_api_port|default(8443) }} + port_range_max: {{ openshift_master_api_port|default(8443) }} + - direction: ingress + protocol: tcp + port_range_min: {{ openshift_master_console_port|default(8443) }} + port_range_max: {{ openshift_master_console_port|default(8443) }} - direction: ingress protocol: tcp port_range_min: 8053 @@ -284,8 +288,12 @@ resources: port_range_max: 4001 - direction: ingress protocol: tcp - port_range_min: 8443 - port_range_max: 8444 + port_range_min: {{ openshift_master_api_port|default(8443) }} + port_range_max: {{ openshift_master_api_port|default(8443) }} + - direction: ingress + protocol: tcp + port_range_min: {{ openshift_master_console_port|default(8443) }} + port_range_max: {{ openshift_master_console_port|default(8443) }} - direction: ingress protocol: tcp port_range_min: 8053 -- cgit v1.2.3 -- cgit v1.2.3 -- cgit v1.2.3 -- cgit v1.2.3 From b6dd8f112cd5506923b4b3ce51a1774b0bfc037c Mon Sep 17 00:00:00 2001 From: Tomas Sedovic Date: Mon, 11 Sep 2017 14:57:09 +0200 Subject: Pre-create a Cinder registry volume --- playbooks/provisioning/openstack/README.md | 117 ++++++++++++--------- .../openstack/post-provision-openstack.yml | 5 +- .../prepare-and-format-cinder-volume.yaml | 78 ++++++-------- .../provisioning/openstack/provision-openstack.yml | 4 + .../sample-inventory/group_vars/OSEv3.yml | 4 + .../openstack/sample-inventory/group_vars/all.yml | 6 ++ .../tasks/main.yaml | 5 + roles/static_inventory/templates/inventory.j2 | 8 +- 8 files changed, 135 insertions(+), 92 deletions(-) create mode 100644 roles/openstack-create-cinder-registry/tasks/main.yaml diff --git a/playbooks/provisioning/openstack/README.md b/playbooks/provisioning/openstack/README.md index 267176eec..ab1513a73 100644 --- a/playbooks/provisioning/openstack/README.md +++ b/playbooks/provisioning/openstack/README.md @@ -305,82 +305,105 @@ In order to set a custom entrypoint, update `openshift_master_cluster_public_hos Note than an empty hostname does not work, so if your domain is `openshift.example.com`, you cannot set this value to simply `openshift.example.com`. -### Use an existing Cinder volume for the OpenShift registry +### Creating and using a Cinder volume for the OpenShift registry -You can optionally use an existing Cinder volume for the storage of -your OpenShift registry. +You can optionally have the playbooks create a Cinder volume and set +it up as the OpenShift hosted registry. -To do that, you need to have a Cinder volume (you can create one by -running: +To do that you need specify the desired Cinder volume name and size in +Gigabytes in `inventory/group_vars/all.yml`: - openstack volume create --size + cinder_hosted_registry_name: cinder-registry + cinder_hosted_registry_size_gb: 10 -The volume needs to have a file system created before you put it to -use. We can do prepare it for you if you put this in inventory/group_vars/all.yml: +With this, the playbooks will create the volume and set up its +filesystem. If there is an existing volume of the same name, we will +use it but keep the existing data on it. - prepare_and_format_registry_volume: true - -**NOTE:** doing so **will destroy any data that's currently on the volume**! - -You can also run the registry setup playbook directly: - - ansible-playbook -i inventory playbooks/provisioning/openstack/prepare-and-format-cinder-volume.yaml - -(the provisioning phase must be completed, first) +To use the volume for the registry, you must first configure it with +the OpenStack credentials by putting the following to `OSEv3.yml`: + openshift_cloudprovider_openstack_username: "{{ lookup('env','OS_USERNAME') }}" + openshift_cloudprovider_openstack_password: "{{ lookup('env','OS_PASSWORD') }}" + openshift_cloudprovider_openstack_auth_url: "{{ lookup('env','OS_AUTH_URL') }}" + openshift_cloudprovider_openstack_tenant_name: "{{ lookup('env','OS_TENANT_NAME') }}" -To instruct OpenShift to actually use the volume, you must first configure it -with the OpenStack credentials by putting the following to `OSEv3.yml`: - - ## Openstack credentials - #openshift_cloudprovider_kind=openstack - #openshift_cloudprovider_openstack_auth_url=http://openstack.example.com:35357/v2.0/ - #openshift_cloudprovider_openstack_username=username - #openshift_cloudprovider_openstack_password=password - #openshift_cloudprovider_openstack_domain_id=domain_id - #openshift_cloudprovider_openstack_domain_name=domain_name - #openshift_cloudprovider_openstack_tenant_id=tenant_id - #openshift_cloudprovider_openstack_tenant_name=tenant_name - #openshift_cloudprovider_openstack_region=region - -Note that these credentials may be different from the ones you used for -provisioning (say for quota or access control reasons). To use the same -OpenStack credentials for both, take a look at the `sample-inventory`. It shows -how to read the values from your shell environment. - -Make sure to only set the values you need from (e.g. your keystonerc or -clouds.yaml). Some of the options ar keystone V2 or V3 specific. +This will use the credentials from your shell environment. If you want +to enter them explicitly, you can. You can also use credentials +different from the provisioning ones (say for quota or access control +reasons). **NOTE**: If you're testing this on (DevStack)[devstack], you must explicitly set your Keystone API version to v2 (e.g. -`OS_AUTH_URL=http://10.20.30.40/identity/v2.0`) instead of the default +`OS_AUTH_URL=http://10.34.37.47/identity/v2.0`) instead of the default value provided by `openrc`. You may also encounter the following issue with Cinder: https://github.com/kubernetes/kubernetes/issues/50461 +You can read the (OpenShift documentation on configuring +OpenStack)[openstack] for more information. [devstack]: https://docs.openstack.org/devstack/latest/ +[openstack]: https://docs.openshift.org/latest/install_config/configuring_openstack.html -You can read the (OpenShift documentation on configuring -OpenStack)[openstack] for more information. +Next, we need to instruct OpenShift to use the Cinder volume for it's +registry. Again in `OSEv3.yml`: -[openstack]: https://docs.openshift.org/latest/install_config/configuring_openstack.html + #openshift_hosted_registry_storage_kind: openstack + #openshift_hosted_registry_storage_access_modes: ['ReadWriteOnce'] + #openshift_hosted_registry_storage_openstack_filesystem: xfs +The filesystem value here will be used in the initial formatting of +the volume. -Next we need to instruct openshift-ansible to use the Cinder volume -for it's registry. Again in `OSEv3.yml`: - ## Use Cinder volume for Openshift registry: +### Use an existing Cinder volume for the OpenShift registry + +You can also use a pre-existing Cinder volume for the storage of your +OpenShift registry. + +To do that, you need to have a Cinder volume. You can create one by +running: + + openstack volume create --size + +The volume needs to have a file system created before you put it to +use. + +As with the automatically-created volume, you have to set up the +OpenStack credentials in `inventory/group_vars/OSEv3.yml` as well as +registry values: + #openshift_hosted_registry_storage_kind: openstack #openshift_hosted_registry_storage_access_modes: ['ReadWriteOnce'] #openshift_hosted_registry_storage_openstack_filesystem: xfs #openshift_hosted_registry_storage_openstack_volumeID: e0ba2d73-d2f9-4514-a3b2-a0ced507fa05 #openshift_hosted_registry_storage_volume_size: 10Gi -The **Cinder volume ID**, **filesystem** and **volume size** variables must -correspond to the values in your volume. +Note the `openshift_hosted_registry_storage_openstack_volumeID` and +`openshift_hosted_registry_storage_volume_size` values: these need to +be added in addition to the previous variables. + +The **Cinder volume ID**, **filesystem** and **volume size** variables +must correspond to the values in your volume. The volume ID must be +the **UUID** of the Cinder volume, *not its name*. + +We can do formate the volume for you if you ask for it in +`inventory/group_vars/all.yml`: + + prepare_and_format_registry_volume: true + +**NOTE:** doing so **will destroy any data that's currently on the volume**! + +You can also run the registry setup playbook directly: + + ansible-playbook -i inventory playbooks/provisioning/openstack/prepare-and-format-cinder-volume.yaml + +(the provisioning phase must be completed, first) + + ### Configure static inventory and access via a bastion node diff --git a/playbooks/provisioning/openstack/post-provision-openstack.yml b/playbooks/provisioning/openstack/post-provision-openstack.yml index 116eb1244..61f950c14 100644 --- a/playbooks/provisioning/openstack/post-provision-openstack.yml +++ b/playbooks/provisioning/openstack/post-provision-openstack.yml @@ -86,4 +86,7 @@ - node-network-manager - include: prepare-and-format-cinder-volume.yaml - when: prepare_and_format_registry_volume|default(False) + when: > + prepare_and_format_registry_volume|default(False) or + (cinder_registry_volume is defined and + cinder_registry_volume.changed|default(False)) diff --git a/playbooks/provisioning/openstack/prepare-and-format-cinder-volume.yaml b/playbooks/provisioning/openstack/prepare-and-format-cinder-volume.yaml index 2d630f79d..30e094459 100644 --- a/playbooks/provisioning/openstack/prepare-and-format-cinder-volume.yaml +++ b/playbooks/provisioning/openstack/prepare-and-format-cinder-volume.yaml @@ -18,56 +18,48 @@ attached_device: >- {{ volume_attachment['attachments']|json_query("[?volume_id=='" + cinder_volume + "'].device | [0]") }} + - delegate_to: "{{ groups['masters'][0] }}" + block: + - name: Wait for the device to appear + wait_for: path={{ attached_device }} -- hosts: masters[0] - gather_facts: False - become: True - tasks: - - name: Wait for the device to appear - wait_for: path={{ hostvars['localhost'].attached_device }} - - - name: Create a temp directory for mounting the volume - tempfile: - prefix: cinder-volume - state: directory - register: cinder_mount_dir + - name: Create a temp directory for mounting the volume + tempfile: + prefix: cinder-volume + state: directory + register: cinder_mount_dir - - name: Format the device - filesystem: - fstype: "{{ openshift_hosted_registry_storage_openstack_filesystem }}" - dev: "{{ hostvars['localhost'].attached_device }}" + - name: Format the device + filesystem: + fstype: "{{ cinder_fs }}" + dev: "{{ attached_device }}" - - name: Mount the device - mount: - name: "{{ cinder_mount_dir.path }}" - src: "{{ hostvars['localhost'].attached_device }}" - state: mounted - fstype: "{{ openshift_hosted_registry_storage_openstack_filesystem }}" + - name: Mount the device + mount: + name: "{{ cinder_mount_dir.path }}" + src: "{{ attached_device }}" + state: mounted + fstype: "{{ cinder_fs }}" - - name: Change mode on the filesystem - file: - path: "{{ cinder_mount_dir.path }}" - state: directory - recurse: true - mode: 0777 - - - name: Unmount the device - mount: - name: "{{ cinder_mount_dir.path }}" - src: "{{ hostvars['localhost'].attached_device }}" - state: absent - fstype: "{{ openshift_hosted_registry_storage_openstack_filesystem }}" + - name: Change mode on the filesystem + file: + path: "{{ cinder_mount_dir.path }}" + state: directory + recurse: true + mode: 0777 - - name: Delete the temp directory - file: - name: "{{ cinder_mount_dir.path }}" - state: absent + - name: Unmount the device + mount: + name: "{{ cinder_mount_dir.path }}" + src: "{{ attached_device }}" + state: absent + fstype: "{{ cinder_fs }}" + - name: Delete the temp directory + file: + name: "{{ cinder_mount_dir.path }}" + state: absent -- hosts: localhost - gather_facts: False - become: False - tasks: - name: Detach the volume from the VM os_server_volume: state: absent diff --git a/playbooks/provisioning/openstack/provision-openstack.yml b/playbooks/provisioning/openstack/provision-openstack.yml index 6ec944d56..e4705bd2c 100644 --- a/playbooks/provisioning/openstack/provision-openstack.yml +++ b/playbooks/provisioning/openstack/provision-openstack.yml @@ -8,6 +8,10 @@ - include: pre_tasks.yml roles: - role: openstack-stack + - role: openstack-create-cinder-registry + when: + - cinder_hosted_registry_name is defined + - cinder_hosted_registry_size_gb is defined - role: static_inventory when: openstack_inventory|default('static') == 'static' inventory_path: "{{ openstack_inventory_path|default(inventory_dir) }}" diff --git a/playbooks/provisioning/openstack/sample-inventory/group_vars/OSEv3.yml b/playbooks/provisioning/openstack/sample-inventory/group_vars/OSEv3.yml index 874ea7126..7d7683c62 100644 --- a/playbooks/provisioning/openstack/sample-inventory/group_vars/OSEv3.yml +++ b/playbooks/provisioning/openstack/sample-inventory/group_vars/OSEv3.yml @@ -26,6 +26,10 @@ openshift_hosted_registry_wait: True #openshift_hosted_registry_storage_kind: openstack #openshift_hosted_registry_storage_access_modes: ['ReadWriteOnce'] #openshift_hosted_registry_storage_openstack_filesystem: xfs + +## Configure this if you're attaching a Cinder volume you've set up. +## If you're using the `cinder_hosted_registry_name` option from +## `all.yml`, this will be configured automaticaly. #openshift_hosted_registry_storage_openstack_volumeID: e0ba2d73-d2f9-4514-a3b2-a0ced507fa05 #openshift_hosted_registry_storage_volume_size: 10Gi diff --git a/playbooks/provisioning/openstack/sample-inventory/group_vars/all.yml b/playbooks/provisioning/openstack/sample-inventory/group_vars/all.yml index 2e73d2e26..bc186a6b8 100644 --- a/playbooks/provisioning/openstack/sample-inventory/group_vars/all.yml +++ b/playbooks/provisioning/openstack/sample-inventory/group_vars/all.yml @@ -62,6 +62,12 @@ openstack_default_flavor: "m1.medium" #docker_lb_volume_size: "5" docker_volume_size: "15" + +## Create a Cinder volume and use it for the OpenShift registry. +## NOTE: the openstack credentials and hosted registry options must be set in OSEv3.yml! +#cinder_hosted_registry_name: cinder-registry +#cinder_hosted_registry_size_gb: 10 + ## Set up a filesystem on the cinder volume specified in `OSEv3.yaml`. ## You need to specify the file system and volume ID in OSEv3 via ## `openshift_hosted_registry_storage_openstack_filesystem` and diff --git a/roles/openstack-create-cinder-registry/tasks/main.yaml b/roles/openstack-create-cinder-registry/tasks/main.yaml new file mode 100644 index 000000000..6e9d1c2e7 --- /dev/null +++ b/roles/openstack-create-cinder-registry/tasks/main.yaml @@ -0,0 +1,5 @@ +--- +- os_volume: + display_name: "{{ cinder_hosted_registry_name }}" + size: "{{ cinder_hosted_registry_size_gb }}" + register: cinder_registry_volume diff --git a/roles/static_inventory/templates/inventory.j2 b/roles/static_inventory/templates/inventory.j2 index 987c98ec6..640a46ba2 100644 --- a/roles/static_inventory/templates/inventory.j2 +++ b/roles/static_inventory/templates/inventory.j2 @@ -42,10 +42,16 @@ etcd lb # Set variables common for all OSEv3 hosts -#[OSEv3:vars] +[OSEv3:vars] # For OSEv3 normal group vars, see ./group_vars/OSEv3.yml +{% if cinder_registry_volume is defined %} +openshift_hosted_registry_storage_openstack_volumeID="{{ cinder_registry_volume.id }}" +openshift_hosted_registry_storage_volume_size="{{ cinder_registry_volume.volume.size }}Gi" +{% endif %} + + # Host Groups [masters:children] -- cgit v1.2.3 From d2be3821ae085ec7faa2091df7abaf7279a983e3 Mon Sep 17 00:00:00 2001 From: Tomas Sedovic Date: Wed, 13 Sep 2017 14:03:27 +0200 Subject: Fix the cinder_registry_volume conditional Deployments without the cinder registry would fail, because the `cinder_registry_volume` variable is still set even when we don't actually create the volume. --- roles/static_inventory/templates/inventory.j2 | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/roles/static_inventory/templates/inventory.j2 b/roles/static_inventory/templates/inventory.j2 index 640a46ba2..2245963c0 100644 --- a/roles/static_inventory/templates/inventory.j2 +++ b/roles/static_inventory/templates/inventory.j2 @@ -46,7 +46,7 @@ lb # For OSEv3 normal group vars, see ./group_vars/OSEv3.yml -{% if cinder_registry_volume is defined %} +{% if cinder_registry_volume is defined and 'volume' in cinder_registry_volume %} openshift_hosted_registry_storage_openstack_volumeID="{{ cinder_registry_volume.id }}" openshift_hosted_registry_storage_volume_size="{{ cinder_registry_volume.volume.size }}Gi" {% endif %} -- cgit v1.2.3 From 8a204aaec709135ebfa716459f2ba3bcf1db4f04 Mon Sep 17 00:00:00 2001 From: Tomas Sedovic Date: Wed, 13 Sep 2017 17:44:45 +0200 Subject: Clear the previous inventory during provisioning If there was a left-over inventory from a previous run that had nodes which were subsequently removed, these would still show up in the Ansible's in-memory inventory and Ansible would fail trying to connect to them. This is because Ansible automatically loads the `inventory/hosts` file if it exists and even if we overwrite it later, every node and group still remains in the memory. By removing the inventory file and and calling the `refresh_inventory` meta task, we make sure that any left-over values are removed. --- roles/static_inventory/tasks/main.yml | 8 ++++++++ 1 file changed, 8 insertions(+) diff --git a/roles/static_inventory/tasks/main.yml b/roles/static_inventory/tasks/main.yml index 24e11beb6..3dab62df2 100644 --- a/roles/static_inventory/tasks/main.yml +++ b/roles/static_inventory/tasks/main.yml @@ -1,4 +1,12 @@ --- +- name: Remove any existing inventory + file: + path: "{{ inventory_path }}/hosts" + state: absent + +- name: Refresh the inventory + meta: refresh_inventory + - name: Generate in-memory inventory include: openstack.yml -- cgit v1.2.3 From 2d5704d7927a73aaeb6af1fa0a14427e766fd1e3 Mon Sep 17 00:00:00 2001 From: Tomas Sedovic Date: Thu, 14 Sep 2017 08:57:57 +0200 Subject: Make the `rhsm_register` value optional This was a regression -- it used to be optional (defaulting to False), but among some changes we ended up requiring it again. --- playbooks/provisioning/openstack/pre-install.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/playbooks/provisioning/openstack/pre-install.yml b/playbooks/provisioning/openstack/pre-install.yml index 9b49136da..45e9005cc 100644 --- a/playbooks/provisioning/openstack/pre-install.yml +++ b/playbooks/provisioning/openstack/pre-install.yml @@ -9,7 +9,7 @@ - hosts: OSEv3 become: true roles: - - { role: subscription-manager, when: hostvars.localhost.rhsm_register, tags: 'subscription-manager', ansible_sudo: true } + - { role: subscription-manager, when: hostvars.localhost.rhsm_register|default(False), tags: 'subscription-manager', ansible_sudo: true } - { role: docker, tags: 'docker' } - { role: openshift-prep, tags: 'openshift-prep' } -- cgit v1.2.3 From 5fe8f8cd89da4312f8b8465cde44c01b1db3a1da Mon Sep 17 00:00:00 2001 From: Tomas Sedovic Date: Thu, 14 Sep 2017 09:02:04 +0200 Subject: Remove the `rhsm_register` value from inventory It is now commented out since it's no longer necessary. --- playbooks/provisioning/openstack/sample-inventory/group_vars/all.yml | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) diff --git a/playbooks/provisioning/openstack/sample-inventory/group_vars/all.yml b/playbooks/provisioning/openstack/sample-inventory/group_vars/all.yml index bc186a6b8..12f64f401 100644 --- a/playbooks/provisioning/openstack/sample-inventory/group_vars/all.yml +++ b/playbooks/provisioning/openstack/sample-inventory/group_vars/all.yml @@ -77,8 +77,9 @@ docker_volume_size: "15" openstack_subnet_prefix: "192.168.99" -# # Red Hat subscription -rhsm_register: False +## Red Hat subscription defaults to false which means we will not attempt to +## subscribe the nodes +#rhsm_register: False # # Using Red Hat Satellite: #rhsm_register: True -- cgit v1.2.3 -- cgit v1.2.3 -- cgit v1.2.3 -- cgit v1.2.3 -- cgit v1.2.3 From 288fef2dd2d74baab729d7c8b628a32d337da9bc Mon Sep 17 00:00:00 2001 From: Tomas Sedovic Date: Tue, 19 Sep 2017 16:36:57 +0200 Subject: Empty ssh (#729) * Make `openstack_private_ssh_key` optional Before this, the deployer could not reasonably rely on their own SSH configuration or e.g. using the `--private-key` option to ansible-playbook because we always wrote the `ansible_private_key_file` value in the static inventory. This change makes the `openstack_private_ssh_key` variable truly optional: if it's not set, the static inventory will not configure the SSH key and will just rely on the existing configuration. * Update the openstack e2e CI It no longer sets the SSH keys explicitly -- which should just work with the previous commit. * Put back the `openstack_ssh_public_key` in CI This is the option we actually need to keep. This sholud fix the CI failures. --- playbooks/provisioning/openstack/provision-openstack.yml | 2 +- roles/static_inventory/templates/inventory.j2 | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/playbooks/provisioning/openstack/provision-openstack.yml b/playbooks/provisioning/openstack/provision-openstack.yml index e4705bd2c..bf424676d 100644 --- a/playbooks/provisioning/openstack/provision-openstack.yml +++ b/playbooks/provisioning/openstack/provision-openstack.yml @@ -15,7 +15,7 @@ - role: static_inventory when: openstack_inventory|default('static') == 'static' inventory_path: "{{ openstack_inventory_path|default(inventory_dir) }}" - private_ssh_key: "{{ openstack_private_ssh_key|default('~/.ssh/id_rsa') }}" + private_ssh_key: "{{ openstack_private_ssh_key|default('') }}" ssh_config_path: "{{ openstack_ssh_config_path|default('/tmp/ssh.config.openshift.ansible' + '.' + stack_name) }}" ssh_user: "{{ ansible_user }}" diff --git a/roles/static_inventory/templates/inventory.j2 b/roles/static_inventory/templates/inventory.j2 index 2245963c0..8863fb7c4 100644 --- a/roles/static_inventory/templates/inventory.j2 +++ b/roles/static_inventory/templates/inventory.j2 @@ -12,7 +12,7 @@ %} public_v4={{ hostvars[host]['public_v4'] }}{% endif %} {% if 'ansible_user' in hostvars[host] %} ansible_user={{ hostvars[host]['ansible_user'] }}{% endif %} -{% if 'ansible_private_key_file' in hostvars[host] +{% if 'ansible_private_key_file' in hostvars[host] and hostvars[host]['ansible_private_key_file'] %} ansible_private_key_file={{ hostvars[host]['ansible_private_key_file'] }}{% endif %} {% if use_bastion|bool and 'ansible_ssh_extra_args' in hostvars[host] %} ansible_ssh_extra_args={{ hostvars[host]['ansible_ssh_extra_args']|quote }}{% endif %} openshift_hostname={{ host }} -- cgit v1.2.3 From 957a3130d586f7da8cd2643dce3de059649bcdbf Mon Sep 17 00:00:00 2001 From: Tomas Sedovic Date: Tue, 19 Sep 2017 17:35:45 +0200 Subject: Docker ansible host (#742) * Document using a Docker image for Ansible host * Fix the markdown url syntax * Mention keystonerc as well --- playbooks/provisioning/openstack/README.md | 32 ++++++++++++++++++++++++++++++ 1 file changed, 32 insertions(+) diff --git a/playbooks/provisioning/openstack/README.md b/playbooks/provisioning/openstack/README.md index ab1513a73..c6633df06 100644 --- a/playbooks/provisioning/openstack/README.md +++ b/playbooks/provisioning/openstack/README.md @@ -14,6 +14,9 @@ etc.). The result is an environment ready for openshift-ansible. * python-dns / [dnspython](https://pypi.python.org/pypi/dnspython) * Become (sudo) is not required. +**NOTE**: You can use a Docker image with all dependencies set up. +Find more in the [Deployment section](#deployment). + ### Optional Dependencies for localhost **Note**: When using rhel images, `rhel-7-server-openstack-10-rpms` repository is required in order to install these packages. @@ -444,6 +447,35 @@ the dynamic inventory file in your ansible commands , like `-i openstack.py`. ## Deployment +### Using Docker on the Ansible host + +If you don't want to worry about the dependencies, you can use the +[OpenStack Control Host image][control-host-image]. + +[control-host-image]: https://hub.docker.com/r/redhatcop/control-host-openstack/ + +It has all the dependencies installed, but you'll need to map your +code and credentials to it. Assuming your SSH keys live in `~/.ssh` +and everything else is in your current directory (i.e. `ansible.cfg`, +`keystonerc`, `inventory`, `openshift-ansible`, +`openshift-ansible-contrib`), this is how you run the deployment: + + sudo docker run -it -v ~/.ssh:/mnt/.ssh:Z \ + -v $PWD:/root/openshift:Z \ + -v $PWD/keystonerc:/root/.config/openstack/keystonerc.sh:Z \ + redhatcop/control-host-openstack bash + +(feel free to replace `$PWD` with an actual path to your inventory and +checkouts, but note that relative paths don't work) + +The first run may take a few minutes while the image is being +downloaded. After that, you'll be inside the container and you can run +the playbooks: + + cd openshift + ansible-playbook openshift-ansible-contrib/playbooks/provisioning/openstack/provision.yaml + + ### Run the playbook Assuming your OpenStack (Keystone) credentials are in the `keystonerc` -- cgit v1.2.3 -- cgit v1.2.3 -- cgit v1.2.3 -- cgit v1.2.3 From 0cded5e5474a11ef5ae9c19f7e9141b6bacf14e8 Mon Sep 17 00:00:00 2001 From: tzumainn Date: Thu, 21 Sep 2017 11:30:10 -0400 Subject: load balancer formatting fix (#745) --- roles/openstack-stack/templates/heat_stack.yaml.j2 | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/roles/openstack-stack/templates/heat_stack.yaml.j2 b/roles/openstack-stack/templates/heat_stack.yaml.j2 index 1abc67207..7acef5a6b 100644 --- a/roles/openstack-stack/templates/heat_stack.yaml.j2 +++ b/roles/openstack-stack/templates/heat_stack.yaml.j2 @@ -577,14 +577,14 @@ resources: secgrp: - { get_resource: lb-secgrp } - { get_resource: common-secgrp } - {% if not provider_network %} +{% if not provider_network %} floating_network: {{ external_network }} - {% endif %} +{% endif %} volume_size: {{ lb_volume_size }} - {% if not provider_network %} +{% if not provider_network %} depends_on: - interface - {% endif %} +{% endif %} {% endif %} masters: -- cgit v1.2.3 -- cgit v1.2.3 -- cgit v1.2.3 -- cgit v1.2.3 From d361dc4b307781ec2bb5978f30516f266a34188c Mon Sep 17 00:00:00 2001 From: Tlacenka Date: Tue, 26 Sep 2017 13:39:55 +0200 Subject: Upscaling OpenShift application nodes (#571) * scale-up: playbook for upscaling app nodes * scale-up: removed debug * scale-up: made suggested changes * scale-up: indentation fix * upscaling: process split into two playbooks that are executed by a bash script - upscaling_run.sh: bash script, usage displayed using -h parameter - upscaling_pre-tasks: check that new value is higher, change inventory variable - upscaling_scale-up: rerun provisioning and installation, verify change * upscaling_run: fixed openshift-ansible-contrib directory name * upscaling_run: inventory can be entered as relative path * upscaling_scale-up: fixed formatting * upscaling: minor changes * upscaling: moved to .../provisioning/openstack directory, README updated, minor changes made * README: minor changes * README: formatting * uspcaling: minor fix * upscaling: fix * upscaling: added customisations, fixes - openshift-ansible-contrib and openshift-ansible paths are customisable - fixed implicit incrementation by 1 * upscaling: fixes * upscaling: fixes * upscaling: another fix * upscaling: another fix * upscaling: fix * upscaling: back to a single playbook, README updated * minor fix * pre_tasks: added labels for autoscaling * scale-up: fixes * scale-up: fixed host variables, post-verification is only based on labels * scale-up: added openshift-ansible path customisation - path has to be absolute, cannot contain '/' at the end * scale-up: fix * scale-up: debug removed * README: added docs on openshift_ansible_dir, note about bastion * static_inventory: newly added nodes are added to new_nodes group - note: re-running provisioning fails when trying to install docker * removing new line * scale-up: running byo/config.yml or scaleup.yml based on the situation - (whether there is an existing deployment or not) * openstack.yml: indentation fix * added refresh inventory * upscaling: new_nodes only contains new does, it is not used during the first deployment * static_inventory: make sure that new nodes end up only in their new_nodes group * bug fixes * another fix * fixed condition * scale-up, static_inventory role: all app node data gathered before provisioning * upscaling: bug fixes * upscaling: another fixes * fixes * upscaling: fix * upscaling: fix * upscaling: another logic fix * bug fix for non-scaling deployments --- playbooks/provisioning/openstack/README.md | 21 ++++++ playbooks/provisioning/openstack/pre_tasks.yml | 4 ++ playbooks/provisioning/openstack/scale-up.yaml | 75 ++++++++++++++++++++++ .../tasks/filter_out_new_app_nodes.yaml | 15 +++++ roles/static_inventory/tasks/openstack.yml | 26 +++++++- roles/static_inventory/templates/inventory.j2 | 4 ++ 6 files changed, 143 insertions(+), 2 deletions(-) create mode 100644 playbooks/provisioning/openstack/scale-up.yaml create mode 100644 roles/static_inventory/tasks/filter_out_new_app_nodes.yaml diff --git a/playbooks/provisioning/openstack/README.md b/playbooks/provisioning/openstack/README.md index c6633df06..5e45add51 100644 --- a/playbooks/provisioning/openstack/README.md +++ b/playbooks/provisioning/openstack/README.md @@ -568,6 +568,27 @@ In order to access UI, the ssh-tunnel service will be created and started on the control node. Make sure to remove these changes and the service manually, when not needed anymore. +## Scale Deployment up/down + +### Scaling up + +One can scale up the number of application nodes by executing the ansible playbook +`openshift-ansible-contrib/playbooks/provisioning/openstack/scale-up.yaml`. +This process can be done even if there is currently no deployment available. +The `increment_by` variable is used to specify by how much the deployment should +be scaled up (if none exists, it serves as a target number of application nodes). +The path to `openshift-ansible` directory can be customised by the `openshift_ansible_dir` +variable. Its value must be an absolute path to `openshift-ansible` and it cannot +contain the '/' symbol at the end. + +Usage: + +``` +ansible-playbook -i openshift-ansible-contrib/playbooks/provisioning/openstack/scale-up.yaml` [-e increment_by=] [-e openshift_ansible_dir=] +``` + +Note: This playbook works only without a bastion node (`openstack_use_bastion: False`). + ## License As the rest of the openshift-ansible-contrib repository, the code here is diff --git a/playbooks/provisioning/openstack/pre_tasks.yml b/playbooks/provisioning/openstack/pre_tasks.yml index be29dad16..7146c886a 100644 --- a/playbooks/provisioning/openstack/pre_tasks.yml +++ b/playbooks/provisioning/openstack/pre_tasks.yml @@ -47,3 +47,7 @@ - name: Set openshift_cluster_node_labels for the app group set_fact: openshift_cluster_node_labels: "{{ openshift_cluster_node_labels | combine({'app': {'region': 'primary'}}, recursive=True) }}" + +- name: Set openshift_cluster_node_labels for auto-scaling app nodes + set_fact: + openshift_cluster_node_labels: "{{ openshift_cluster_node_labels | combine({'app': {'autoscaling': 'app'}}, recursive=True) }}" diff --git a/playbooks/provisioning/openstack/scale-up.yaml b/playbooks/provisioning/openstack/scale-up.yaml new file mode 100644 index 000000000..79fc09050 --- /dev/null +++ b/playbooks/provisioning/openstack/scale-up.yaml @@ -0,0 +1,75 @@ +--- +# Get the needed information about the current deployment +- hosts: masters[0] + tasks: + - name: Get number of app nodes + shell: oc get nodes -l autoscaling=app --no-headers=true | wc -l + register: oc_old_num_nodes + - name: Get names of app nodes + shell: oc get nodes -l autoscaling=app --no-headers=true | cut -f1 -d " " + register: oc_old_app_nodes + +- hosts: localhost + tasks: + # Since both number and names of app nodes are to be removed + # localhost variables for these values need to be set + - name: Store old number and names of app nodes locally (if there is an existing deployment) + when: '"masters" in groups' + register: set_fact_result + set_fact: + oc_old_num_nodes: "{{ hostvars[groups['masters'][0]]['oc_old_num_nodes'].stdout }}" + oc_old_app_nodes: "{{ hostvars[groups['masters'][0]]['oc_old_app_nodes'].stdout_lines }}" + + - name: Set default values for old app nodes (if there is no existing deployment) + when: 'set_fact_result | skipped' + set_fact: + oc_old_num_nodes: 0 + oc_old_app_nodes: [] + + # Set how many nodes are to be added (1 by default) + - name: Set how many nodes are to be added + set_fact: + increment_by: 1 + - name: Check that the number corresponds to scaling up (not down) + assert: + that: 'increment_by | int >= 1' + msg: > + FAIL: The value of increment_by must be at least 1 + (but it is {{ increment_by | int }}). + - name: Update openstack_num_nodes variable + set_fact: + openstack_num_nodes: "{{ oc_old_num_nodes | int + increment_by | int }}" + +# Run provision.yaml with higher number of nodes to create a new app-node VM +- include: provision.yaml + +# Run config.yml to perform openshift installation +# Path to openshift-ansible can be customised: +# - the value of openshift_ansible_dir has to be an absolute path +# - the path cannot contain the '/' symbol at the end + +# Creating a new deployment by the full installation +- include: "{{ openshift_ansible_dir }}/playbooks/byo/config.yml" + vars: + openshift_ansible_dir: ../../../../openshift-ansible + when: 'not groups["new_nodes"] | list' + +# Scaling up existing deployment +- include: "{{ openshift_ansible_dir }}/playbooks/byo/openshift-node/scaleup.yml" + vars: + openshift_ansible_dir: ../../../../openshift-ansible + when: 'groups["new_nodes"] | list' + +# Post-verification: Verify new number of nodes +- hosts: masters[0] + tasks: + - name: Get number of nodes + shell: oc get nodes -l autoscaling=app --no-headers=true | wc -l + register: oc_new_num_nodes + - name: Check that the actual result matches the defined value + assert: + that: 'oc_new_num_nodes.stdout | int == (hostvars["localhost"]["oc_old_num_nodes"] | int + hostvars["localhost"]["increment_by"] | int)' + msg: > + FAIL: Number of application nodes has not been increased accordingly + (it should be {{ hostvars["localhost"]["oc_old_num_nodes"] | int + hostvars["localhost"]["increment_by"] | int }} + but it is {{ oc_new_num_nodes.stdout | int }}). diff --git a/roles/static_inventory/tasks/filter_out_new_app_nodes.yaml b/roles/static_inventory/tasks/filter_out_new_app_nodes.yaml new file mode 100644 index 000000000..826efe78d --- /dev/null +++ b/roles/static_inventory/tasks/filter_out_new_app_nodes.yaml @@ -0,0 +1,15 @@ +--- +- name: Add all new app nodes to new_app_nodes + when: + - 'oc_old_app_nodes is defined' + - 'oc_old_app_nodes | list' + - 'node.name not in oc_old_app_nodes' + - 'node["metadata"]["sub-host-type"] == "app"' + register: result + set_fact: + new_app_nodes: '{{ new_app_nodes }} + [ {{ node }} ]' + +- name: If the node was added to new_nodes, remove it from registered nodes + set_fact: + registered_nodes: '{{ registered_nodes | difference([ node ]) }}' + when: 'not result | skipped' diff --git a/roles/static_inventory/tasks/openstack.yml b/roles/static_inventory/tasks/openstack.yml index e36974d93..adf78c966 100644 --- a/roles/static_inventory/tasks/openstack.yml +++ b/roles/static_inventory/tasks/openstack.yml @@ -37,7 +37,6 @@ with_items: "{{ registered_nodes|difference(registered_nodes_floating) }}" add_host: name: '{{ item.name }}' - groups: '{{ item.metadata.group }}' ansible_host: >- {% if use_bastion|bool -%} {{ item.name }} @@ -57,7 +56,6 @@ with_items: "{{ registered_nodes_floating }}" add_host: name: '{{ item.name }}' - groups: '{{ item.metadata.group }}' ansible_host: >- {% if use_bastion|bool -%} {{ item.name }} @@ -80,6 +78,30 @@ {{ item.public_v4 }} {%- endif %} + # Split registered_nodes into old nodes and new app nodes + # Add new app nodes to new_nodes host group for upscaling + - name: Create new_app_nodes variable + set_fact: + new_app_nodes: [] + + - name: Filter new app nodes out of registered_nodes + include: filter_out_new_app_nodes.yaml + with_items: "{{ registered_nodes }}" + loop_control: + loop_var: node + + - name: Add new app nodes to the new_nodes section (if a deployment already exists) + with_items: "{{ new_app_nodes }}" + add_host: + name: "{{ item.name }}" + groups: new_nodes, app + + - name: Add the rest of cluster nodes to their corresponding groups + with_items: "{{ registered_nodes }}" + add_host: + name: '{{ item.name }}' + groups: '{{ item.metadata.group }}' + - name: Add bastion node to inventory add_host: name: bastion diff --git a/roles/static_inventory/templates/inventory.j2 b/roles/static_inventory/templates/inventory.j2 index 8863fb7c4..9dfbe3a5b 100644 --- a/roles/static_inventory/templates/inventory.j2 +++ b/roles/static_inventory/templates/inventory.j2 @@ -40,6 +40,7 @@ dns nodes etcd lb +new_nodes # Set variables common for all OSEv3 hosts [OSEv3:vars] @@ -78,6 +79,8 @@ dns.{{ stack_name }} [lb:children] lb.{{ stack_name }} +[new_nodes:children] + # Empty placeholders for all groups of the cluster nodes [masters.{{ stack_name }}] [etcd.{{ stack_name }}] @@ -86,6 +89,7 @@ lb.{{ stack_name }} [app.{{ stack_name }}] [dns.{{ stack_name }}] [lb.{{ stack_name }}] +[new_nodes.{{ stack_name }}] # BEGIN Autogenerated groups {% for group in groups %} -- cgit v1.2.3 From 4669bf33d611555613dec904b1b33a1908f0a35b Mon Sep 17 00:00:00 2001 From: Bogdan Dobrelya Date: Tue, 26 Sep 2017 14:36:12 +0200 Subject: Fix public master cluster DNS record when using bastion (#752) When using a bastion and a single master, add the bastion node's public IP the public master's IP for the DNS record. Signed-off-by: Bogdan Dobrelya --- playbooks/provisioning/openstack/post-provision-openstack.yml | 1 + roles/dns-records/defaults/main.yml | 2 ++ roles/dns-records/tasks/main.yml | 9 +++++++++ 3 files changed, 12 insertions(+) create mode 100644 roles/dns-records/defaults/main.yml diff --git a/playbooks/provisioning/openstack/post-provision-openstack.yml b/playbooks/provisioning/openstack/post-provision-openstack.yml index 61f950c14..a80e8d829 100644 --- a/playbooks/provisioning/openstack/post-provision-openstack.yml +++ b/playbooks/provisioning/openstack/post-provision-openstack.yml @@ -58,6 +58,7 @@ become: False roles: - role: dns-records + use_bastion: "{{ openstack_use_bastion|default(False)|bool }}" - role: infra-ansible/roles/dns - name: Switch the stack subnet to the configured private DNS server diff --git a/roles/dns-records/defaults/main.yml b/roles/dns-records/defaults/main.yml new file mode 100644 index 000000000..3f7fa783f --- /dev/null +++ b/roles/dns-records/defaults/main.yml @@ -0,0 +1,2 @@ +--- +use_bastion: False diff --git a/roles/dns-records/tasks/main.yml b/roles/dns-records/tasks/main.yml index 305a55195..7148b016a 100644 --- a/roles/dns-records/tasks/main.yml +++ b/roles/dns-records/tasks/main.yml @@ -70,6 +70,15 @@ when: - hostvars[groups.masters[0]].openshift_master_cluster_public_hostname is defined - openstack_num_masters == 1 + - not use_bastion|bool + +- name: "Add public master cluster hostname records to the public A records (single master behind a bastion)" + set_fact: + public_records: "{{ public_records | default([]) + [ { 'type': 'A', 'hostname': (hostvars[groups.masters[0]].openshift_master_cluster_public_hostname | replace(full_dns_domain, ''))[:-1], 'ip': hostvars[groups.bastions[0]].public_v4 } ] }}" + when: + - hostvars[groups.masters[0]].openshift_master_cluster_public_hostname is defined + - openstack_num_masters == 1 + - use_bastion|bool - name: "Add public master cluster hostname records to the public A records (multi-master)" set_fact: -- cgit v1.2.3 -- cgit v1.2.3 -- cgit v1.2.3 -- cgit v1.2.3 -- cgit v1.2.3 -- cgit v1.2.3 -- cgit v1.2.3 -- cgit v1.2.3 -- cgit v1.2.3 -- cgit v1.2.3 -- cgit v1.2.3 -- cgit v1.2.3 -- cgit v1.2.3 -- cgit v1.2.3 -- cgit v1.2.3 From 15be1ebcf1705bc5e9347463594f50cc9d0f27b4 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?=C3=98ystein=20Bedin?= Date: Mon, 2 Oct 2017 21:08:45 +0000 Subject: Adding the option to use 'stack_state' to allow for easy de-provisioning (#754) * Adding 'openstack-stack-delete' role to allow for easy de-provisioning * Updated per etsauer's comments --- roles/openstack-stack/defaults/main.yml | 3 ++ roles/openstack-stack/tasks/cleanup.yml | 6 +++ roles/openstack-stack/tasks/generate-templates.yml | 32 +++++++++++++ roles/openstack-stack/tasks/main.yml | 54 ++++++---------------- 4 files changed, 56 insertions(+), 39 deletions(-) create mode 100644 roles/openstack-stack/tasks/cleanup.yml create mode 100644 roles/openstack-stack/tasks/generate-templates.yml diff --git a/roles/openstack-stack/defaults/main.yml b/roles/openstack-stack/defaults/main.yml index c16b5dc00..6f1949286 100644 --- a/roles/openstack-stack/defaults/main.yml +++ b/roles/openstack-stack/defaults/main.yml @@ -1,4 +1,7 @@ --- + +stack_state: 'present' + ssh_ingress_cidr: 0.0.0.0/0 node_ingress_cidr: 0.0.0.0/0 master_ingress_cidr: 0.0.0.0/0 diff --git a/roles/openstack-stack/tasks/cleanup.yml b/roles/openstack-stack/tasks/cleanup.yml new file mode 100644 index 000000000..258334a6b --- /dev/null +++ b/roles/openstack-stack/tasks/cleanup.yml @@ -0,0 +1,6 @@ +--- + +- name: cleanup temp files + file: + path: "{{ stack_template_pre.path }}" + state: absent diff --git a/roles/openstack-stack/tasks/generate-templates.yml b/roles/openstack-stack/tasks/generate-templates.yml new file mode 100644 index 000000000..0ce9a3eec --- /dev/null +++ b/roles/openstack-stack/tasks/generate-templates.yml @@ -0,0 +1,32 @@ +--- +- name: create HOT stack template prefix + register: stack_template_pre + tempfile: + state: directory + prefix: casl-ansible + +- name: set template paths + set_fact: + stack_template_path: "{{ stack_template_pre.path }}/stack.yaml" + user_data_template_path: "{{ stack_template_pre.path }}/user-data" + +- name: generate HOT stack template from jinja2 template + template: + src: heat_stack.yaml.j2 + dest: "{{ stack_template_path }}" + +- name: generate HOT server template from jinja2 template + template: + src: heat_stack_server.yaml.j2 + dest: "{{ stack_template_pre.path }}/server.yaml" + +- name: generate HOT server w/o floating IPs template from jinja2 template + template: + src: heat_stack_server_nofloating.yaml.j2 + dest: "{{ stack_template_pre.path }}/server_nofloating.yaml" + when: use_bastion|bool + +- name: generate user_data from jinja2 template + template: + src: user_data.j2 + dest: "{{ user_data_template_path }}" diff --git a/roles/openstack-stack/tasks/main.yml b/roles/openstack-stack/tasks/main.yml index 9b4855294..983567026 100644 --- a/roles/openstack-stack/tasks/main.yml +++ b/roles/openstack-stack/tasks/main.yml @@ -1,51 +1,27 @@ --- -- name: create HOT stack template prefix - register: stack_template_pre - tempfile: - state: directory - prefix: casl-ansible -- name: set template paths - set_fact: - stack_template_path: "{{ stack_template_pre.path }}/stack.yaml" - user_data_template_path: "{{ stack_template_pre.path }}/user-data" +- name: Generate the templates + include: generate-templates.yml + when: + - stack_state == 'present' -- name: generate HOT stack template from jinja2 template - template: - src: heat_stack.yaml.j2 - dest: "{{ stack_template_path }}" - -- name: generate HOT server template from jinja2 template - template: - src: heat_stack_server.yaml.j2 - dest: "{{ stack_template_pre.path }}/server.yaml" - -- name: generate HOT server w/o floating IPs template from jinja2 template - template: - src: heat_stack_server_nofloating.yaml.j2 - dest: "{{ stack_template_pre.path }}/server_nofloating.yaml" - when: use_bastion|bool - -- name: generate user_data from jinja2 template - template: - src: user_data.j2 - dest: "{{ user_data_template_path }}" - -- name: create stack +- name: Handle the Stack (create/delete) ignore_errors: False register: stack_create os_stack: name: "{{ stack_name }}" - state: present - template: "{{ stack_template_path }}" + state: "{{ stack_state }}" + template: "{{ stack_template_path | default(omit) }}" wait: yes # NOTE(bogdando) OS::Neutron::Subnet doesn't support live updates for # dns_nameservers, so we can't do that for the "create stack" task. - include: subnet_update_dns_servers.yaml - when: private_dns_server is defined - -- name: cleanup temp files - file: - path: "{{ stack_template_pre.path }}" - state: absent + when: + - private_dns_server is defined + - stack_state == 'present' + +- name: CleanUp + include: cleanup.yml + when: + - stack_state == 'present' -- cgit v1.2.3 -- cgit v1.2.3 -- cgit v1.2.3 -- cgit v1.2.3 -- cgit v1.2.3 -- cgit v1.2.3 -- cgit v1.2.3 -- cgit v1.2.3 -- cgit v1.2.3 From c969394a52c311f1ff5cc2fc669276bc8e2b4e4b Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Eduardo=20M=C3=ADnguez?= Date: Wed, 4 Oct 2017 03:31:56 +0200 Subject: Required variables to create dedicated lv (#766) * Required variables to create dedicated lv https://bugzilla.redhat.com/show_bug.cgi?id=1490910#c11 * Fixed lint and added distribution to checks --- roles/docker-storage-setup/defaults/main.yaml | 2 ++ roles/docker-storage-setup/tasks/main.yaml | 35 +++++++++++++++++----- .../templates/docker-storage-setup-dm.j2 | 4 +++ .../templates/docker-storage-setup-overlayfs.j2 | 6 ++++ .../templates/docker-storage-setup.j2 | 4 --- 5 files changed, 40 insertions(+), 11 deletions(-) create mode 100644 roles/docker-storage-setup/templates/docker-storage-setup-dm.j2 create mode 100644 roles/docker-storage-setup/templates/docker-storage-setup-overlayfs.j2 delete mode 100644 roles/docker-storage-setup/templates/docker-storage-setup.j2 diff --git a/roles/docker-storage-setup/defaults/main.yaml b/roles/docker-storage-setup/defaults/main.yaml index e36f1b85a..062f543ad 100644 --- a/roles/docker-storage-setup/defaults/main.yaml +++ b/roles/docker-storage-setup/defaults/main.yaml @@ -3,3 +3,5 @@ docker_dev: "/dev/sdb" docker_vg: "docker-vol" docker_data_size: "95%VG" docker_dm_basesize: "3G" +container_root_lv_name: "dockerlv" +container_root_lv_mount_path: "/var/lib/docker" diff --git a/roles/docker-storage-setup/tasks/main.yaml b/roles/docker-storage-setup/tasks/main.yaml index 7202bc46b..209062ca7 100644 --- a/roles/docker-storage-setup/tasks/main.yaml +++ b/roles/docker-storage-setup/tasks/main.yaml @@ -1,8 +1,29 @@ --- -- name: create the docker-storage-setup config file - template: - src: "{{ role_path }}/templates/docker-storage-setup.j2" - dest: /etc/sysconfig/docker-storage-setup - owner: root - group: root - mode: 0644 +- block: + - name: create the docker-storage config file + template: + src: "{{ role_path }}/templates/docker-storage-setup-overlayfs.j2" + dest: /etc/sysconfig/docker-storage-setup + owner: root + group: root + mode: 0644 + + when: + - ansible_distribution_version | version_compare('7.4', '>=') + - ansible_distribution == "RedHat" + +- block: + - name: create the docker-storage-setup config file + template: + src: "{{ role_path }}/templates/docker-storage-setup-dm.j2" + dest: /etc/sysconfig/docker-storage-setup + owner: root + group: root + mode: 0644 + + when: + - ansible_distribution_version | version_compare('7.4', '<') + - ansible_distribution == "RedHat" + +- name: start docker + service: name=docker state=started enabled=true diff --git a/roles/docker-storage-setup/templates/docker-storage-setup-dm.j2 b/roles/docker-storage-setup/templates/docker-storage-setup-dm.j2 new file mode 100644 index 000000000..b5869feff --- /dev/null +++ b/roles/docker-storage-setup/templates/docker-storage-setup-dm.j2 @@ -0,0 +1,4 @@ +DEVS="{{ docker_dev }}" +VG="{{ docker_vg }}" +DATA_SIZE="{{ docker_data_size }}" +EXTRA_DOCKER_STORAGE_OPTIONS="--storage-opt dm.basesize={{ docker_dm_basesize }}" diff --git a/roles/docker-storage-setup/templates/docker-storage-setup-overlayfs.j2 b/roles/docker-storage-setup/templates/docker-storage-setup-overlayfs.j2 new file mode 100644 index 000000000..4bef865c8 --- /dev/null +++ b/roles/docker-storage-setup/templates/docker-storage-setup-overlayfs.j2 @@ -0,0 +1,6 @@ +DEVS="{{ docker_dev }}" +VG="{{ docker_vg }}" +DATA_SIZE="{{ docker_data_size }}" +STORAGE_DRIVER=overlay2 +CONTAINER_ROOT_LV_NAME="{{ container_root_lv_name }}" +CONTAINER_ROOT_LV_MOUNT_PATH="{{ container_root_lv_mount_path }}" diff --git a/roles/docker-storage-setup/templates/docker-storage-setup.j2 b/roles/docker-storage-setup/templates/docker-storage-setup.j2 deleted file mode 100644 index b5869feff..000000000 --- a/roles/docker-storage-setup/templates/docker-storage-setup.j2 +++ /dev/null @@ -1,4 +0,0 @@ -DEVS="{{ docker_dev }}" -VG="{{ docker_vg }}" -DATA_SIZE="{{ docker_data_size }}" -EXTRA_DOCKER_STORAGE_OPTIONS="--storage-opt dm.basesize={{ docker_dm_basesize }}" -- cgit v1.2.3 -- cgit v1.2.3 -- cgit v1.2.3 -- cgit v1.2.3 -- cgit v1.2.3 -- cgit v1.2.3 From 2abe439cd35321e6388c25d5c8e4e6f1fa77e796 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?=C3=98ystein=20Bedin?= Date: Wed, 4 Oct 2017 15:03:17 +0000 Subject: Fixing various contrib changes causing CASL breakage (#771) --- roles/openstack-stack/defaults/main.yml | 2 +- roles/openstack-stack/templates/heat_stack.yaml.j2 | 24 +++++++++++----------- 2 files changed, 13 insertions(+), 13 deletions(-) diff --git a/roles/openstack-stack/defaults/main.yml b/roles/openstack-stack/defaults/main.yml index 6f1949286..a24e684cc 100644 --- a/roles/openstack-stack/defaults/main.yml +++ b/roles/openstack-stack/defaults/main.yml @@ -18,4 +18,4 @@ dns_volume_size: 1 lb_volume_size: 5 use_bastion: False ui_ssh_tunnel: False -provider_network: None +provider_network: False diff --git a/roles/openstack-stack/templates/heat_stack.yaml.j2 b/roles/openstack-stack/templates/heat_stack.yaml.j2 index 7acef5a6b..ef46211a4 100644 --- a/roles/openstack-stack/templates/heat_stack.yaml.j2 +++ b/roles/openstack-stack/templates/heat_stack.yaml.j2 @@ -499,7 +499,7 @@ resources: template: k8s_type-%index%.cluster_id params: cluster_id: {{ stack_name }} - k8s_type: {{ etcd_hostname }} + k8s_type: {{ etcd_hostname | default('etcd') }} cluster_env: {{ public_dns_domain }} cluster_id: {{ stack_name }} group: @@ -509,7 +509,7 @@ resources: k8s_type: etcds cluster_id: {{ stack_name }} type: etcd - image: {{ openstack_etcd_image }} + image: {{ openstack_etcd_image | default(openstack_image) }} flavor: {{ etcd_flavor }} key_name: {{ ssh_public_key }} {% if provider_network %} @@ -549,7 +549,7 @@ resources: template: k8s_type-%index%.cluster_id params: cluster_id: {{ stack_name }} - k8s_type: {{ lb_hostname }} + k8s_type: {{ lb_hostname | default('lb') }} cluster_env: {{ public_dns_domain }} cluster_id: {{ stack_name }} group: @@ -559,7 +559,7 @@ resources: k8s_type: lb cluster_id: {{ stack_name }} type: lb - image: {{ openstack_lb_image }} + image: {{ openstack_lb_image | default(openstack_image) }} flavor: {{ lb_flavor }} key_name: {{ ssh_public_key }} {% if provider_network %} @@ -603,7 +603,7 @@ resources: template: k8s_type-%index%.cluster_id params: cluster_id: {{ stack_name }} - k8s_type: {{ master_hostname }} + k8s_type: {{ master_hostname | default('master')}} cluster_env: {{ public_dns_domain }} cluster_id: {{ stack_name }} group: @@ -613,7 +613,7 @@ resources: k8s_type: masters cluster_id: {{ stack_name }} type: master - image: {{ openstack_master_image }} + image: {{ openstack_master_image | default(openstack_image) }} flavor: {{ master_flavor }} key_name: {{ ssh_public_key }} {% if provider_network %} @@ -666,7 +666,7 @@ resources: template: sub_type_k8s_type-%index%.cluster_id params: cluster_id: {{ stack_name }} - sub_type_k8s_type: {{ node_hostname }} + sub_type_k8s_type: {{ node_hostname | default('app-node') }} cluster_env: {{ public_dns_domain }} cluster_id: {{ stack_name }} group: @@ -681,7 +681,7 @@ resources: {% for k, v in openshift_cluster_node_labels.app.iteritems() %} {{ k|e }}: {{ v|e }} {% endfor %} - image: {{ openstack_node_image }} + image: {{ openstack_node_image | default(openstack_image) }} flavor: {{ node_flavor }} key_name: {{ ssh_public_key }} {% if provider_network %} @@ -720,7 +720,7 @@ resources: template: sub_type_k8s_type-%index%.cluster_id params: cluster_id: {{ stack_name }} - sub_type_k8s_type: {{ infra_hostname }} + sub_type_k8s_type: {{ infra_hostname | default('infranode') }} cluster_env: {{ public_dns_domain }} cluster_id: {{ stack_name }} group: @@ -735,7 +735,7 @@ resources: {% for k, v in openshift_cluster_node_labels.infra.iteritems() %} {{ k|e }}: {{ v|e }} {% endfor %} - image: {{ openstack_infra_image }} + image: {{ openstack_infra_image | default(openstack_image) }} flavor: {{ infra_flavor }} key_name: {{ ssh_public_key }} {% if provider_network %} @@ -784,7 +784,7 @@ resources: template: k8s_type-%index%.cluster_id params: cluster_id: {{ stack_name }} - k8s_type: {{ dns_hostname }} + k8s_type: {{ dns_hostname | default('dns') }} cluster_env: {{ public_dns_domain }} cluster_id: {{ stack_name }} group: @@ -794,7 +794,7 @@ resources: k8s_type: dns cluster_id: {{ stack_name }} type: dns - image: {{ openstack_dns_image }} + image: {{ openstack_dns_image | default(openstack_image) }} flavor: {{ dns_flavor }} key_name: {{ ssh_public_key }} {% if provider_network %} -- cgit v1.2.3 From 51e017647815e10f61afcb0ac60985b4eeff24ca Mon Sep 17 00:00:00 2001 From: Tomas Sedovic Date: Mon, 2 Oct 2017 09:49:32 +0200 Subject: Add dynamic inventory This adds an `inventory.py` script to the `sample-inventory` that lists all the necessary servers and groups dynamically, skipping the `static_inventory` role as well as the `hosts` creation. It also adds an `os_cinder` lookup function which is necessary for a seamless Cinder OpenShift registry integration without a static inventory. --- playbooks/provisioning/openstack/README.md | 13 ++++ .../openstack/sample-inventory/ansible.cfg | 3 + .../sample-inventory/group_vars/OSEv3.yml | 9 ++- .../openstack/sample-inventory/inventory.py | 89 ++++++++++++++++++++++ 4 files changed, 112 insertions(+), 2 deletions(-) create mode 100755 playbooks/provisioning/openstack/sample-inventory/inventory.py diff --git a/playbooks/provisioning/openstack/README.md b/playbooks/provisioning/openstack/README.md index 5e45add51..b96c9c9db 100644 --- a/playbooks/provisioning/openstack/README.md +++ b/playbooks/provisioning/openstack/README.md @@ -361,6 +361,19 @@ registry. Again in `OSEv3.yml`: The filesystem value here will be used in the initial formatting of the volume. +If you're using the dynamic inventory, you must uncomment these two values as +well: + + #openshift_hosted_registry_storage_openstack_volumeID: "{{ lookup('os_cinder', cinder_hosted_registry_name).id }}" + #openshift_hosted_registry_storage_volume_size: "{{ cinder_hosted_registry_size_gb }}Gi" + +But note that they use the `os_cinder` lookup plugin we provide, so you must +tell Ansible where to find it either in `ansible.cfg` (the one we provide is +configured properly) or by exporting the +`ANSIBLE_LOOKUP_PLUGINS=openshift-ansible-contrib/lookup_plugins` environment +variable. + + ### Use an existing Cinder volume for the OpenShift registry diff --git a/playbooks/provisioning/openstack/sample-inventory/ansible.cfg b/playbooks/provisioning/openstack/sample-inventory/ansible.cfg index 81d8ae10c..a21f023ea 100644 --- a/playbooks/provisioning/openstack/sample-inventory/ansible.cfg +++ b/playbooks/provisioning/openstack/sample-inventory/ansible.cfg @@ -1,6 +1,7 @@ # config file for ansible -- http://ansible.com/ # ============================================== [defaults] +ansible_user = openshift forks = 50 # work around privilege escalation timeouts in ansible timeout = 30 @@ -14,6 +15,8 @@ fact_caching_connection = .ansible/cached_facts fact_caching_timeout = 900 stdout_callback = skippy callback_whitelist = profile_tasks +lookup_plugins = openshift-ansible-contrib/lookup_plugins + [ssh_connection] ssh_args = -o ControlMaster=auto -o ControlPersist=900s -o GSSAPIAuthentication=no diff --git a/playbooks/provisioning/openstack/sample-inventory/group_vars/OSEv3.yml b/playbooks/provisioning/openstack/sample-inventory/group_vars/OSEv3.yml index 7d7683c62..2e897102e 100644 --- a/playbooks/provisioning/openstack/sample-inventory/group_vars/OSEv3.yml +++ b/playbooks/provisioning/openstack/sample-inventory/group_vars/OSEv3.yml @@ -27,9 +27,14 @@ openshift_hosted_registry_wait: True #openshift_hosted_registry_storage_access_modes: ['ReadWriteOnce'] #openshift_hosted_registry_storage_openstack_filesystem: xfs -## Configure this if you're attaching a Cinder volume you've set up. +## NOTE(shadower): This won't work until the openshift-ansible issue #5657 is fixed: +## https://github.com/openshift/openshift-ansible/issues/5657 ## If you're using the `cinder_hosted_registry_name` option from -## `all.yml`, this will be configured automaticaly. +## `all.yml`, uncomment these lines: +#openshift_hosted_registry_storage_openstack_volumeID: "{{ lookup('os_cinder', cinder_hosted_registry_name).id }}" +#openshift_hosted_registry_storage_volume_size: "{{ cinder_hosted_registry_size_gb }}Gi" + +## If you're using a Cinder volume you've set up yourself, uncomment these lines: #openshift_hosted_registry_storage_openstack_volumeID: e0ba2d73-d2f9-4514-a3b2-a0ced507fa05 #openshift_hosted_registry_storage_volume_size: 10Gi diff --git a/playbooks/provisioning/openstack/sample-inventory/inventory.py b/playbooks/provisioning/openstack/sample-inventory/inventory.py new file mode 100755 index 000000000..0b128ee40 --- /dev/null +++ b/playbooks/provisioning/openstack/sample-inventory/inventory.py @@ -0,0 +1,89 @@ +#!/usr/bin/env python + +from __future__ import print_function + +import json +import os +import sys + +import shade + + +if __name__ == '__main__': + cloud = shade.openstack_cloud() + + inventory = {} + + # TODO(shadower): filter the servers based on the `OPENSHIFT_CLUSTER` + # environment variable. + cluster_hosts = [ + server for server in cloud.list_servers() + if 'metadata' in server and 'clusterid' in server.metadata] + + masters = [server.name for server in cluster_hosts + if server.metadata['host-type'] == 'master'] + + etcd = [server.name for server in cluster_hosts + if server.metadata['host-type'] == 'etcd'] + if not etcd: + etcd = masters + + infra_hosts = [server.name for server in cluster_hosts + if server.metadata['host-type'] == 'node' and + server.metadata['sub-host-type'] == 'infra'] + + app = [server.name for server in cluster_hosts + if server.metadata['host-type'] == 'node' and + server.metadata['sub-host-type'] == 'app'] + + nodes = list(set(masters + infra_hosts + app)) + + dns = [server.name for server in cluster_hosts + if server.metadata['host-type'] == 'dns'] + + lb = [server.name for server in cluster_hosts + if server.metadata['host-type'] == 'lb'] + + osev3 = list(set(nodes + etcd + lb)) + + groups = [server.metadata.group for server in cluster_hosts + if 'group' in server.metadata] + + inventory['cluster_hosts'] = { 'hosts': [s.name for s in cluster_hosts] } + inventory['OSEv3'] = { 'hosts': osev3 } + inventory['masters'] = { 'hosts': masters } + inventory['etcd'] = { 'hosts': etcd } + inventory['nodes'] = { 'hosts': nodes } + inventory['infra_hosts'] = { 'hosts': infra_hosts } + inventory['app'] = { 'hosts': app } + inventory['dns'] = { 'hosts': dns } + inventory['lb'] = { 'hosts': lb } + + for server in cluster_hosts: + if 'group' in server.metadata: + group = server.metadata.group + if group not in inventory: + inventory[group] = {'hosts': []} + inventory[group]['hosts'].append(server.name) + + inventory['_meta'] = { 'hostvars': {} } + + for server in cluster_hosts: + ssh_ip_address = server.public_v4 or server.private_v4 + vars = { + 'ansible_host': ssh_ip_address + } + + if server.public_v4: + vars['public_v4'] = server.public_v4 + # TODO(shadower): what about multiple networks? + if server.private_v4: + vars['private_v4'] = server.private_v4 + + node_labels = server.metadata.get('node_labels') + if node_labels: + vars['openshift_node_labels'] = node_labels + + inventory['_meta']['hostvars'][server.name] = vars + + print(json.dumps(inventory, indent=4, sort_keys=True)) -- cgit v1.2.3 -- cgit v1.2.3 -- cgit v1.2.3 From 181b8f6c82fe7f135b563edb74a39a44d279e32e Mon Sep 17 00:00:00 2001 From: Tomas Sedovic Date: Thu, 5 Oct 2017 10:26:47 +0200 Subject: Fix flake8 errors --- .../openstack/sample-inventory/inventory.py | 30 ++++++++++------------ 1 file changed, 14 insertions(+), 16 deletions(-) diff --git a/playbooks/provisioning/openstack/sample-inventory/inventory.py b/playbooks/provisioning/openstack/sample-inventory/inventory.py index 0b128ee40..4949deac2 100755 --- a/playbooks/provisioning/openstack/sample-inventory/inventory.py +++ b/playbooks/provisioning/openstack/sample-inventory/inventory.py @@ -3,8 +3,6 @@ from __future__ import print_function import json -import os -import sys import shade @@ -19,7 +17,7 @@ if __name__ == '__main__': cluster_hosts = [ server for server in cloud.list_servers() if 'metadata' in server and 'clusterid' in server.metadata] - + masters = [server.name for server in cluster_hosts if server.metadata['host-type'] == 'master'] @@ -30,11 +28,11 @@ if __name__ == '__main__': infra_hosts = [server.name for server in cluster_hosts if server.metadata['host-type'] == 'node' and - server.metadata['sub-host-type'] == 'infra'] + server.metadata['sub-host-type'] == 'infra'] app = [server.name for server in cluster_hosts if server.metadata['host-type'] == 'node' and - server.metadata['sub-host-type'] == 'app'] + server.metadata['sub-host-type'] == 'app'] nodes = list(set(masters + infra_hosts + app)) @@ -42,22 +40,22 @@ if __name__ == '__main__': if server.metadata['host-type'] == 'dns'] lb = [server.name for server in cluster_hosts - if server.metadata['host-type'] == 'lb'] + if server.metadata['host-type'] == 'lb'] osev3 = list(set(nodes + etcd + lb)) groups = [server.metadata.group for server in cluster_hosts if 'group' in server.metadata] - inventory['cluster_hosts'] = { 'hosts': [s.name for s in cluster_hosts] } - inventory['OSEv3'] = { 'hosts': osev3 } - inventory['masters'] = { 'hosts': masters } - inventory['etcd'] = { 'hosts': etcd } - inventory['nodes'] = { 'hosts': nodes } - inventory['infra_hosts'] = { 'hosts': infra_hosts } - inventory['app'] = { 'hosts': app } - inventory['dns'] = { 'hosts': dns } - inventory['lb'] = { 'hosts': lb } + inventory['cluster_hosts'] = {'hosts': [s.name for s in cluster_hosts]} + inventory['OSEv3'] = {'hosts': osev3} + inventory['masters'] = {'hosts': masters} + inventory['etcd'] = {'hosts': etcd} + inventory['nodes'] = {'hosts': nodes} + inventory['infra_hosts'] = {'hosts': infra_hosts} + inventory['app'] = {'hosts': app} + inventory['dns'] = {'hosts': dns} + inventory['lb'] = {'hosts': lb} for server in cluster_hosts: if 'group' in server.metadata: @@ -66,7 +64,7 @@ if __name__ == '__main__': inventory[group] = {'hosts': []} inventory[group]['hosts'].append(server.name) - inventory['_meta'] = { 'hostvars': {} } + inventory['_meta'] = {'hostvars': {}} for server in cluster_hosts: ssh_ip_address = server.public_v4 or server.private_v4 -- cgit v1.2.3 From 3fb3db798d7f3d890f063315c8174e7252b9c054 Mon Sep 17 00:00:00 2001 From: Tomas Sedovic Date: Thu, 5 Oct 2017 12:36:03 +0200 Subject: Set public_v4 to private_v4 if it doesn't exist The DNS code expects a `public_v4` even when we use the provider networks. Let's just always export it. --- playbooks/provisioning/openstack/sample-inventory/inventory.py | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) diff --git a/playbooks/provisioning/openstack/sample-inventory/inventory.py b/playbooks/provisioning/openstack/sample-inventory/inventory.py index 4949deac2..6a1b74b3d 100755 --- a/playbooks/provisioning/openstack/sample-inventory/inventory.py +++ b/playbooks/provisioning/openstack/sample-inventory/inventory.py @@ -72,8 +72,9 @@ if __name__ == '__main__': 'ansible_host': ssh_ip_address } - if server.public_v4: - vars['public_v4'] = server.public_v4 + public_v4 = server.public_v4 or server.private_v4 + if public_v4: + vars['public_v4'] = public_v4 # TODO(shadower): what about multiple networks? if server.private_v4: vars['private_v4'] = server.private_v4 -- cgit v1.2.3 -- cgit v1.2.3 -- cgit v1.2.3 -- cgit v1.2.3 -- cgit v1.2.3 -- cgit v1.2.3 -- cgit v1.2.3 From 1c73318927fe1730fa4c52fc684a94d37d12a5fd Mon Sep 17 00:00:00 2001 From: Tomas Sedovic Date: Fri, 6 Oct 2017 09:20:53 +0200 Subject: Replace the CASL references (#778) Following up on the initial port of the OpenStack roles from casl-ansible to openshift-ansible-contrib. One of the points that was brought up in the review was to drop the references to CASL in the code since the code has now wider reach. --- playbooks/provisioning/openstack/pre_tasks.yml | 2 +- roles/openstack-stack/README.md | 2 +- roles/openstack-stack/tasks/generate-templates.yml | 2 +- 3 files changed, 3 insertions(+), 3 deletions(-) diff --git a/playbooks/provisioning/openstack/pre_tasks.yml b/playbooks/provisioning/openstack/pre_tasks.yml index 7146c886a..11fe2dd84 100644 --- a/playbooks/provisioning/openstack/pre_tasks.yml +++ b/playbooks/provisioning/openstack/pre_tasks.yml @@ -7,7 +7,7 @@ - name: Set default Environment ID set_fact: - default_env_id: "casl-{{ lookup('env','OS_USERNAME') }}-{{ env_random_id }}" + default_env_id: "openshift-{{ lookup('env','OS_USERNAME') }}-{{ env_random_id }}" delegate_to: localhost - name: Setting Common Facts diff --git a/roles/openstack-stack/README.md b/roles/openstack-stack/README.md index 509c9de6c..32a2b49f1 100644 --- a/roles/openstack-stack/README.md +++ b/roles/openstack-stack/README.md @@ -5,5 +5,5 @@ Role for spinning up instances using OpenStack Heat. ## To Test ``` -ansible-playbook casl-ansible/roles/openstack-stack/test/stack-create-test.yml +ansible-playbook openshift-ansible-contrib/roles/openstack-stack/test/stack-create-test.yml ``` diff --git a/roles/openstack-stack/tasks/generate-templates.yml b/roles/openstack-stack/tasks/generate-templates.yml index 0ce9a3eec..110da8444 100644 --- a/roles/openstack-stack/tasks/generate-templates.yml +++ b/roles/openstack-stack/tasks/generate-templates.yml @@ -3,7 +3,7 @@ register: stack_template_pre tempfile: state: directory - prefix: casl-ansible + prefix: openshift-ansible - name: set template paths set_fact: -- cgit v1.2.3 -- cgit v1.2.3 -- cgit v1.2.3 -- cgit v1.2.3 -- cgit v1.2.3 -- cgit v1.2.3 -- cgit v1.2.3 From 5a6c1927828d942ebe4c71861521c0dae6545011 Mon Sep 17 00:00:00 2001 From: Tomas Sedovic Date: Mon, 9 Oct 2017 09:57:08 +0000 Subject: Add CentOS support to the docker-storage-setup role This let's us use the role on CentOS systems, as well as RHEL. In addition, it installs docker and makes sure it's restarted (as opposed to just "started" which has no effect when docker is already running). --- roles/docker-storage-setup/tasks/main.yaml | 18 +++++++++++++++++- 1 file changed, 17 insertions(+), 1 deletion(-) diff --git a/roles/docker-storage-setup/tasks/main.yaml b/roles/docker-storage-setup/tasks/main.yaml index 209062ca7..8dabb1cc7 100644 --- a/roles/docker-storage-setup/tasks/main.yaml +++ b/roles/docker-storage-setup/tasks/main.yaml @@ -25,5 +25,21 @@ - ansible_distribution_version | version_compare('7.4', '<') - ansible_distribution == "RedHat" +- block: + - name: create the docker-storage-setup config file for CentOS + template: + src: "{{ role_path }}/templates/docker-storage-setup-dm.j2" + dest: /etc/sysconfig/docker-storage-setup + owner: root + group: root + mode: 0644 + + # TODO(shadower): Find out which CentOS version supports overlayfs2 + when: + - ansible_distribution == "CentOS" + +- name: Install Docker + package: name=docker state=present + - name: start docker - service: name=docker state=started enabled=true + service: name=docker state=restarted enabled=true -- cgit v1.2.3 -- cgit v1.2.3 -- cgit v1.2.3 From af9f352d64fba76fcaed7de4e2b35b44ddf10e2b Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Eduardo=20M=C3=ADnguez?= Date: Tue, 10 Oct 2017 20:02:50 +0200 Subject: Fix for this issue https://bugzilla.redhat.com/show_bug.cgi?id=1495372 (#793) --- roles/docker-storage-setup/templates/docker-storage-setup-overlayfs.j2 | 1 + 1 file changed, 1 insertion(+) diff --git a/roles/docker-storage-setup/templates/docker-storage-setup-overlayfs.j2 b/roles/docker-storage-setup/templates/docker-storage-setup-overlayfs.j2 index 4bef865c8..d8b4a0276 100644 --- a/roles/docker-storage-setup/templates/docker-storage-setup-overlayfs.j2 +++ b/roles/docker-storage-setup/templates/docker-storage-setup-overlayfs.j2 @@ -4,3 +4,4 @@ DATA_SIZE="{{ docker_data_size }}" STORAGE_DRIVER=overlay2 CONTAINER_ROOT_LV_NAME="{{ container_root_lv_name }}" CONTAINER_ROOT_LV_MOUNT_PATH="{{ container_root_lv_mount_path }}" +CONTAINER_ROOT_LV_SIZE=100%FREE -- cgit v1.2.3 -- cgit v1.2.3 -- cgit v1.2.3 -- cgit v1.2.3 From 4fff75f713f963e8ab1ec9b2302c3395d9c53ba2 Mon Sep 17 00:00:00 2001 From: Tomas Sedovic Date: Thu, 12 Oct 2017 12:32:22 +0200 Subject: Streamline the OpenStack provider README This moves all the extra configuration options and deployment notes to a new `advanced-configuration.md` file and keeps the README much shorter. The README now presents the simplest workflow with minimal configuration and manual steps on part of the deployer. The advanced configuration is in need of a little more cleanup, but we can do that in another pull request. --- playbooks/provisioning/openstack/README.md | 712 ++++++--------------- .../openstack/advanced-configuration.md | 699 ++++++++++++++++++++ playbooks/provisioning/openstack/ansible.cfg | 24 + .../openstack/sample-inventory/ansible.cfg | 24 - .../sample-inventory/group_vars/OSEv3.yml | 4 +- 5 files changed, 907 insertions(+), 556 deletions(-) create mode 100644 playbooks/provisioning/openstack/advanced-configuration.md create mode 100644 playbooks/provisioning/openstack/ansible.cfg delete mode 100644 playbooks/provisioning/openstack/sample-inventory/ansible.cfg diff --git a/playbooks/provisioning/openstack/README.md b/playbooks/provisioning/openstack/README.md index b96c9c9db..a2f3d4d5d 100644 --- a/playbooks/provisioning/openstack/README.md +++ b/playbooks/provisioning/openstack/README.md @@ -1,608 +1,260 @@ # OpenStack Provisioning -This repository contains playbooks and Heat templates to provision +This directory contains [Ansible][ansible] playbooks and roles to create OpenStack resources (servers, networking, volumes, security groups, -etc.). The result is an environment ready for openshift-ansible. +etc.). The result is an environment ready for OpenShift installation +via [openshift-ansible]. -## Dependencies for localhost (ansible control/admin node) +We provide everything necessary to be able to install OpenShift on +OpenStack (including the DNS and load balancer servers when +necessary). In addition, we work on providing integration with the +OpenStack-native services (storage, lbaas, baremetal as a service, +dns, etc.). -* [Ansible 2.3](https://pypi.python.org/pypi/ansible) -* [Ansible-galaxy](https://pypi.python.org/pypi/ansible-galaxy-local-deps) -* [jinja2](http://jinja.pocoo.org/docs/2.9/) -* [shade](https://pypi.python.org/pypi/shade) -* python-jmespath / [jmespath](https://pypi.python.org/pypi/jmespath) -* python-dns / [dnspython](https://pypi.python.org/pypi/dnspython) -* Become (sudo) is not required. -**NOTE**: You can use a Docker image with all dependencies set up. -Find more in the [Deployment section](#deployment). +## OpenStack Requirements -### Optional Dependencies for localhost -**Note**: When using rhel images, `rhel-7-server-openstack-10-rpms` repository is required in order to install these packages. +Before you start the installation, you need to have an OpenStack +environment to connect to. You can use a public cloud or an OpenStack +within your organisation. It is also possible to +use [Devstack][devstack] or [TripleO][tripleo]. In the case of +TripleO, we will be running on top of the **overcloud**. -* `python-openstackclient` -* `python-heatclient` +The OpenStack release must be Newton (for Red Hat OpenStack this is +version 10) or newer. It must also satisfy these requirements: -## Dependencies for OpenStack hosted cluster nodes (servers) +* Heat (Orchestration) must be available +* The deployment image (CentOS 7 or RHEL 7) must be loaded +* The deployment flavor must be available to your user + - `m1.medium` / 4GB RAM + 40GB disk should be enough for testing + - look at + the [Minimum Hardware Requirements page][hardware-requirements] + for production +* The keypair for SSH must be available in openstack +* `keystonerc` file that lets you talk to the openstack services + * NOTE: only Keystone V2 is currently supported -There are no additional dependencies for the cluster nodes. Required -configuration steps are done by Heat given a specific user data config -that normally should not be changed. +Optional: +* External Neutron network with a floating IP address pool -## Required galaxy modules -In order to pull in external dependencies for DNS configuration steps, -the following commads need to be executed: +## Installation - ansible-galaxy install \ - -r openshift-ansible-contrib/playbooks/provisioning/openstack/galaxy-requirements.yaml \ - -p openshift-ansible-contrib/roles +There are four main parts to the installation: -Alternatively you can install directly from github: +1. [Preparing Ansible and dependencies](#1-preparing-ansible-and-dependencies) +2. [Configuring the desired OpenStack environment and OpenShift cluster](#2-configuring-the-openstack-environment-and-openshift-cluster) +3. [Creating the OpenStack resources (VMs, networking, etc.)](#3-creating-the-openstack-resources-vms-networking-etc) +4. [Installing OpenShift](#4-installing-openshift) - ansible-galaxy install git+https://github.com/redhat-cop/infra-ansible,master \ - -p openshift-ansible-contrib/roles +This guide is going to install [OpenShift Origin][origin] +with [CentOS 7][centos7] images with minimal customisation. -Notes: -* This assumes we're in the directory that contains the clonned -openshift-ansible-contrib repo in its root path. -* When trying to install a different version, the previous one must be removed first -(`infra-ansible` directory from [roles](https://github.com/openshift/openshift-ansible-contrib/tree/master/roles)). -Otherwise, even if there are differences between the two versions, installation of the newer version is skipped. +We will create the VMs for running OpenShift, in a new Neutron +network, assign Floating IP addresses and configure DNS. -## What does it do +The OpenShift cluster will have a single Master node that will run +`etcd`, a single Infra node and two App nodes. -* Create Nova servers with floating IP addresses attached -* Assigns Cinder volumes to the servers -* Set up an `openshift` user with sudo privileges -* Optionally attach Red Hat subscriptions -* Sets up a bind-based DNS server or configures the cluster servers to use an external DNS server. -* Supports mixed in-stack/external DNS servers for dynamic updates. -* When deploying more than one master, sets up a HAproxy server +You can look at +the [Advanced Configuration page][advanced-configuration] for +additional options. -## Set up -### Copy the sample inventory +### 1. Preparing Ansible and dependencies - cp -r openshift-ansible-contrib/playbooks/provisioning/openstack/sample-inventory inventory +First, you need to select where to run [Ansible][ansible] from (the +*Ansible host*). This can be the computer you read this guide on or an +OpenStack VM you'll create specifically for this purpose. -### Copy ansible config +We will use +a +[Docker image that has all the dependencies installed][control-host-image] to +make things easier. If you don't want to use Docker, take a look at +the [Ansible host dependencies][ansible-dependencies] and make sure +they're installed. - cp openshift-ansible-contrib/playbooks/provisioning/openstack/sample-inventory/ansible.cfg ansible.cfg +Your *Ansible host* needs to have the following: -### Update `inventory/group_vars/all.yml` +1. Docker +2. `keystonerc` file with your OpenStack credentials +3. SSH private key for logging in to your OpenShift nodes -#### DNS configuration variables +Assuming your private key is `~/.ssh/id_rsa` and `keystonerc` in your +current directory: -Pay special attention to the values in the first paragraph -- these -will depend on your OpenStack environment. - -Note that the provsisioning playbooks update the original Neutron subnet -created with the Heat stack to point to the configured DNS servers. -So the provisioned cluster nodes will start using those natively as -default nameservers. Technically, this allows to deploy OpenShift clusters -without dnsmasq proxies. - -The `env_id` and `public_dns_domain` will form the cluster's DNS domain all -your servers will be under. With the default values, this will be -`openshift.example.com`. For workloads, the default subdomain is 'apps'. -That sudomain can be set as well by the `openshift_app_domain` variable in -the inventory. - -The `openstack__hostname` is a set of variables used for customising -hostnames of servers with a given role. When such a variable stays commented, -default hostname (usually the role name) is used. - -The `public_dns_nameservers` is a list of DNS servers accessible from all -the created Nova servers. These will be serving as your DNS forwarders for -external FQDNs that do not belong to the cluster's DNS domain and its subdomains. -If you're unsure what to put in here, you can try the google or opendns servers, -but note that some organizations may be blocking them. - -The `openshift_use_dnsmasq` controls either dnsmasq is deployed or not. -By default, dnsmasq is deployed and comes as the hosts' /etc/resolv.conf file -first nameserver entry that points to the local host instance of the dnsmasq -daemon that in turn proxies DNS requests to the authoritative DNS server. -When Network Manager is enabled for provisioned cluster nodes, which is -normally the case, you should not change the defaults and always deploy dnsmasq. - -`external_nsupdate_keys` describes an external authoritative DNS server(s) -processing dynamic records updates in the public and private cluster views: - - external_nsupdate_keys: - public: - key_secret: - key_algorithm: 'hmac-md5' - key_name: 'update-key' - server: - private: - key_secret: - key_algorithm: 'hmac-sha256' - server: - -Here, for the public view section, we specified another key algorithm and -optional `key_name`, which normally defaults to the cluster's DNS domain. -This just illustrates a compatibility mode with a DNS service deployed -by OpenShift on OSP10 reference architecture, and used in a mixed mode with -another external DNS server. - -Another example defines an external DNS server for the public view -additionally to the in-stack DNS server used for the private view only: - - external_nsupdate_keys: - public: - key_secret: - key_algorithm: 'hmac-sha256' - server: - -Here, updates matching the public view will be hitting the given public -server IP. While updates matching the private view will be sent to the -auto evaluated in-stack DNS server's **public** IP. - -Note, for the in-stack DNS server, private view updates may be sent only -via the public IP of the server. You can not send updates via the private -IP yet. This forces the in-stack private server to have a floating IP. -See also the [security notes](#security-notes) - -#### Other configuration variables - -`openstack_ssh_key` is a Nova keypair - you can see your keypairs with -`openstack keypair list`. This guide assumes that its corresponding private -key is `~/.ssh/openshift`, stored on the ansible admin (control) node. - -`openstack_default_image_name` is the default name of the Glance image the -servers will use. You can see your images with `openstack image list`. -In order to set a different image for a role, uncomment the line with the -corresponding variable (e.g. `openstack_lb_image_name` for load balancer) and -set its value to another available image name. `openstack_default_image_name` -must stay defined as it is used as a default value for the rest of the roles. - -`openstack_default_flavor` is the default Nova flavor the servers will use. -You can see your flavors with `openstack flavor list`. -In order to set a different flavor for a role, uncomment the line with the -corresponding variable (e.g. `openstack_lb_flavor` for load balancer) and -set its value to another available flavor. `openstack_default_flavor` must -stay defined as it is used as a default value for the rest of the roles. - -`openstack_external_network_name` is the name of the Neutron network -providing external connectivity. It is often called `public`, -`external` or `ext-net`. You can see your networks with `openstack -network list`. - -`openstack_private_network_name` is the name of the private Neutron network -providing admin/control access for ansible. It can be merged with other -cluster networks, there are no special requirements for networking. - -The `openstack_num_masters`, `openstack_num_infra` and -`openstack_num_nodes` values specify the number of Master, Infra and -App nodes to create. - -The `openshift_cluster_node_labels` defines custom labels for your openshift -cluster node groups. It currently supports app and infra node groups. -The default value of this variable sets `region: primary` to app nodes and -`region: infra` to infra nodes. -An example of setting a customised label: -``` -openshift_cluster_node_labels: - app: - mylabel: myvalue +```bash +$ sudo docker run -it -v ~/.ssh:/mnt/.ssh:Z \ + -v $PWD/keystonerc:/root/.config/openstack/keystonerc.sh:Z \ + redhatcop/control-host-openstack bash ``` -The `openstack_nodes_to_remove` allows you to specify the numerical indexes -of App nodes that should be removed; for example, ['0', '2'], - -The `docker_volume_size` is the default Docker volume size the servers will use. -In order to set a different volume size for a role, -uncomment the line with the corresponding variable (e. g. `docker_master_volume_size` -for master) and change its value. `docker_volume_size` must stay defined as it is -used as a default value for some of the servers (master, infra, app node). -The rest of the roles (etcd, load balancer, dns) have their defaults hard-coded. - -**Note**: If the `ephemeral_volumes` is set to `true`, the `*_volume_size` variables -will be ignored and the deployment will not create any cinder volumes. - -The `openstack_flat_secgrp`, controls Neutron security groups creation for Heat -stacks. Set it to true, if you experience issues with sec group rules -quotas. It trades security for number of rules, by sharing the same set -of firewall rules for master, node, etcd and infra nodes. - -The `required_packages` variable also provides a list of the additional -prerequisite packages to be installed before to deploy an OpenShift cluster. -Those are ignored though, if the `manage_packages: False`. - -The `openstack_inventory` controls either a static inventory will be created after the -cluster nodes provisioned on OpenStack cloud. Note, the fully dynamic inventory -is yet to be supported, so the static inventory will be created anyway. - -The `openstack_inventory_path` points the directory to host the generated static inventory. -It should point to the copied example inventory directory, otherwise ti creates -a new one for you. - -#### Multi-master configuration - -Please refer to the official documentation for the -[multi-master setup](https://docs.openshift.com/container-platform/3.6/install_config/install/advanced_install.html#multiple-masters) -and define the corresponding [inventory -variables](https://docs.openshift.com/container-platform/3.6/install_config/install/advanced_install.html#configuring-cluster-variables) -in `inventory/group_vars/OSEv3.yml`. For example, given a load balancer node -under the ansible group named `ext_lb`: - - openshift_master_cluster_method: native - openshift_master_cluster_hostname: "{{ groups.ext_lb.0 }}" - openshift_master_cluster_public_hostname: "{{ groups.ext_lb.0 }}" - -#### Provider Network - -Normally, the playbooks create a new Neutron network and subnet and attach -floating IP addresses to each node. If you have a provider network set up, this -is all unnecessary as you can just access servers that are placed in the -provider network directly. - -To use a provider network, set its name in `openstack_provider_network_name` in -`inventory/group_vars/all.yml`. - -If you set the provider network name, the `openstack_external_network_name` and -`openstack_private_network_name` fields will be ignored. - -**NOTE**: this will not update the nodes' DNS, so running openshift-ansible -right after provisioning will fail (unless you're using an external DNS server -your provider network knows about). You must make sure your nodes are able to -resolve each other by name. - -#### Security notes - -Configure required `*_ingress_cidr` variables to restrict public access -to provisioned servers from your laptop (a /32 notation should be used) -or your trusted network. The most important is the `node_ingress_cidr` -that restricts public access to the deployed DNS server and cluster -nodes' ephemeral ports range. - -Note, the command ``curl https://api.ipify.org`` helps fiding an external -IP address of your box (the ansible admin node). - -There is also the `manage_packages` variable (defaults to True) you -may want to turn off in order to speed up the provisioning tasks. This may -be the case for development environments. When turned off, the servers will -be provisioned omitting the ``yum update`` command. This brings security -implications though, and is not recommended for production deployments. - -##### DNS servers security options - -Aside from `node_ingress_cidr` restricting public access to in-stack DNS -servers, there are following (bind/named specific) DNS security -options available: - - named_public_recursion: 'no' - named_private_recursion: 'yes' - -External DNS servers, which is not included in the 'dns' hosts group, -are not managed. It is up to you to configure such ones. - -### Configure the OpenShift parameters - -Finally, you need to update the DNS entry in -`inventory/group_vars/OSEv3.yml` (look at -`openshift_master_default_subdomain`). - -In addition, this is the place where you can customise your OpenShift -installation for example by specifying the authentication. - -The full list of options is available in this sample inventory: - -https://github.com/openshift/openshift-ansible/blob/master/inventory/byo/hosts.ose.example - -Note, that in order to deploy OpenShift origin, you should update the following -variables for the `inventory/group_vars/OSEv3.yml`, `all.yml`: - - deployment_type: origin - openshift_deployment_type: "{{ deployment_type }}" - - -#### Setting a custom entrypoint - -In order to set a custom entrypoint, update `openshift_master_cluster_public_hostname` - - openshift_master_cluster_public_hostname: api.openshift.example.com - -Note than an empty hostname does not work, so if your domain is `openshift.example.com`, -you cannot set this value to simply `openshift.example.com`. - -### Creating and using a Cinder volume for the OpenShift registry - -You can optionally have the playbooks create a Cinder volume and set -it up as the OpenShift hosted registry. - -To do that you need specify the desired Cinder volume name and size in -Gigabytes in `inventory/group_vars/all.yml`: - - cinder_hosted_registry_name: cinder-registry - cinder_hosted_registry_size_gb: 10 - -With this, the playbooks will create the volume and set up its -filesystem. If there is an existing volume of the same name, we will -use it but keep the existing data on it. - -To use the volume for the registry, you must first configure it with -the OpenStack credentials by putting the following to `OSEv3.yml`: - - openshift_cloudprovider_openstack_username: "{{ lookup('env','OS_USERNAME') }}" - openshift_cloudprovider_openstack_password: "{{ lookup('env','OS_PASSWORD') }}" - openshift_cloudprovider_openstack_auth_url: "{{ lookup('env','OS_AUTH_URL') }}" - openshift_cloudprovider_openstack_tenant_name: "{{ lookup('env','OS_TENANT_NAME') }}" - -This will use the credentials from your shell environment. If you want -to enter them explicitly, you can. You can also use credentials -different from the provisioning ones (say for quota or access control -reasons). - -**NOTE**: If you're testing this on (DevStack)[devstack], you must -explicitly set your Keystone API version to v2 (e.g. -`OS_AUTH_URL=http://10.34.37.47/identity/v2.0`) instead of the default -value provided by `openrc`. You may also encounter the following issue -with Cinder: - -https://github.com/kubernetes/kubernetes/issues/50461 - -You can read the (OpenShift documentation on configuring -OpenStack)[openstack] for more information. - -[devstack]: https://docs.openstack.org/devstack/latest/ -[openstack]: https://docs.openshift.org/latest/install_config/configuring_openstack.html - - -Next, we need to instruct OpenShift to use the Cinder volume for it's -registry. Again in `OSEv3.yml`: - - #openshift_hosted_registry_storage_kind: openstack - #openshift_hosted_registry_storage_access_modes: ['ReadWriteOnce'] - #openshift_hosted_registry_storage_openstack_filesystem: xfs -The filesystem value here will be used in the initial formatting of -the volume. +This will create the container, add your SSH key and source your +`keystonerc`. It should be set up for the installation. -If you're using the dynamic inventory, you must uncomment these two values as -well: +You can verify that everything is in order: - #openshift_hosted_registry_storage_openstack_volumeID: "{{ lookup('os_cinder', cinder_hosted_registry_name).id }}" - #openshift_hosted_registry_storage_volume_size: "{{ cinder_hosted_registry_size_gb }}Gi" -But note that they use the `os_cinder` lookup plugin we provide, so you must -tell Ansible where to find it either in `ansible.cfg` (the one we provide is -configured properly) or by exporting the -`ANSIBLE_LOOKUP_PLUGINS=openshift-ansible-contrib/lookup_plugins` environment -variable. - - - -### Use an existing Cinder volume for the OpenShift registry - -You can also use a pre-existing Cinder volume for the storage of your -OpenShift registry. - -To do that, you need to have a Cinder volume. You can create one by -running: - - openstack volume create --size - -The volume needs to have a file system created before you put it to -use. - -As with the automatically-created volume, you have to set up the -OpenStack credentials in `inventory/group_vars/OSEv3.yml` as well as -registry values: - - #openshift_hosted_registry_storage_kind: openstack - #openshift_hosted_registry_storage_access_modes: ['ReadWriteOnce'] - #openshift_hosted_registry_storage_openstack_filesystem: xfs - #openshift_hosted_registry_storage_openstack_volumeID: e0ba2d73-d2f9-4514-a3b2-a0ced507fa05 - #openshift_hosted_registry_storage_volume_size: 10Gi - -Note the `openshift_hosted_registry_storage_openstack_volumeID` and -`openshift_hosted_registry_storage_volume_size` values: these need to -be added in addition to the previous variables. - -The **Cinder volume ID**, **filesystem** and **volume size** variables -must correspond to the values in your volume. The volume ID must be -the **UUID** of the Cinder volume, *not its name*. - -We can do formate the volume for you if you ask for it in -`inventory/group_vars/all.yml`: - - prepare_and_format_registry_volume: true - -**NOTE:** doing so **will destroy any data that's currently on the volume**! - -You can also run the registry setup playbook directly: - - ansible-playbook -i inventory playbooks/provisioning/openstack/prepare-and-format-cinder-volume.yaml - -(the provisioning phase must be completed, first) - - - -### Configure static inventory and access via a bastion node - -Example inventory variables: - - openstack_use_bastion: true - bastion_ingress_cidr: "{{openstack_subnet_prefix}}.0/24" - openstack_private_ssh_key: ~/.ssh/openshift - openstack_inventory: static - openstack_inventory_path: ../../../../inventory - openstack_ssh_config_path: /tmp/ssh.config.openshift.ansible.openshift.example.com - -The `openstack_subnet_prefix` is the openstack private network for your cluster. -And the `bastion_ingress_cidr` defines accepted range for SSH connections to nodes -additionally to the `ssh_ingress_cidr`` (see the security notes above). - -The SSH config will be stored on the ansible control node by the -gitven path. Ansible uses it automatically. To access the cluster nodes with -that ssh config, use the `-F` prefix, f.e.: - - ssh -F /tmp/ssh.config.openshift.ansible.openshift.example.com master-0.openshift.example.com echo OK - -Note, relative paths will not work for the `openstack_ssh_config_path`, but it -works for the `openstack_private_ssh_key` and `openstack_inventory_path`. In this -guide, the latter points to the current directory, where you run ansible commands -from. +```bash +$ less .ssh/id_rsa +$ ansible --version +$ openstack image list +``` -To verify nodes connectivity, use the command: - ansible -v -i inventory/hosts -m ping all +### 2. Configuring the OpenStack Environment and OpenShift Cluster -If something is broken, double-check the inventory variables, paths and the -generated `/hosts` and `openstack_ssh_config_path` files. +The configuration is all done in an Ansible inventory directory. We +will clone the [openshift-ansible-contrib][contrib] repository and set +things up for a minimal installation. -The `inventory: dynamic` can be used instead to access cluster nodes directly via -floating IPs. In this mode you can not use a bastion node and should specify -the dynamic inventory file in your ansible commands , like `-i openstack.py`. -## Deployment +``` +$ git clone https://github.com/openshift/openshift-ansible-contrib +$ cp -r openshift-ansible-contrib/playbooks/provisioning/openstack/sample-inventory/ inventory +``` -### Using Docker on the Ansible host +If you're testing multiple configurations, you can have multiple +inventories and switch between them. -If you don't want to worry about the dependencies, you can use the -[OpenStack Control Host image][control-host-image]. +#### OpenStack Configuration -[control-host-image]: https://hub.docker.com/r/redhatcop/control-host-openstack/ +The OpenStack configuration is in `inventory/group_vars/all.yml`. -It has all the dependencies installed, but you'll need to map your -code and credentials to it. Assuming your SSH keys live in `~/.ssh` -and everything else is in your current directory (i.e. `ansible.cfg`, -`keystonerc`, `inventory`, `openshift-ansible`, -`openshift-ansible-contrib`), this is how you run the deployment: +Open the file and plug in the image, flavor and network configuration +corresponding to your OpenStack installation. - sudo docker run -it -v ~/.ssh:/mnt/.ssh:Z \ - -v $PWD:/root/openshift:Z \ - -v $PWD/keystonerc:/root/.config/openstack/keystonerc.sh:Z \ - redhatcop/control-host-openstack bash - -(feel free to replace `$PWD` with an actual path to your inventory and -checkouts, but note that relative paths don't work) +```bash +$ vi inventory/group_vars/all.yml +``` -The first run may take a few minutes while the image is being -downloaded. After that, you'll be inside the container and you can run -the playbooks: +1. Set the `openstack_ssh_public_key` to your OpenStack keypair name. + - See `openstack keypair list` to find the keypairs registered with + OpenShift. + - This must correspond to your private SSH key in `~/.ssh/id_rsa` +2. Set the `openstack_external_network_name` to the floating IP + network of your openstack. + - See `openstack network list` for the list of networks. + - It's often called `public`, `external` or `ext-net`. +3. Set the `openstack_default_image_name` to the image you want your + OpenShift VMs to run. + - See `openstack image list` for the list of available images. +4. Set the `openstack_default_flavor` to the flavor you want your + OpenShift VMs to use. + - See `openstack flavor list` for the list of available flavors. + +**NOTE**: In most OpenStack environments, you will also need to +configure the forwarders for the DNS server we create. This depends on +your environment. + +Launch a VM in your OpenStack and look at its `/etc/resolv.conf` and +put the IP addresses into `public_dns_nameservers` in +`inventory/group_vars/all.yml`. - cd openshift - ansible-playbook openshift-ansible-contrib/playbooks/provisioning/openstack/provision.yaml +#### OpenShift configuration -### Run the playbook +The OpenShift configuration is in `inventory/group_vars/OSEv3.yml`. -Assuming your OpenStack (Keystone) credentials are in the `keystonerc` -this is how you stat the provisioning process from your ansible control node: +The default options will mostly work, but unless you used the large +flavors for a production-ready environment, openshift-ansible's +hardware check will fail. - . keystonerc - ansible-playbook openshift-ansible-contrib/playbooks/provisioning/openstack/provision.yaml +Let's disable those checks by putting this in +`inventory/group_vars/OSEv3.yml`: -Note, here you start with an empty inventory. The static inventory will be populated -with data so you can omit providing additional arguments for future ansible commands. +```yaml +openshift_disable_check: disk_availability,memory_availability +``` -If bastion enabled, the generates SSH config must be applied for ansible. -Otherwise, it is auto included by the previous step. In order to execute it -as a separate playbook, use the following command: +**NOTE**: The default authentication method will allow **any username +and password** in! If you're running this in a public place, you need +to set up access control. - ansible-playbook openshift-ansible-contrib/playbooks/provisioning/openstack/post-provision-openstack.yml +Feel free to look at +the [Sample OpenShift Inventory][sample-openshift-inventory] and +the [advanced configuration][advanced-configuration]. -The first infra node then becomes a bastion node as well and proxies access -for future ansible commands. The post-provision step also configures Satellite, -if requested, and DNS server, and ensures other OpenShift requirements to be met. -### Running Custom Post-Provision Actions +### 3. Creating the OpenStack resources (VMs, networking, etc.) -A custom playbook can be run like this: +We will install the DNS server roles using ansible galaxy and then run +the openstack provisioning playbook. The `ansible.cfg` file we provide +has useful defaults -- copy it to the directory you're going to run +Ansible from. +```bash +$ ansible-galaxy install -r openshift-ansible-contrib/playbooks/provisioning/openstack/galaxy-requirements.yaml -p openshift-ansible-contrib/roles +$ cp openshift-ansible-contrib/playbooks/provisioning/openstack/ansible.cfg ansible.cfg ``` -ansible-playbook --private-key ~/.ssh/openshift -i inventory/ openshift-ansible-contrib/playbooks/provisioning/openstack/custom-actions/custom-playbook.yml -``` +(you will only need to do this once) -If you'd like to limit the run to one particular host, you can do so as follows: +Then run the provisioning playbook -- this will create the OpenStack +resources: -``` -ansible-playbook --private-key ~/.ssh/openshift -i inventory/ openshift-ansible-contrib/playbooks/provisioning/openstack/custom-actions/custom-playbook.yml -l app-node-0.openshift.example.com +```bash +$ ansible-playbook -i inventory openshift-ansible-contrib/playbooks/provisioning/openstack/provision.yaml ``` -You can also create your own custom playbook. Here's one example that adds additional YUM repositories: - -``` ---- -- hosts: app - tasks: - - # enable EPL - - name: Add repository - yum_repository: - name: epel - description: EPEL YUM repo - baseurl: https://download.fedoraproject.org/pub/epel/$releasever/$basearch/ -``` +If you're using multiple inventories, make sure you pass the path to +the right one to `-i`. -This example runs against app nodes. The list of options include: - - cluster_hosts (all hosts: app, infra, masters, dns, lb) - - OSEv3 (app, infra, masters) - - app - - dns - - masters - - infra_hosts -Please consider contributing your custom playbook back to openshift-ansible-contrib! +### 4. Installing OpenShift -A library of custom post-provision actions exists in `openshift-ansible-contrib/playbooks/provisioning/openstack/custom-actions`. Playbooks include: +We will use the `openshift-ansible` project to install openshift on +top of the OpenStack nodes we have prepared: -##### add-yum-repos.yml - -[add-yum-repos.yml](https://github.com/openshift/openshift-ansible-contrib/blob/master/playbooks/provisioning/openstack/custom-actions/add-yum-repos.yml) adds a list of custom yum repositories to every node in the cluster. - -### Install OpenShift - -Once it succeeds, you can install openshift by running: - - ansible-playbook openshift-ansible/playbooks/byo/config.yml - -### Access UI - -OpenShift UI may be accessed via the 1st master node FQDN, port 8443. - -When using a bastion, you may want to make an SSH tunnel from your control node -to access UI on the `https://localhost:8443`, with this inventory variable: - - openshift_ui_ssh_tunnel: True +```bash +$ git clone https://github.com/openshift/openshift-ansible +$ ansible-playbook -i inventory openshift-ansible/playbooks/byo/config.yml +``` -Note, this requires sudo rights on the ansible control node and an absolute path -for the `openstack_private_ssh_key`. You should also update the control node's -`/etc/hosts`: - 127.0.0.1 master-0.openshift.example.com +### Next Steps -In order to access UI, the ssh-tunnel service will be created and started on the -control node. Make sure to remove these changes and the service manually, when not -needed anymore. +And that's it! You should have a small but functional OpenShift +cluster now. -## Scale Deployment up/down +Take a look at [how to access the cluster][accessing-openshift] +and [how to remove it][uninstall-openshift] as well as the more +advanced configuration: -### Scaling up +* [Accessing the OpenShift cluster][accessing-openshift] +* [Removing the OpenShift cluster][uninstall-openshift] +* Set Up Authentication (TODO) +* [Multiple Masters with a load balancer][loadbalancer] +* [External Dns][external-dns] +* Multiple Clusters (TODO) +* [Cinder Registry][cinder-registry] +* [Bastion Node][bastion] -One can scale up the number of application nodes by executing the ansible playbook -`openshift-ansible-contrib/playbooks/provisioning/openstack/scale-up.yaml`. -This process can be done even if there is currently no deployment available. -The `increment_by` variable is used to specify by how much the deployment should -be scaled up (if none exists, it serves as a target number of application nodes). -The path to `openshift-ansible` directory can be customised by the `openshift_ansible_dir` -variable. Its value must be an absolute path to `openshift-ansible` and it cannot -contain the '/' symbol at the end. -Usage: +[ansible]: https://www.ansible.com/ +[openshift-ansible]: https://github.com/openshift/openshift-ansible +[devstack]: https://docs.openstack.org/devstack/ +[tripleo]: http://tripleo.org/ +[ansible-dependencies]: ./advanced-configuration.md#dependencies-for-localhost-ansible-controladmin-node +[contrib]: https://github.com/openshift/openshift-ansible-contrib +[control-host-image]: https://hub.docker.com/r/redhatcop/control-host-openstack/ +[hardware-requirements]: https://docs.openshift.org/latest/install_config/install/prerequisites.html#hardware +[origin]: https://www.openshift.org/ +[centos7]: https://www.centos.org/ +[sample-openshift-inventory]: https://github.com/openshift/openshift-ansible/blob/master/inventory/byo/hosts.example +[advanced-configuration]: ./advanced-configuration.md +[accessing-openshift]: ./advanced-configuration.md#accessing-the-openshift-cluster +[uninstall-openshift]: ./advanced-configuration.md#removing-the-openshift-cluster +[loadbalancer]: ./advanced-configuration.md#multi-master-configuration +[external-dns]: ./advanced-configuration.md#dns-configuration-variables +[cinder-registry]: ./advanced-configuration.md#creating-and-using-a-cinder-volume-for-the-openshift-registry +[bastion]: ./advanced-configuration.md#configure-static-inventory-and-access-via-a-bastion-node -``` -ansible-playbook -i openshift-ansible-contrib/playbooks/provisioning/openstack/scale-up.yaml` [-e increment_by=] [-e openshift_ansible_dir=] -``` -Note: This playbook works only without a bastion node (`openstack_use_bastion: False`). ## License -As the rest of the openshift-ansible-contrib repository, the code here is -licensed under Apache 2. +Like the rest of the openshift-ansible-contrib repository, the code +here is licensed under Apache 2. diff --git a/playbooks/provisioning/openstack/advanced-configuration.md b/playbooks/provisioning/openstack/advanced-configuration.md new file mode 100644 index 000000000..af5ae9946 --- /dev/null +++ b/playbooks/provisioning/openstack/advanced-configuration.md @@ -0,0 +1,699 @@ +## Dependencies for localhost (ansible control/admin node) + +* [Ansible 2.3](https://pypi.python.org/pypi/ansible) +* [Ansible-galaxy](https://pypi.python.org/pypi/ansible-galaxy-local-deps) +* [jinja2](http://jinja.pocoo.org/docs/2.9/) +* [shade](https://pypi.python.org/pypi/shade) +* python-jmespath / [jmespath](https://pypi.python.org/pypi/jmespath) +* python-dns / [dnspython](https://pypi.python.org/pypi/dnspython) +* Become (sudo) is not required. + +**NOTE**: You can use a Docker image with all dependencies set up. +Find more in the [Deployment section](#deployment). + +### Optional Dependencies for localhost +**Note**: When using rhel images, `rhel-7-server-openstack-10-rpms` repository is required in order to install these packages. + +* `python-openstackclient` +* `python-heatclient` + +## Dependencies for OpenStack hosted cluster nodes (servers) + +There are no additional dependencies for the cluster nodes. Required +configuration steps are done by Heat given a specific user data config +that normally should not be changed. + +## Required galaxy modules + +In order to pull in external dependencies for DNS configuration steps, +the following commads need to be executed: + + ansible-galaxy install \ + -r openshift-ansible-contrib/playbooks/provisioning/openstack/galaxy-requirements.yaml \ + -p openshift-ansible-contrib/roles + +Alternatively you can install directly from github: + + ansible-galaxy install git+https://github.com/redhat-cop/infra-ansible,master \ + -p openshift-ansible-contrib/roles + +Notes: +* This assumes we're in the directory that contains the clonned +openshift-ansible-contrib repo in its root path. +* When trying to install a different version, the previous one must be removed first +(`infra-ansible` directory from [roles](https://github.com/openshift/openshift-ansible-contrib/tree/master/roles)). +Otherwise, even if there are differences between the two versions, installation of the newer version is skipped. + + +## Accessing the OpenShift Cluster + +### Use the Cluster DNS + +In addition to the OpenShift nodes, we created a DNS server with all +the necessary entries. We will configure your *Ansible host* to use +this new DNS and talk to the deployed OpenShift. + +First, get the DNS IP address: + +```bash +$ openstack server show dns-0.openshift.example.com --format value --column addresses +openshift-ansible-openshift.example.com-net=192.168.99.11, 10.40.128.129 +``` + +Note the floating IP address (it's `10.40.128.129` in this case) -- if +you're not sure, try pinging them both -- it's the one that responds +to pings. + +Next, edit your `/etc/resolv.conf` as root and put `nameserver DNS_IP` as your +**first entry**. + +If your `/etc/resolv.conf` currently looks like this: + +``` +; generated by /usr/sbin/dhclient-script +search openstacklocal +nameserver 192.168.0.3 +nameserver 192.168.0.2 +``` + +Change it to this: + +``` +; generated by /usr/sbin/dhclient-script +search openstacklocal +nameserver 10.40.128.129 +nameserver 192.168.0.3 +nameserver 192.168.0.2 +``` + +### Get the `oc` Client + +**NOTE**: You can skip this section if you're using the Docker image +-- it already has the `oc` binary. + +You need to download the OpenShift command line client (called `oc`). +You can download and extract `openshift-origin-client-tools` from the +OpenShift release page: + +https://github.com/openshift/origin/releases/latest/ + +Or you can now copy it from the master node: + + $ ansible --private-key ~/.ssh/openshift -i inventory masters[0] -m fetch -a "src=/bin/oc dest=oc" + +Either way, find the `oc` binary and put it in your `PATH`. + + +### Logging in Using the Command Line + + +```bash +oc login --insecure-skip-tls-verify=true https://console.openshift.example.com:8443 -u user -p password +oc new-project test +oc new-app --template=cakephp-mysql-example +oc status -v +curl http://cakephp-mysql-example-test.apps.openshift.example.com +``` + +This will trigger an image build. You can run `oc logs -f +bc/cakephp-mysql-example` to follow its progress. + +Wait until the build has finished and both pods are deployed and running: + +``` +$ oc status -v +In project test on server https://console.openshift.example.com:8443 + +http://cakephp-mysql-example-test.apps.openshift.example.com (svc/cakephp-mysql-example) + dc/cakephp-mysql-example deploys istag/cakephp-mysql-example:latest <- + bc/cakephp-mysql-example source builds https://github.com/openshift/cakephp-ex.git on openshift/php:7.0 + deployment #1 deployed about a minute ago - 1 pod + +svc/mysql - 172.30.144.36:3306 + dc/mysql deploys openshift/mysql:5.7 + deployment #1 deployed 3 minutes ago - 1 pod + +Info: + * pod/cakephp-mysql-example-1-build has no liveness probe to verify pods are still running. + try: oc set probe pod/cakephp-mysql-example-1-build --liveness ... +View details with 'oc describe /' or list everything with 'oc get all'. + +``` + +You can now look at the deployed app using its route: + +``` +$ curl http://cakephp-mysql-example-test.apps.openshift.example.com +``` + +Its `title` should say: "Welcome to OpenShift". + + +### Accessing the UI + +You can also access the OpenShift cluster with a web browser by going to: + +https://console.openshift.example.com:8443 + +Note that for this to work, the OpenShift nodes must be accessible +from your computer and it's DNS configuration must use the cruster's +DNS. + + +## Removing the OpenShift Cluster + +Everything in the cluster is contained within a Heat stack. To +completely remove the cluster and all the related OpenStack resources, +run this command: + +```bash +openstack stack delete --wait --yes openshift.example.com +``` + + +## DNS configuration variables + +Pay special attention to the values in the first paragraph -- these +will depend on your OpenStack environment. + +Note that the provsisioning playbooks update the original Neutron subnet +created with the Heat stack to point to the configured DNS servers. +So the provisioned cluster nodes will start using those natively as +default nameservers. Technically, this allows to deploy OpenShift clusters +without dnsmasq proxies. + +The `env_id` and `public_dns_domain` will form the cluster's DNS domain all +your servers will be under. With the default values, this will be +`openshift.example.com`. For workloads, the default subdomain is 'apps'. +That sudomain can be set as well by the `openshift_app_domain` variable in +the inventory. + +The `openstack__hostname` is a set of variables used for customising +hostnames of servers with a given role. When such a variable stays commented, +default hostname (usually the role name) is used. + +The `public_dns_nameservers` is a list of DNS servers accessible from all +the created Nova servers. These will be serving as your DNS forwarders for +external FQDNs that do not belong to the cluster's DNS domain and its subdomains. +If you're unsure what to put in here, you can try the google or opendns servers, +but note that some organizations may be blocking them. + +The `openshift_use_dnsmasq` controls either dnsmasq is deployed or not. +By default, dnsmasq is deployed and comes as the hosts' /etc/resolv.conf file +first nameserver entry that points to the local host instance of the dnsmasq +daemon that in turn proxies DNS requests to the authoritative DNS server. +When Network Manager is enabled for provisioned cluster nodes, which is +normally the case, you should not change the defaults and always deploy dnsmasq. + +`external_nsupdate_keys` describes an external authoritative DNS server(s) +processing dynamic records updates in the public and private cluster views: + + external_nsupdate_keys: + public: + key_secret: + key_algorithm: 'hmac-md5' + key_name: 'update-key' + server: + private: + key_secret: + key_algorithm: 'hmac-sha256' + server: + +Here, for the public view section, we specified another key algorithm and +optional `key_name`, which normally defaults to the cluster's DNS domain. +This just illustrates a compatibility mode with a DNS service deployed +by OpenShift on OSP10 reference architecture, and used in a mixed mode with +another external DNS server. + +Another example defines an external DNS server for the public view +additionally to the in-stack DNS server used for the private view only: + + external_nsupdate_keys: + public: + key_secret: + key_algorithm: 'hmac-sha256' + server: + +Here, updates matching the public view will be hitting the given public +server IP. While updates matching the private view will be sent to the +auto evaluated in-stack DNS server's **public** IP. + +Note, for the in-stack DNS server, private view updates may be sent only +via the public IP of the server. You can not send updates via the private +IP yet. This forces the in-stack private server to have a floating IP. +See also the [security notes](#security-notes) + +## Other configuration variables + +`openstack_ssh_key` is a Nova keypair - you can see your keypairs with +`openstack keypair list`. This guide assumes that its corresponding private +key is `~/.ssh/openshift`, stored on the ansible admin (control) node. + +`openstack_default_image_name` is the default name of the Glance image the +servers will use. You can see your images with `openstack image list`. +In order to set a different image for a role, uncomment the line with the +corresponding variable (e.g. `openstack_lb_image_name` for load balancer) and +set its value to another available image name. `openstack_default_image_name` +must stay defined as it is used as a default value for the rest of the roles. + +`openstack_default_flavor` is the default Nova flavor the servers will use. +You can see your flavors with `openstack flavor list`. +In order to set a different flavor for a role, uncomment the line with the +corresponding variable (e.g. `openstack_lb_flavor` for load balancer) and +set its value to another available flavor. `openstack_default_flavor` must +stay defined as it is used as a default value for the rest of the roles. + +`openstack_external_network_name` is the name of the Neutron network +providing external connectivity. It is often called `public`, +`external` or `ext-net`. You can see your networks with `openstack +network list`. + +`openstack_private_network_name` is the name of the private Neutron network +providing admin/control access for ansible. It can be merged with other +cluster networks, there are no special requirements for networking. + +The `openstack_num_masters`, `openstack_num_infra` and +`openstack_num_nodes` values specify the number of Master, Infra and +App nodes to create. + +The `openshift_cluster_node_labels` defines custom labels for your openshift +cluster node groups. It currently supports app and infra node groups. +The default value of this variable sets `region: primary` to app nodes and +`region: infra` to infra nodes. +An example of setting a customised label: +``` +openshift_cluster_node_labels: + app: + mylabel: myvalue +``` + +The `openstack_nodes_to_remove` allows you to specify the numerical indexes +of App nodes that should be removed; for example, ['0', '2'], + +The `docker_volume_size` is the default Docker volume size the servers will use. +In order to set a different volume size for a role, +uncomment the line with the corresponding variable (e. g. `docker_master_volume_size` +for master) and change its value. `docker_volume_size` must stay defined as it is +used as a default value for some of the servers (master, infra, app node). +The rest of the roles (etcd, load balancer, dns) have their defaults hard-coded. + +**Note**: If the `ephemeral_volumes` is set to `true`, the `*_volume_size` variables +will be ignored and the deployment will not create any cinder volumes. + +The `openstack_flat_secgrp`, controls Neutron security groups creation for Heat +stacks. Set it to true, if you experience issues with sec group rules +quotas. It trades security for number of rules, by sharing the same set +of firewall rules for master, node, etcd and infra nodes. + +The `required_packages` variable also provides a list of the additional +prerequisite packages to be installed before to deploy an OpenShift cluster. +Those are ignored though, if the `manage_packages: False`. + +The `openstack_inventory` controls either a static inventory will be created after the +cluster nodes provisioned on OpenStack cloud. Note, the fully dynamic inventory +is yet to be supported, so the static inventory will be created anyway. + +The `openstack_inventory_path` points the directory to host the generated static inventory. +It should point to the copied example inventory directory, otherwise ti creates +a new one for you. + +## Multi-master configuration + +Please refer to the official documentation for the +[multi-master setup](https://docs.openshift.com/container-platform/3.6/install_config/install/advanced_install.html#multiple-masters) +and define the corresponding [inventory +variables](https://docs.openshift.com/container-platform/3.6/install_config/install/advanced_install.html#configuring-cluster-variables) +in `inventory/group_vars/OSEv3.yml`. For example, given a load balancer node +under the ansible group named `ext_lb`: + + openshift_master_cluster_method: native + openshift_master_cluster_hostname: "{{ groups.ext_lb.0 }}" + openshift_master_cluster_public_hostname: "{{ groups.ext_lb.0 }}" + +## Provider Network + +Normally, the playbooks create a new Neutron network and subnet and attach +floating IP addresses to each node. If you have a provider network set up, this +is all unnecessary as you can just access servers that are placed in the +provider network directly. + +To use a provider network, set its name in `openstack_provider_network_name` in +`inventory/group_vars/all.yml`. + +If you set the provider network name, the `openstack_external_network_name` and +`openstack_private_network_name` fields will be ignored. + +**NOTE**: this will not update the nodes' DNS, so running openshift-ansible +right after provisioning will fail (unless you're using an external DNS server +your provider network knows about). You must make sure your nodes are able to +resolve each other by name. + +## Security notes + +Configure required `*_ingress_cidr` variables to restrict public access +to provisioned servers from your laptop (a /32 notation should be used) +or your trusted network. The most important is the `node_ingress_cidr` +that restricts public access to the deployed DNS server and cluster +nodes' ephemeral ports range. + +Note, the command ``curl https://api.ipify.org`` helps fiding an external +IP address of your box (the ansible admin node). + +There is also the `manage_packages` variable (defaults to True) you +may want to turn off in order to speed up the provisioning tasks. This may +be the case for development environments. When turned off, the servers will +be provisioned omitting the ``yum update`` command. This brings security +implications though, and is not recommended for production deployments. + +### DNS servers security options + +Aside from `node_ingress_cidr` restricting public access to in-stack DNS +servers, there are following (bind/named specific) DNS security +options available: + + named_public_recursion: 'no' + named_private_recursion: 'yes' + +External DNS servers, which is not included in the 'dns' hosts group, +are not managed. It is up to you to configure such ones. + +## Configure the OpenShift parameters + +Finally, you need to update the DNS entry in +`inventory/group_vars/OSEv3.yml` (look at +`openshift_master_default_subdomain`). + +In addition, this is the place where you can customise your OpenShift +installation for example by specifying the authentication. + +The full list of options is available in this sample inventory: + +https://github.com/openshift/openshift-ansible/blob/master/inventory/byo/hosts.ose.example + +Note, that in order to deploy OpenShift origin, you should update the following +variables for the `inventory/group_vars/OSEv3.yml`, `all.yml`: + + deployment_type: origin + openshift_deployment_type: "{{ deployment_type }}" + + +## Setting a custom entrypoint + +In order to set a custom entrypoint, update `openshift_master_cluster_public_hostname` + + openshift_master_cluster_public_hostname: api.openshift.example.com + +Note than an empty hostname does not work, so if your domain is `openshift.example.com`, +you cannot set this value to simply `openshift.example.com`. + +## Creating and using a Cinder volume for the OpenShift registry + +You can optionally have the playbooks create a Cinder volume and set +it up as the OpenShift hosted registry. + +To do that you need specify the desired Cinder volume name and size in +Gigabytes in `inventory/group_vars/all.yml`: + + cinder_hosted_registry_name: cinder-registry + cinder_hosted_registry_size_gb: 10 + +With this, the playbooks will create the volume and set up its +filesystem. If there is an existing volume of the same name, we will +use it but keep the existing data on it. + +To use the volume for the registry, you must first configure it with +the OpenStack credentials by putting the following to `OSEv3.yml`: + + openshift_cloudprovider_openstack_username: "{{ lookup('env','OS_USERNAME') }}" + openshift_cloudprovider_openstack_password: "{{ lookup('env','OS_PASSWORD') }}" + openshift_cloudprovider_openstack_auth_url: "{{ lookup('env','OS_AUTH_URL') }}" + openshift_cloudprovider_openstack_tenant_name: "{{ lookup('env','OS_TENANT_NAME') }}" + +This will use the credentials from your shell environment. If you want +to enter them explicitly, you can. You can also use credentials +different from the provisioning ones (say for quota or access control +reasons). + +**NOTE**: If you're testing this on (DevStack)[devstack], you must +explicitly set your Keystone API version to v2 (e.g. +`OS_AUTH_URL=http://10.34.37.47/identity/v2.0`) instead of the default +value provided by `openrc`. You may also encounter the following issue +with Cinder: + +https://github.com/kubernetes/kubernetes/issues/50461 + +You can read the (OpenShift documentation on configuring +OpenStack)[openstack] for more information. + +[devstack]: https://docs.openstack.org/devstack/latest/ +[openstack]: https://docs.openshift.org/latest/install_config/configuring_openstack.html + + +Next, we need to instruct OpenShift to use the Cinder volume for it's +registry. Again in `OSEv3.yml`: + + #openshift_hosted_registry_storage_kind: openstack + #openshift_hosted_registry_storage_access_modes: ['ReadWriteOnce'] + #openshift_hosted_registry_storage_openstack_filesystem: xfs + +The filesystem value here will be used in the initial formatting of +the volume. + +If you're using the dynamic inventory, you must uncomment these two values as +well: + + #openshift_hosted_registry_storage_openstack_volumeID: "{{ lookup('os_cinder', cinder_hosted_registry_name).id }}" + #openshift_hosted_registry_storage_volume_size: "{{ cinder_hosted_registry_size_gb }}Gi" + +But note that they use the `os_cinder` lookup plugin we provide, so you must +tell Ansible where to find it either in `ansible.cfg` (the one we provide is +configured properly) or by exporting the +`ANSIBLE_LOOKUP_PLUGINS=openshift-ansible-contrib/lookup_plugins` environment +variable. + + + +## Use an existing Cinder volume for the OpenShift registry + +You can also use a pre-existing Cinder volume for the storage of your +OpenShift registry. + +To do that, you need to have a Cinder volume. You can create one by +running: + + openstack volume create --size + +The volume needs to have a file system created before you put it to +use. + +As with the automatically-created volume, you have to set up the +OpenStack credentials in `inventory/group_vars/OSEv3.yml` as well as +registry values: + + #openshift_hosted_registry_storage_kind: openstack + #openshift_hosted_registry_storage_access_modes: ['ReadWriteOnce'] + #openshift_hosted_registry_storage_openstack_filesystem: xfs + #openshift_hosted_registry_storage_openstack_volumeID: e0ba2d73-d2f9-4514-a3b2-a0ced507fa05 + #openshift_hosted_registry_storage_volume_size: 10Gi + +Note the `openshift_hosted_registry_storage_openstack_volumeID` and +`openshift_hosted_registry_storage_volume_size` values: these need to +be added in addition to the previous variables. + +The **Cinder volume ID**, **filesystem** and **volume size** variables +must correspond to the values in your volume. The volume ID must be +the **UUID** of the Cinder volume, *not its name*. + +We can do formate the volume for you if you ask for it in +`inventory/group_vars/all.yml`: + + prepare_and_format_registry_volume: true + +**NOTE:** doing so **will destroy any data that's currently on the volume**! + +You can also run the registry setup playbook directly: + + ansible-playbook -i inventory playbooks/provisioning/openstack/prepare-and-format-cinder-volume.yaml + +(the provisioning phase must be completed, first) + + + +## Configure static inventory and access via a bastion node + +Example inventory variables: + + openstack_use_bastion: true + bastion_ingress_cidr: "{{openstack_subnet_prefix}}.0/24" + openstack_private_ssh_key: ~/.ssh/openshift + openstack_inventory: static + openstack_inventory_path: ../../../../inventory + openstack_ssh_config_path: /tmp/ssh.config.openshift.ansible.openshift.example.com + +The `openstack_subnet_prefix` is the openstack private network for your cluster. +And the `bastion_ingress_cidr` defines accepted range for SSH connections to nodes +additionally to the `ssh_ingress_cidr`` (see the security notes above). + +The SSH config will be stored on the ansible control node by the +gitven path. Ansible uses it automatically. To access the cluster nodes with +that ssh config, use the `-F` prefix, f.e.: + + ssh -F /tmp/ssh.config.openshift.ansible.openshift.example.com master-0.openshift.example.com echo OK + +Note, relative paths will not work for the `openstack_ssh_config_path`, but it +works for the `openstack_private_ssh_key` and `openstack_inventory_path`. In this +guide, the latter points to the current directory, where you run ansible commands +from. + +To verify nodes connectivity, use the command: + + ansible -v -i inventory/hosts -m ping all + +If something is broken, double-check the inventory variables, paths and the +generated `/hosts` and `openstack_ssh_config_path` files. + +The `inventory: dynamic` can be used instead to access cluster nodes directly via +floating IPs. In this mode you can not use a bastion node and should specify +the dynamic inventory file in your ansible commands , like `-i openstack.py`. + +## Using Docker on the Ansible host + +If you don't want to worry about the dependencies, you can use the +[OpenStack Control Host image][control-host-image]. + +[control-host-image]: https://hub.docker.com/r/redhatcop/control-host-openstack/ + +It has all the dependencies installed, but you'll need to map your +code and credentials to it. Assuming your SSH keys live in `~/.ssh` +and everything else is in your current directory (i.e. `ansible.cfg`, +`keystonerc`, `inventory`, `openshift-ansible`, +`openshift-ansible-contrib`), this is how you run the deployment: + + sudo docker run -it -v ~/.ssh:/mnt/.ssh:Z \ + -v $PWD:/root/openshift:Z \ + -v $PWD/keystonerc:/root/.config/openstack/keystonerc.sh:Z \ + redhatcop/control-host-openstack bash + +(feel free to replace `$PWD` with an actual path to your inventory and +checkouts, but note that relative paths don't work) + +The first run may take a few minutes while the image is being +downloaded. After that, you'll be inside the container and you can run +the playbooks: + + cd openshift + ansible-playbook openshift-ansible-contrib/playbooks/provisioning/openstack/provision.yaml + + +### Run the playbook + +Assuming your OpenStack (Keystone) credentials are in the `keystonerc` +this is how you stat the provisioning process from your ansible control node: + + . keystonerc + ansible-playbook openshift-ansible-contrib/playbooks/provisioning/openstack/provision.yaml + +Note, here you start with an empty inventory. The static inventory will be populated +with data so you can omit providing additional arguments for future ansible commands. + +If bastion enabled, the generates SSH config must be applied for ansible. +Otherwise, it is auto included by the previous step. In order to execute it +as a separate playbook, use the following command: + + ansible-playbook openshift-ansible-contrib/playbooks/provisioning/openstack/post-provision-openstack.yml + +The first infra node then becomes a bastion node as well and proxies access +for future ansible commands. The post-provision step also configures Satellite, +if requested, and DNS server, and ensures other OpenShift requirements to be met. + +## Running Custom Post-Provision Actions + +A custom playbook can be run like this: + +``` +ansible-playbook --private-key ~/.ssh/openshift -i inventory/ openshift-ansible-contrib/playbooks/provisioning/openstack/custom-actions/custom-playbook.yml +``` + +If you'd like to limit the run to one particular host, you can do so as follows: + +``` +ansible-playbook --private-key ~/.ssh/openshift -i inventory/ openshift-ansible-contrib/playbooks/provisioning/openstack/custom-actions/custom-playbook.yml -l app-node-0.openshift.example.com +``` + +You can also create your own custom playbook. Here's one example that adds additional YUM repositories: + +``` +--- +- hosts: app + tasks: + + # enable EPL + - name: Add repository + yum_repository: + name: epel + description: EPEL YUM repo + baseurl: https://download.fedoraproject.org/pub/epel/$releasever/$basearch/ +``` + +This example runs against app nodes. The list of options include: + + - cluster_hosts (all hosts: app, infra, masters, dns, lb) + - OSEv3 (app, infra, masters) + - app + - dns + - masters + - infra_hosts + +Please consider contributing your custom playbook back to openshift-ansible-contrib! + +A library of custom post-provision actions exists in `openshift-ansible-contrib/playbooks/provisioning/openstack/custom-actions`. Playbooks include: + +### add-yum-repos.yml + +[add-yum-repos.yml](https://github.com/openshift/openshift-ansible-contrib/blob/master/playbooks/provisioning/openstack/custom-actions/add-yum-repos.yml) adds a list of custom yum repositories to every node in the cluster. + +## Install OpenShift + +Once it succeeds, you can install openshift by running: + + ansible-playbook openshift-ansible/playbooks/byo/config.yml + +## Access UI + +OpenShift UI may be accessed via the 1st master node FQDN, port 8443. + +When using a bastion, you may want to make an SSH tunnel from your control node +to access UI on the `https://localhost:8443`, with this inventory variable: + + openshift_ui_ssh_tunnel: True + +Note, this requires sudo rights on the ansible control node and an absolute path +for the `openstack_private_ssh_key`. You should also update the control node's +`/etc/hosts`: + + 127.0.0.1 master-0.openshift.example.com + +In order to access UI, the ssh-tunnel service will be created and started on the +control node. Make sure to remove these changes and the service manually, when not +needed anymore. + +## Scale Deployment up/down + +### Scaling up + +One can scale up the number of application nodes by executing the ansible playbook +`openshift-ansible-contrib/playbooks/provisioning/openstack/scale-up.yaml`. +This process can be done even if there is currently no deployment available. +The `increment_by` variable is used to specify by how much the deployment should +be scaled up (if none exists, it serves as a target number of application nodes). +The path to `openshift-ansible` directory can be customised by the `openshift_ansible_dir` +variable. Its value must be an absolute path to `openshift-ansible` and it cannot +contain the '/' symbol at the end. + +Usage: + +``` +ansible-playbook -i openshift-ansible-contrib/playbooks/provisioning/openstack/scale-up.yaml` [-e increment_by=] [-e openshift_ansible_dir=] +``` + +Note: This playbook works only without a bastion node (`openstack_use_bastion: False`). diff --git a/playbooks/provisioning/openstack/ansible.cfg b/playbooks/provisioning/openstack/ansible.cfg new file mode 100644 index 000000000..a21f023ea --- /dev/null +++ b/playbooks/provisioning/openstack/ansible.cfg @@ -0,0 +1,24 @@ +# config file for ansible -- http://ansible.com/ +# ============================================== +[defaults] +ansible_user = openshift +forks = 50 +# work around privilege escalation timeouts in ansible +timeout = 30 +host_key_checking = false +inventory = inventory +inventory_ignore_extensions = secrets.py, .pyc, .cfg, .crt +gathering = smart +retry_files_enabled = false +fact_caching = jsonfile +fact_caching_connection = .ansible/cached_facts +fact_caching_timeout = 900 +stdout_callback = skippy +callback_whitelist = profile_tasks +lookup_plugins = openshift-ansible-contrib/lookup_plugins + + +[ssh_connection] +ssh_args = -o ControlMaster=auto -o ControlPersist=900s -o GSSAPIAuthentication=no +control_path = /var/tmp/%%h-%%r +pipelining = True diff --git a/playbooks/provisioning/openstack/sample-inventory/ansible.cfg b/playbooks/provisioning/openstack/sample-inventory/ansible.cfg deleted file mode 100644 index a21f023ea..000000000 --- a/playbooks/provisioning/openstack/sample-inventory/ansible.cfg +++ /dev/null @@ -1,24 +0,0 @@ -# config file for ansible -- http://ansible.com/ -# ============================================== -[defaults] -ansible_user = openshift -forks = 50 -# work around privilege escalation timeouts in ansible -timeout = 30 -host_key_checking = false -inventory = inventory -inventory_ignore_extensions = secrets.py, .pyc, .cfg, .crt -gathering = smart -retry_files_enabled = false -fact_caching = jsonfile -fact_caching_connection = .ansible/cached_facts -fact_caching_timeout = 900 -stdout_callback = skippy -callback_whitelist = profile_tasks -lookup_plugins = openshift-ansible-contrib/lookup_plugins - - -[ssh_connection] -ssh_args = -o ControlMaster=auto -o ControlPersist=900s -o GSSAPIAuthentication=no -control_path = /var/tmp/%%h-%%r -pipelining = True diff --git a/playbooks/provisioning/openstack/sample-inventory/group_vars/OSEv3.yml b/playbooks/provisioning/openstack/sample-inventory/group_vars/OSEv3.yml index 2e897102e..970a07815 100644 --- a/playbooks/provisioning/openstack/sample-inventory/group_vars/OSEv3.yml +++ b/playbooks/provisioning/openstack/sample-inventory/group_vars/OSEv3.yml @@ -5,8 +5,8 @@ openshift_deployment_type: origin openshift_master_default_subdomain: "apps.{{ env_id }}.{{ public_dns_domain }}" openshift_master_cluster_method: native -openshift_master_cluster_hostname: "{{ groups.lb.0|default(groups.masters.0) }}" -openshift_master_cluster_public_hostname: "{{ groups.lb.0|default(groups.masters.0) }}" +openshift_master_cluster_public_hostname: "console.{{ env_id }}.{{ public_dns_domain }}" +openshift_master_cluster_hostname: "{{ openshift_master_cluster_public_hostname }}" osm_default_node_selector: 'region=primary' -- cgit v1.2.3 From 79b5ef66d15b19a232dbf92e246713cf18f3cc8c Mon Sep 17 00:00:00 2001 From: Tlacenka Date: Thu, 12 Oct 2017 18:09:39 +0200 Subject: Attach additional RHN Pools (post-provision custom action) (#753) * README, add-rhn-pools.yml: Add new custom post-provision playbook that attaches additional RHN pools - also mention this example in the contrib README * added become true * README update --- playbooks/provisioning/openstack/README.md | 28 ++++++++++++++++++---- .../openstack/custom-actions/add-rhn-pools.yml | 13 ++++++++++ 2 files changed, 37 insertions(+), 4 deletions(-) create mode 100644 playbooks/provisioning/openstack/custom-actions/add-rhn-pools.yml diff --git a/playbooks/provisioning/openstack/README.md b/playbooks/provisioning/openstack/README.md index b96c9c9db..fe87f68f4 100644 --- a/playbooks/provisioning/openstack/README.md +++ b/playbooks/provisioning/openstack/README.md @@ -524,7 +524,9 @@ If you'd like to limit the run to one particular host, you can do so as follows: ansible-playbook --private-key ~/.ssh/openshift -i inventory/ openshift-ansible-contrib/playbooks/provisioning/openstack/custom-actions/custom-playbook.yml -l app-node-0.openshift.example.com ``` -You can also create your own custom playbook. Here's one example that adds additional YUM repositories: +You can also create your own custom playbook. Here are a few examples: + +#### Adding additional YUM repositories ``` --- @@ -548,13 +550,31 @@ This example runs against app nodes. The list of options include: - masters - infra_hosts +#### Attaching additional RHN pools + +``` +--- +- hosts: cluster_hosts + tasks: + - name: Attach additional RHN pool + become: true + command: "/usr/bin/subscription-manager attach --pool=" + register: attach_rhn_pool_result + until: attach_rhn_pool_result.rc == 0 + retries: 10 + delay: 1 +``` + +This playbook runs against all cluster nodes. In order to help prevent slow connectivity +problems, the task is retried 10 times in case of initial failure. +Note that in order for this example to work in your deployment, your servers must use the RHEL image. + Please consider contributing your custom playbook back to openshift-ansible-contrib! A library of custom post-provision actions exists in `openshift-ansible-contrib/playbooks/provisioning/openstack/custom-actions`. Playbooks include: -##### add-yum-repos.yml - -[add-yum-repos.yml](https://github.com/openshift/openshift-ansible-contrib/blob/master/playbooks/provisioning/openstack/custom-actions/add-yum-repos.yml) adds a list of custom yum repositories to every node in the cluster. +* [add-yum-repos.yml](https://github.com/openshift/openshift-ansible-contrib/blob/master/playbooks/provisioning/openstack/custom-actions/add-yum-repos.yml): adds a list of custom yum repositories to every node in the cluster +* [add-rhn-pools.yml](https://github.com/openshift/openshift-ansible-contrib/blob/master/playbooks/provisioning/openstack/custom-actions/add-rhn-pools.yml): attaches a list of additional RHN pools to every node in the cluster ### Install OpenShift diff --git a/playbooks/provisioning/openstack/custom-actions/add-rhn-pools.yml b/playbooks/provisioning/openstack/custom-actions/add-rhn-pools.yml new file mode 100644 index 000000000..d17c1e335 --- /dev/null +++ b/playbooks/provisioning/openstack/custom-actions/add-rhn-pools.yml @@ -0,0 +1,13 @@ +--- +- hosts: cluster_hosts + vars: + rhn_pools: [] + tasks: + - name: Attach additional RHN pools + become: true + with_items: "{{ rhn_pools }}" + command: "/usr/bin/subscription-manager attach --pool={{ item }}" + register: attach_rhn_pools_result + until: attach_rhn_pools_result.rc == 0 + retries: 10 + delay: 1 -- cgit v1.2.3 -- cgit v1.2.3 From b450ff75888f7801094ca88957a237f33f5e85f1 Mon Sep 17 00:00:00 2001 From: tzumainn Date: Fri, 13 Oct 2017 05:21:26 -0400 Subject: Allow the specification of server group policies when provisioning openstack (#747) * Allow for the specifying of server policies during OpenStack provisioning * documentation for openstack server group policies * add doc link detailing allowed policies * changed default to anti-affinity --- playbooks/provisioning/openstack/README.md | 15 +++++++++++++++ .../openstack/sample-inventory/group_vars/all.yml | 5 +++++ playbooks/provisioning/openstack/stack_params.yaml | 2 ++ roles/openstack-stack/templates/heat_stack.yaml.j2 | 22 ++++++++++++++++++++++ .../templates/heat_stack_server.yaml.j2 | 6 ++++++ .../templates/heat_stack_server_nofloating.yaml.j2 | 6 ++++++ 6 files changed, 56 insertions(+) diff --git a/playbooks/provisioning/openstack/README.md b/playbooks/provisioning/openstack/README.md index fe87f68f4..370f582b2 100644 --- a/playbooks/provisioning/openstack/README.md +++ b/playbooks/provisioning/openstack/README.md @@ -308,6 +308,21 @@ In order to set a custom entrypoint, update `openshift_master_cluster_public_hos Note than an empty hostname does not work, so if your domain is `openshift.example.com`, you cannot set this value to simply `openshift.example.com`. + +### Specifying server group policies + +You can specify server group policies for infra and master nodes using the following +parameters in `inventory/group_vars/all.yml`: + + ## Specify server group policies for master and infra nodes. Nova must be configured to + ## enable these policies. 'anti-affinity' will ensure that each VM is launched on a + ## different physical host. + #openstack_master_server_group_policies: [anti-affinity] + #openstack_infra_server_group_policies: [anti-affinity] + +The [Heat template documentation](https://docs.openstack.org/heat/pike/template_guide/openstack.html#OS::Nova::ServerGroup) +lists allowed policy values. + ### Creating and using a Cinder volume for the OpenShift registry You can optionally have the playbooks create a Cinder volume and set diff --git a/playbooks/provisioning/openstack/sample-inventory/group_vars/all.yml b/playbooks/provisioning/openstack/sample-inventory/group_vars/all.yml index 12f64f401..fa1fb6c64 100644 --- a/playbooks/provisioning/openstack/sample-inventory/group_vars/all.yml +++ b/playbooks/provisioning/openstack/sample-inventory/group_vars/all.yml @@ -62,6 +62,11 @@ openstack_default_flavor: "m1.medium" #docker_lb_volume_size: "5" docker_volume_size: "15" +## Specify server group policies for master and infra nodes. Nova must be configured to +## enable these policies. 'anti-affinity' will ensure that each VM is launched on a +## different physical host. +#openstack_master_server_group_policies: [anti-affinity] +#openstack_infra_server_group_policies: [anti-affinity] ## Create a Cinder volume and use it for the OpenShift registry. ## NOTE: the openstack credentials and hosted registry options must be set in OSEv3.yml! diff --git a/playbooks/provisioning/openstack/stack_params.yaml b/playbooks/provisioning/openstack/stack_params.yaml index 484c06889..a4da31bfe 100644 --- a/playbooks/provisioning/openstack/stack_params.yaml +++ b/playbooks/provisioning/openstack/stack_params.yaml @@ -36,6 +36,8 @@ num_masters: "{{ openstack_num_masters }}" num_nodes: "{{ openstack_num_nodes }}" num_infra: "{{ openstack_num_infra }}" num_dns: "{{ openstack_num_dns | default(1) }}" +master_server_group_policies: "{{ openstack_master_server_group_policies | default([]) | to_yaml }}" +infra_server_group_policies: "{{ openstack_infra_server_group_policies | default([]) | to_yaml }}" master_volume_size: "{{ docker_master_volume_size | default(docker_volume_size) }}" infra_volume_size: "{{ docker_infra_volume_size | default(docker_volume_size) }}" node_volume_size: "{{ docker_node_volume_size | default(docker_volume_size) }}" diff --git a/roles/openstack-stack/templates/heat_stack.yaml.j2 b/roles/openstack-stack/templates/heat_stack.yaml.j2 index ef46211a4..a6b088efb 100644 --- a/roles/openstack-stack/templates/heat_stack.yaml.j2 +++ b/roles/openstack-stack/templates/heat_stack.yaml.j2 @@ -536,6 +536,20 @@ resources: - interface {% endif %} +{% if master_server_group_policies|length > 0 %} + master_server_group: + type: OS::Nova::ServerGroup + properties: + name: master_server_group + policies: {{ master_server_group_policies }} +{% endif %} +{% if infra_server_group_policies|length > 0 %} + infra_server_group: + type: OS::Nova::ServerGroup + properties: + name: infra_server_group + policies: {{ infra_server_group_policies }} +{% endif %} {% if num_masters|int > 1 %} loadbalancer: type: OS::Heat::ResourceGroup @@ -643,6 +657,10 @@ resources: floating_network: {{ external_network }} {% endif %} volume_size: {{ master_volume_size }} +{% if master_server_group_policies|length > 0 %} + scheduler_hints: + group: { get_resource: master_server_group } +{% endif %} {% if not provider_network %} depends_on: - interface @@ -766,6 +784,10 @@ resources: floating_network: {{ external_network }} {% endif %} volume_size: {{ infra_volume_size }} +{% if infra_server_group_policies|length > 0 %} + scheduler_hints: + group: { get_resource: infra_server_group } +{% endif %} {% if not provider_network %} depends_on: - interface diff --git a/roles/openstack-stack/templates/heat_stack_server.yaml.j2 b/roles/openstack-stack/templates/heat_stack_server.yaml.j2 index fc797941e..66c2491a9 100644 --- a/roles/openstack-stack/templates/heat_stack_server.yaml.j2 +++ b/roles/openstack-stack/templates/heat_stack_server.yaml.j2 @@ -98,6 +98,11 @@ parameters: description: OpenShift Node Labels default: {"region": "default" } + scheduler_hints: + type: json + description: Server scheduler hints. + default: {} + outputs: name: @@ -154,6 +159,7 @@ resources: host-type: { get_param: type } sub-host-type: { get_param: subtype } node_labels: { get_param: node_labels } + scheduler_hints: { get_param: scheduler_hints } {% if use_trunk_ports|default(false)|bool %} trunk-port: diff --git a/roles/openstack-stack/templates/heat_stack_server_nofloating.yaml.j2 b/roles/openstack-stack/templates/heat_stack_server_nofloating.yaml.j2 index 2c16ad778..4b79d5ab6 100644 --- a/roles/openstack-stack/templates/heat_stack_server_nofloating.yaml.j2 +++ b/roles/openstack-stack/templates/heat_stack_server_nofloating.yaml.j2 @@ -89,6 +89,11 @@ parameters: description: OpenShift Node Labels default: {"region": "default" } + scheduler_hints: + type: json + description: Server scheduler hints. + default: {} + outputs: name: @@ -131,6 +136,7 @@ resources: host-type: { get_param: type } sub-host-type: { get_param: subtype } node_labels: { get_param: node_labels } + scheduler_hints: { get_param: scheduler_hints } {% if use_trunk_ports|default(false)|bool %} trunk-port: -- cgit v1.2.3 From 9a697aca1fd6a4e13bb67cb09f89527927b77b3e Mon Sep 17 00:00:00 2001 From: Tomas Sedovic Date: Fri, 13 Oct 2017 15:53:47 +0200 Subject: Make the private key examples consistent Just like in the README, the Advanced Configuration will now rely on the default `~/.ssh/id_rsa` key and mention Ansible's `--private-key` option when using a different file. --- .../provisioning/openstack/advanced-configuration.md | 16 +++++++++------- 1 file changed, 9 insertions(+), 7 deletions(-) diff --git a/playbooks/provisioning/openstack/advanced-configuration.md b/playbooks/provisioning/openstack/advanced-configuration.md index af5ae9946..5f4be7238 100644 --- a/playbooks/provisioning/openstack/advanced-configuration.md +++ b/playbooks/provisioning/openstack/advanced-configuration.md @@ -99,7 +99,7 @@ https://github.com/openshift/origin/releases/latest/ Or you can now copy it from the master node: - $ ansible --private-key ~/.ssh/openshift -i inventory masters[0] -m fetch -a "src=/bin/oc dest=oc" + $ ansible -i inventory masters[0] -m fetch -a "src=/bin/oc dest=oc" Either way, find the `oc` binary and put it in your `PATH`. @@ -245,9 +245,11 @@ See also the [security notes](#security-notes) ## Other configuration variables -`openstack_ssh_key` is a Nova keypair - you can see your keypairs with -`openstack keypair list`. This guide assumes that its corresponding private -key is `~/.ssh/openshift`, stored on the ansible admin (control) node. +`openstack_ssh_public_key` is a Nova keypair - you can see your +keypairs with `openstack keypair list`. It must correspond to the +private SSH key Ansible will use to log into the created VMs. This is +`~/.ssh/id_rsa` by default, but you can use a different key by passing +`--private-key` to `ansible-playbook`. `openstack_default_image_name` is the default name of the Glance image the servers will use. You can see your images with `openstack image list`. @@ -525,7 +527,7 @@ Example inventory variables: openstack_use_bastion: true bastion_ingress_cidr: "{{openstack_subnet_prefix}}.0/24" - openstack_private_ssh_key: ~/.ssh/openshift + openstack_private_ssh_key: ~/.ssh/id_rsa openstack_inventory: static openstack_inventory_path: ../../../../inventory openstack_ssh_config_path: /tmp/ssh.config.openshift.ansible.openshift.example.com @@ -611,13 +613,13 @@ if requested, and DNS server, and ensures other OpenShift requirements to be met A custom playbook can be run like this: ``` -ansible-playbook --private-key ~/.ssh/openshift -i inventory/ openshift-ansible-contrib/playbooks/provisioning/openstack/custom-actions/custom-playbook.yml +ansible-playbook -i inventory/ openshift-ansible-contrib/playbooks/provisioning/openstack/custom-actions/custom-playbook.yml ``` If you'd like to limit the run to one particular host, you can do so as follows: ``` -ansible-playbook --private-key ~/.ssh/openshift -i inventory/ openshift-ansible-contrib/playbooks/provisioning/openstack/custom-actions/custom-playbook.yml -l app-node-0.openshift.example.com +ansible-playbook -i inventory/ openshift-ansible-contrib/playbooks/provisioning/openstack/custom-actions/custom-playbook.yml -l app-node-0.openshift.example.com ``` You can also create your own custom playbook. Here's one example that adds additional YUM repositories: -- cgit v1.2.3 From 428018cbe505101d6f034fa4a0aaf53fd8f2caf1 Mon Sep 17 00:00:00 2001 From: Tlacenka Date: Fri, 13 Oct 2017 16:42:07 +0200 Subject: Add Extra Docker Registry URLs (custom post-provision action) (#794) * add-docker-registry: playbook that adds docker registries to docker config file (in progress) * indentation fix * docker registries: add check for variable type * another type conversion * docker registry: try another unified formatting * another attempt * type error fix * quotation attempt * docker registry: bug fixes * docker registry: fixed formatting * docker registry: if docker is not available, skip the whole playbook * README updated * README: typo * docker registries: suggested changes applied (in progress) * docker registries: README updated, redundant check removed * removed redundant become:true --- playbooks/provisioning/openstack/README.md | 19 +++++ .../custom-actions/add-docker-registry.yml | 90 ++++++++++++++++++++++ 2 files changed, 109 insertions(+) create mode 100644 playbooks/provisioning/openstack/custom-actions/add-docker-registry.yml diff --git a/playbooks/provisioning/openstack/README.md b/playbooks/provisioning/openstack/README.md index 370f582b2..78d4ffe7c 100644 --- a/playbooks/provisioning/openstack/README.md +++ b/playbooks/provisioning/openstack/README.md @@ -584,12 +584,31 @@ This playbook runs against all cluster nodes. In order to help prevent slow conn problems, the task is retried 10 times in case of initial failure. Note that in order for this example to work in your deployment, your servers must use the RHEL image. +#### Adding extra Docker registry URLs + +This playbook is located in the [custom-actions](https://github.com/openshift/openshift-ansible-contrib/tree/master/playbooks/provisioning/openstack/custom-actions) directory. + +It adds URLs passed as arguments to the docker configuration program. +Going into more detail, the configuration program (which is in the YAML format) is loaded into an ansible variable +([lines 27-30](https://github.com/openshift/openshift-ansible-contrib/blob/master/playbooks/provisioning/openstack/custom-actions/add-docker-registry.yml#L27-L30)) +and in its structure, `registries` and `insecure_registries` sections are expanded with the newly added items +([lines 56-76](https://github.com/openshift/openshift-ansible-contrib/blob/master/playbooks/provisioning/openstack/custom-actions/add-docker-registry.yml#L56-L76)). +The new content is then saved into the original file +([lines 78-82](https://github.com/openshift/openshift-ansible-contrib/blob/master/playbooks/provisioning/openstack/custom-actions/add-docker-registry.yml#L78-L82)) +and docker is restarted. + +Example usage: +``` +ansible-playbook -i openshift-ansible-contrib/playbooks/provisioning/openstack/custom-actions/add-docker-registry.yml --extra-vars '{"registries": "reg1", "insecure_registries": ["ins_reg1","ins_reg2"]}' +``` + Please consider contributing your custom playbook back to openshift-ansible-contrib! A library of custom post-provision actions exists in `openshift-ansible-contrib/playbooks/provisioning/openstack/custom-actions`. Playbooks include: * [add-yum-repos.yml](https://github.com/openshift/openshift-ansible-contrib/blob/master/playbooks/provisioning/openstack/custom-actions/add-yum-repos.yml): adds a list of custom yum repositories to every node in the cluster * [add-rhn-pools.yml](https://github.com/openshift/openshift-ansible-contrib/blob/master/playbooks/provisioning/openstack/custom-actions/add-rhn-pools.yml): attaches a list of additional RHN pools to every node in the cluster +* [add-docker-registry.yml](https://github.com/openshift/openshift-ansible-contrib/blob/master/playbooks/provisioning/openstack/custom-actions/add-docker-registry.yml): adds a list of docker registries to the docker configuration on every node in the cluster ### Install OpenShift diff --git a/playbooks/provisioning/openstack/custom-actions/add-docker-registry.yml b/playbooks/provisioning/openstack/custom-actions/add-docker-registry.yml new file mode 100644 index 000000000..e118a71dc --- /dev/null +++ b/playbooks/provisioning/openstack/custom-actions/add-docker-registry.yml @@ -0,0 +1,90 @@ +--- +- hosts: OSEv3 + become: true + vars: + registries: [] + insecure_registries: [] + + tasks: + - name: Check if docker is even installed + command: docker + + - name: Install atomic-registries package + yum: + name: atomic-registries + state: latest + + - name: Get registry configuration file + register: file_result + stat: + path: /etc/containers/registries.conf + + - name: Check if it exists + assert: + that: 'file_result.stat.exists' + msg: "Configuration file does not exist." + + - name: Load configuration file + shell: cat /etc/containers/registries.conf + register: file_content + + - name: Store file content into a variable + set_fact: + docker_conf: "{{ file_content.stdout | from_yaml }}" + + - name: Make sure that docker file content is a dictionary + when: '(docker_conf is string) and (not docker_conf)' + set_fact: + docker_conf: {} + + - name: Make sure that registries is a list + when: 'registries is string' + set_fact: + registries_list: [ "{{ registries }}" ] + + - name: Make sure that insecure_registries is a list + when: 'insecure_registries is string' + set_fact: + insecure_registries_list: [ "{{ insecure_registries }}" ] + + - name: Set default values if there are no registries defined + set_fact: + docker_conf_registries: "{{ [] if docker_conf['registries'] is not defined else docker_conf['registries'] }}" + docker_conf_insecure_registries: "{{ [] if docker_conf['insecure_registries'] is not defined else docker_conf['insecure_registries'] }}" + + - name: Add other registries + when: 'registries_list is not defined' + register: registries_merge_result + set_fact: + docker_conf: "{{ docker_conf | combine({'registries': (docker_conf_registries + registries) | unique}, recursive=True) }}" + + - name: Add other registries (if registries had to be converted) + when: 'registries_merge_result|skipped' + set_fact: + docker_conf: "{{ docker_conf | combine({'registries': (docker_conf_registries + registries_list) | unique}, recursive=True) }}" + + - name: Add insecure registries + when: 'insecure_registries_list is not defined' + register: insecure_registries_merge_result + set_fact: + docker_conf: "{{ docker_conf | combine({'insecure_registries': (docker_conf_insecure_registries + insecure_registries) | unique }, recursive=True) }}" + + - name: Add insecure registries (if insecure_registries had to be converted) + when: 'insecure_registries_merge_result|skipped' + set_fact: + docker_conf: "{{ docker_conf | combine({'insecure_registries': (docker_conf_insecure_registries + insecure_registries_list) | unique }, recursive=True) }}" + + - name: Load variable back to file + copy: + content: "{{ docker_conf | to_yaml }}" + dest: /etc/containers/registries.conf + + - name: Restart registries service + service: + name: registries + state: restarted + + - name: Restart docker + service: + name: docker + state: restarted -- cgit v1.2.3 -- cgit v1.2.3 -- cgit v1.2.3 -- cgit v1.2.3 -- cgit v1.2.3 -- cgit v1.2.3 -- cgit v1.2.3 -- cgit v1.2.3 -- cgit v1.2.3 -- cgit v1.2.3 -- cgit v1.2.3 -- cgit v1.2.3 -- cgit v1.2.3 -- cgit v1.2.3 -- cgit v1.2.3 From ca88364175fe5177cecbb479a157d7329db05d8a Mon Sep 17 00:00:00 2001 From: Bogdan Dobrelya Date: Mon, 16 Oct 2017 15:42:42 +0200 Subject: Support separate data network for Flannel SDN (#757) * Support separate data network for Flannel SDN Document the use case for a separate flannel data network. Allow Nova servers for openshift cluster to be provisioned with that isolated data network created and connected to masters, computes and infra nodes. Do not configure dns nameservers and router for that network. Signed-off-by: Bogdan Dobrelya * Fix flannel use cases with provider network Provider network cannot be used with flannel SDN as the latter requires a separate isolated network, while the provider network is an externally managed single network. Signed-off-by: Bogdan Dobrelya * Drop unused data_net_name Signed-off-by: Bogdan Dobrelya --- playbooks/provisioning/openstack/README.md | 9 +++- .../provisioning/openstack/net_vars_check.yaml | 14 ++++++ playbooks/provisioning/openstack/prerequisites.yml | 3 ++ .../sample-inventory/group_vars/OSEv3.yml | 4 ++ .../openstack/sample-inventory/group_vars/all.yml | 4 ++ roles/openstack-stack/templates/heat_stack.yaml.j2 | 31 ++++++++++++ .../templates/heat_stack_server.yaml.j2 | 57 ++++++++++++++++++++++ .../templates/heat_stack_server_nofloating.yaml.j2 | 55 +++++++++++++++++++++ 8 files changed, 176 insertions(+), 1 deletion(-) create mode 100644 playbooks/provisioning/openstack/net_vars_check.yaml diff --git a/playbooks/provisioning/openstack/README.md b/playbooks/provisioning/openstack/README.md index 78d4ffe7c..b9a3b23de 100644 --- a/playbooks/provisioning/openstack/README.md +++ b/playbooks/provisioning/openstack/README.md @@ -250,6 +250,9 @@ right after provisioning will fail (unless you're using an external DNS server your provider network knows about). You must make sure your nodes are able to resolve each other by name. +**NOTE**: Flannel SDN requires a dedicated containers data network and cannot +work over a single provider network. + #### Security notes Configure required `*_ingress_cidr` variables to restrict public access @@ -267,6 +270,10 @@ be the case for development environments. When turned off, the servers will be provisioned omitting the ``yum update`` command. This brings security implications though, and is not recommended for production deployments. +Flannel network used for user applications and workloads data should be +isolated from other networks as it has Neutron ports security disabled. +Openshift master, compute and infra nodes will be connected to that network. + ##### DNS servers security options Aside from `node_ingress_cidr` restricting public access to in-stack DNS @@ -646,7 +653,7 @@ The `increment_by` variable is used to specify by how much the deployment should be scaled up (if none exists, it serves as a target number of application nodes). The path to `openshift-ansible` directory can be customised by the `openshift_ansible_dir` variable. Its value must be an absolute path to `openshift-ansible` and it cannot -contain the '/' symbol at the end. +contain the '/' symbol at the end. Usage: diff --git a/playbooks/provisioning/openstack/net_vars_check.yaml b/playbooks/provisioning/openstack/net_vars_check.yaml new file mode 100644 index 000000000..68afde415 --- /dev/null +++ b/playbooks/provisioning/openstack/net_vars_check.yaml @@ -0,0 +1,14 @@ +--- +- name: Check the provider network configuration + fail: + msg: "Flannel SDN requires a dedicated containers data network and can not work over a provider network" + when: + - openstack_provider_network_name is defined + - openstack_private_data_network_name is defined + +- name: Check the flannel network configuration + fail: + msg: "A dedicated containers data network is only supported with Flannel SDN" + when: + - openstack_private_data_network_name is defined + - not openshift_use_flannel|default(False)|bool diff --git a/playbooks/provisioning/openstack/prerequisites.yml b/playbooks/provisioning/openstack/prerequisites.yml index f2f720f8b..11a31411e 100644 --- a/playbooks/provisioning/openstack/prerequisites.yml +++ b/playbooks/provisioning/openstack/prerequisites.yml @@ -2,6 +2,9 @@ - hosts: localhost tasks: + # Sanity check of inventory variables + - include: net_vars_check.yaml + # Check ansible - name: Check Ansible version assert: diff --git a/playbooks/provisioning/openstack/sample-inventory/group_vars/OSEv3.yml b/playbooks/provisioning/openstack/sample-inventory/group_vars/OSEv3.yml index 2e897102e..70e77662d 100644 --- a/playbooks/provisioning/openstack/sample-inventory/group_vars/OSEv3.yml +++ b/playbooks/provisioning/openstack/sample-inventory/group_vars/OSEv3.yml @@ -51,3 +51,7 @@ openshift_override_hostname_check: true # NOTE(shadower): Always switch to root on the OSEv3 nodes. # openshift-ansible requires an explicit `become`. ansible_become: true + +# # Flannel networking +#openshift_use_openshift_sdn: false +#openshift_use_flannel: true diff --git a/playbooks/provisioning/openstack/sample-inventory/group_vars/all.yml b/playbooks/provisioning/openstack/sample-inventory/group_vars/all.yml index fa1fb6c64..83289307d 100644 --- a/playbooks/provisioning/openstack/sample-inventory/group_vars/all.yml +++ b/playbooks/provisioning/openstack/sample-inventory/group_vars/all.yml @@ -15,6 +15,10 @@ public_dns_nameservers: [] openstack_ssh_public_key: "openshift" openstack_external_network_name: "public" #openstack_private_network_name: "openshift-ansible-{{ stack_name }}-net" +# # A dedicated Neutron network name for containers data network +# # Configures the data network to be separated from openstack_private_network_name +# # NOTE: this is only supported with Flannel SDN yet +#openstack_private_data_network_name: "openshift-ansible-{{ stack_name }}-data-net" ## If you want to use a provider network, set its name here. ## NOTE: the `openstack_external_network_name` and diff --git a/roles/openstack-stack/templates/heat_stack.yaml.j2 b/roles/openstack-stack/templates/heat_stack.yaml.j2 index a6b088efb..1f1e33cf2 100644 --- a/roles/openstack-stack/templates/heat_stack.yaml.j2 +++ b/roles/openstack-stack/templates/heat_stack.yaml.j2 @@ -113,6 +113,22 @@ resources: - {{ nameserver }} {% endfor %} +{% if openshift_use_flannel|default(False)|bool %} + data_net: + type: OS::Neutron::Net + properties: + name: openshift-ansible-{{ stack_name }}-data-net + port_security_enabled: false + + data_subnet: + type: OS::Neutron::Subnet + properties: + name: openshift-ansible-{{ stack_name }}-data-subnet + network: { get_resource: data_net } + cidr: {{ osm_cluster_network_cidr|default('10.128.0.0/14') }} + gateway_ip: null +{% endif %} + router: type: OS::Neutron::Router properties: @@ -641,6 +657,11 @@ resources: template: openshift-ansible-cluster_id-net params: cluster_id: {{ stack_name }} +{% if openshift_use_flannel|default(False)|bool %} + attach_data_net: true + data_net: { get_resource: data_net } + data_subnet: { get_resource: data_subnet } +{% endif %} {% endif %} secgrp: {% if openstack_flat_secgrp|default(False)|bool %} @@ -713,6 +734,11 @@ resources: template: openshift-ansible-cluster_id-net params: cluster_id: {{ stack_name }} +{% if openshift_use_flannel|default(False)|bool %} + attach_data_net: true + data_net: { get_resource: data_net } + data_subnet: { get_resource: data_subnet } +{% endif %} {% endif %} secgrp: - { get_resource: {% if openstack_flat_secgrp|default(False)|bool %}flat-secgrp{% else %}node-secgrp{% endif %} } @@ -767,6 +793,11 @@ resources: template: openshift-ansible-cluster_id-net params: cluster_id: {{ stack_name }} +{% if openshift_use_flannel|default(False)|bool %} + attach_data_net: true + data_net: { get_resource: data_net } + data_subnet: { get_resource: data_subnet } +{% endif %} {% endif %} secgrp: # TODO(bogdando) filter only required node rules into infra-secgrp diff --git a/roles/openstack-stack/templates/heat_stack_server.yaml.j2 b/roles/openstack-stack/templates/heat_stack_server.yaml.j2 index 66c2491a9..6552e0a0d 100644 --- a/roles/openstack-stack/templates/heat_stack_server.yaml.j2 +++ b/roles/openstack-stack/templates/heat_stack_server.yaml.j2 @@ -68,6 +68,28 @@ parameters: description: Subnet resource {% endif %} +{% if openshift_use_flannel|default(False)|bool %} + attach_data_net: + type: boolean + default: false + label: Attach-data-net + description: A switch for data port connection + + data_net: + type: string + default: '' + label: Net ID + description: Net resource + +{% if not provider_network %} + data_subnet: + type: string + default: '' + label: Subnet ID + description: Subnet resource +{% endif %} +{% endif %} + secgrp: type: comma_delimited_list label: Security groups @@ -133,6 +155,11 @@ outputs: {% endif %} - addr +{% if openshift_use_flannel|default(False)|bool %} +conditions: + no_data_subnet: {not: { get_param: attach_data_net} } +{% endif %} + resources: server: @@ -143,10 +170,27 @@ resources: image: { get_param: image } flavor: { get_param: flavor } networks: +{% if openshift_use_flannel|default(False)|bool %} + if: + - no_data_subnet +{% if use_trunk_ports|default(false)|bool %} + - - port: { get_attr: [trunk-port, port_id] } +{% else %} + - - port: { get_resource: port } +{% endif %} +{% if use_trunk_ports|default(false)|bool %} + - - port: { get_attr: [trunk-port, port_id] } +{% else %} + - - port: { get_resource: port } + - port: { get_resource: data_port } +{% endif %} + +{% else %} {% if use_trunk_ports|default(false)|bool %} - port: { get_attr: [trunk-port, port_id] } {% else %} - port: { get_resource: port } +{% endif %} {% endif %} user_data: get_file: user-data @@ -179,6 +223,19 @@ resources: {% endif %} security_groups: { get_param: secgrp } +{% if openshift_use_flannel|default(False)|bool %} + data_port: + type: OS::Neutron::Port + condition: { not: no_data_subnet } + properties: + network: { get_param: data_net } + port_security_enabled: false +{% if not provider_network %} + fixed_ips: + - subnet: { get_param: data_subnet } +{% endif %} +{% endif %} + {% if not provider_network %} floating-ip: type: OS::Neutron::FloatingIP diff --git a/roles/openstack-stack/templates/heat_stack_server_nofloating.yaml.j2 b/roles/openstack-stack/templates/heat_stack_server_nofloating.yaml.j2 index 4b79d5ab6..742d53649 100644 --- a/roles/openstack-stack/templates/heat_stack_server_nofloating.yaml.j2 +++ b/roles/openstack-stack/templates/heat_stack_server_nofloating.yaml.j2 @@ -66,6 +66,26 @@ parameters: label: Subnet ID description: Subnet resource +{% if openshift_use_flannel|default(False)|bool %} + attach_data_net: + type: boolean + default: false + label: Attach-data-net + description: A switch for data port connection + + data_net: + type: string + default: '' + label: Net ID + description: Net resource + + data_subnet: + type: string + default: '' + label: Subnet ID + description: Subnet resource +{% endif %} + secgrp: type: comma_delimited_list label: Security groups @@ -110,6 +130,11 @@ outputs: - 0 - addr +{% if openshift_use_flannel|default(False)|bool %} +conditions: + no_data_subnet: {not: { get_param: attach_data_net} } +{% endif %} + resources: server_nofloating: @@ -120,10 +145,27 @@ resources: image: { get_param: image } flavor: { get_param: flavor } networks: +{% if openshift_use_flannel|default(False)|bool %} + if: + - no_data_subnet +{% if use_trunk_ports|default(false)|bool %} + - - port: { get_attr: [trunk-port, port_id] } +{% else %} + - - port: { get_resource: port } +{% endif %} +{% if use_trunk_ports|default(false)|bool %} + - - port: { get_attr: [trunk-port, port_id] } +{% else %} + - - port: { get_resource: port } + - port: { get_resource: data_port } +{% endif %} + +{% else %} {% if use_trunk_ports|default(false)|bool %} - port: { get_attr: [trunk-port, port_id] } {% else %} - port: { get_resource: port } +{% endif %} {% endif %} user_data: get_file: user-data @@ -154,6 +196,19 @@ resources: - subnet: { get_param: subnet } security_groups: { get_param: secgrp } +{% if openshift_use_flannel|default(False)|bool %} + data_port: + type: OS::Neutron::Port + condition: { not: no_data_subnet } + properties: + network: { get_param: data_net } + port_security_enabled: false +{% if not provider_network %} + fixed_ips: + - subnet: { get_param: data_subnet } +{% endif %} +{% endif %} + {% if not ephemeral_volumes|default(false)|bool %} cinder_volume: type: OS::Cinder::Volume -- cgit v1.2.3 -- cgit v1.2.3 From e89bd6b1cb32ad52f0109f80022e801943b51893 Mon Sep 17 00:00:00 2001 From: Bogdan Dobrelya Date: Tue, 17 Oct 2017 10:12:59 +0200 Subject: [WIP] Merge server with nofloating server heat templates (#761) Merge server with nofloating server heat templates --- roles/openstack-stack/tasks/generate-templates.yml | 6 - roles/openstack-stack/templates/heat_stack.yaml.j2 | 42 ++-- .../templates/heat_stack_server.yaml.j2 | 12 +- .../templates/heat_stack_server_nofloating.yaml.j2 | 225 --------------------- 4 files changed, 35 insertions(+), 250 deletions(-) delete mode 100644 roles/openstack-stack/templates/heat_stack_server_nofloating.yaml.j2 diff --git a/roles/openstack-stack/tasks/generate-templates.yml b/roles/openstack-stack/tasks/generate-templates.yml index 110da8444..0ff50a095 100644 --- a/roles/openstack-stack/tasks/generate-templates.yml +++ b/roles/openstack-stack/tasks/generate-templates.yml @@ -20,12 +20,6 @@ src: heat_stack_server.yaml.j2 dest: "{{ stack_template_pre.path }}/server.yaml" -- name: generate HOT server w/o floating IPs template from jinja2 template - template: - src: heat_stack_server_nofloating.yaml.j2 - dest: "{{ stack_template_pre.path }}/server_nofloating.yaml" - when: use_bastion|bool - - name: generate user_data from jinja2 template template: src: user_data.j2 diff --git a/roles/openstack-stack/templates/heat_stack.yaml.j2 b/roles/openstack-stack/templates/heat_stack.yaml.j2 index 1f1e33cf2..a69b7fc00 100644 --- a/roles/openstack-stack/templates/heat_stack.yaml.j2 +++ b/roles/openstack-stack/templates/heat_stack.yaml.j2 @@ -71,6 +71,9 @@ outputs: value: { get_attr: [ dns, private_ip ] } {% endif %} +conditions: + no_floating: {% if provider_network or use_bastion|bool %}true{% else %}false{% endif %} + resources: {% if not provider_network %} @@ -504,11 +507,7 @@ resources: properties: count: {{ num_etcd }} resource_def: -{% if use_bastion|bool %} - type: server_nofloating.yaml -{% else %} type: server.yaml -{% endif %} properties: name: str_replace: @@ -543,8 +542,13 @@ resources: secgrp: - { get_resource: {% if openstack_flat_secgrp|default(False)|bool %}flat-secgrp{% else %}etcd-secgrp{% endif %} } - { get_resource: common-secgrp } -{% if not use_bastion|bool and not provider_network %} - floating_network: {{ external_network }} + floating_network: + if: + - no_floating + - null + - {{ external_network }} +{% if use_bastion|bool or provider_network %} + attach_float_net: false {% endif %} volume_size: {{ etcd_volume_size }} {% if not provider_network %} @@ -622,11 +626,7 @@ resources: properties: count: {{ num_masters }} resource_def: -{% if use_bastion|bool %} - type: server_nofloating.yaml -{% else %} type: server.yaml -{% endif %} properties: name: str_replace: @@ -674,8 +674,13 @@ resources: {% endif %} {% endif %} - { get_resource: common-secgrp } -{% if not use_bastion|bool and not provider_network %} - floating_network: {{ external_network }} + floating_network: + if: + - no_floating + - null + - {{ external_network }} +{% if use_bastion|bool or provider_network %} + attach_float_net: false {% endif %} volume_size: {{ master_volume_size }} {% if master_server_group_policies|length > 0 %} @@ -694,11 +699,7 @@ resources: removal_policies: - resource_list: {{ nodes_to_remove }} resource_def: -{% if use_bastion|bool %} - type: server_nofloating.yaml -{% else %} type: server.yaml -{% endif %} properties: name: str_replace: @@ -743,8 +744,13 @@ resources: secgrp: - { get_resource: {% if openstack_flat_secgrp|default(False)|bool %}flat-secgrp{% else %}node-secgrp{% endif %} } - { get_resource: common-secgrp } -{% if not use_bastion|bool and not provider_network %} - floating_network: {{ external_network }} + floating_network: + if: + - no_floating + - null + - {{ external_network }} +{% if use_bastion|bool or provider_network %} + attach_float_net: false {% endif %} volume_size: {{ node_volume_size }} {% if not provider_network %} diff --git a/roles/openstack-stack/templates/heat_stack_server.yaml.j2 b/roles/openstack-stack/templates/heat_stack_server.yaml.j2 index 6552e0a0d..9ffe721a5 100644 --- a/roles/openstack-stack/templates/heat_stack_server.yaml.j2 +++ b/roles/openstack-stack/templates/heat_stack_server.yaml.j2 @@ -95,9 +95,17 @@ parameters: label: Security groups description: Security group resources + attach_float_net: + type: boolean + default: true + + label: Attach-float-net + description: A switch for floating network port connection + {% if not provider_network %} floating_network: type: string + default: '' label: Floating network description: Network to allocate floating IP from {% endif %} @@ -155,8 +163,9 @@ outputs: {% endif %} - addr -{% if openshift_use_flannel|default(False)|bool %} conditions: + no_floating: {not: { get_param: attach_float_net} } +{% if openshift_use_flannel|default(False)|bool %} no_data_subnet: {not: { get_param: attach_data_net} } {% endif %} @@ -238,6 +247,7 @@ resources: {% if not provider_network %} floating-ip: + condition: { not: no_floating } type: OS::Neutron::FloatingIP properties: floating_network: { get_param: floating_network } diff --git a/roles/openstack-stack/templates/heat_stack_server_nofloating.yaml.j2 b/roles/openstack-stack/templates/heat_stack_server_nofloating.yaml.j2 deleted file mode 100644 index 742d53649..000000000 --- a/roles/openstack-stack/templates/heat_stack_server_nofloating.yaml.j2 +++ /dev/null @@ -1,225 +0,0 @@ -heat_template_version: 2016-10-14 - -description: OpenShift cluster server w/o floating IP - -parameters: - - name: - type: string - label: Name - description: Name - - group: - type: string - label: Host Group - description: The Primary Ansible Host Group - default: host - - cluster_env: - type: string - label: Cluster environment - description: Environment of the cluster - - cluster_id: - type: string - label: Cluster ID - description: Identifier of the cluster - - type: - type: string - label: Type - description: Type master or node - - subtype: - type: string - label: Sub-type - description: Sub-type compute or infra for nodes, default otherwise - default: default - - key_name: - type: string - label: Key name - description: Key name of keypair - - image: - type: string - label: Image - description: Name of the image - - flavor: - type: string - label: Flavor - description: Name of the flavor - - net: - type: string - label: Net ID - description: Net resource - - net_name: - type: string - label: Net name - description: Net name - - subnet: - type: string - label: Subnet ID - description: Subnet resource - -{% if openshift_use_flannel|default(False)|bool %} - attach_data_net: - type: boolean - default: false - label: Attach-data-net - description: A switch for data port connection - - data_net: - type: string - default: '' - label: Net ID - description: Net resource - - data_subnet: - type: string - default: '' - label: Subnet ID - description: Subnet resource -{% endif %} - - secgrp: - type: comma_delimited_list - label: Security groups - description: Security group resources - - availability_zone: - type: string - description: The Availability Zone to launch the instance. - default: nova - - volume_size: - type: number - description: Size of the volume to be created. - default: 1 - constraints: - - range: { min: 1, max: 1024 } - description: must be between 1 and 1024 Gb. - - node_labels: - type: json - description: OpenShift Node Labels - default: {"region": "default" } - - scheduler_hints: - type: json - description: Server scheduler hints. - default: {} - -outputs: - - name: - description: Name of the server - value: { get_attr: [ server_nofloating, name ] } - - private_ip: - description: Private IP of the server - value: - get_attr: - - server_nofloating - - addresses - - { get_param: net_name } - - 0 - - addr - -{% if openshift_use_flannel|default(False)|bool %} -conditions: - no_data_subnet: {not: { get_param: attach_data_net} } -{% endif %} - -resources: - - server_nofloating: - type: OS::Nova::Server - properties: - name: { get_param: name } - key_name: { get_param: key_name } - image: { get_param: image } - flavor: { get_param: flavor } - networks: -{% if openshift_use_flannel|default(False)|bool %} - if: - - no_data_subnet -{% if use_trunk_ports|default(false)|bool %} - - - port: { get_attr: [trunk-port, port_id] } -{% else %} - - - port: { get_resource: port } -{% endif %} -{% if use_trunk_ports|default(false)|bool %} - - - port: { get_attr: [trunk-port, port_id] } -{% else %} - - - port: { get_resource: port } - - port: { get_resource: data_port } -{% endif %} - -{% else %} -{% if use_trunk_ports|default(false)|bool %} - - port: { get_attr: [trunk-port, port_id] } -{% else %} - - port: { get_resource: port } -{% endif %} -{% endif %} - user_data: - get_file: user-data - user_data_format: RAW - user_data_update_policy: IGNORE - metadata: - group: { get_param: group } - environment: { get_param: cluster_env } - clusterid: { get_param: cluster_id } - host-type: { get_param: type } - sub-host-type: { get_param: subtype } - node_labels: { get_param: node_labels } - scheduler_hints: { get_param: scheduler_hints } - -{% if use_trunk_ports|default(false)|bool %} - trunk-port: - type: OS::Neutron::Trunk - properties: - name: { get_param: name } - port: { get_resource: port } -{% endif %} - - port: - type: OS::Neutron::Port - properties: - network: { get_param: net } - fixed_ips: - - subnet: { get_param: subnet } - security_groups: { get_param: secgrp } - -{% if openshift_use_flannel|default(False)|bool %} - data_port: - type: OS::Neutron::Port - condition: { not: no_data_subnet } - properties: - network: { get_param: data_net } - port_security_enabled: false -{% if not provider_network %} - fixed_ips: - - subnet: { get_param: data_subnet } -{% endif %} -{% endif %} - -{% if not ephemeral_volumes|default(false)|bool %} - cinder_volume: - type: OS::Cinder::Volume - properties: - size: { get_param: volume_size } - availability_zone: { get_param: availability_zone } - - volume_attachment: - type: OS::Cinder::VolumeAttachment - properties: - volume_id: { get_resource: cinder_volume } - instance_uuid: { get_resource: server_nofloating } - mountpoint: /dev/sdb -{% endif %} -- cgit v1.2.3 -- cgit v1.2.3 From 8d14089a84119d4b824bfea991099941122a2c12 Mon Sep 17 00:00:00 2001 From: Chandler Wilkerson Date: Tue, 17 Oct 2017 07:53:18 -0500 Subject: Docker storage fix (#812) * Added task to stop docker before templating config * Rearranged storage roles in rhv install --- roles/docker-storage-setup/tasks/main.yaml | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) diff --git a/roles/docker-storage-setup/tasks/main.yaml b/roles/docker-storage-setup/tasks/main.yaml index 209062ca7..dbff85662 100644 --- a/roles/docker-storage-setup/tasks/main.yaml +++ b/roles/docker-storage-setup/tasks/main.yaml @@ -1,4 +1,7 @@ --- +- name: stop docker + service: name=docker state=stopped + - block: - name: create the docker-storage config file template: @@ -7,7 +10,6 @@ owner: root group: root mode: 0644 - when: - ansible_distribution_version | version_compare('7.4', '>=') - ansible_distribution == "RedHat" @@ -20,7 +22,6 @@ owner: root group: root mode: 0644 - when: - ansible_distribution_version | version_compare('7.4', '<') - ansible_distribution == "RedHat" -- cgit v1.2.3 -- cgit v1.2.3 From d2ff422b284f04b8a19ad4c6aa388ba397d915e1 Mon Sep 17 00:00:00 2001 From: Bogdan Dobrelya Date: Wed, 18 Oct 2017 12:53:31 +0200 Subject: Add Flannel support (#814) * Add flannel support * Document Flannel SDN use case for a separate data network. * Add post install step for flannel SDN * Configure iptables rules as described for OCP 3.4 refarch https://access.redhat.com/documentation/en-us/reference_architectures/2017/html/deploying_red_hat_openshift_container_platform_3.4_on_red_hat_openstack_platform_10/emphasis_manual_deployment_emphasis#run_ansible_installer * Configure flannel interface options Signed-off-by: Bogdan Dobrelya * Use os_firewall from galaxy for required flannel rules For flannel SDN: * Add openshift-ansible as a galaxy dependency module. * Use openshift-ansible/roles/os_firewall to apply DNS rules for flanel SDN. * Apply the remaining advanced rules with direct iptables commands as os_firewall do not support advanced rules. * Persist only iptables rules w/o dynamic KUBe rules. Those are added runtime and need restoration after reboot or iptables restart. * Configure and enable the masked iptables service on the app nodes. Enable it to allow the in-memory rules to be persisted. Disable firewalld, which is the expected default behavior of the os_firewall module. Signed-off-by: Bogdan Dobrelya * Allow access from nodes to masters' port 2379 when using flannel Flannel requires to gather information from etcd to configure and assign the subnets in the nodes, therefore, allow access from nodes to port 2379/tcp to the master security group. Signed-off-by: Bogdan Dobrelya --- playbooks/provisioning/openstack/README.md | 20 ++++++++ .../openstack/galaxy-requirements.yaml | 4 ++ playbooks/provisioning/openstack/post-install.yml | 57 ++++++++++++++++++++++ .../openstack/post-provision-openstack.yml | 25 ++++++++++ .../sample-inventory/group_vars/OSEv3.yml | 2 + roles/openstack-stack/templates/heat_stack.yaml.j2 | 6 +++ 6 files changed, 114 insertions(+) create mode 100644 playbooks/provisioning/openstack/post-install.yml diff --git a/playbooks/provisioning/openstack/README.md b/playbooks/provisioning/openstack/README.md index b9a3b23de..a277047e1 100644 --- a/playbooks/provisioning/openstack/README.md +++ b/playbooks/provisioning/openstack/README.md @@ -145,6 +145,26 @@ via the public IP of the server. You can not send updates via the private IP yet. This forces the in-stack private server to have a floating IP. See also the [security notes](#security-notes) +#### Flannel networking + +In order to configure the +[flannel networking](https://docs.openshift.com/container-platform/3.6/install_config/configuring_sdn.html#using-flannel), +uncomment and adjust the appropriate `inventory/group_vars/OSEv3.yml` group vars. +Note that the `osm_cluster_network_cidr` must not overlap with the default +Docker bridge subnet of 172.17.0.0/16. Or you should change the docker0 default +CIDR range otherwise. For example, by adding `--bip=192.168.2.1/24` to +`DOCKER_NETWORK_OPTIONS` located in `/etc/sysconfig/docker-network`. + +Also note that the flannel network will be provisioned on a separate isolated Neutron +subnet defined from `osm_cluster_network_cidr` and having ports security disabled. +Use the `openstack_private_data_network_name` variable to define the network +name for the heat stack resource. + +After the cluster deployment done, you should run an additional post installation +step for flannel and docker iptables configuration: + + ansible-playbook openshift-ansible-contrib/playbooks/provisioning/openstack/post-install.yml + #### Other configuration variables `openstack_ssh_key` is a Nova keypair - you can see your keypairs with diff --git a/playbooks/provisioning/openstack/galaxy-requirements.yaml b/playbooks/provisioning/openstack/galaxy-requirements.yaml index 93dd14ec2..1d745dcc3 100644 --- a/playbooks/provisioning/openstack/galaxy-requirements.yaml +++ b/playbooks/provisioning/openstack/galaxy-requirements.yaml @@ -4,3 +4,7 @@ # From 'infra-ansible' - src: https://github.com/redhat-cop/infra-ansible version: master + +# From 'openshift-ansible' +- src: https://github.com/openshift/openshift-ansible + version: master diff --git a/playbooks/provisioning/openstack/post-install.yml b/playbooks/provisioning/openstack/post-install.yml new file mode 100644 index 000000000..417813e2a --- /dev/null +++ b/playbooks/provisioning/openstack/post-install.yml @@ -0,0 +1,57 @@ +--- +- hosts: OSEv3 + gather_facts: False + become: True + tasks: + - name: Save iptables rules to a backup file + when: openshift_use_flannel|default(False)|bool + shell: iptables-save > /etc/sysconfig/iptables.orig-$(date +%Y%m%d%H%M%S) + +# Enable iptables service on app nodes to persist custom rules (flannel SDN) +# FIXME(bogdando) w/a https://bugzilla.redhat.com/show_bug.cgi?id=1490820 +- hosts: app + gather_facts: False + become: True + vars: + os_firewall_allow: + - service: dnsmasq tcp + port: 53/tcp + - service: dnsmasq udp + port: 53/udp + tasks: + - when: openshift_use_flannel|default(False)|bool + block: + - include_role: + name: openshift-ansible/roles/os_firewall + - include_role: + name: openshift-ansible/roles/lib_os_firewall + - name: set allow rules for dnsmasq + os_firewall_manage_iptables: + name: "{{ item.service }}" + action: add + protocol: "{{ item.port.split('/')[1] }}" + port: "{{ item.port.split('/')[0] }}" + with_items: "{{ os_firewall_allow }}" + +- hosts: OSEv3 + gather_facts: False + become: True + tasks: + - name: Apply post-install iptables hacks for Flannel SDN (the best effort) + when: openshift_use_flannel|default(False)|bool + block: + - name: set allow/masquerade rules for for flannel/docker + shell: >- + (iptables-save | grep -q custom-flannel-docker-1) || + iptables -A DOCKER -w + -p all -j ACCEPT + -m comment --comment "custom-flannel-docker-1"; + (iptables-save | grep -q custom-flannel-docker-2) || + iptables -t nat -A POSTROUTING -w + -o {{flannel_interface|default('eth1')}} + -m comment --comment "custom-flannel-docker-2" + -j MASQUERADE + + # NOTE(bogdando) the rules will not be restored, when iptables service unit is disabled & masked + - name: Persist in-memory iptables rules (w/o dynamic KUBE rules) + shell: iptables-save | grep -v KUBE > /etc/sysconfig/iptables diff --git a/playbooks/provisioning/openstack/post-provision-openstack.yml b/playbooks/provisioning/openstack/post-provision-openstack.yml index a80e8d829..e460fbf12 100644 --- a/playbooks/provisioning/openstack/post-provision-openstack.yml +++ b/playbooks/provisioning/openstack/post-provision-openstack.yml @@ -76,6 +76,16 @@ hosts: OSEv3 gather_facts: true become: true + vars: + interface: "{{ flannel_interface|default('eth1') }}" + interface_file: /etc/sysconfig/network-scripts/ifcfg-{{ interface }} + interface_config: + DEVICE: "{{ interface }}" + TYPE: Ethernet + BOOTPROTO: dhcp + ONBOOT: 'yes' + DEFTROUTE: 'no' + PEERDNS: 'no' pre_tasks: - name: "Include DNS configuration to ensure proper name resolution" lineinfile: @@ -83,6 +93,21 @@ dest: /etc/sysconfig/network regexp: "IP4_NAMESERVERS={{ hostvars['localhost'].private_dns_server }}" line: "IP4_NAMESERVERS={{ hostvars['localhost'].private_dns_server }}" + - name: "Configure the flannel interface options" + when: openshift_use_flannel|default(False)|bool + block: + - file: + dest: "{{ interface_file }}" + state: touch + mode: 0644 + owner: root + group: root + - lineinfile: + state: present + dest: "{{ interface_file }}" + regexp: "{{ item.key }}=" + line: "{{ item.key }}={{ item.value }}" + with_dict: "{{ interface_config }}" roles: - node-network-manager diff --git a/playbooks/provisioning/openstack/sample-inventory/group_vars/OSEv3.yml b/playbooks/provisioning/openstack/sample-inventory/group_vars/OSEv3.yml index 70e77662d..949a323a7 100644 --- a/playbooks/provisioning/openstack/sample-inventory/group_vars/OSEv3.yml +++ b/playbooks/provisioning/openstack/sample-inventory/group_vars/OSEv3.yml @@ -53,5 +53,7 @@ openshift_override_hostname_check: true ansible_become: true # # Flannel networking +#osm_cluster_network_cidr: 10.128.0.0/14 #openshift_use_openshift_sdn: false #openshift_use_flannel: true +#flannel_interface: eth1 diff --git a/roles/openstack-stack/templates/heat_stack.yaml.j2 b/roles/openstack-stack/templates/heat_stack.yaml.j2 index a69b7fc00..2359842a5 100644 --- a/roles/openstack-stack/templates/heat_stack.yaml.j2 +++ b/roles/openstack-stack/templates/heat_stack.yaml.j2 @@ -341,6 +341,12 @@ resources: protocol: tcp port_range_min: 9090 port_range_max: 9090 +{% if openshift_use_flannel|default(False)|bool %} + - direction: ingress + protocol: tcp + port_range_min: 2379 + port_range_max: 2379 +{% endif %} etcd-secgrp: type: OS::Neutron::SecurityGroup -- cgit v1.2.3 From 3823c72af11f77b9639176921b398fbab2ac04fd Mon Sep 17 00:00:00 2001 From: Tlacenka Date: Wed, 18 Oct 2017 12:55:58 +0200 Subject: Add Extra CAs (custom post-provision action) (#801) * add cas: playbook adding new CAs created * add CAs: README updated, bug fixes * README: improvements * README: minor fixes * README: removed code snippet * README: fix --- playbooks/provisioning/openstack/README.md | 11 +++++++++++ playbooks/provisioning/openstack/custom-actions/add-cas.yml | 13 +++++++++++++ 2 files changed, 24 insertions(+) create mode 100644 playbooks/provisioning/openstack/custom-actions/add-cas.yml diff --git a/playbooks/provisioning/openstack/README.md b/playbooks/provisioning/openstack/README.md index a277047e1..f11a9bd73 100644 --- a/playbooks/provisioning/openstack/README.md +++ b/playbooks/provisioning/openstack/README.md @@ -629,6 +629,16 @@ Example usage: ansible-playbook -i openshift-ansible-contrib/playbooks/provisioning/openstack/custom-actions/add-docker-registry.yml --extra-vars '{"registries": "reg1", "insecure_registries": ["ins_reg1","ins_reg2"]}' ``` +#### Adding extra CAs to the trust chain + +This playbook is also located in the [custom-actions](https://github.com/openshift/openshift-ansible-contrib/blob/master/playbooks/provisioning/openstack/custom-actions) directory. +It copies passed CAs to the trust chain location and updates the trust chain on each selected host. + +Example usage: +``` +ansible-playbook -i openshift-ansible-contrib/playbooks/provisioning/openstack/custom-actions/add-cas.yml --extra-vars '{"ca_files": [, ]}' +``` + Please consider contributing your custom playbook back to openshift-ansible-contrib! A library of custom post-provision actions exists in `openshift-ansible-contrib/playbooks/provisioning/openstack/custom-actions`. Playbooks include: @@ -636,6 +646,7 @@ A library of custom post-provision actions exists in `openshift-ansible-contrib/ * [add-yum-repos.yml](https://github.com/openshift/openshift-ansible-contrib/blob/master/playbooks/provisioning/openstack/custom-actions/add-yum-repos.yml): adds a list of custom yum repositories to every node in the cluster * [add-rhn-pools.yml](https://github.com/openshift/openshift-ansible-contrib/blob/master/playbooks/provisioning/openstack/custom-actions/add-rhn-pools.yml): attaches a list of additional RHN pools to every node in the cluster * [add-docker-registry.yml](https://github.com/openshift/openshift-ansible-contrib/blob/master/playbooks/provisioning/openstack/custom-actions/add-docker-registry.yml): adds a list of docker registries to the docker configuration on every node in the cluster +* [add-cas.yml](https://github.com/openshift/openshift-ansible-contrib/blob/master/playbooks/provisioning/openstack/custom-actions/add-rhn-pools.yml): adds a list of CAs to the trust chain on every node in the cluster ### Install OpenShift diff --git a/playbooks/provisioning/openstack/custom-actions/add-cas.yml b/playbooks/provisioning/openstack/custom-actions/add-cas.yml new file mode 100644 index 000000000..b2c195f91 --- /dev/null +++ b/playbooks/provisioning/openstack/custom-actions/add-cas.yml @@ -0,0 +1,13 @@ +--- +- hosts: cluster_hosts + become: true + vars: + ca_files: [] + tasks: + - name: Copy CAs to the trusted CAs location + with_items: "{{ ca_files }}" + copy: + src: "{{ item }}" + dest: /etc/pki/ca-trust/source/anchors/ + - name: Update trusted CAs + shell: 'update-ca-trust enable && update-ca-trust extract' -- cgit v1.2.3 From 2e6426bfb83bfb92c44761227695c21170c93b1e Mon Sep 17 00:00:00 2001 From: Tomas Sedovic Date: Wed, 18 Oct 2017 14:26:21 +0200 Subject: Revert the console hostname change We'll do it in a separate pull request. --- playbooks/provisioning/openstack/advanced-configuration.md | 6 +++--- .../provisioning/openstack/sample-inventory/group_vars/OSEv3.yml | 4 ++-- 2 files changed, 5 insertions(+), 5 deletions(-) diff --git a/playbooks/provisioning/openstack/advanced-configuration.md b/playbooks/provisioning/openstack/advanced-configuration.md index 17f0e2629..9e3fe75d2 100644 --- a/playbooks/provisioning/openstack/advanced-configuration.md +++ b/playbooks/provisioning/openstack/advanced-configuration.md @@ -108,7 +108,7 @@ Either way, find the `oc` binary and put it in your `PATH`. ```bash -oc login --insecure-skip-tls-verify=true https://console.openshift.example.com:8443 -u user -p password +oc login --insecure-skip-tls-verify=true https://master-0.openshift.example.com:8443 -u user -p password oc new-project test oc new-app --template=cakephp-mysql-example oc status -v @@ -122,7 +122,7 @@ Wait until the build has finished and both pods are deployed and running: ``` $ oc status -v -In project test on server https://console.openshift.example.com:8443 +In project test on server https://master-0.openshift.example.com:8443 http://cakephp-mysql-example-test.apps.openshift.example.com (svc/cakephp-mysql-example) dc/cakephp-mysql-example deploys istag/cakephp-mysql-example:latest <- @@ -153,7 +153,7 @@ Its `title` should say: "Welcome to OpenShift". You can also access the OpenShift cluster with a web browser by going to: -https://console.openshift.example.com:8443 +https://master-0.openshift.example.com:8443 Note that for this to work, the OpenShift nodes must be accessible from your computer and it's DNS configuration must use the cruster's diff --git a/playbooks/provisioning/openstack/sample-inventory/group_vars/OSEv3.yml b/playbooks/provisioning/openstack/sample-inventory/group_vars/OSEv3.yml index 4c1ca8c96..949a323a7 100644 --- a/playbooks/provisioning/openstack/sample-inventory/group_vars/OSEv3.yml +++ b/playbooks/provisioning/openstack/sample-inventory/group_vars/OSEv3.yml @@ -5,8 +5,8 @@ openshift_deployment_type: origin openshift_master_default_subdomain: "apps.{{ env_id }}.{{ public_dns_domain }}" openshift_master_cluster_method: native -openshift_master_cluster_public_hostname: "console.{{ env_id }}.{{ public_dns_domain }}" -openshift_master_cluster_hostname: "{{ openshift_master_cluster_public_hostname }}" +openshift_master_cluster_hostname: "{{ groups.lb.0|default(groups.masters.0) }}" +openshift_master_cluster_public_hostname: "{{ groups.lb.0|default(groups.masters.0) }}" osm_default_node_selector: 'region=primary' -- cgit v1.2.3 From d20b0f90098fa40c0925d317fc4c5c30584ed861 Mon Sep 17 00:00:00 2001 From: Tomas Sedovic Date: Wed, 18 Oct 2017 14:29:24 +0200 Subject: Remove bash highlight --- playbooks/provisioning/openstack/advanced-configuration.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/playbooks/provisioning/openstack/advanced-configuration.md b/playbooks/provisioning/openstack/advanced-configuration.md index 9e3fe75d2..72bb95254 100644 --- a/playbooks/provisioning/openstack/advanced-configuration.md +++ b/playbooks/provisioning/openstack/advanced-configuration.md @@ -107,7 +107,7 @@ Either way, find the `oc` binary and put it in your `PATH`. ### Logging in Using the Command Line -```bash +``` oc login --insecure-skip-tls-verify=true https://master-0.openshift.example.com:8443 -u user -p password oc new-project test oc new-app --template=cakephp-mysql-example -- cgit v1.2.3 From 85181ea469ed5f541cbac6f73aefc134526aca8d Mon Sep 17 00:00:00 2001 From: Tomas Sedovic Date: Tue, 7 Nov 2017 14:34:03 +1100 Subject: Move the OpenStack playbooks We move them from `playbooks/provisioning/openstack` to `playbooks/openstack` to mirror `playbooks/aws`. --- playbooks/openstack/README.md | 258 +++++++ playbooks/openstack/advanced-configuration.md | 773 +++++++++++++++++++++ playbooks/openstack/ansible.cfg | 24 + playbooks/openstack/custom-actions/add-cas.yml | 13 + .../custom-actions/add-docker-registry.yml | 90 +++ .../openstack/custom-actions/add-rhn-pools.yml | 13 + .../openstack/custom-actions/add-yum-repos.yml | 12 + playbooks/openstack/galaxy-requirements.yaml | 10 + .../openshift-cluster/custom_flavor_check.yaml | 9 + .../openshift-cluster/custom_image_check.yaml | 9 + .../openshift-cluster/net_vars_check.yaml | 14 + .../openstack/openshift-cluster/post-install.yml | 57 ++ .../openshift-cluster/post-provision-openstack.yml | 118 ++++ .../openstack/openshift-cluster/pre-install.yml | 19 + .../openstack/openshift-cluster/pre_tasks.yml | 53 ++ .../prepare-and-format-cinder-volume.yaml | 67 ++ .../openstack/openshift-cluster/prerequisites.yml | 123 ++++ .../openshift-cluster/provision-openstack.yml | 35 + .../openstack/openshift-cluster/provision.yaml | 4 + playbooks/openstack/openshift-cluster/roles | 1 + .../openstack/openshift-cluster/scale-up.yaml | 75 ++ .../openstack/openshift-cluster/stack_params.yaml | 49 ++ .../sample-inventory/group_vars/OSEv3.yml | 59 ++ .../openstack/sample-inventory/group_vars/all.yml | 166 +++++ playbooks/openstack/sample-inventory/inventory.py | 88 +++ playbooks/provisioning/openstack/README.md | 258 ------- .../openstack/advanced-configuration.md | 773 --------------------- playbooks/provisioning/openstack/ansible.cfg | 24 - .../openstack/custom-actions/add-cas.yml | 13 - .../custom-actions/add-docker-registry.yml | 90 --- .../openstack/custom-actions/add-rhn-pools.yml | 13 - .../openstack/custom-actions/add-yum-repos.yml | 12 - .../openstack/custom_flavor_check.yaml | 9 - .../provisioning/openstack/custom_image_check.yaml | 9 - .../openstack/galaxy-requirements.yaml | 10 - .../provisioning/openstack/net_vars_check.yaml | 14 - playbooks/provisioning/openstack/post-install.yml | 57 -- .../openstack/post-provision-openstack.yml | 118 ---- playbooks/provisioning/openstack/pre-install.yml | 19 - playbooks/provisioning/openstack/pre_tasks.yml | 53 -- .../prepare-and-format-cinder-volume.yaml | 67 -- playbooks/provisioning/openstack/prerequisites.yml | 123 ---- .../provisioning/openstack/provision-openstack.yml | 35 - playbooks/provisioning/openstack/provision.yaml | 4 - playbooks/provisioning/openstack/roles | 1 - .../sample-inventory/group_vars/OSEv3.yml | 59 -- .../openstack/sample-inventory/group_vars/all.yml | 166 ----- .../openstack/sample-inventory/inventory.py | 88 --- playbooks/provisioning/openstack/scale-up.yaml | 75 -- playbooks/provisioning/openstack/stack_params.yaml | 49 -- 50 files changed, 2139 insertions(+), 2139 deletions(-) create mode 100644 playbooks/openstack/README.md create mode 100644 playbooks/openstack/advanced-configuration.md create mode 100644 playbooks/openstack/ansible.cfg create mode 100644 playbooks/openstack/custom-actions/add-cas.yml create mode 100644 playbooks/openstack/custom-actions/add-docker-registry.yml create mode 100644 playbooks/openstack/custom-actions/add-rhn-pools.yml create mode 100644 playbooks/openstack/custom-actions/add-yum-repos.yml create mode 100644 playbooks/openstack/galaxy-requirements.yaml create mode 100644 playbooks/openstack/openshift-cluster/custom_flavor_check.yaml create mode 100644 playbooks/openstack/openshift-cluster/custom_image_check.yaml create mode 100644 playbooks/openstack/openshift-cluster/net_vars_check.yaml create mode 100644 playbooks/openstack/openshift-cluster/post-install.yml create mode 100644 playbooks/openstack/openshift-cluster/post-provision-openstack.yml create mode 100644 playbooks/openstack/openshift-cluster/pre-install.yml create mode 100644 playbooks/openstack/openshift-cluster/pre_tasks.yml create mode 100644 playbooks/openstack/openshift-cluster/prepare-and-format-cinder-volume.yaml create mode 100644 playbooks/openstack/openshift-cluster/prerequisites.yml create mode 100644 playbooks/openstack/openshift-cluster/provision-openstack.yml create mode 100644 playbooks/openstack/openshift-cluster/provision.yaml create mode 120000 playbooks/openstack/openshift-cluster/roles create mode 100644 playbooks/openstack/openshift-cluster/scale-up.yaml create mode 100644 playbooks/openstack/openshift-cluster/stack_params.yaml create mode 100644 playbooks/openstack/sample-inventory/group_vars/OSEv3.yml create mode 100644 playbooks/openstack/sample-inventory/group_vars/all.yml create mode 100755 playbooks/openstack/sample-inventory/inventory.py delete mode 100644 playbooks/provisioning/openstack/README.md delete mode 100644 playbooks/provisioning/openstack/advanced-configuration.md delete mode 100644 playbooks/provisioning/openstack/ansible.cfg delete mode 100644 playbooks/provisioning/openstack/custom-actions/add-cas.yml delete mode 100644 playbooks/provisioning/openstack/custom-actions/add-docker-registry.yml delete mode 100644 playbooks/provisioning/openstack/custom-actions/add-rhn-pools.yml delete mode 100644 playbooks/provisioning/openstack/custom-actions/add-yum-repos.yml delete mode 100644 playbooks/provisioning/openstack/custom_flavor_check.yaml delete mode 100644 playbooks/provisioning/openstack/custom_image_check.yaml delete mode 100644 playbooks/provisioning/openstack/galaxy-requirements.yaml delete mode 100644 playbooks/provisioning/openstack/net_vars_check.yaml delete mode 100644 playbooks/provisioning/openstack/post-install.yml delete mode 100644 playbooks/provisioning/openstack/post-provision-openstack.yml delete mode 100644 playbooks/provisioning/openstack/pre-install.yml delete mode 100644 playbooks/provisioning/openstack/pre_tasks.yml delete mode 100644 playbooks/provisioning/openstack/prepare-and-format-cinder-volume.yaml delete mode 100644 playbooks/provisioning/openstack/prerequisites.yml delete mode 100644 playbooks/provisioning/openstack/provision-openstack.yml delete mode 100644 playbooks/provisioning/openstack/provision.yaml delete mode 120000 playbooks/provisioning/openstack/roles delete mode 100644 playbooks/provisioning/openstack/sample-inventory/group_vars/OSEv3.yml delete mode 100644 playbooks/provisioning/openstack/sample-inventory/group_vars/all.yml delete mode 100755 playbooks/provisioning/openstack/sample-inventory/inventory.py delete mode 100644 playbooks/provisioning/openstack/scale-up.yaml delete mode 100644 playbooks/provisioning/openstack/stack_params.yaml diff --git a/playbooks/openstack/README.md b/playbooks/openstack/README.md new file mode 100644 index 000000000..a2f553f4c --- /dev/null +++ b/playbooks/openstack/README.md @@ -0,0 +1,258 @@ +# OpenStack Provisioning + +This directory contains [Ansible][ansible] playbooks and roles to create +OpenStack resources (servers, networking, volumes, security groups, +etc.). The result is an environment ready for OpenShift installation +via [openshift-ansible]. + +We provide everything necessary to be able to install OpenShift on +OpenStack (including the DNS and load balancer servers when +necessary). In addition, we work on providing integration with the +OpenStack-native services (storage, lbaas, baremetal as a service, +dns, etc.). + + +## OpenStack Requirements + +Before you start the installation, you need to have an OpenStack +environment to connect to. You can use a public cloud or an OpenStack +within your organisation. It is also possible to +use [Devstack][devstack] or [TripleO][tripleo]. In the case of +TripleO, we will be running on top of the **overcloud**. + +The OpenStack release must be Newton (for Red Hat OpenStack this is +version 10) or newer. It must also satisfy these requirements: + +* Heat (Orchestration) must be available +* The deployment image (CentOS 7 or RHEL 7) must be loaded +* The deployment flavor must be available to your user + - `m1.medium` / 4GB RAM + 40GB disk should be enough for testing + - look at + the [Minimum Hardware Requirements page][hardware-requirements] + for production +* The keypair for SSH must be available in openstack +* `keystonerc` file that lets you talk to the openstack services + * NOTE: only Keystone V2 is currently supported + +Optional: +* External Neutron network with a floating IP address pool + + +## Installation + +There are four main parts to the installation: + +1. [Preparing Ansible and dependencies](#1-preparing-ansible-and-dependencies) +2. [Configuring the desired OpenStack environment and OpenShift cluster](#2-configuring-the-openstack-environment-and-openshift-cluster) +3. [Creating the OpenStack resources (VMs, networking, etc.)](#3-creating-the-openstack-resources-vms-networking-etc) +4. [Installing OpenShift](#4-installing-openshift) + +This guide is going to install [OpenShift Origin][origin] +with [CentOS 7][centos7] images with minimal customisation. + +We will create the VMs for running OpenShift, in a new Neutron +network, assign Floating IP addresses and configure DNS. + +The OpenShift cluster will have a single Master node that will run +`etcd`, a single Infra node and two App nodes. + +You can look at +the [Advanced Configuration page][advanced-configuration] for +additional options. + + + +### 1. Preparing Ansible and dependencies + +First, you need to select where to run [Ansible][ansible] from (the +*Ansible host*). This can be the computer you read this guide on or an +OpenStack VM you'll create specifically for this purpose. + +We will use +a +[Docker image that has all the dependencies installed][control-host-image] to +make things easier. If you don't want to use Docker, take a look at +the [Ansible host dependencies][ansible-dependencies] and make sure +they're installed. + +Your *Ansible host* needs to have the following: + +1. Docker +2. `keystonerc` file with your OpenStack credentials +3. SSH private key for logging in to your OpenShift nodes + +Assuming your private key is `~/.ssh/id_rsa` and `keystonerc` in your +current directory: + +```bash +$ sudo docker run -it -v ~/.ssh:/mnt/.ssh:Z \ + -v $PWD/keystonerc:/root/.config/openstack/keystonerc.sh:Z \ + redhatcop/control-host-openstack bash +``` + +This will create the container, add your SSH key and source your +`keystonerc`. It should be set up for the installation. + +You can verify that everything is in order: + + +```bash +$ less .ssh/id_rsa +$ ansible --version +$ openstack image list +``` + + +### 2. Configuring the OpenStack Environment and OpenShift Cluster + +The configuration is all done in an Ansible inventory directory. We +will clone the [openshift-ansible-contrib][contrib] repository and set +things up for a minimal installation. + + +``` +$ git clone https://github.com/openshift/openshift-ansible-contrib +$ cp -r openshift-ansible-contrib/playbooks/provisioning/openstack/sample-inventory/ inventory +``` + +If you're testing multiple configurations, you can have multiple +inventories and switch between them. + +#### OpenStack Configuration + +The OpenStack configuration is in `inventory/group_vars/all.yml`. + +Open the file and plug in the image, flavor and network configuration +corresponding to your OpenStack installation. + +```bash +$ vi inventory/group_vars/all.yml +``` + +1. Set the `openstack_ssh_public_key` to your OpenStack keypair name. + - See `openstack keypair list` to find the keypairs registered with + OpenShift. + - This must correspond to your private SSH key in `~/.ssh/id_rsa` +2. Set the `openstack_external_network_name` to the floating IP + network of your openstack. + - See `openstack network list` for the list of networks. + - It's often called `public`, `external` or `ext-net`. +3. Set the `openstack_default_image_name` to the image you want your + OpenShift VMs to run. + - See `openstack image list` for the list of available images. +4. Set the `openstack_default_flavor` to the flavor you want your + OpenShift VMs to use. + - See `openstack flavor list` for the list of available flavors. + +**NOTE**: In most OpenStack environments, you will also need to +configure the forwarders for the DNS server we create. This depends on +your environment. + +Launch a VM in your OpenStack and look at its `/etc/resolv.conf` and +put the IP addresses into `public_dns_nameservers` in +`inventory/group_vars/all.yml`. + + +#### OpenShift configuration + +The OpenShift configuration is in `inventory/group_vars/OSEv3.yml`. + +The default options will mostly work, but unless you used the large +flavors for a production-ready environment, openshift-ansible's +hardware check will fail. + +Let's disable those checks by putting this in +`inventory/group_vars/OSEv3.yml`: + +```yaml +openshift_disable_check: disk_availability,memory_availability +``` + +**NOTE**: The default authentication method will allow **any username +and password** in! If you're running this in a public place, you need +to set up access control. + +Feel free to look at +the [Sample OpenShift Inventory][sample-openshift-inventory] and +the [advanced configuration][advanced-configuration]. + + +### 3. Creating the OpenStack resources (VMs, networking, etc.) + +We will install the DNS server roles using ansible galaxy and then run +the openstack provisioning playbook. The `ansible.cfg` file we provide +has useful defaults -- copy it to the directory you're going to run +Ansible from. + +```bash +$ ansible-galaxy install -r openshift-ansible-contrib/playbooks/provisioning/openstack/galaxy-requirements.yaml -p openshift-ansible-contrib/roles +$ cp openshift-ansible-contrib/playbooks/provisioning/openstack/ansible.cfg ansible.cfg +``` +(you will only need to do this once) + +Then run the provisioning playbook -- this will create the OpenStack +resources: + +```bash +$ ansible-playbook -i inventory openshift-ansible-contrib/playbooks/provisioning/openstack/provision.yaml +``` + +If you're using multiple inventories, make sure you pass the path to +the right one to `-i`. + + +### 4. Installing OpenShift + +We will use the `openshift-ansible` project to install openshift on +top of the OpenStack nodes we have prepared: + +```bash +$ git clone https://github.com/openshift/openshift-ansible +$ ansible-playbook -i inventory openshift-ansible/playbooks/byo/config.yml +``` + + +### Next Steps + +And that's it! You should have a small but functional OpenShift +cluster now. + +Take a look at [how to access the cluster][accessing-openshift] +and [how to remove it][uninstall-openshift] as well as the more +advanced configuration: + +* [Accessing the OpenShift cluster][accessing-openshift] +* [Removing the OpenShift cluster][uninstall-openshift] +* Set Up Authentication (TODO) +* [Multiple Masters with a load balancer][loadbalancer] +* [External Dns][external-dns] +* Multiple Clusters (TODO) +* [Cinder Registry][cinder-registry] +* [Bastion Node][bastion] + + +[ansible]: https://www.ansible.com/ +[openshift-ansible]: https://github.com/openshift/openshift-ansible +[devstack]: https://docs.openstack.org/devstack/ +[tripleo]: http://tripleo.org/ +[ansible-dependencies]: ./advanced-configuration.md#dependencies-for-localhost-ansible-controladmin-node +[contrib]: https://github.com/openshift/openshift-ansible-contrib +[control-host-image]: https://hub.docker.com/r/redhatcop/control-host-openstack/ +[hardware-requirements]: https://docs.openshift.org/latest/install_config/install/prerequisites.html#hardware +[origin]: https://www.openshift.org/ +[centos7]: https://www.centos.org/ +[sample-openshift-inventory]: https://github.com/openshift/openshift-ansible/blob/master/inventory/byo/hosts.example +[advanced-configuration]: ./advanced-configuration.md +[accessing-openshift]: ./advanced-configuration.md#accessing-the-openshift-cluster +[uninstall-openshift]: ./advanced-configuration.md#removing-the-openshift-cluster +[loadbalancer]: ./advanced-configuration.md#multi-master-configuration +[external-dns]: ./advanced-configuration.md#dns-configuration-variables +[cinder-registry]: ./advanced-configuration.md#creating-and-using-a-cinder-volume-for-the-openshift-registry +[bastion]: ./advanced-configuration.md#configure-static-inventory-and-access-via-a-bastion-node + + + +## License + +Like the rest of the openshift-ansible-contrib repository, the code +here is licensed under Apache 2. diff --git a/playbooks/openstack/advanced-configuration.md b/playbooks/openstack/advanced-configuration.md new file mode 100644 index 000000000..72bb95254 --- /dev/null +++ b/playbooks/openstack/advanced-configuration.md @@ -0,0 +1,773 @@ +## Dependencies for localhost (ansible control/admin node) + +* [Ansible 2.3](https://pypi.python.org/pypi/ansible) +* [Ansible-galaxy](https://pypi.python.org/pypi/ansible-galaxy-local-deps) +* [jinja2](http://jinja.pocoo.org/docs/2.9/) +* [shade](https://pypi.python.org/pypi/shade) +* python-jmespath / [jmespath](https://pypi.python.org/pypi/jmespath) +* python-dns / [dnspython](https://pypi.python.org/pypi/dnspython) +* Become (sudo) is not required. + +**NOTE**: You can use a Docker image with all dependencies set up. +Find more in the [Deployment section](#deployment). + +### Optional Dependencies for localhost +**Note**: When using rhel images, `rhel-7-server-openstack-10-rpms` repository is required in order to install these packages. + +* `python-openstackclient` +* `python-heatclient` + +## Dependencies for OpenStack hosted cluster nodes (servers) + +There are no additional dependencies for the cluster nodes. Required +configuration steps are done by Heat given a specific user data config +that normally should not be changed. + +## Required galaxy modules + +In order to pull in external dependencies for DNS configuration steps, +the following commads need to be executed: + + ansible-galaxy install \ + -r openshift-ansible-contrib/playbooks/provisioning/openstack/galaxy-requirements.yaml \ + -p openshift-ansible-contrib/roles + +Alternatively you can install directly from github: + + ansible-galaxy install git+https://github.com/redhat-cop/infra-ansible,master \ + -p openshift-ansible-contrib/roles + +Notes: +* This assumes we're in the directory that contains the clonned +openshift-ansible-contrib repo in its root path. +* When trying to install a different version, the previous one must be removed first +(`infra-ansible` directory from [roles](https://github.com/openshift/openshift-ansible-contrib/tree/master/roles)). +Otherwise, even if there are differences between the two versions, installation of the newer version is skipped. + + +## Accessing the OpenShift Cluster + +### Use the Cluster DNS + +In addition to the OpenShift nodes, we created a DNS server with all +the necessary entries. We will configure your *Ansible host* to use +this new DNS and talk to the deployed OpenShift. + +First, get the DNS IP address: + +```bash +$ openstack server show dns-0.openshift.example.com --format value --column addresses +openshift-ansible-openshift.example.com-net=192.168.99.11, 10.40.128.129 +``` + +Note the floating IP address (it's `10.40.128.129` in this case) -- if +you're not sure, try pinging them both -- it's the one that responds +to pings. + +Next, edit your `/etc/resolv.conf` as root and put `nameserver DNS_IP` as your +**first entry**. + +If your `/etc/resolv.conf` currently looks like this: + +``` +; generated by /usr/sbin/dhclient-script +search openstacklocal +nameserver 192.168.0.3 +nameserver 192.168.0.2 +``` + +Change it to this: + +``` +; generated by /usr/sbin/dhclient-script +search openstacklocal +nameserver 10.40.128.129 +nameserver 192.168.0.3 +nameserver 192.168.0.2 +``` + +### Get the `oc` Client + +**NOTE**: You can skip this section if you're using the Docker image +-- it already has the `oc` binary. + +You need to download the OpenShift command line client (called `oc`). +You can download and extract `openshift-origin-client-tools` from the +OpenShift release page: + +https://github.com/openshift/origin/releases/latest/ + +Or you can now copy it from the master node: + + $ ansible -i inventory masters[0] -m fetch -a "src=/bin/oc dest=oc" + +Either way, find the `oc` binary and put it in your `PATH`. + + +### Logging in Using the Command Line + + +``` +oc login --insecure-skip-tls-verify=true https://master-0.openshift.example.com:8443 -u user -p password +oc new-project test +oc new-app --template=cakephp-mysql-example +oc status -v +curl http://cakephp-mysql-example-test.apps.openshift.example.com +``` + +This will trigger an image build. You can run `oc logs -f +bc/cakephp-mysql-example` to follow its progress. + +Wait until the build has finished and both pods are deployed and running: + +``` +$ oc status -v +In project test on server https://master-0.openshift.example.com:8443 + +http://cakephp-mysql-example-test.apps.openshift.example.com (svc/cakephp-mysql-example) + dc/cakephp-mysql-example deploys istag/cakephp-mysql-example:latest <- + bc/cakephp-mysql-example source builds https://github.com/openshift/cakephp-ex.git on openshift/php:7.0 + deployment #1 deployed about a minute ago - 1 pod + +svc/mysql - 172.30.144.36:3306 + dc/mysql deploys openshift/mysql:5.7 + deployment #1 deployed 3 minutes ago - 1 pod + +Info: + * pod/cakephp-mysql-example-1-build has no liveness probe to verify pods are still running. + try: oc set probe pod/cakephp-mysql-example-1-build --liveness ... +View details with 'oc describe /' or list everything with 'oc get all'. + +``` + +You can now look at the deployed app using its route: + +``` +$ curl http://cakephp-mysql-example-test.apps.openshift.example.com +``` + +Its `title` should say: "Welcome to OpenShift". + + +### Accessing the UI + +You can also access the OpenShift cluster with a web browser by going to: + +https://master-0.openshift.example.com:8443 + +Note that for this to work, the OpenShift nodes must be accessible +from your computer and it's DNS configuration must use the cruster's +DNS. + + +## Removing the OpenShift Cluster + +Everything in the cluster is contained within a Heat stack. To +completely remove the cluster and all the related OpenStack resources, +run this command: + +```bash +openstack stack delete --wait --yes openshift.example.com +``` + + +## DNS configuration variables + +Pay special attention to the values in the first paragraph -- these +will depend on your OpenStack environment. + +Note that the provsisioning playbooks update the original Neutron subnet +created with the Heat stack to point to the configured DNS servers. +So the provisioned cluster nodes will start using those natively as +default nameservers. Technically, this allows to deploy OpenShift clusters +without dnsmasq proxies. + +The `env_id` and `public_dns_domain` will form the cluster's DNS domain all +your servers will be under. With the default values, this will be +`openshift.example.com`. For workloads, the default subdomain is 'apps'. +That sudomain can be set as well by the `openshift_app_domain` variable in +the inventory. + +The `openstack__hostname` is a set of variables used for customising +hostnames of servers with a given role. When such a variable stays commented, +default hostname (usually the role name) is used. + +The `public_dns_nameservers` is a list of DNS servers accessible from all +the created Nova servers. These will be serving as your DNS forwarders for +external FQDNs that do not belong to the cluster's DNS domain and its subdomains. +If you're unsure what to put in here, you can try the google or opendns servers, +but note that some organizations may be blocking them. + +The `openshift_use_dnsmasq` controls either dnsmasq is deployed or not. +By default, dnsmasq is deployed and comes as the hosts' /etc/resolv.conf file +first nameserver entry that points to the local host instance of the dnsmasq +daemon that in turn proxies DNS requests to the authoritative DNS server. +When Network Manager is enabled for provisioned cluster nodes, which is +normally the case, you should not change the defaults and always deploy dnsmasq. + +`external_nsupdate_keys` describes an external authoritative DNS server(s) +processing dynamic records updates in the public and private cluster views: + + external_nsupdate_keys: + public: + key_secret: + key_algorithm: 'hmac-md5' + key_name: 'update-key' + server: + private: + key_secret: + key_algorithm: 'hmac-sha256' + server: + +Here, for the public view section, we specified another key algorithm and +optional `key_name`, which normally defaults to the cluster's DNS domain. +This just illustrates a compatibility mode with a DNS service deployed +by OpenShift on OSP10 reference architecture, and used in a mixed mode with +another external DNS server. + +Another example defines an external DNS server for the public view +additionally to the in-stack DNS server used for the private view only: + + external_nsupdate_keys: + public: + key_secret: + key_algorithm: 'hmac-sha256' + server: + +Here, updates matching the public view will be hitting the given public +server IP. While updates matching the private view will be sent to the +auto evaluated in-stack DNS server's **public** IP. + +Note, for the in-stack DNS server, private view updates may be sent only +via the public IP of the server. You can not send updates via the private +IP yet. This forces the in-stack private server to have a floating IP. +See also the [security notes](#security-notes) + +## Flannel networking + +In order to configure the +[flannel networking](https://docs.openshift.com/container-platform/3.6/install_config/configuring_sdn.html#using-flannel), +uncomment and adjust the appropriate `inventory/group_vars/OSEv3.yml` group vars. +Note that the `osm_cluster_network_cidr` must not overlap with the default +Docker bridge subnet of 172.17.0.0/16. Or you should change the docker0 default +CIDR range otherwise. For example, by adding `--bip=192.168.2.1/24` to +`DOCKER_NETWORK_OPTIONS` located in `/etc/sysconfig/docker-network`. + +Also note that the flannel network will be provisioned on a separate isolated Neutron +subnet defined from `osm_cluster_network_cidr` and having ports security disabled. +Use the `openstack_private_data_network_name` variable to define the network +name for the heat stack resource. + +After the cluster deployment done, you should run an additional post installation +step for flannel and docker iptables configuration: + + ansible-playbook openshift-ansible-contrib/playbooks/provisioning/openstack/post-install.yml + +## Other configuration variables + +`openstack_ssh_public_key` is a Nova keypair - you can see your +keypairs with `openstack keypair list`. It must correspond to the +private SSH key Ansible will use to log into the created VMs. This is +`~/.ssh/id_rsa` by default, but you can use a different key by passing +`--private-key` to `ansible-playbook`. + +`openstack_default_image_name` is the default name of the Glance image the +servers will use. You can see your images with `openstack image list`. +In order to set a different image for a role, uncomment the line with the +corresponding variable (e.g. `openstack_lb_image_name` for load balancer) and +set its value to another available image name. `openstack_default_image_name` +must stay defined as it is used as a default value for the rest of the roles. + +`openstack_default_flavor` is the default Nova flavor the servers will use. +You can see your flavors with `openstack flavor list`. +In order to set a different flavor for a role, uncomment the line with the +corresponding variable (e.g. `openstack_lb_flavor` for load balancer) and +set its value to another available flavor. `openstack_default_flavor` must +stay defined as it is used as a default value for the rest of the roles. + +`openstack_external_network_name` is the name of the Neutron network +providing external connectivity. It is often called `public`, +`external` or `ext-net`. You can see your networks with `openstack +network list`. + +`openstack_private_network_name` is the name of the private Neutron network +providing admin/control access for ansible. It can be merged with other +cluster networks, there are no special requirements for networking. + +The `openstack_num_masters`, `openstack_num_infra` and +`openstack_num_nodes` values specify the number of Master, Infra and +App nodes to create. + +The `openshift_cluster_node_labels` defines custom labels for your openshift +cluster node groups. It currently supports app and infra node groups. +The default value of this variable sets `region: primary` to app nodes and +`region: infra` to infra nodes. +An example of setting a customised label: +``` +openshift_cluster_node_labels: + app: + mylabel: myvalue +``` + +The `openstack_nodes_to_remove` allows you to specify the numerical indexes +of App nodes that should be removed; for example, ['0', '2'], + +The `docker_volume_size` is the default Docker volume size the servers will use. +In order to set a different volume size for a role, +uncomment the line with the corresponding variable (e. g. `docker_master_volume_size` +for master) and change its value. `docker_volume_size` must stay defined as it is +used as a default value for some of the servers (master, infra, app node). +The rest of the roles (etcd, load balancer, dns) have their defaults hard-coded. + +**Note**: If the `ephemeral_volumes` is set to `true`, the `*_volume_size` variables +will be ignored and the deployment will not create any cinder volumes. + +The `openstack_flat_secgrp`, controls Neutron security groups creation for Heat +stacks. Set it to true, if you experience issues with sec group rules +quotas. It trades security for number of rules, by sharing the same set +of firewall rules for master, node, etcd and infra nodes. + +The `required_packages` variable also provides a list of the additional +prerequisite packages to be installed before to deploy an OpenShift cluster. +Those are ignored though, if the `manage_packages: False`. + +The `openstack_inventory` controls either a static inventory will be created after the +cluster nodes provisioned on OpenStack cloud. Note, the fully dynamic inventory +is yet to be supported, so the static inventory will be created anyway. + +The `openstack_inventory_path` points the directory to host the generated static inventory. +It should point to the copied example inventory directory, otherwise ti creates +a new one for you. + +## Multi-master configuration + +Please refer to the official documentation for the +[multi-master setup](https://docs.openshift.com/container-platform/3.6/install_config/install/advanced_install.html#multiple-masters) +and define the corresponding [inventory +variables](https://docs.openshift.com/container-platform/3.6/install_config/install/advanced_install.html#configuring-cluster-variables) +in `inventory/group_vars/OSEv3.yml`. For example, given a load balancer node +under the ansible group named `ext_lb`: + + openshift_master_cluster_method: native + openshift_master_cluster_hostname: "{{ groups.ext_lb.0 }}" + openshift_master_cluster_public_hostname: "{{ groups.ext_lb.0 }}" + +## Provider Network + +Normally, the playbooks create a new Neutron network and subnet and attach +floating IP addresses to each node. If you have a provider network set up, this +is all unnecessary as you can just access servers that are placed in the +provider network directly. + +To use a provider network, set its name in `openstack_provider_network_name` in +`inventory/group_vars/all.yml`. + +If you set the provider network name, the `openstack_external_network_name` and +`openstack_private_network_name` fields will be ignored. + +**NOTE**: this will not update the nodes' DNS, so running openshift-ansible +right after provisioning will fail (unless you're using an external DNS server +your provider network knows about). You must make sure your nodes are able to +resolve each other by name. + +## Security notes + +Configure required `*_ingress_cidr` variables to restrict public access +to provisioned servers from your laptop (a /32 notation should be used) +or your trusted network. The most important is the `node_ingress_cidr` +that restricts public access to the deployed DNS server and cluster +nodes' ephemeral ports range. + +Note, the command ``curl https://api.ipify.org`` helps fiding an external +IP address of your box (the ansible admin node). + +There is also the `manage_packages` variable (defaults to True) you +may want to turn off in order to speed up the provisioning tasks. This may +be the case for development environments. When turned off, the servers will +be provisioned omitting the ``yum update`` command. This brings security +implications though, and is not recommended for production deployments. + +### DNS servers security options + +Aside from `node_ingress_cidr` restricting public access to in-stack DNS +servers, there are following (bind/named specific) DNS security +options available: + + named_public_recursion: 'no' + named_private_recursion: 'yes' + +External DNS servers, which is not included in the 'dns' hosts group, +are not managed. It is up to you to configure such ones. + +## Configure the OpenShift parameters + +Finally, you need to update the DNS entry in +`inventory/group_vars/OSEv3.yml` (look at +`openshift_master_default_subdomain`). + +In addition, this is the place where you can customise your OpenShift +installation for example by specifying the authentication. + +The full list of options is available in this sample inventory: + +https://github.com/openshift/openshift-ansible/blob/master/inventory/byo/hosts.ose.example + +Note, that in order to deploy OpenShift origin, you should update the following +variables for the `inventory/group_vars/OSEv3.yml`, `all.yml`: + + deployment_type: origin + openshift_deployment_type: "{{ deployment_type }}" + + +## Setting a custom entrypoint + +In order to set a custom entrypoint, update `openshift_master_cluster_public_hostname` + + openshift_master_cluster_public_hostname: api.openshift.example.com + +Note than an empty hostname does not work, so if your domain is `openshift.example.com`, +you cannot set this value to simply `openshift.example.com`. + +## Creating and using a Cinder volume for the OpenShift registry + +You can optionally have the playbooks create a Cinder volume and set +it up as the OpenShift hosted registry. + +To do that you need specify the desired Cinder volume name and size in +Gigabytes in `inventory/group_vars/all.yml`: + + cinder_hosted_registry_name: cinder-registry + cinder_hosted_registry_size_gb: 10 + +With this, the playbooks will create the volume and set up its +filesystem. If there is an existing volume of the same name, we will +use it but keep the existing data on it. + +To use the volume for the registry, you must first configure it with +the OpenStack credentials by putting the following to `OSEv3.yml`: + + openshift_cloudprovider_openstack_username: "{{ lookup('env','OS_USERNAME') }}" + openshift_cloudprovider_openstack_password: "{{ lookup('env','OS_PASSWORD') }}" + openshift_cloudprovider_openstack_auth_url: "{{ lookup('env','OS_AUTH_URL') }}" + openshift_cloudprovider_openstack_tenant_name: "{{ lookup('env','OS_TENANT_NAME') }}" + +This will use the credentials from your shell environment. If you want +to enter them explicitly, you can. You can also use credentials +different from the provisioning ones (say for quota or access control +reasons). + +**NOTE**: If you're testing this on (DevStack)[devstack], you must +explicitly set your Keystone API version to v2 (e.g. +`OS_AUTH_URL=http://10.34.37.47/identity/v2.0`) instead of the default +value provided by `openrc`. You may also encounter the following issue +with Cinder: + +https://github.com/kubernetes/kubernetes/issues/50461 + +You can read the (OpenShift documentation on configuring +OpenStack)[openstack] for more information. + +[devstack]: https://docs.openstack.org/devstack/latest/ +[openstack]: https://docs.openshift.org/latest/install_config/configuring_openstack.html + + +Next, we need to instruct OpenShift to use the Cinder volume for it's +registry. Again in `OSEv3.yml`: + + #openshift_hosted_registry_storage_kind: openstack + #openshift_hosted_registry_storage_access_modes: ['ReadWriteOnce'] + #openshift_hosted_registry_storage_openstack_filesystem: xfs + +The filesystem value here will be used in the initial formatting of +the volume. + +If you're using the dynamic inventory, you must uncomment these two values as +well: + + #openshift_hosted_registry_storage_openstack_volumeID: "{{ lookup('os_cinder', cinder_hosted_registry_name).id }}" + #openshift_hosted_registry_storage_volume_size: "{{ cinder_hosted_registry_size_gb }}Gi" + +But note that they use the `os_cinder` lookup plugin we provide, so you must +tell Ansible where to find it either in `ansible.cfg` (the one we provide is +configured properly) or by exporting the +`ANSIBLE_LOOKUP_PLUGINS=openshift-ansible-contrib/lookup_plugins` environment +variable. + + + +## Use an existing Cinder volume for the OpenShift registry + +You can also use a pre-existing Cinder volume for the storage of your +OpenShift registry. + +To do that, you need to have a Cinder volume. You can create one by +running: + + openstack volume create --size + +The volume needs to have a file system created before you put it to +use. + +As with the automatically-created volume, you have to set up the +OpenStack credentials in `inventory/group_vars/OSEv3.yml` as well as +registry values: + + #openshift_hosted_registry_storage_kind: openstack + #openshift_hosted_registry_storage_access_modes: ['ReadWriteOnce'] + #openshift_hosted_registry_storage_openstack_filesystem: xfs + #openshift_hosted_registry_storage_openstack_volumeID: e0ba2d73-d2f9-4514-a3b2-a0ced507fa05 + #openshift_hosted_registry_storage_volume_size: 10Gi + +Note the `openshift_hosted_registry_storage_openstack_volumeID` and +`openshift_hosted_registry_storage_volume_size` values: these need to +be added in addition to the previous variables. + +The **Cinder volume ID**, **filesystem** and **volume size** variables +must correspond to the values in your volume. The volume ID must be +the **UUID** of the Cinder volume, *not its name*. + +We can do formate the volume for you if you ask for it in +`inventory/group_vars/all.yml`: + + prepare_and_format_registry_volume: true + +**NOTE:** doing so **will destroy any data that's currently on the volume**! + +You can also run the registry setup playbook directly: + + ansible-playbook -i inventory playbooks/provisioning/openstack/prepare-and-format-cinder-volume.yaml + +(the provisioning phase must be completed, first) + + + +## Configure static inventory and access via a bastion node + +Example inventory variables: + + openstack_use_bastion: true + bastion_ingress_cidr: "{{openstack_subnet_prefix}}.0/24" + openstack_private_ssh_key: ~/.ssh/id_rsa + openstack_inventory: static + openstack_inventory_path: ../../../../inventory + openstack_ssh_config_path: /tmp/ssh.config.openshift.ansible.openshift.example.com + +The `openstack_subnet_prefix` is the openstack private network for your cluster. +And the `bastion_ingress_cidr` defines accepted range for SSH connections to nodes +additionally to the `ssh_ingress_cidr`` (see the security notes above). + +The SSH config will be stored on the ansible control node by the +gitven path. Ansible uses it automatically. To access the cluster nodes with +that ssh config, use the `-F` prefix, f.e.: + + ssh -F /tmp/ssh.config.openshift.ansible.openshift.example.com master-0.openshift.example.com echo OK + +Note, relative paths will not work for the `openstack_ssh_config_path`, but it +works for the `openstack_private_ssh_key` and `openstack_inventory_path`. In this +guide, the latter points to the current directory, where you run ansible commands +from. + +To verify nodes connectivity, use the command: + + ansible -v -i inventory/hosts -m ping all + +If something is broken, double-check the inventory variables, paths and the +generated `/hosts` and `openstack_ssh_config_path` files. + +The `inventory: dynamic` can be used instead to access cluster nodes directly via +floating IPs. In this mode you can not use a bastion node and should specify +the dynamic inventory file in your ansible commands , like `-i openstack.py`. + +## Using Docker on the Ansible host + +If you don't want to worry about the dependencies, you can use the +[OpenStack Control Host image][control-host-image]. + +[control-host-image]: https://hub.docker.com/r/redhatcop/control-host-openstack/ + +It has all the dependencies installed, but you'll need to map your +code and credentials to it. Assuming your SSH keys live in `~/.ssh` +and everything else is in your current directory (i.e. `ansible.cfg`, +`keystonerc`, `inventory`, `openshift-ansible`, +`openshift-ansible-contrib`), this is how you run the deployment: + + sudo docker run -it -v ~/.ssh:/mnt/.ssh:Z \ + -v $PWD:/root/openshift:Z \ + -v $PWD/keystonerc:/root/.config/openstack/keystonerc.sh:Z \ + redhatcop/control-host-openstack bash + +(feel free to replace `$PWD` with an actual path to your inventory and +checkouts, but note that relative paths don't work) + +The first run may take a few minutes while the image is being +downloaded. After that, you'll be inside the container and you can run +the playbooks: + + cd openshift + ansible-playbook openshift-ansible-contrib/playbooks/provisioning/openstack/provision.yaml + + +### Run the playbook + +Assuming your OpenStack (Keystone) credentials are in the `keystonerc` +this is how you stat the provisioning process from your ansible control node: + + . keystonerc + ansible-playbook openshift-ansible-contrib/playbooks/provisioning/openstack/provision.yaml + +Note, here you start with an empty inventory. The static inventory will be populated +with data so you can omit providing additional arguments for future ansible commands. + +If bastion enabled, the generates SSH config must be applied for ansible. +Otherwise, it is auto included by the previous step. In order to execute it +as a separate playbook, use the following command: + + ansible-playbook openshift-ansible-contrib/playbooks/provisioning/openstack/post-provision-openstack.yml + +The first infra node then becomes a bastion node as well and proxies access +for future ansible commands. The post-provision step also configures Satellite, +if requested, and DNS server, and ensures other OpenShift requirements to be met. + + +## Running Custom Post-Provision Actions + +A custom playbook can be run like this: + +``` +ansible-playbook --private-key ~/.ssh/openshift -i inventory/ openshift-ansible-contrib/playbooks/provisioning/openstack/custom-actions/custom-playbook.yml +``` + +If you'd like to limit the run to one particular host, you can do so as follows: + +``` +ansible-playbook --private-key ~/.ssh/openshift -i inventory/ openshift-ansible-contrib/playbooks/provisioning/openstack/custom-actions/custom-playbook.yml -l app-node-0.openshift.example.com +``` + +You can also create your own custom playbook. Here are a few examples: + +### Adding additional YUM repositories + +``` +--- +- hosts: app + tasks: + + # enable EPL + - name: Add repository + yum_repository: + name: epel + description: EPEL YUM repo + baseurl: https://download.fedoraproject.org/pub/epel/$releasever/$basearch/ +``` + +This example runs against app nodes. The list of options include: + + - cluster_hosts (all hosts: app, infra, masters, dns, lb) + - OSEv3 (app, infra, masters) + - app + - dns + - masters + - infra_hosts + +### Attaching additional RHN pools + +``` +--- +- hosts: cluster_hosts + tasks: + - name: Attach additional RHN pool + become: true + command: "/usr/bin/subscription-manager attach --pool=" + register: attach_rhn_pool_result + until: attach_rhn_pool_result.rc == 0 + retries: 10 + delay: 1 +``` + +This playbook runs against all cluster nodes. In order to help prevent slow connectivity +problems, the task is retried 10 times in case of initial failure. +Note that in order for this example to work in your deployment, your servers must use the RHEL image. + +### Adding extra Docker registry URLs + +This playbook is located in the [custom-actions](https://github.com/openshift/openshift-ansible-contrib/tree/master/playbooks/provisioning/openstack/custom-actions) directory. + +It adds URLs passed as arguments to the docker configuration program. +Going into more detail, the configuration program (which is in the YAML format) is loaded into an ansible variable +([lines 27-30](https://github.com/openshift/openshift-ansible-contrib/blob/master/playbooks/provisioning/openstack/custom-actions/add-docker-registry.yml#L27-L30)) +and in its structure, `registries` and `insecure_registries` sections are expanded with the newly added items +([lines 56-76](https://github.com/openshift/openshift-ansible-contrib/blob/master/playbooks/provisioning/openstack/custom-actions/add-docker-registry.yml#L56-L76)). +The new content is then saved into the original file +([lines 78-82](https://github.com/openshift/openshift-ansible-contrib/blob/master/playbooks/provisioning/openstack/custom-actions/add-docker-registry.yml#L78-L82)) +and docker is restarted. + +Example usage: +``` +ansible-playbook -i openshift-ansible-contrib/playbooks/provisioning/openstack/custom-actions/add-docker-registry.yml --extra-vars '{"registries": "reg1", "insecure_registries": ["ins_reg1","ins_reg2"]}' +``` + +### Adding extra CAs to the trust chain + +This playbook is also located in the [custom-actions](https://github.com/openshift/openshift-ansible-contrib/blob/master/playbooks/provisioning/openstack/custom-actions) directory. +It copies passed CAs to the trust chain location and updates the trust chain on each selected host. + +Example usage: +``` +ansible-playbook -i openshift-ansible-contrib/playbooks/provisioning/openstack/custom-actions/add-cas.yml --extra-vars '{"ca_files": [, ]}' +``` + +Please consider contributing your custom playbook back to openshift-ansible-contrib! + +A library of custom post-provision actions exists in `openshift-ansible-contrib/playbooks/provisioning/openstack/custom-actions`. Playbooks include: + +* [add-yum-repos.yml](https://github.com/openshift/openshift-ansible-contrib/blob/master/playbooks/provisioning/openstack/custom-actions/add-yum-repos.yml): adds a list of custom yum repositories to every node in the cluster +* [add-rhn-pools.yml](https://github.com/openshift/openshift-ansible-contrib/blob/master/playbooks/provisioning/openstack/custom-actions/add-rhn-pools.yml): attaches a list of additional RHN pools to every node in the cluster +* [add-docker-registry.yml](https://github.com/openshift/openshift-ansible-contrib/blob/master/playbooks/provisioning/openstack/custom-actions/add-docker-registry.yml): adds a list of docker registries to the docker configuration on every node in the cluster +* [add-cas.yml](https://github.com/openshift/openshift-ansible-contrib/blob/master/playbooks/provisioning/openstack/custom-actions/add-rhn-pools.yml): adds a list of CAs to the trust chain on every node in the cluster + + +## Install OpenShift + +Once it succeeds, you can install openshift by running: + + ansible-playbook openshift-ansible/playbooks/byo/config.yml + +## Access UI + +OpenShift UI may be accessed via the 1st master node FQDN, port 8443. + +When using a bastion, you may want to make an SSH tunnel from your control node +to access UI on the `https://localhost:8443`, with this inventory variable: + + openshift_ui_ssh_tunnel: True + +Note, this requires sudo rights on the ansible control node and an absolute path +for the `openstack_private_ssh_key`. You should also update the control node's +`/etc/hosts`: + + 127.0.0.1 master-0.openshift.example.com + +In order to access UI, the ssh-tunnel service will be created and started on the +control node. Make sure to remove these changes and the service manually, when not +needed anymore. + +## Scale Deployment up/down + +### Scaling up + +One can scale up the number of application nodes by executing the ansible playbook +`openshift-ansible-contrib/playbooks/provisioning/openstack/scale-up.yaml`. +This process can be done even if there is currently no deployment available. +The `increment_by` variable is used to specify by how much the deployment should +be scaled up (if none exists, it serves as a target number of application nodes). +The path to `openshift-ansible` directory can be customised by the `openshift_ansible_dir` +variable. Its value must be an absolute path to `openshift-ansible` and it cannot +contain the '/' symbol at the end. + +Usage: + +``` +ansible-playbook -i openshift-ansible-contrib/playbooks/provisioning/openstack/scale-up.yaml` [-e increment_by=] [-e openshift_ansible_dir=] +``` + +Note: This playbook works only without a bastion node (`openstack_use_bastion: False`). diff --git a/playbooks/openstack/ansible.cfg b/playbooks/openstack/ansible.cfg new file mode 100644 index 000000000..a21f023ea --- /dev/null +++ b/playbooks/openstack/ansible.cfg @@ -0,0 +1,24 @@ +# config file for ansible -- http://ansible.com/ +# ============================================== +[defaults] +ansible_user = openshift +forks = 50 +# work around privilege escalation timeouts in ansible +timeout = 30 +host_key_checking = false +inventory = inventory +inventory_ignore_extensions = secrets.py, .pyc, .cfg, .crt +gathering = smart +retry_files_enabled = false +fact_caching = jsonfile +fact_caching_connection = .ansible/cached_facts +fact_caching_timeout = 900 +stdout_callback = skippy +callback_whitelist = profile_tasks +lookup_plugins = openshift-ansible-contrib/lookup_plugins + + +[ssh_connection] +ssh_args = -o ControlMaster=auto -o ControlPersist=900s -o GSSAPIAuthentication=no +control_path = /var/tmp/%%h-%%r +pipelining = True diff --git a/playbooks/openstack/custom-actions/add-cas.yml b/playbooks/openstack/custom-actions/add-cas.yml new file mode 100644 index 000000000..b2c195f91 --- /dev/null +++ b/playbooks/openstack/custom-actions/add-cas.yml @@ -0,0 +1,13 @@ +--- +- hosts: cluster_hosts + become: true + vars: + ca_files: [] + tasks: + - name: Copy CAs to the trusted CAs location + with_items: "{{ ca_files }}" + copy: + src: "{{ item }}" + dest: /etc/pki/ca-trust/source/anchors/ + - name: Update trusted CAs + shell: 'update-ca-trust enable && update-ca-trust extract' diff --git a/playbooks/openstack/custom-actions/add-docker-registry.yml b/playbooks/openstack/custom-actions/add-docker-registry.yml new file mode 100644 index 000000000..e118a71dc --- /dev/null +++ b/playbooks/openstack/custom-actions/add-docker-registry.yml @@ -0,0 +1,90 @@ +--- +- hosts: OSEv3 + become: true + vars: + registries: [] + insecure_registries: [] + + tasks: + - name: Check if docker is even installed + command: docker + + - name: Install atomic-registries package + yum: + name: atomic-registries + state: latest + + - name: Get registry configuration file + register: file_result + stat: + path: /etc/containers/registries.conf + + - name: Check if it exists + assert: + that: 'file_result.stat.exists' + msg: "Configuration file does not exist." + + - name: Load configuration file + shell: cat /etc/containers/registries.conf + register: file_content + + - name: Store file content into a variable + set_fact: + docker_conf: "{{ file_content.stdout | from_yaml }}" + + - name: Make sure that docker file content is a dictionary + when: '(docker_conf is string) and (not docker_conf)' + set_fact: + docker_conf: {} + + - name: Make sure that registries is a list + when: 'registries is string' + set_fact: + registries_list: [ "{{ registries }}" ] + + - name: Make sure that insecure_registries is a list + when: 'insecure_registries is string' + set_fact: + insecure_registries_list: [ "{{ insecure_registries }}" ] + + - name: Set default values if there are no registries defined + set_fact: + docker_conf_registries: "{{ [] if docker_conf['registries'] is not defined else docker_conf['registries'] }}" + docker_conf_insecure_registries: "{{ [] if docker_conf['insecure_registries'] is not defined else docker_conf['insecure_registries'] }}" + + - name: Add other registries + when: 'registries_list is not defined' + register: registries_merge_result + set_fact: + docker_conf: "{{ docker_conf | combine({'registries': (docker_conf_registries + registries) | unique}, recursive=True) }}" + + - name: Add other registries (if registries had to be converted) + when: 'registries_merge_result|skipped' + set_fact: + docker_conf: "{{ docker_conf | combine({'registries': (docker_conf_registries + registries_list) | unique}, recursive=True) }}" + + - name: Add insecure registries + when: 'insecure_registries_list is not defined' + register: insecure_registries_merge_result + set_fact: + docker_conf: "{{ docker_conf | combine({'insecure_registries': (docker_conf_insecure_registries + insecure_registries) | unique }, recursive=True) }}" + + - name: Add insecure registries (if insecure_registries had to be converted) + when: 'insecure_registries_merge_result|skipped' + set_fact: + docker_conf: "{{ docker_conf | combine({'insecure_registries': (docker_conf_insecure_registries + insecure_registries_list) | unique }, recursive=True) }}" + + - name: Load variable back to file + copy: + content: "{{ docker_conf | to_yaml }}" + dest: /etc/containers/registries.conf + + - name: Restart registries service + service: + name: registries + state: restarted + + - name: Restart docker + service: + name: docker + state: restarted diff --git a/playbooks/openstack/custom-actions/add-rhn-pools.yml b/playbooks/openstack/custom-actions/add-rhn-pools.yml new file mode 100644 index 000000000..d17c1e335 --- /dev/null +++ b/playbooks/openstack/custom-actions/add-rhn-pools.yml @@ -0,0 +1,13 @@ +--- +- hosts: cluster_hosts + vars: + rhn_pools: [] + tasks: + - name: Attach additional RHN pools + become: true + with_items: "{{ rhn_pools }}" + command: "/usr/bin/subscription-manager attach --pool={{ item }}" + register: attach_rhn_pools_result + until: attach_rhn_pools_result.rc == 0 + retries: 10 + delay: 1 diff --git a/playbooks/openstack/custom-actions/add-yum-repos.yml b/playbooks/openstack/custom-actions/add-yum-repos.yml new file mode 100644 index 000000000..ffebcb642 --- /dev/null +++ b/playbooks/openstack/custom-actions/add-yum-repos.yml @@ -0,0 +1,12 @@ +--- +- hosts: cluster_hosts + vars: + yum_repos: [] + tasks: + # enable additional yum repos + - name: Add repository + yum_repository: + name: "{{ item.name }}" + description: "{{ item.description }}" + baseurl: "{{ item.baseurl }}" + with_items: "{{ yum_repos }}" diff --git a/playbooks/openstack/galaxy-requirements.yaml b/playbooks/openstack/galaxy-requirements.yaml new file mode 100644 index 000000000..1d745dcc3 --- /dev/null +++ b/playbooks/openstack/galaxy-requirements.yaml @@ -0,0 +1,10 @@ +--- +# This is the Ansible Galaxy requirements file to pull in the correct roles + +# From 'infra-ansible' +- src: https://github.com/redhat-cop/infra-ansible + version: master + +# From 'openshift-ansible' +- src: https://github.com/openshift/openshift-ansible + version: master diff --git a/playbooks/openstack/openshift-cluster/custom_flavor_check.yaml b/playbooks/openstack/openshift-cluster/custom_flavor_check.yaml new file mode 100644 index 000000000..e11874c28 --- /dev/null +++ b/playbooks/openstack/openshift-cluster/custom_flavor_check.yaml @@ -0,0 +1,9 @@ +--- +- name: Try to get flavor facts + os_flavor_facts: + name: "{{ flavor }}" + register: flavor_result +- name: Check that custom flavor is available + assert: + that: "flavor_result.ansible_facts.openstack_flavors" + msg: "Flavor {{ flavor }} is not available." diff --git a/playbooks/openstack/openshift-cluster/custom_image_check.yaml b/playbooks/openstack/openshift-cluster/custom_image_check.yaml new file mode 100644 index 000000000..452e1e4d8 --- /dev/null +++ b/playbooks/openstack/openshift-cluster/custom_image_check.yaml @@ -0,0 +1,9 @@ +--- +- name: Try to get image facts + os_image_facts: + image: "{{ image }}" + register: image_result +- name: Check that custom image is available + assert: + that: "image_result.ansible_facts.openstack_image" + msg: "Image {{ image }} is not available." diff --git a/playbooks/openstack/openshift-cluster/net_vars_check.yaml b/playbooks/openstack/openshift-cluster/net_vars_check.yaml new file mode 100644 index 000000000..68afde415 --- /dev/null +++ b/playbooks/openstack/openshift-cluster/net_vars_check.yaml @@ -0,0 +1,14 @@ +--- +- name: Check the provider network configuration + fail: + msg: "Flannel SDN requires a dedicated containers data network and can not work over a provider network" + when: + - openstack_provider_network_name is defined + - openstack_private_data_network_name is defined + +- name: Check the flannel network configuration + fail: + msg: "A dedicated containers data network is only supported with Flannel SDN" + when: + - openstack_private_data_network_name is defined + - not openshift_use_flannel|default(False)|bool diff --git a/playbooks/openstack/openshift-cluster/post-install.yml b/playbooks/openstack/openshift-cluster/post-install.yml new file mode 100644 index 000000000..417813e2a --- /dev/null +++ b/playbooks/openstack/openshift-cluster/post-install.yml @@ -0,0 +1,57 @@ +--- +- hosts: OSEv3 + gather_facts: False + become: True + tasks: + - name: Save iptables rules to a backup file + when: openshift_use_flannel|default(False)|bool + shell: iptables-save > /etc/sysconfig/iptables.orig-$(date +%Y%m%d%H%M%S) + +# Enable iptables service on app nodes to persist custom rules (flannel SDN) +# FIXME(bogdando) w/a https://bugzilla.redhat.com/show_bug.cgi?id=1490820 +- hosts: app + gather_facts: False + become: True + vars: + os_firewall_allow: + - service: dnsmasq tcp + port: 53/tcp + - service: dnsmasq udp + port: 53/udp + tasks: + - when: openshift_use_flannel|default(False)|bool + block: + - include_role: + name: openshift-ansible/roles/os_firewall + - include_role: + name: openshift-ansible/roles/lib_os_firewall + - name: set allow rules for dnsmasq + os_firewall_manage_iptables: + name: "{{ item.service }}" + action: add + protocol: "{{ item.port.split('/')[1] }}" + port: "{{ item.port.split('/')[0] }}" + with_items: "{{ os_firewall_allow }}" + +- hosts: OSEv3 + gather_facts: False + become: True + tasks: + - name: Apply post-install iptables hacks for Flannel SDN (the best effort) + when: openshift_use_flannel|default(False)|bool + block: + - name: set allow/masquerade rules for for flannel/docker + shell: >- + (iptables-save | grep -q custom-flannel-docker-1) || + iptables -A DOCKER -w + -p all -j ACCEPT + -m comment --comment "custom-flannel-docker-1"; + (iptables-save | grep -q custom-flannel-docker-2) || + iptables -t nat -A POSTROUTING -w + -o {{flannel_interface|default('eth1')}} + -m comment --comment "custom-flannel-docker-2" + -j MASQUERADE + + # NOTE(bogdando) the rules will not be restored, when iptables service unit is disabled & masked + - name: Persist in-memory iptables rules (w/o dynamic KUBE rules) + shell: iptables-save | grep -v KUBE > /etc/sysconfig/iptables diff --git a/playbooks/openstack/openshift-cluster/post-provision-openstack.yml b/playbooks/openstack/openshift-cluster/post-provision-openstack.yml new file mode 100644 index 000000000..e460fbf12 --- /dev/null +++ b/playbooks/openstack/openshift-cluster/post-provision-openstack.yml @@ -0,0 +1,118 @@ +--- +- hosts: cluster_hosts + name: Wait for the the nodes to come up + become: False + gather_facts: False + tasks: + - when: not openstack_use_bastion|default(False)|bool + wait_for_connection: + - when: openstack_use_bastion|default(False)|bool + delegate_to: bastion + wait_for_connection: + +- hosts: cluster_hosts + gather_facts: True + tasks: + - name: Debug hostvar + debug: + msg: "{{ hostvars[inventory_hostname] }}" + verbosity: 2 + +- name: OpenShift Pre-Requisites (part 1) + include: pre-install.yml + +- name: Assign hostnames + hosts: cluster_hosts + gather_facts: False + become: true + roles: + - role: hostnames + +- name: Subscribe DNS Host to allow for configuration below + hosts: dns + gather_facts: False + become: true + roles: + - role: subscription-manager + when: hostvars.localhost.rhsm_register|default(False) + tags: 'subscription-manager' + +- name: Determine which DNS server(s) to use for our generated records + hosts: localhost + gather_facts: False + become: False + roles: + - dns-server-detect + +- name: Build the DNS Server Views and Configure DNS Server(s) + hosts: dns + gather_facts: False + become: true + roles: + - role: dns-views + - role: infra-ansible/roles/dns-server + +- name: Build and process DNS Records + hosts: localhost + gather_facts: True + become: False + roles: + - role: dns-records + use_bastion: "{{ openstack_use_bastion|default(False)|bool }}" + - role: infra-ansible/roles/dns + +- name: Switch the stack subnet to the configured private DNS server + hosts: localhost + gather_facts: False + become: False + vars_files: + - stack_params.yaml + tasks: + - include_role: + name: openstack-stack + tasks_from: subnet_update_dns_servers + +- name: OpenShift Pre-Requisites (part 2) + hosts: OSEv3 + gather_facts: true + become: true + vars: + interface: "{{ flannel_interface|default('eth1') }}" + interface_file: /etc/sysconfig/network-scripts/ifcfg-{{ interface }} + interface_config: + DEVICE: "{{ interface }}" + TYPE: Ethernet + BOOTPROTO: dhcp + ONBOOT: 'yes' + DEFTROUTE: 'no' + PEERDNS: 'no' + pre_tasks: + - name: "Include DNS configuration to ensure proper name resolution" + lineinfile: + state: present + dest: /etc/sysconfig/network + regexp: "IP4_NAMESERVERS={{ hostvars['localhost'].private_dns_server }}" + line: "IP4_NAMESERVERS={{ hostvars['localhost'].private_dns_server }}" + - name: "Configure the flannel interface options" + when: openshift_use_flannel|default(False)|bool + block: + - file: + dest: "{{ interface_file }}" + state: touch + mode: 0644 + owner: root + group: root + - lineinfile: + state: present + dest: "{{ interface_file }}" + regexp: "{{ item.key }}=" + line: "{{ item.key }}={{ item.value }}" + with_dict: "{{ interface_config }}" + roles: + - node-network-manager + +- include: prepare-and-format-cinder-volume.yaml + when: > + prepare_and_format_registry_volume|default(False) or + (cinder_registry_volume is defined and + cinder_registry_volume.changed|default(False)) diff --git a/playbooks/openstack/openshift-cluster/pre-install.yml b/playbooks/openstack/openshift-cluster/pre-install.yml new file mode 100644 index 000000000..45e9005cc --- /dev/null +++ b/playbooks/openstack/openshift-cluster/pre-install.yml @@ -0,0 +1,19 @@ +--- +############################### +# OpenShift Pre-Requisites + +# - subscribe hosts +# - prepare docker +# - other prep (install additional packages, etc.) +# +- hosts: OSEv3 + become: true + roles: + - { role: subscription-manager, when: hostvars.localhost.rhsm_register|default(False), tags: 'subscription-manager', ansible_sudo: true } + - { role: docker, tags: 'docker' } + - { role: openshift-prep, tags: 'openshift-prep' } + +- hosts: localhost:cluster_hosts + become: False + tasks: + - include: pre_tasks.yml diff --git a/playbooks/openstack/openshift-cluster/pre_tasks.yml b/playbooks/openstack/openshift-cluster/pre_tasks.yml new file mode 100644 index 000000000..11fe2dd84 --- /dev/null +++ b/playbooks/openstack/openshift-cluster/pre_tasks.yml @@ -0,0 +1,53 @@ +--- +- name: Generate Environment ID + set_fact: + env_random_id: "{{ ansible_date_time.epoch }}" + run_once: true + delegate_to: localhost + +- name: Set default Environment ID + set_fact: + default_env_id: "openshift-{{ lookup('env','OS_USERNAME') }}-{{ env_random_id }}" + delegate_to: localhost + +- name: Setting Common Facts + set_fact: + env_id: "{{ env_id | default(default_env_id) }}" + delegate_to: localhost + +- name: Updating DNS domain to include env_id (if not empty) + set_fact: + full_dns_domain: "{{ (env_id|trim == '') | ternary(public_dns_domain, env_id + '.' + public_dns_domain) }}" + delegate_to: localhost + +- name: Set the APP domain for OpenShift use + set_fact: + openshift_app_domain: "{{ openshift_app_domain | default('apps') }}" + delegate_to: localhost + +- name: Set the default app domain for routing purposes + set_fact: + openshift_master_default_subdomain: "{{ openshift_app_domain }}.{{ full_dns_domain }}" + delegate_to: localhost + when: + - openshift_master_default_subdomain is undefined + +# Check that openshift_cluster_node_labels has regions defined for all groups +# NOTE(kpilatov): if node labels are to be enabled for more groups, +# this check needs to be modified as well +- name: Set openshift_cluster_node_labels if undefined (should not happen) + set_fact: + openshift_cluster_node_labels: {'app': {'region': 'primary'}, 'infra': {'region': 'infra'}} + when: openshift_cluster_node_labels is not defined + +- name: Set openshift_cluster_node_labels for the infra group + set_fact: + openshift_cluster_node_labels: "{{ openshift_cluster_node_labels | combine({'infra': {'region': 'infra'}}, recursive=True) }}" + +- name: Set openshift_cluster_node_labels for the app group + set_fact: + openshift_cluster_node_labels: "{{ openshift_cluster_node_labels | combine({'app': {'region': 'primary'}}, recursive=True) }}" + +- name: Set openshift_cluster_node_labels for auto-scaling app nodes + set_fact: + openshift_cluster_node_labels: "{{ openshift_cluster_node_labels | combine({'app': {'autoscaling': 'app'}}, recursive=True) }}" diff --git a/playbooks/openstack/openshift-cluster/prepare-and-format-cinder-volume.yaml b/playbooks/openstack/openshift-cluster/prepare-and-format-cinder-volume.yaml new file mode 100644 index 000000000..30e094459 --- /dev/null +++ b/playbooks/openstack/openshift-cluster/prepare-and-format-cinder-volume.yaml @@ -0,0 +1,67 @@ +--- +- hosts: localhost + gather_facts: False + become: False + tasks: + - set_fact: + cinder_volume: "{{ hostvars[groups.masters[0]].openshift_hosted_registry_storage_openstack_volumeID }}" + cinder_fs: "{{ hostvars[groups.masters[0]].openshift_hosted_registry_storage_openstack_filesystem }}" + + - name: Attach the volume to the VM + os_server_volume: + state: present + server: "{{ groups['masters'][0] }}" + volume: "{{ cinder_volume }}" + register: volume_attachment + + - set_fact: + attached_device: >- + {{ volume_attachment['attachments']|json_query("[?volume_id=='" + cinder_volume + "'].device | [0]") }} + + - delegate_to: "{{ groups['masters'][0] }}" + block: + - name: Wait for the device to appear + wait_for: path={{ attached_device }} + + - name: Create a temp directory for mounting the volume + tempfile: + prefix: cinder-volume + state: directory + register: cinder_mount_dir + + - name: Format the device + filesystem: + fstype: "{{ cinder_fs }}" + dev: "{{ attached_device }}" + + - name: Mount the device + mount: + name: "{{ cinder_mount_dir.path }}" + src: "{{ attached_device }}" + state: mounted + fstype: "{{ cinder_fs }}" + + - name: Change mode on the filesystem + file: + path: "{{ cinder_mount_dir.path }}" + state: directory + recurse: true + mode: 0777 + + - name: Unmount the device + mount: + name: "{{ cinder_mount_dir.path }}" + src: "{{ attached_device }}" + state: absent + fstype: "{{ cinder_fs }}" + + - name: Delete the temp directory + file: + name: "{{ cinder_mount_dir.path }}" + state: absent + + - name: Detach the volume from the VM + os_server_volume: + state: absent + server: "{{ groups['masters'][0] }}" + volume: "{{ cinder_volume }}" diff --git a/playbooks/openstack/openshift-cluster/prerequisites.yml b/playbooks/openstack/openshift-cluster/prerequisites.yml new file mode 100644 index 000000000..11a31411e --- /dev/null +++ b/playbooks/openstack/openshift-cluster/prerequisites.yml @@ -0,0 +1,123 @@ +--- +- hosts: localhost + tasks: + + # Sanity check of inventory variables + - include: net_vars_check.yaml + + # Check ansible + - name: Check Ansible version + assert: + that: > + (ansible_version.major == 2 and ansible_version.minor >= 3) or + (ansible_version.major > 2) + msg: "Ansible version must be at least 2.3" + + # Check shade + - name: Try to import python module shade + command: python -c "import shade" + ignore_errors: yes + register: shade_result + - name: Check if shade is installed + assert: + that: 'shade_result.rc == 0' + msg: "Python module shade is not installed" + + # Check jmespath + - name: Try to import python module shade + command: python -c "import jmespath" + ignore_errors: yes + register: jmespath_result + - name: Check if jmespath is installed + assert: + that: 'jmespath_result.rc == 0' + msg: "Python module jmespath is not installed" + + # Check python-dns + - name: Try to import python DNS module + command: python -c "import dns" + ignore_errors: yes + register: pythondns_result + - name: Check if python-dns is installed + assert: + that: 'pythondns_result.rc == 0' + msg: "Python module python-dns is not installed" + + # Check jinja2 + - name: Try to import jinja2 module + command: python -c "import jinja2" + ignore_errors: yes + register: jinja_result + - name: Check if jinja2 is installed + assert: + that: 'jinja_result.rc == 0' + msg: "Python module jinja2 is not installed" + + # Check Glance image + - name: Try to get image facts + os_image_facts: + image: "{{ openstack_default_image_name }}" + register: image_result + - name: Check that image is available + assert: + that: "image_result.ansible_facts.openstack_image" + msg: "Image {{ openstack_default_image_name }} is not available" + + # Check network name + - name: Try to get network facts + os_networks_facts: + name: "{{ openstack_external_network_name }}" + register: network_result + when: not openstack_provider_network_name|default(None) + - name: Check that network is available + assert: + that: "network_result.ansible_facts.openstack_networks" + msg: "Network {{ openstack_external_network_name }} is not available" + when: not openstack_provider_network_name|default(None) + + # Check keypair + # TODO kpilatov: there is no Ansible module for getting OS keypairs + # (os_keypair is not suitable for this) + # this method does not force python-openstackclient dependency + - name: Try to show keypair + command: > + python -c 'import shade; cloud = shade.openstack_cloud(); + exit(cloud.get_keypair("{{ openstack_ssh_public_key }}") is None)' + ignore_errors: yes + register: key_result + - name: Check that keypair is available + assert: + that: 'key_result.rc == 0' + msg: "Keypair {{ openstack_ssh_public_key }} is not available" + +# Check that custom images and flavors exist +- hosts: localhost + + # Include variables that will be used by heat + vars_files: + - stack_params.yaml + + tasks: + # Check that custom images are available + - include: custom_image_check.yaml + with_items: + - "{{ openstack_master_image }}" + - "{{ openstack_infra_image }}" + - "{{ openstack_node_image }}" + - "{{ openstack_lb_image }}" + - "{{ openstack_etcd_image }}" + - "{{ openstack_dns_image }}" + loop_control: + loop_var: image + + # Check that custom flavors are available + - include: custom_flavor_check.yaml + with_items: + - "{{ master_flavor }}" + - "{{ infra_flavor }}" + - "{{ node_flavor }}" + - "{{ lb_flavor }}" + - "{{ etcd_flavor }}" + - "{{ dns_flavor }}" + loop_control: + loop_var: flavor diff --git a/playbooks/openstack/openshift-cluster/provision-openstack.yml b/playbooks/openstack/openshift-cluster/provision-openstack.yml new file mode 100644 index 000000000..bf424676d --- /dev/null +++ b/playbooks/openstack/openshift-cluster/provision-openstack.yml @@ -0,0 +1,35 @@ +--- +- hosts: localhost + gather_facts: True + become: False + vars_files: + - stack_params.yaml + pre_tasks: + - include: pre_tasks.yml + roles: + - role: openstack-stack + - role: openstack-create-cinder-registry + when: + - cinder_hosted_registry_name is defined + - cinder_hosted_registry_size_gb is defined + - role: static_inventory + when: openstack_inventory|default('static') == 'static' + inventory_path: "{{ openstack_inventory_path|default(inventory_dir) }}" + private_ssh_key: "{{ openstack_private_ssh_key|default('') }}" + ssh_config_path: "{{ openstack_ssh_config_path|default('/tmp/ssh.config.openshift.ansible' + '.' + stack_name) }}" + ssh_user: "{{ ansible_user }}" + +- name: Refresh Server inventory or exit to apply SSH config + hosts: localhost + connection: local + become: False + gather_facts: False + tasks: + - name: Exit to apply SSH config for a bastion + meta: end_play + when: openstack_use_bastion|default(False)|bool + - name: Refresh Server inventory + meta: refresh_inventory + +- include: post-provision-openstack.yml + when: not openstack_use_bastion|default(False)|bool diff --git a/playbooks/openstack/openshift-cluster/provision.yaml b/playbooks/openstack/openshift-cluster/provision.yaml new file mode 100644 index 000000000..474c9c803 --- /dev/null +++ b/playbooks/openstack/openshift-cluster/provision.yaml @@ -0,0 +1,4 @@ +--- +- include: "prerequisites.yml" + +- include: "provision-openstack.yml" diff --git a/playbooks/openstack/openshift-cluster/roles b/playbooks/openstack/openshift-cluster/roles new file mode 120000 index 000000000..e2b799b9d --- /dev/null +++ b/playbooks/openstack/openshift-cluster/roles @@ -0,0 +1 @@ +../../../roles/ \ No newline at end of file diff --git a/playbooks/openstack/openshift-cluster/scale-up.yaml b/playbooks/openstack/openshift-cluster/scale-up.yaml new file mode 100644 index 000000000..79fc09050 --- /dev/null +++ b/playbooks/openstack/openshift-cluster/scale-up.yaml @@ -0,0 +1,75 @@ +--- +# Get the needed information about the current deployment +- hosts: masters[0] + tasks: + - name: Get number of app nodes + shell: oc get nodes -l autoscaling=app --no-headers=true | wc -l + register: oc_old_num_nodes + - name: Get names of app nodes + shell: oc get nodes -l autoscaling=app --no-headers=true | cut -f1 -d " " + register: oc_old_app_nodes + +- hosts: localhost + tasks: + # Since both number and names of app nodes are to be removed + # localhost variables for these values need to be set + - name: Store old number and names of app nodes locally (if there is an existing deployment) + when: '"masters" in groups' + register: set_fact_result + set_fact: + oc_old_num_nodes: "{{ hostvars[groups['masters'][0]]['oc_old_num_nodes'].stdout }}" + oc_old_app_nodes: "{{ hostvars[groups['masters'][0]]['oc_old_app_nodes'].stdout_lines }}" + + - name: Set default values for old app nodes (if there is no existing deployment) + when: 'set_fact_result | skipped' + set_fact: + oc_old_num_nodes: 0 + oc_old_app_nodes: [] + + # Set how many nodes are to be added (1 by default) + - name: Set how many nodes are to be added + set_fact: + increment_by: 1 + - name: Check that the number corresponds to scaling up (not down) + assert: + that: 'increment_by | int >= 1' + msg: > + FAIL: The value of increment_by must be at least 1 + (but it is {{ increment_by | int }}). + - name: Update openstack_num_nodes variable + set_fact: + openstack_num_nodes: "{{ oc_old_num_nodes | int + increment_by | int }}" + +# Run provision.yaml with higher number of nodes to create a new app-node VM +- include: provision.yaml + +# Run config.yml to perform openshift installation +# Path to openshift-ansible can be customised: +# - the value of openshift_ansible_dir has to be an absolute path +# - the path cannot contain the '/' symbol at the end + +# Creating a new deployment by the full installation +- include: "{{ openshift_ansible_dir }}/playbooks/byo/config.yml" + vars: + openshift_ansible_dir: ../../../../openshift-ansible + when: 'not groups["new_nodes"] | list' + +# Scaling up existing deployment +- include: "{{ openshift_ansible_dir }}/playbooks/byo/openshift-node/scaleup.yml" + vars: + openshift_ansible_dir: ../../../../openshift-ansible + when: 'groups["new_nodes"] | list' + +# Post-verification: Verify new number of nodes +- hosts: masters[0] + tasks: + - name: Get number of nodes + shell: oc get nodes -l autoscaling=app --no-headers=true | wc -l + register: oc_new_num_nodes + - name: Check that the actual result matches the defined value + assert: + that: 'oc_new_num_nodes.stdout | int == (hostvars["localhost"]["oc_old_num_nodes"] | int + hostvars["localhost"]["increment_by"] | int)' + msg: > + FAIL: Number of application nodes has not been increased accordingly + (it should be {{ hostvars["localhost"]["oc_old_num_nodes"] | int + hostvars["localhost"]["increment_by"] | int }} + but it is {{ oc_new_num_nodes.stdout | int }}). diff --git a/playbooks/openstack/openshift-cluster/stack_params.yaml b/playbooks/openstack/openshift-cluster/stack_params.yaml new file mode 100644 index 000000000..a4da31bfe --- /dev/null +++ b/playbooks/openstack/openshift-cluster/stack_params.yaml @@ -0,0 +1,49 @@ +--- +stack_name: "{{ env_id }}.{{ public_dns_domain }}" +dns_domain: "{{ public_dns_domain }}" +dns_nameservers: "{{ public_dns_nameservers }}" +subnet_prefix: "{{ openstack_subnet_prefix }}" +master_hostname: "{{ openstack_master_hostname | default('master') }}" +infra_hostname: "{{ openstack_infra_hostname | default('infra-node') }}" +node_hostname: "{{ openstack_node_hostname | default('app-node') }}" +lb_hostname: "{{ openstack_lb_hostname | default('lb') }}" +etcd_hostname: "{{ openstack_etcd_hostname | default('etcd') }}" +dns_hostname: "{{ openstack_dns_hostname | default('dns') }}" +ssh_public_key: "{{ openstack_ssh_public_key }}" +openstack_image: "{{ openstack_default_image_name }}" +lb_flavor: "{{ openstack_lb_flavor | default(openstack_default_flavor) }}" +etcd_flavor: "{{ openstack_etcd_flavor | default(openstack_default_flavor) }}" +master_flavor: "{{ openstack_master_flavor | default(openstack_default_flavor) }}" +node_flavor: "{{ openstack_node_flavor | default(openstack_default_flavor) }}" +infra_flavor: "{{ openstack_infra_flavor | default(openstack_default_flavor) }}" +dns_flavor: "{{ openstack_dns_flavor | default(openstack_default_flavor) }}" +openstack_master_image: "{{ openstack_master_image_name | default(openstack_default_image_name) }}" +openstack_infra_image: "{{ openstack_infra_image_name | default(openstack_default_image_name) }}" +openstack_node_image: "{{ openstack_node_image_name | default(openstack_default_image_name) }}" +openstack_lb_image: "{{ openstack_lb_image_name | default(openstack_default_image_name) }}" +openstack_etcd_image: "{{ openstack_etcd_image_name | default(openstack_default_image_name) }}" +openstack_dns_image: "{{ openstack_dns_image_name | default(openstack_default_image_name) }}" +openstack_private_network: >- + {% if openstack_provider_network_name | default(None) -%} + {{ openstack_provider_network_name }} + {%- else -%} + {{ openstack_private_network_name | default ('openshift-ansible-' + stack_name + '-net') }} + {%- endif -%} +provider_network: "{{ openstack_provider_network_name | default(None) }}" +external_network: "{{ openstack_external_network_name | default(None) }}" +num_etcd: "{{ openstack_num_etcd | default(0) }}" +num_masters: "{{ openstack_num_masters }}" +num_nodes: "{{ openstack_num_nodes }}" +num_infra: "{{ openstack_num_infra }}" +num_dns: "{{ openstack_num_dns | default(1) }}" +master_server_group_policies: "{{ openstack_master_server_group_policies | default([]) | to_yaml }}" +infra_server_group_policies: "{{ openstack_infra_server_group_policies | default([]) | to_yaml }}" +master_volume_size: "{{ docker_master_volume_size | default(docker_volume_size) }}" +infra_volume_size: "{{ docker_infra_volume_size | default(docker_volume_size) }}" +node_volume_size: "{{ docker_node_volume_size | default(docker_volume_size) }}" +etcd_volume_size: "{{ docker_etcd_volume_size | default('2') }}" +dns_volume_size: "{{ docker_dns_volume_size | default('1') }}" +lb_volume_size: "{{ docker_lb_volume_size | default('5') }}" +nodes_to_remove: "{{ openstack_nodes_to_remove | default([]) | to_yaml }}" +use_bastion: "{{ openstack_use_bastion|default(False) }}" +ui_ssh_tunnel: "{{ openshift_ui_ssh_tunnel|default(False) }}" diff --git a/playbooks/openstack/sample-inventory/group_vars/OSEv3.yml b/playbooks/openstack/sample-inventory/group_vars/OSEv3.yml new file mode 100644 index 000000000..949a323a7 --- /dev/null +++ b/playbooks/openstack/sample-inventory/group_vars/OSEv3.yml @@ -0,0 +1,59 @@ +--- +openshift_deployment_type: origin +#openshift_deployment_type: openshift-enterprise +#openshift_release: v3.5 +openshift_master_default_subdomain: "apps.{{ env_id }}.{{ public_dns_domain }}" + +openshift_master_cluster_method: native +openshift_master_cluster_hostname: "{{ groups.lb.0|default(groups.masters.0) }}" +openshift_master_cluster_public_hostname: "{{ groups.lb.0|default(groups.masters.0) }}" + +osm_default_node_selector: 'region=primary' + +openshift_hosted_router_wait: True +openshift_hosted_registry_wait: True + +## Openstack credentials +#openshift_cloudprovider_kind=openstack +#openshift_cloudprovider_openstack_auth_url: "{{ lookup('env','OS_AUTH_URL') }}" +#openshift_cloudprovider_openstack_username: "{{ lookup('env','OS_USERNAME') }}" +#openshift_cloudprovider_openstack_password: "{{ lookup('env','OS_PASSWORD') }}" +#openshift_cloudprovider_openstack_tenant_name: "{{ lookup('env','OS_TENANT_NAME') }}" +#openshift_cloudprovider_openstack_region="{{ lookup('env', 'OS_REGION_NAME') }}" + + +## Use Cinder volume for Openshift registry: +#openshift_hosted_registry_storage_kind: openstack +#openshift_hosted_registry_storage_access_modes: ['ReadWriteOnce'] +#openshift_hosted_registry_storage_openstack_filesystem: xfs + +## NOTE(shadower): This won't work until the openshift-ansible issue #5657 is fixed: +## https://github.com/openshift/openshift-ansible/issues/5657 +## If you're using the `cinder_hosted_registry_name` option from +## `all.yml`, uncomment these lines: +#openshift_hosted_registry_storage_openstack_volumeID: "{{ lookup('os_cinder', cinder_hosted_registry_name).id }}" +#openshift_hosted_registry_storage_volume_size: "{{ cinder_hosted_registry_size_gb }}Gi" + +## If you're using a Cinder volume you've set up yourself, uncomment these lines: +#openshift_hosted_registry_storage_openstack_volumeID: e0ba2d73-d2f9-4514-a3b2-a0ced507fa05 +#openshift_hosted_registry_storage_volume_size: 10Gi + + +# NOTE(shadower): the hostname check seems to always fail because the +# host's floating IP address doesn't match the address received from +# inside the host. +openshift_override_hostname_check: true + +# For POCs or demo environments that are using smaller instances than +# the official recommended values for RAM and DISK, uncomment the line below. +#openshift_disable_check: disk_availability,memory_availability + +# NOTE(shadower): Always switch to root on the OSEv3 nodes. +# openshift-ansible requires an explicit `become`. +ansible_become: true + +# # Flannel networking +#osm_cluster_network_cidr: 10.128.0.0/14 +#openshift_use_openshift_sdn: false +#openshift_use_flannel: true +#flannel_interface: eth1 diff --git a/playbooks/openstack/sample-inventory/group_vars/all.yml b/playbooks/openstack/sample-inventory/group_vars/all.yml new file mode 100644 index 000000000..83289307d --- /dev/null +++ b/playbooks/openstack/sample-inventory/group_vars/all.yml @@ -0,0 +1,166 @@ +--- +env_id: "openshift" +public_dns_domain: "example.com" +public_dns_nameservers: [] + +# # Used Hostnames +# # - set custom hostnames for roles by uncommenting corresponding lines +#openstack_master_hostname: "master" +#openstack_infra_hostname: "infra-node" +#openstack_node_hostname: "app-node" +#openstack_lb_hostname: "lb" +#openstack_etcd_hostname: "etcd" +#openstack_dns_hostname: "dns" + +openstack_ssh_public_key: "openshift" +openstack_external_network_name: "public" +#openstack_private_network_name: "openshift-ansible-{{ stack_name }}-net" +# # A dedicated Neutron network name for containers data network +# # Configures the data network to be separated from openstack_private_network_name +# # NOTE: this is only supported with Flannel SDN yet +#openstack_private_data_network_name: "openshift-ansible-{{ stack_name }}-data-net" + +## If you want to use a provider network, set its name here. +## NOTE: the `openstack_external_network_name` and +## `openstack_private_network_name` options will be ignored when using a +## provider network. +#openstack_provider_network_name: "provider" + +# # Used Images +# # - set specific images for roles by uncommenting corresponding lines +# # - note: do not remove openstack_default_image_name definition +#openstack_master_image_name: "centos7" +#openstack_infra_image_name: "centos7" +#openstack_node_image_name: "centos7" +#openstack_lb_image_name: "centos7" +#openstack_etcd_image_name: "centos7" +#openstack_dns_image_name: "centos7" +openstack_default_image_name: "centos7" + +openstack_num_masters: 1 +openstack_num_infra: 1 +openstack_num_nodes: 2 + +# # Used Flavors +# # - set specific flavors for roles by uncommenting corresponding lines +# # - note: do note remove openstack_default_flavor definition +#openstack_master_flavor: "m1.medium" +#openstack_infra_flavor: "m1.medium" +#openstack_node_flavor: "m1.medium" +#openstack_lb_flavor: "m1.medium" +#openstack_etcd_flavor: "m1.medium" +#openstack_dns_flavor: "m1.medium" +openstack_default_flavor: "m1.medium" + +# # Numerical index of nodes to remove +# openstack_nodes_to_remove: [] + +# # Docker volume size +# # - set specific volume size for roles by uncommenting corresponding lines +# # - note: do not remove docker_default_volume_size definition +#docker_master_volume_size: "15" +#docker_infra_volume_size: "15" +#docker_node_volume_size: "15" +#docker_etcd_volume_size: "2" +#docker_dns_volume_size: "1" +#docker_lb_volume_size: "5" +docker_volume_size: "15" + +## Specify server group policies for master and infra nodes. Nova must be configured to +## enable these policies. 'anti-affinity' will ensure that each VM is launched on a +## different physical host. +#openstack_master_server_group_policies: [anti-affinity] +#openstack_infra_server_group_policies: [anti-affinity] + +## Create a Cinder volume and use it for the OpenShift registry. +## NOTE: the openstack credentials and hosted registry options must be set in OSEv3.yml! +#cinder_hosted_registry_name: cinder-registry +#cinder_hosted_registry_size_gb: 10 + +## Set up a filesystem on the cinder volume specified in `OSEv3.yaml`. +## You need to specify the file system and volume ID in OSEv3 via +## `openshift_hosted_registry_storage_openstack_filesystem` and +## `openshift_hosted_registry_storage_openstack_volumeID`. +## WARNING: This will delete any data on the volume! +#prepare_and_format_registry_volume: False + +openstack_subnet_prefix: "192.168.99" + +## Red Hat subscription defaults to false which means we will not attempt to +## subscribe the nodes +#rhsm_register: False + +# # Using Red Hat Satellite: +#rhsm_register: True +#rhsm_satellite: 'sat-6.example.com' +#rhsm_org: 'OPENSHIFT_ORG' +#rhsm_activationkey: '' + +# # Or using RHN username, password and optionally pool: +#rhsm_register: True +#rhsm_username: '' +#rhsm_password: '' +#rhsm_pool: '' + +#rhsm_repos: +# - "rhel-7-server-rpms" +# - "rhel-7-server-ose-3.5-rpms" +# - "rhel-7-server-extras-rpms" +# - "rhel-7-fast-datapath-rpms" + + +# # Roll-your-own DNS +#openstack_num_dns: 0 +#external_nsupdate_keys: +# public: +# key_secret: 'SKqKNdpfk7llKxZ57bbxUnUDobaaJp9t8CjXLJPl+fRI5mPcSBuxTAyvJPa6Y9R7vUg9DwCy/6WTpgLNqnV4Hg==' +# key_algorithm: 'hmac-md5' +# server: '192.168.1.1' +# private: +# key_secret: 'kVE2bVTgZjrdJipxPhID8BEZmbHD8cExlVPR+zbFpW6la8kL5wpXiwOh8q5AAosXQI5t95UXwq3Inx8QT58duw==' +# key_algorithm: 'hmac-md5' +# server: '192.168.1.2' + +# # Customize DNS server security options +#named_public_recursion: 'no' +#named_private_recursion: 'yes' + + +# NOTE(shadower): Do not change this value. The Ansible user is currently +# hardcoded to `openshift`. +ansible_user: openshift + +# # Use a single security group for a cluster (default: false) +#openstack_flat_secgrp: false + +# # Openstack inventory type and cluster nodes access pattern +# # Defaults to 'static'. +# # Use 'dynamic' to access cluster nodes directly, via floating IPs +# # and given a dynamic inventory script, like openstack.py +#openstack_inventory: static +# # The path to checkpoint the static inventory from the in-memory one +#openstack_inventory_path: ../../../../inventory + +# # Use bastion node to access cluster nodes (Defaults to False). +# # Requires a static inventory. +#openstack_use_bastion: False +#bastion_ingress_cidr: "{{openstack_subnet_prefix}}.0/24" +# +# # The Nova key-pair's private SSH key to access inventory nodes +#openstack_private_ssh_key: ~/.ssh/openshift +# # The path for the SSH config to access all nodes +#openstack_ssh_config_path: /tmp/ssh.config.openshift.ansible.{{ env_id }}.{{ public_dns_domain }} + + +# If you want to use the VM storage instead of Cinder volumes, set this to `true`. +# NOTE: this is for testing only! Your data will be gone once the VM disappears! +# ephemeral_volumes: false + +# # OpenShift node labels +# # - in order to customise node labels for app and/or infra group, set the +# # openshift_cluster_node_labels variable +#openshift_cluster_node_labels: +# app: +# region: primary +# infra: +# region: infra diff --git a/playbooks/openstack/sample-inventory/inventory.py b/playbooks/openstack/sample-inventory/inventory.py new file mode 100755 index 000000000..6a1b74b3d --- /dev/null +++ b/playbooks/openstack/sample-inventory/inventory.py @@ -0,0 +1,88 @@ +#!/usr/bin/env python + +from __future__ import print_function + +import json + +import shade + + +if __name__ == '__main__': + cloud = shade.openstack_cloud() + + inventory = {} + + # TODO(shadower): filter the servers based on the `OPENSHIFT_CLUSTER` + # environment variable. + cluster_hosts = [ + server for server in cloud.list_servers() + if 'metadata' in server and 'clusterid' in server.metadata] + + masters = [server.name for server in cluster_hosts + if server.metadata['host-type'] == 'master'] + + etcd = [server.name for server in cluster_hosts + if server.metadata['host-type'] == 'etcd'] + if not etcd: + etcd = masters + + infra_hosts = [server.name for server in cluster_hosts + if server.metadata['host-type'] == 'node' and + server.metadata['sub-host-type'] == 'infra'] + + app = [server.name for server in cluster_hosts + if server.metadata['host-type'] == 'node' and + server.metadata['sub-host-type'] == 'app'] + + nodes = list(set(masters + infra_hosts + app)) + + dns = [server.name for server in cluster_hosts + if server.metadata['host-type'] == 'dns'] + + lb = [server.name for server in cluster_hosts + if server.metadata['host-type'] == 'lb'] + + osev3 = list(set(nodes + etcd + lb)) + + groups = [server.metadata.group for server in cluster_hosts + if 'group' in server.metadata] + + inventory['cluster_hosts'] = {'hosts': [s.name for s in cluster_hosts]} + inventory['OSEv3'] = {'hosts': osev3} + inventory['masters'] = {'hosts': masters} + inventory['etcd'] = {'hosts': etcd} + inventory['nodes'] = {'hosts': nodes} + inventory['infra_hosts'] = {'hosts': infra_hosts} + inventory['app'] = {'hosts': app} + inventory['dns'] = {'hosts': dns} + inventory['lb'] = {'hosts': lb} + + for server in cluster_hosts: + if 'group' in server.metadata: + group = server.metadata.group + if group not in inventory: + inventory[group] = {'hosts': []} + inventory[group]['hosts'].append(server.name) + + inventory['_meta'] = {'hostvars': {}} + + for server in cluster_hosts: + ssh_ip_address = server.public_v4 or server.private_v4 + vars = { + 'ansible_host': ssh_ip_address + } + + public_v4 = server.public_v4 or server.private_v4 + if public_v4: + vars['public_v4'] = public_v4 + # TODO(shadower): what about multiple networks? + if server.private_v4: + vars['private_v4'] = server.private_v4 + + node_labels = server.metadata.get('node_labels') + if node_labels: + vars['openshift_node_labels'] = node_labels + + inventory['_meta']['hostvars'][server.name] = vars + + print(json.dumps(inventory, indent=4, sort_keys=True)) diff --git a/playbooks/provisioning/openstack/README.md b/playbooks/provisioning/openstack/README.md deleted file mode 100644 index a2f553f4c..000000000 --- a/playbooks/provisioning/openstack/README.md +++ /dev/null @@ -1,258 +0,0 @@ -# OpenStack Provisioning - -This directory contains [Ansible][ansible] playbooks and roles to create -OpenStack resources (servers, networking, volumes, security groups, -etc.). The result is an environment ready for OpenShift installation -via [openshift-ansible]. - -We provide everything necessary to be able to install OpenShift on -OpenStack (including the DNS and load balancer servers when -necessary). In addition, we work on providing integration with the -OpenStack-native services (storage, lbaas, baremetal as a service, -dns, etc.). - - -## OpenStack Requirements - -Before you start the installation, you need to have an OpenStack -environment to connect to. You can use a public cloud or an OpenStack -within your organisation. It is also possible to -use [Devstack][devstack] or [TripleO][tripleo]. In the case of -TripleO, we will be running on top of the **overcloud**. - -The OpenStack release must be Newton (for Red Hat OpenStack this is -version 10) or newer. It must also satisfy these requirements: - -* Heat (Orchestration) must be available -* The deployment image (CentOS 7 or RHEL 7) must be loaded -* The deployment flavor must be available to your user - - `m1.medium` / 4GB RAM + 40GB disk should be enough for testing - - look at - the [Minimum Hardware Requirements page][hardware-requirements] - for production -* The keypair for SSH must be available in openstack -* `keystonerc` file that lets you talk to the openstack services - * NOTE: only Keystone V2 is currently supported - -Optional: -* External Neutron network with a floating IP address pool - - -## Installation - -There are four main parts to the installation: - -1. [Preparing Ansible and dependencies](#1-preparing-ansible-and-dependencies) -2. [Configuring the desired OpenStack environment and OpenShift cluster](#2-configuring-the-openstack-environment-and-openshift-cluster) -3. [Creating the OpenStack resources (VMs, networking, etc.)](#3-creating-the-openstack-resources-vms-networking-etc) -4. [Installing OpenShift](#4-installing-openshift) - -This guide is going to install [OpenShift Origin][origin] -with [CentOS 7][centos7] images with minimal customisation. - -We will create the VMs for running OpenShift, in a new Neutron -network, assign Floating IP addresses and configure DNS. - -The OpenShift cluster will have a single Master node that will run -`etcd`, a single Infra node and two App nodes. - -You can look at -the [Advanced Configuration page][advanced-configuration] for -additional options. - - - -### 1. Preparing Ansible and dependencies - -First, you need to select where to run [Ansible][ansible] from (the -*Ansible host*). This can be the computer you read this guide on or an -OpenStack VM you'll create specifically for this purpose. - -We will use -a -[Docker image that has all the dependencies installed][control-host-image] to -make things easier. If you don't want to use Docker, take a look at -the [Ansible host dependencies][ansible-dependencies] and make sure -they're installed. - -Your *Ansible host* needs to have the following: - -1. Docker -2. `keystonerc` file with your OpenStack credentials -3. SSH private key for logging in to your OpenShift nodes - -Assuming your private key is `~/.ssh/id_rsa` and `keystonerc` in your -current directory: - -```bash -$ sudo docker run -it -v ~/.ssh:/mnt/.ssh:Z \ - -v $PWD/keystonerc:/root/.config/openstack/keystonerc.sh:Z \ - redhatcop/control-host-openstack bash -``` - -This will create the container, add your SSH key and source your -`keystonerc`. It should be set up for the installation. - -You can verify that everything is in order: - - -```bash -$ less .ssh/id_rsa -$ ansible --version -$ openstack image list -``` - - -### 2. Configuring the OpenStack Environment and OpenShift Cluster - -The configuration is all done in an Ansible inventory directory. We -will clone the [openshift-ansible-contrib][contrib] repository and set -things up for a minimal installation. - - -``` -$ git clone https://github.com/openshift/openshift-ansible-contrib -$ cp -r openshift-ansible-contrib/playbooks/provisioning/openstack/sample-inventory/ inventory -``` - -If you're testing multiple configurations, you can have multiple -inventories and switch between them. - -#### OpenStack Configuration - -The OpenStack configuration is in `inventory/group_vars/all.yml`. - -Open the file and plug in the image, flavor and network configuration -corresponding to your OpenStack installation. - -```bash -$ vi inventory/group_vars/all.yml -``` - -1. Set the `openstack_ssh_public_key` to your OpenStack keypair name. - - See `openstack keypair list` to find the keypairs registered with - OpenShift. - - This must correspond to your private SSH key in `~/.ssh/id_rsa` -2. Set the `openstack_external_network_name` to the floating IP - network of your openstack. - - See `openstack network list` for the list of networks. - - It's often called `public`, `external` or `ext-net`. -3. Set the `openstack_default_image_name` to the image you want your - OpenShift VMs to run. - - See `openstack image list` for the list of available images. -4. Set the `openstack_default_flavor` to the flavor you want your - OpenShift VMs to use. - - See `openstack flavor list` for the list of available flavors. - -**NOTE**: In most OpenStack environments, you will also need to -configure the forwarders for the DNS server we create. This depends on -your environment. - -Launch a VM in your OpenStack and look at its `/etc/resolv.conf` and -put the IP addresses into `public_dns_nameservers` in -`inventory/group_vars/all.yml`. - - -#### OpenShift configuration - -The OpenShift configuration is in `inventory/group_vars/OSEv3.yml`. - -The default options will mostly work, but unless you used the large -flavors for a production-ready environment, openshift-ansible's -hardware check will fail. - -Let's disable those checks by putting this in -`inventory/group_vars/OSEv3.yml`: - -```yaml -openshift_disable_check: disk_availability,memory_availability -``` - -**NOTE**: The default authentication method will allow **any username -and password** in! If you're running this in a public place, you need -to set up access control. - -Feel free to look at -the [Sample OpenShift Inventory][sample-openshift-inventory] and -the [advanced configuration][advanced-configuration]. - - -### 3. Creating the OpenStack resources (VMs, networking, etc.) - -We will install the DNS server roles using ansible galaxy and then run -the openstack provisioning playbook. The `ansible.cfg` file we provide -has useful defaults -- copy it to the directory you're going to run -Ansible from. - -```bash -$ ansible-galaxy install -r openshift-ansible-contrib/playbooks/provisioning/openstack/galaxy-requirements.yaml -p openshift-ansible-contrib/roles -$ cp openshift-ansible-contrib/playbooks/provisioning/openstack/ansible.cfg ansible.cfg -``` -(you will only need to do this once) - -Then run the provisioning playbook -- this will create the OpenStack -resources: - -```bash -$ ansible-playbook -i inventory openshift-ansible-contrib/playbooks/provisioning/openstack/provision.yaml -``` - -If you're using multiple inventories, make sure you pass the path to -the right one to `-i`. - - -### 4. Installing OpenShift - -We will use the `openshift-ansible` project to install openshift on -top of the OpenStack nodes we have prepared: - -```bash -$ git clone https://github.com/openshift/openshift-ansible -$ ansible-playbook -i inventory openshift-ansible/playbooks/byo/config.yml -``` - - -### Next Steps - -And that's it! You should have a small but functional OpenShift -cluster now. - -Take a look at [how to access the cluster][accessing-openshift] -and [how to remove it][uninstall-openshift] as well as the more -advanced configuration: - -* [Accessing the OpenShift cluster][accessing-openshift] -* [Removing the OpenShift cluster][uninstall-openshift] -* Set Up Authentication (TODO) -* [Multiple Masters with a load balancer][loadbalancer] -* [External Dns][external-dns] -* Multiple Clusters (TODO) -* [Cinder Registry][cinder-registry] -* [Bastion Node][bastion] - - -[ansible]: https://www.ansible.com/ -[openshift-ansible]: https://github.com/openshift/openshift-ansible -[devstack]: https://docs.openstack.org/devstack/ -[tripleo]: http://tripleo.org/ -[ansible-dependencies]: ./advanced-configuration.md#dependencies-for-localhost-ansible-controladmin-node -[contrib]: https://github.com/openshift/openshift-ansible-contrib -[control-host-image]: https://hub.docker.com/r/redhatcop/control-host-openstack/ -[hardware-requirements]: https://docs.openshift.org/latest/install_config/install/prerequisites.html#hardware -[origin]: https://www.openshift.org/ -[centos7]: https://www.centos.org/ -[sample-openshift-inventory]: https://github.com/openshift/openshift-ansible/blob/master/inventory/byo/hosts.example -[advanced-configuration]: ./advanced-configuration.md -[accessing-openshift]: ./advanced-configuration.md#accessing-the-openshift-cluster -[uninstall-openshift]: ./advanced-configuration.md#removing-the-openshift-cluster -[loadbalancer]: ./advanced-configuration.md#multi-master-configuration -[external-dns]: ./advanced-configuration.md#dns-configuration-variables -[cinder-registry]: ./advanced-configuration.md#creating-and-using-a-cinder-volume-for-the-openshift-registry -[bastion]: ./advanced-configuration.md#configure-static-inventory-and-access-via-a-bastion-node - - - -## License - -Like the rest of the openshift-ansible-contrib repository, the code -here is licensed under Apache 2. diff --git a/playbooks/provisioning/openstack/advanced-configuration.md b/playbooks/provisioning/openstack/advanced-configuration.md deleted file mode 100644 index 72bb95254..000000000 --- a/playbooks/provisioning/openstack/advanced-configuration.md +++ /dev/null @@ -1,773 +0,0 @@ -## Dependencies for localhost (ansible control/admin node) - -* [Ansible 2.3](https://pypi.python.org/pypi/ansible) -* [Ansible-galaxy](https://pypi.python.org/pypi/ansible-galaxy-local-deps) -* [jinja2](http://jinja.pocoo.org/docs/2.9/) -* [shade](https://pypi.python.org/pypi/shade) -* python-jmespath / [jmespath](https://pypi.python.org/pypi/jmespath) -* python-dns / [dnspython](https://pypi.python.org/pypi/dnspython) -* Become (sudo) is not required. - -**NOTE**: You can use a Docker image with all dependencies set up. -Find more in the [Deployment section](#deployment). - -### Optional Dependencies for localhost -**Note**: When using rhel images, `rhel-7-server-openstack-10-rpms` repository is required in order to install these packages. - -* `python-openstackclient` -* `python-heatclient` - -## Dependencies for OpenStack hosted cluster nodes (servers) - -There are no additional dependencies for the cluster nodes. Required -configuration steps are done by Heat given a specific user data config -that normally should not be changed. - -## Required galaxy modules - -In order to pull in external dependencies for DNS configuration steps, -the following commads need to be executed: - - ansible-galaxy install \ - -r openshift-ansible-contrib/playbooks/provisioning/openstack/galaxy-requirements.yaml \ - -p openshift-ansible-contrib/roles - -Alternatively you can install directly from github: - - ansible-galaxy install git+https://github.com/redhat-cop/infra-ansible,master \ - -p openshift-ansible-contrib/roles - -Notes: -* This assumes we're in the directory that contains the clonned -openshift-ansible-contrib repo in its root path. -* When trying to install a different version, the previous one must be removed first -(`infra-ansible` directory from [roles](https://github.com/openshift/openshift-ansible-contrib/tree/master/roles)). -Otherwise, even if there are differences between the two versions, installation of the newer version is skipped. - - -## Accessing the OpenShift Cluster - -### Use the Cluster DNS - -In addition to the OpenShift nodes, we created a DNS server with all -the necessary entries. We will configure your *Ansible host* to use -this new DNS and talk to the deployed OpenShift. - -First, get the DNS IP address: - -```bash -$ openstack server show dns-0.openshift.example.com --format value --column addresses -openshift-ansible-openshift.example.com-net=192.168.99.11, 10.40.128.129 -``` - -Note the floating IP address (it's `10.40.128.129` in this case) -- if -you're not sure, try pinging them both -- it's the one that responds -to pings. - -Next, edit your `/etc/resolv.conf` as root and put `nameserver DNS_IP` as your -**first entry**. - -If your `/etc/resolv.conf` currently looks like this: - -``` -; generated by /usr/sbin/dhclient-script -search openstacklocal -nameserver 192.168.0.3 -nameserver 192.168.0.2 -``` - -Change it to this: - -``` -; generated by /usr/sbin/dhclient-script -search openstacklocal -nameserver 10.40.128.129 -nameserver 192.168.0.3 -nameserver 192.168.0.2 -``` - -### Get the `oc` Client - -**NOTE**: You can skip this section if you're using the Docker image --- it already has the `oc` binary. - -You need to download the OpenShift command line client (called `oc`). -You can download and extract `openshift-origin-client-tools` from the -OpenShift release page: - -https://github.com/openshift/origin/releases/latest/ - -Or you can now copy it from the master node: - - $ ansible -i inventory masters[0] -m fetch -a "src=/bin/oc dest=oc" - -Either way, find the `oc` binary and put it in your `PATH`. - - -### Logging in Using the Command Line - - -``` -oc login --insecure-skip-tls-verify=true https://master-0.openshift.example.com:8443 -u user -p password -oc new-project test -oc new-app --template=cakephp-mysql-example -oc status -v -curl http://cakephp-mysql-example-test.apps.openshift.example.com -``` - -This will trigger an image build. You can run `oc logs -f -bc/cakephp-mysql-example` to follow its progress. - -Wait until the build has finished and both pods are deployed and running: - -``` -$ oc status -v -In project test on server https://master-0.openshift.example.com:8443 - -http://cakephp-mysql-example-test.apps.openshift.example.com (svc/cakephp-mysql-example) - dc/cakephp-mysql-example deploys istag/cakephp-mysql-example:latest <- - bc/cakephp-mysql-example source builds https://github.com/openshift/cakephp-ex.git on openshift/php:7.0 - deployment #1 deployed about a minute ago - 1 pod - -svc/mysql - 172.30.144.36:3306 - dc/mysql deploys openshift/mysql:5.7 - deployment #1 deployed 3 minutes ago - 1 pod - -Info: - * pod/cakephp-mysql-example-1-build has no liveness probe to verify pods are still running. - try: oc set probe pod/cakephp-mysql-example-1-build --liveness ... -View details with 'oc describe /' or list everything with 'oc get all'. - -``` - -You can now look at the deployed app using its route: - -``` -$ curl http://cakephp-mysql-example-test.apps.openshift.example.com -``` - -Its `title` should say: "Welcome to OpenShift". - - -### Accessing the UI - -You can also access the OpenShift cluster with a web browser by going to: - -https://master-0.openshift.example.com:8443 - -Note that for this to work, the OpenShift nodes must be accessible -from your computer and it's DNS configuration must use the cruster's -DNS. - - -## Removing the OpenShift Cluster - -Everything in the cluster is contained within a Heat stack. To -completely remove the cluster and all the related OpenStack resources, -run this command: - -```bash -openstack stack delete --wait --yes openshift.example.com -``` - - -## DNS configuration variables - -Pay special attention to the values in the first paragraph -- these -will depend on your OpenStack environment. - -Note that the provsisioning playbooks update the original Neutron subnet -created with the Heat stack to point to the configured DNS servers. -So the provisioned cluster nodes will start using those natively as -default nameservers. Technically, this allows to deploy OpenShift clusters -without dnsmasq proxies. - -The `env_id` and `public_dns_domain` will form the cluster's DNS domain all -your servers will be under. With the default values, this will be -`openshift.example.com`. For workloads, the default subdomain is 'apps'. -That sudomain can be set as well by the `openshift_app_domain` variable in -the inventory. - -The `openstack__hostname` is a set of variables used for customising -hostnames of servers with a given role. When such a variable stays commented, -default hostname (usually the role name) is used. - -The `public_dns_nameservers` is a list of DNS servers accessible from all -the created Nova servers. These will be serving as your DNS forwarders for -external FQDNs that do not belong to the cluster's DNS domain and its subdomains. -If you're unsure what to put in here, you can try the google or opendns servers, -but note that some organizations may be blocking them. - -The `openshift_use_dnsmasq` controls either dnsmasq is deployed or not. -By default, dnsmasq is deployed and comes as the hosts' /etc/resolv.conf file -first nameserver entry that points to the local host instance of the dnsmasq -daemon that in turn proxies DNS requests to the authoritative DNS server. -When Network Manager is enabled for provisioned cluster nodes, which is -normally the case, you should not change the defaults and always deploy dnsmasq. - -`external_nsupdate_keys` describes an external authoritative DNS server(s) -processing dynamic records updates in the public and private cluster views: - - external_nsupdate_keys: - public: - key_secret: - key_algorithm: 'hmac-md5' - key_name: 'update-key' - server: - private: - key_secret: - key_algorithm: 'hmac-sha256' - server: - -Here, for the public view section, we specified another key algorithm and -optional `key_name`, which normally defaults to the cluster's DNS domain. -This just illustrates a compatibility mode with a DNS service deployed -by OpenShift on OSP10 reference architecture, and used in a mixed mode with -another external DNS server. - -Another example defines an external DNS server for the public view -additionally to the in-stack DNS server used for the private view only: - - external_nsupdate_keys: - public: - key_secret: - key_algorithm: 'hmac-sha256' - server: - -Here, updates matching the public view will be hitting the given public -server IP. While updates matching the private view will be sent to the -auto evaluated in-stack DNS server's **public** IP. - -Note, for the in-stack DNS server, private view updates may be sent only -via the public IP of the server. You can not send updates via the private -IP yet. This forces the in-stack private server to have a floating IP. -See also the [security notes](#security-notes) - -## Flannel networking - -In order to configure the -[flannel networking](https://docs.openshift.com/container-platform/3.6/install_config/configuring_sdn.html#using-flannel), -uncomment and adjust the appropriate `inventory/group_vars/OSEv3.yml` group vars. -Note that the `osm_cluster_network_cidr` must not overlap with the default -Docker bridge subnet of 172.17.0.0/16. Or you should change the docker0 default -CIDR range otherwise. For example, by adding `--bip=192.168.2.1/24` to -`DOCKER_NETWORK_OPTIONS` located in `/etc/sysconfig/docker-network`. - -Also note that the flannel network will be provisioned on a separate isolated Neutron -subnet defined from `osm_cluster_network_cidr` and having ports security disabled. -Use the `openstack_private_data_network_name` variable to define the network -name for the heat stack resource. - -After the cluster deployment done, you should run an additional post installation -step for flannel and docker iptables configuration: - - ansible-playbook openshift-ansible-contrib/playbooks/provisioning/openstack/post-install.yml - -## Other configuration variables - -`openstack_ssh_public_key` is a Nova keypair - you can see your -keypairs with `openstack keypair list`. It must correspond to the -private SSH key Ansible will use to log into the created VMs. This is -`~/.ssh/id_rsa` by default, but you can use a different key by passing -`--private-key` to `ansible-playbook`. - -`openstack_default_image_name` is the default name of the Glance image the -servers will use. You can see your images with `openstack image list`. -In order to set a different image for a role, uncomment the line with the -corresponding variable (e.g. `openstack_lb_image_name` for load balancer) and -set its value to another available image name. `openstack_default_image_name` -must stay defined as it is used as a default value for the rest of the roles. - -`openstack_default_flavor` is the default Nova flavor the servers will use. -You can see your flavors with `openstack flavor list`. -In order to set a different flavor for a role, uncomment the line with the -corresponding variable (e.g. `openstack_lb_flavor` for load balancer) and -set its value to another available flavor. `openstack_default_flavor` must -stay defined as it is used as a default value for the rest of the roles. - -`openstack_external_network_name` is the name of the Neutron network -providing external connectivity. It is often called `public`, -`external` or `ext-net`. You can see your networks with `openstack -network list`. - -`openstack_private_network_name` is the name of the private Neutron network -providing admin/control access for ansible. It can be merged with other -cluster networks, there are no special requirements for networking. - -The `openstack_num_masters`, `openstack_num_infra` and -`openstack_num_nodes` values specify the number of Master, Infra and -App nodes to create. - -The `openshift_cluster_node_labels` defines custom labels for your openshift -cluster node groups. It currently supports app and infra node groups. -The default value of this variable sets `region: primary` to app nodes and -`region: infra` to infra nodes. -An example of setting a customised label: -``` -openshift_cluster_node_labels: - app: - mylabel: myvalue -``` - -The `openstack_nodes_to_remove` allows you to specify the numerical indexes -of App nodes that should be removed; for example, ['0', '2'], - -The `docker_volume_size` is the default Docker volume size the servers will use. -In order to set a different volume size for a role, -uncomment the line with the corresponding variable (e. g. `docker_master_volume_size` -for master) and change its value. `docker_volume_size` must stay defined as it is -used as a default value for some of the servers (master, infra, app node). -The rest of the roles (etcd, load balancer, dns) have their defaults hard-coded. - -**Note**: If the `ephemeral_volumes` is set to `true`, the `*_volume_size` variables -will be ignored and the deployment will not create any cinder volumes. - -The `openstack_flat_secgrp`, controls Neutron security groups creation for Heat -stacks. Set it to true, if you experience issues with sec group rules -quotas. It trades security for number of rules, by sharing the same set -of firewall rules for master, node, etcd and infra nodes. - -The `required_packages` variable also provides a list of the additional -prerequisite packages to be installed before to deploy an OpenShift cluster. -Those are ignored though, if the `manage_packages: False`. - -The `openstack_inventory` controls either a static inventory will be created after the -cluster nodes provisioned on OpenStack cloud. Note, the fully dynamic inventory -is yet to be supported, so the static inventory will be created anyway. - -The `openstack_inventory_path` points the directory to host the generated static inventory. -It should point to the copied example inventory directory, otherwise ti creates -a new one for you. - -## Multi-master configuration - -Please refer to the official documentation for the -[multi-master setup](https://docs.openshift.com/container-platform/3.6/install_config/install/advanced_install.html#multiple-masters) -and define the corresponding [inventory -variables](https://docs.openshift.com/container-platform/3.6/install_config/install/advanced_install.html#configuring-cluster-variables) -in `inventory/group_vars/OSEv3.yml`. For example, given a load balancer node -under the ansible group named `ext_lb`: - - openshift_master_cluster_method: native - openshift_master_cluster_hostname: "{{ groups.ext_lb.0 }}" - openshift_master_cluster_public_hostname: "{{ groups.ext_lb.0 }}" - -## Provider Network - -Normally, the playbooks create a new Neutron network and subnet and attach -floating IP addresses to each node. If you have a provider network set up, this -is all unnecessary as you can just access servers that are placed in the -provider network directly. - -To use a provider network, set its name in `openstack_provider_network_name` in -`inventory/group_vars/all.yml`. - -If you set the provider network name, the `openstack_external_network_name` and -`openstack_private_network_name` fields will be ignored. - -**NOTE**: this will not update the nodes' DNS, so running openshift-ansible -right after provisioning will fail (unless you're using an external DNS server -your provider network knows about). You must make sure your nodes are able to -resolve each other by name. - -## Security notes - -Configure required `*_ingress_cidr` variables to restrict public access -to provisioned servers from your laptop (a /32 notation should be used) -or your trusted network. The most important is the `node_ingress_cidr` -that restricts public access to the deployed DNS server and cluster -nodes' ephemeral ports range. - -Note, the command ``curl https://api.ipify.org`` helps fiding an external -IP address of your box (the ansible admin node). - -There is also the `manage_packages` variable (defaults to True) you -may want to turn off in order to speed up the provisioning tasks. This may -be the case for development environments. When turned off, the servers will -be provisioned omitting the ``yum update`` command. This brings security -implications though, and is not recommended for production deployments. - -### DNS servers security options - -Aside from `node_ingress_cidr` restricting public access to in-stack DNS -servers, there are following (bind/named specific) DNS security -options available: - - named_public_recursion: 'no' - named_private_recursion: 'yes' - -External DNS servers, which is not included in the 'dns' hosts group, -are not managed. It is up to you to configure such ones. - -## Configure the OpenShift parameters - -Finally, you need to update the DNS entry in -`inventory/group_vars/OSEv3.yml` (look at -`openshift_master_default_subdomain`). - -In addition, this is the place where you can customise your OpenShift -installation for example by specifying the authentication. - -The full list of options is available in this sample inventory: - -https://github.com/openshift/openshift-ansible/blob/master/inventory/byo/hosts.ose.example - -Note, that in order to deploy OpenShift origin, you should update the following -variables for the `inventory/group_vars/OSEv3.yml`, `all.yml`: - - deployment_type: origin - openshift_deployment_type: "{{ deployment_type }}" - - -## Setting a custom entrypoint - -In order to set a custom entrypoint, update `openshift_master_cluster_public_hostname` - - openshift_master_cluster_public_hostname: api.openshift.example.com - -Note than an empty hostname does not work, so if your domain is `openshift.example.com`, -you cannot set this value to simply `openshift.example.com`. - -## Creating and using a Cinder volume for the OpenShift registry - -You can optionally have the playbooks create a Cinder volume and set -it up as the OpenShift hosted registry. - -To do that you need specify the desired Cinder volume name and size in -Gigabytes in `inventory/group_vars/all.yml`: - - cinder_hosted_registry_name: cinder-registry - cinder_hosted_registry_size_gb: 10 - -With this, the playbooks will create the volume and set up its -filesystem. If there is an existing volume of the same name, we will -use it but keep the existing data on it. - -To use the volume for the registry, you must first configure it with -the OpenStack credentials by putting the following to `OSEv3.yml`: - - openshift_cloudprovider_openstack_username: "{{ lookup('env','OS_USERNAME') }}" - openshift_cloudprovider_openstack_password: "{{ lookup('env','OS_PASSWORD') }}" - openshift_cloudprovider_openstack_auth_url: "{{ lookup('env','OS_AUTH_URL') }}" - openshift_cloudprovider_openstack_tenant_name: "{{ lookup('env','OS_TENANT_NAME') }}" - -This will use the credentials from your shell environment. If you want -to enter them explicitly, you can. You can also use credentials -different from the provisioning ones (say for quota or access control -reasons). - -**NOTE**: If you're testing this on (DevStack)[devstack], you must -explicitly set your Keystone API version to v2 (e.g. -`OS_AUTH_URL=http://10.34.37.47/identity/v2.0`) instead of the default -value provided by `openrc`. You may also encounter the following issue -with Cinder: - -https://github.com/kubernetes/kubernetes/issues/50461 - -You can read the (OpenShift documentation on configuring -OpenStack)[openstack] for more information. - -[devstack]: https://docs.openstack.org/devstack/latest/ -[openstack]: https://docs.openshift.org/latest/install_config/configuring_openstack.html - - -Next, we need to instruct OpenShift to use the Cinder volume for it's -registry. Again in `OSEv3.yml`: - - #openshift_hosted_registry_storage_kind: openstack - #openshift_hosted_registry_storage_access_modes: ['ReadWriteOnce'] - #openshift_hosted_registry_storage_openstack_filesystem: xfs - -The filesystem value here will be used in the initial formatting of -the volume. - -If you're using the dynamic inventory, you must uncomment these two values as -well: - - #openshift_hosted_registry_storage_openstack_volumeID: "{{ lookup('os_cinder', cinder_hosted_registry_name).id }}" - #openshift_hosted_registry_storage_volume_size: "{{ cinder_hosted_registry_size_gb }}Gi" - -But note that they use the `os_cinder` lookup plugin we provide, so you must -tell Ansible where to find it either in `ansible.cfg` (the one we provide is -configured properly) or by exporting the -`ANSIBLE_LOOKUP_PLUGINS=openshift-ansible-contrib/lookup_plugins` environment -variable. - - - -## Use an existing Cinder volume for the OpenShift registry - -You can also use a pre-existing Cinder volume for the storage of your -OpenShift registry. - -To do that, you need to have a Cinder volume. You can create one by -running: - - openstack volume create --size - -The volume needs to have a file system created before you put it to -use. - -As with the automatically-created volume, you have to set up the -OpenStack credentials in `inventory/group_vars/OSEv3.yml` as well as -registry values: - - #openshift_hosted_registry_storage_kind: openstack - #openshift_hosted_registry_storage_access_modes: ['ReadWriteOnce'] - #openshift_hosted_registry_storage_openstack_filesystem: xfs - #openshift_hosted_registry_storage_openstack_volumeID: e0ba2d73-d2f9-4514-a3b2-a0ced507fa05 - #openshift_hosted_registry_storage_volume_size: 10Gi - -Note the `openshift_hosted_registry_storage_openstack_volumeID` and -`openshift_hosted_registry_storage_volume_size` values: these need to -be added in addition to the previous variables. - -The **Cinder volume ID**, **filesystem** and **volume size** variables -must correspond to the values in your volume. The volume ID must be -the **UUID** of the Cinder volume, *not its name*. - -We can do formate the volume for you if you ask for it in -`inventory/group_vars/all.yml`: - - prepare_and_format_registry_volume: true - -**NOTE:** doing so **will destroy any data that's currently on the volume**! - -You can also run the registry setup playbook directly: - - ansible-playbook -i inventory playbooks/provisioning/openstack/prepare-and-format-cinder-volume.yaml - -(the provisioning phase must be completed, first) - - - -## Configure static inventory and access via a bastion node - -Example inventory variables: - - openstack_use_bastion: true - bastion_ingress_cidr: "{{openstack_subnet_prefix}}.0/24" - openstack_private_ssh_key: ~/.ssh/id_rsa - openstack_inventory: static - openstack_inventory_path: ../../../../inventory - openstack_ssh_config_path: /tmp/ssh.config.openshift.ansible.openshift.example.com - -The `openstack_subnet_prefix` is the openstack private network for your cluster. -And the `bastion_ingress_cidr` defines accepted range for SSH connections to nodes -additionally to the `ssh_ingress_cidr`` (see the security notes above). - -The SSH config will be stored on the ansible control node by the -gitven path. Ansible uses it automatically. To access the cluster nodes with -that ssh config, use the `-F` prefix, f.e.: - - ssh -F /tmp/ssh.config.openshift.ansible.openshift.example.com master-0.openshift.example.com echo OK - -Note, relative paths will not work for the `openstack_ssh_config_path`, but it -works for the `openstack_private_ssh_key` and `openstack_inventory_path`. In this -guide, the latter points to the current directory, where you run ansible commands -from. - -To verify nodes connectivity, use the command: - - ansible -v -i inventory/hosts -m ping all - -If something is broken, double-check the inventory variables, paths and the -generated `/hosts` and `openstack_ssh_config_path` files. - -The `inventory: dynamic` can be used instead to access cluster nodes directly via -floating IPs. In this mode you can not use a bastion node and should specify -the dynamic inventory file in your ansible commands , like `-i openstack.py`. - -## Using Docker on the Ansible host - -If you don't want to worry about the dependencies, you can use the -[OpenStack Control Host image][control-host-image]. - -[control-host-image]: https://hub.docker.com/r/redhatcop/control-host-openstack/ - -It has all the dependencies installed, but you'll need to map your -code and credentials to it. Assuming your SSH keys live in `~/.ssh` -and everything else is in your current directory (i.e. `ansible.cfg`, -`keystonerc`, `inventory`, `openshift-ansible`, -`openshift-ansible-contrib`), this is how you run the deployment: - - sudo docker run -it -v ~/.ssh:/mnt/.ssh:Z \ - -v $PWD:/root/openshift:Z \ - -v $PWD/keystonerc:/root/.config/openstack/keystonerc.sh:Z \ - redhatcop/control-host-openstack bash - -(feel free to replace `$PWD` with an actual path to your inventory and -checkouts, but note that relative paths don't work) - -The first run may take a few minutes while the image is being -downloaded. After that, you'll be inside the container and you can run -the playbooks: - - cd openshift - ansible-playbook openshift-ansible-contrib/playbooks/provisioning/openstack/provision.yaml - - -### Run the playbook - -Assuming your OpenStack (Keystone) credentials are in the `keystonerc` -this is how you stat the provisioning process from your ansible control node: - - . keystonerc - ansible-playbook openshift-ansible-contrib/playbooks/provisioning/openstack/provision.yaml - -Note, here you start with an empty inventory. The static inventory will be populated -with data so you can omit providing additional arguments for future ansible commands. - -If bastion enabled, the generates SSH config must be applied for ansible. -Otherwise, it is auto included by the previous step. In order to execute it -as a separate playbook, use the following command: - - ansible-playbook openshift-ansible-contrib/playbooks/provisioning/openstack/post-provision-openstack.yml - -The first infra node then becomes a bastion node as well and proxies access -for future ansible commands. The post-provision step also configures Satellite, -if requested, and DNS server, and ensures other OpenShift requirements to be met. - - -## Running Custom Post-Provision Actions - -A custom playbook can be run like this: - -``` -ansible-playbook --private-key ~/.ssh/openshift -i inventory/ openshift-ansible-contrib/playbooks/provisioning/openstack/custom-actions/custom-playbook.yml -``` - -If you'd like to limit the run to one particular host, you can do so as follows: - -``` -ansible-playbook --private-key ~/.ssh/openshift -i inventory/ openshift-ansible-contrib/playbooks/provisioning/openstack/custom-actions/custom-playbook.yml -l app-node-0.openshift.example.com -``` - -You can also create your own custom playbook. Here are a few examples: - -### Adding additional YUM repositories - -``` ---- -- hosts: app - tasks: - - # enable EPL - - name: Add repository - yum_repository: - name: epel - description: EPEL YUM repo - baseurl: https://download.fedoraproject.org/pub/epel/$releasever/$basearch/ -``` - -This example runs against app nodes. The list of options include: - - - cluster_hosts (all hosts: app, infra, masters, dns, lb) - - OSEv3 (app, infra, masters) - - app - - dns - - masters - - infra_hosts - -### Attaching additional RHN pools - -``` ---- -- hosts: cluster_hosts - tasks: - - name: Attach additional RHN pool - become: true - command: "/usr/bin/subscription-manager attach --pool=" - register: attach_rhn_pool_result - until: attach_rhn_pool_result.rc == 0 - retries: 10 - delay: 1 -``` - -This playbook runs against all cluster nodes. In order to help prevent slow connectivity -problems, the task is retried 10 times in case of initial failure. -Note that in order for this example to work in your deployment, your servers must use the RHEL image. - -### Adding extra Docker registry URLs - -This playbook is located in the [custom-actions](https://github.com/openshift/openshift-ansible-contrib/tree/master/playbooks/provisioning/openstack/custom-actions) directory. - -It adds URLs passed as arguments to the docker configuration program. -Going into more detail, the configuration program (which is in the YAML format) is loaded into an ansible variable -([lines 27-30](https://github.com/openshift/openshift-ansible-contrib/blob/master/playbooks/provisioning/openstack/custom-actions/add-docker-registry.yml#L27-L30)) -and in its structure, `registries` and `insecure_registries` sections are expanded with the newly added items -([lines 56-76](https://github.com/openshift/openshift-ansible-contrib/blob/master/playbooks/provisioning/openstack/custom-actions/add-docker-registry.yml#L56-L76)). -The new content is then saved into the original file -([lines 78-82](https://github.com/openshift/openshift-ansible-contrib/blob/master/playbooks/provisioning/openstack/custom-actions/add-docker-registry.yml#L78-L82)) -and docker is restarted. - -Example usage: -``` -ansible-playbook -i openshift-ansible-contrib/playbooks/provisioning/openstack/custom-actions/add-docker-registry.yml --extra-vars '{"registries": "reg1", "insecure_registries": ["ins_reg1","ins_reg2"]}' -``` - -### Adding extra CAs to the trust chain - -This playbook is also located in the [custom-actions](https://github.com/openshift/openshift-ansible-contrib/blob/master/playbooks/provisioning/openstack/custom-actions) directory. -It copies passed CAs to the trust chain location and updates the trust chain on each selected host. - -Example usage: -``` -ansible-playbook -i openshift-ansible-contrib/playbooks/provisioning/openstack/custom-actions/add-cas.yml --extra-vars '{"ca_files": [, ]}' -``` - -Please consider contributing your custom playbook back to openshift-ansible-contrib! - -A library of custom post-provision actions exists in `openshift-ansible-contrib/playbooks/provisioning/openstack/custom-actions`. Playbooks include: - -* [add-yum-repos.yml](https://github.com/openshift/openshift-ansible-contrib/blob/master/playbooks/provisioning/openstack/custom-actions/add-yum-repos.yml): adds a list of custom yum repositories to every node in the cluster -* [add-rhn-pools.yml](https://github.com/openshift/openshift-ansible-contrib/blob/master/playbooks/provisioning/openstack/custom-actions/add-rhn-pools.yml): attaches a list of additional RHN pools to every node in the cluster -* [add-docker-registry.yml](https://github.com/openshift/openshift-ansible-contrib/blob/master/playbooks/provisioning/openstack/custom-actions/add-docker-registry.yml): adds a list of docker registries to the docker configuration on every node in the cluster -* [add-cas.yml](https://github.com/openshift/openshift-ansible-contrib/blob/master/playbooks/provisioning/openstack/custom-actions/add-rhn-pools.yml): adds a list of CAs to the trust chain on every node in the cluster - - -## Install OpenShift - -Once it succeeds, you can install openshift by running: - - ansible-playbook openshift-ansible/playbooks/byo/config.yml - -## Access UI - -OpenShift UI may be accessed via the 1st master node FQDN, port 8443. - -When using a bastion, you may want to make an SSH tunnel from your control node -to access UI on the `https://localhost:8443`, with this inventory variable: - - openshift_ui_ssh_tunnel: True - -Note, this requires sudo rights on the ansible control node and an absolute path -for the `openstack_private_ssh_key`. You should also update the control node's -`/etc/hosts`: - - 127.0.0.1 master-0.openshift.example.com - -In order to access UI, the ssh-tunnel service will be created and started on the -control node. Make sure to remove these changes and the service manually, when not -needed anymore. - -## Scale Deployment up/down - -### Scaling up - -One can scale up the number of application nodes by executing the ansible playbook -`openshift-ansible-contrib/playbooks/provisioning/openstack/scale-up.yaml`. -This process can be done even if there is currently no deployment available. -The `increment_by` variable is used to specify by how much the deployment should -be scaled up (if none exists, it serves as a target number of application nodes). -The path to `openshift-ansible` directory can be customised by the `openshift_ansible_dir` -variable. Its value must be an absolute path to `openshift-ansible` and it cannot -contain the '/' symbol at the end. - -Usage: - -``` -ansible-playbook -i openshift-ansible-contrib/playbooks/provisioning/openstack/scale-up.yaml` [-e increment_by=] [-e openshift_ansible_dir=] -``` - -Note: This playbook works only without a bastion node (`openstack_use_bastion: False`). diff --git a/playbooks/provisioning/openstack/ansible.cfg b/playbooks/provisioning/openstack/ansible.cfg deleted file mode 100644 index a21f023ea..000000000 --- a/playbooks/provisioning/openstack/ansible.cfg +++ /dev/null @@ -1,24 +0,0 @@ -# config file for ansible -- http://ansible.com/ -# ============================================== -[defaults] -ansible_user = openshift -forks = 50 -# work around privilege escalation timeouts in ansible -timeout = 30 -host_key_checking = false -inventory = inventory -inventory_ignore_extensions = secrets.py, .pyc, .cfg, .crt -gathering = smart -retry_files_enabled = false -fact_caching = jsonfile -fact_caching_connection = .ansible/cached_facts -fact_caching_timeout = 900 -stdout_callback = skippy -callback_whitelist = profile_tasks -lookup_plugins = openshift-ansible-contrib/lookup_plugins - - -[ssh_connection] -ssh_args = -o ControlMaster=auto -o ControlPersist=900s -o GSSAPIAuthentication=no -control_path = /var/tmp/%%h-%%r -pipelining = True diff --git a/playbooks/provisioning/openstack/custom-actions/add-cas.yml b/playbooks/provisioning/openstack/custom-actions/add-cas.yml deleted file mode 100644 index b2c195f91..000000000 --- a/playbooks/provisioning/openstack/custom-actions/add-cas.yml +++ /dev/null @@ -1,13 +0,0 @@ ---- -- hosts: cluster_hosts - become: true - vars: - ca_files: [] - tasks: - - name: Copy CAs to the trusted CAs location - with_items: "{{ ca_files }}" - copy: - src: "{{ item }}" - dest: /etc/pki/ca-trust/source/anchors/ - - name: Update trusted CAs - shell: 'update-ca-trust enable && update-ca-trust extract' diff --git a/playbooks/provisioning/openstack/custom-actions/add-docker-registry.yml b/playbooks/provisioning/openstack/custom-actions/add-docker-registry.yml deleted file mode 100644 index e118a71dc..000000000 --- a/playbooks/provisioning/openstack/custom-actions/add-docker-registry.yml +++ /dev/null @@ -1,90 +0,0 @@ ---- -- hosts: OSEv3 - become: true - vars: - registries: [] - insecure_registries: [] - - tasks: - - name: Check if docker is even installed - command: docker - - - name: Install atomic-registries package - yum: - name: atomic-registries - state: latest - - - name: Get registry configuration file - register: file_result - stat: - path: /etc/containers/registries.conf - - - name: Check if it exists - assert: - that: 'file_result.stat.exists' - msg: "Configuration file does not exist." - - - name: Load configuration file - shell: cat /etc/containers/registries.conf - register: file_content - - - name: Store file content into a variable - set_fact: - docker_conf: "{{ file_content.stdout | from_yaml }}" - - - name: Make sure that docker file content is a dictionary - when: '(docker_conf is string) and (not docker_conf)' - set_fact: - docker_conf: {} - - - name: Make sure that registries is a list - when: 'registries is string' - set_fact: - registries_list: [ "{{ registries }}" ] - - - name: Make sure that insecure_registries is a list - when: 'insecure_registries is string' - set_fact: - insecure_registries_list: [ "{{ insecure_registries }}" ] - - - name: Set default values if there are no registries defined - set_fact: - docker_conf_registries: "{{ [] if docker_conf['registries'] is not defined else docker_conf['registries'] }}" - docker_conf_insecure_registries: "{{ [] if docker_conf['insecure_registries'] is not defined else docker_conf['insecure_registries'] }}" - - - name: Add other registries - when: 'registries_list is not defined' - register: registries_merge_result - set_fact: - docker_conf: "{{ docker_conf | combine({'registries': (docker_conf_registries + registries) | unique}, recursive=True) }}" - - - name: Add other registries (if registries had to be converted) - when: 'registries_merge_result|skipped' - set_fact: - docker_conf: "{{ docker_conf | combine({'registries': (docker_conf_registries + registries_list) | unique}, recursive=True) }}" - - - name: Add insecure registries - when: 'insecure_registries_list is not defined' - register: insecure_registries_merge_result - set_fact: - docker_conf: "{{ docker_conf | combine({'insecure_registries': (docker_conf_insecure_registries + insecure_registries) | unique }, recursive=True) }}" - - - name: Add insecure registries (if insecure_registries had to be converted) - when: 'insecure_registries_merge_result|skipped' - set_fact: - docker_conf: "{{ docker_conf | combine({'insecure_registries': (docker_conf_insecure_registries + insecure_registries_list) | unique }, recursive=True) }}" - - - name: Load variable back to file - copy: - content: "{{ docker_conf | to_yaml }}" - dest: /etc/containers/registries.conf - - - name: Restart registries service - service: - name: registries - state: restarted - - - name: Restart docker - service: - name: docker - state: restarted diff --git a/playbooks/provisioning/openstack/custom-actions/add-rhn-pools.yml b/playbooks/provisioning/openstack/custom-actions/add-rhn-pools.yml deleted file mode 100644 index d17c1e335..000000000 --- a/playbooks/provisioning/openstack/custom-actions/add-rhn-pools.yml +++ /dev/null @@ -1,13 +0,0 @@ ---- -- hosts: cluster_hosts - vars: - rhn_pools: [] - tasks: - - name: Attach additional RHN pools - become: true - with_items: "{{ rhn_pools }}" - command: "/usr/bin/subscription-manager attach --pool={{ item }}" - register: attach_rhn_pools_result - until: attach_rhn_pools_result.rc == 0 - retries: 10 - delay: 1 diff --git a/playbooks/provisioning/openstack/custom-actions/add-yum-repos.yml b/playbooks/provisioning/openstack/custom-actions/add-yum-repos.yml deleted file mode 100644 index ffebcb642..000000000 --- a/playbooks/provisioning/openstack/custom-actions/add-yum-repos.yml +++ /dev/null @@ -1,12 +0,0 @@ ---- -- hosts: cluster_hosts - vars: - yum_repos: [] - tasks: - # enable additional yum repos - - name: Add repository - yum_repository: - name: "{{ item.name }}" - description: "{{ item.description }}" - baseurl: "{{ item.baseurl }}" - with_items: "{{ yum_repos }}" diff --git a/playbooks/provisioning/openstack/custom_flavor_check.yaml b/playbooks/provisioning/openstack/custom_flavor_check.yaml deleted file mode 100644 index e11874c28..000000000 --- a/playbooks/provisioning/openstack/custom_flavor_check.yaml +++ /dev/null @@ -1,9 +0,0 @@ ---- -- name: Try to get flavor facts - os_flavor_facts: - name: "{{ flavor }}" - register: flavor_result -- name: Check that custom flavor is available - assert: - that: "flavor_result.ansible_facts.openstack_flavors" - msg: "Flavor {{ flavor }} is not available." diff --git a/playbooks/provisioning/openstack/custom_image_check.yaml b/playbooks/provisioning/openstack/custom_image_check.yaml deleted file mode 100644 index 452e1e4d8..000000000 --- a/playbooks/provisioning/openstack/custom_image_check.yaml +++ /dev/null @@ -1,9 +0,0 @@ ---- -- name: Try to get image facts - os_image_facts: - image: "{{ image }}" - register: image_result -- name: Check that custom image is available - assert: - that: "image_result.ansible_facts.openstack_image" - msg: "Image {{ image }} is not available." diff --git a/playbooks/provisioning/openstack/galaxy-requirements.yaml b/playbooks/provisioning/openstack/galaxy-requirements.yaml deleted file mode 100644 index 1d745dcc3..000000000 --- a/playbooks/provisioning/openstack/galaxy-requirements.yaml +++ /dev/null @@ -1,10 +0,0 @@ ---- -# This is the Ansible Galaxy requirements file to pull in the correct roles - -# From 'infra-ansible' -- src: https://github.com/redhat-cop/infra-ansible - version: master - -# From 'openshift-ansible' -- src: https://github.com/openshift/openshift-ansible - version: master diff --git a/playbooks/provisioning/openstack/net_vars_check.yaml b/playbooks/provisioning/openstack/net_vars_check.yaml deleted file mode 100644 index 68afde415..000000000 --- a/playbooks/provisioning/openstack/net_vars_check.yaml +++ /dev/null @@ -1,14 +0,0 @@ ---- -- name: Check the provider network configuration - fail: - msg: "Flannel SDN requires a dedicated containers data network and can not work over a provider network" - when: - - openstack_provider_network_name is defined - - openstack_private_data_network_name is defined - -- name: Check the flannel network configuration - fail: - msg: "A dedicated containers data network is only supported with Flannel SDN" - when: - - openstack_private_data_network_name is defined - - not openshift_use_flannel|default(False)|bool diff --git a/playbooks/provisioning/openstack/post-install.yml b/playbooks/provisioning/openstack/post-install.yml deleted file mode 100644 index 417813e2a..000000000 --- a/playbooks/provisioning/openstack/post-install.yml +++ /dev/null @@ -1,57 +0,0 @@ ---- -- hosts: OSEv3 - gather_facts: False - become: True - tasks: - - name: Save iptables rules to a backup file - when: openshift_use_flannel|default(False)|bool - shell: iptables-save > /etc/sysconfig/iptables.orig-$(date +%Y%m%d%H%M%S) - -# Enable iptables service on app nodes to persist custom rules (flannel SDN) -# FIXME(bogdando) w/a https://bugzilla.redhat.com/show_bug.cgi?id=1490820 -- hosts: app - gather_facts: False - become: True - vars: - os_firewall_allow: - - service: dnsmasq tcp - port: 53/tcp - - service: dnsmasq udp - port: 53/udp - tasks: - - when: openshift_use_flannel|default(False)|bool - block: - - include_role: - name: openshift-ansible/roles/os_firewall - - include_role: - name: openshift-ansible/roles/lib_os_firewall - - name: set allow rules for dnsmasq - os_firewall_manage_iptables: - name: "{{ item.service }}" - action: add - protocol: "{{ item.port.split('/')[1] }}" - port: "{{ item.port.split('/')[0] }}" - with_items: "{{ os_firewall_allow }}" - -- hosts: OSEv3 - gather_facts: False - become: True - tasks: - - name: Apply post-install iptables hacks for Flannel SDN (the best effort) - when: openshift_use_flannel|default(False)|bool - block: - - name: set allow/masquerade rules for for flannel/docker - shell: >- - (iptables-save | grep -q custom-flannel-docker-1) || - iptables -A DOCKER -w - -p all -j ACCEPT - -m comment --comment "custom-flannel-docker-1"; - (iptables-save | grep -q custom-flannel-docker-2) || - iptables -t nat -A POSTROUTING -w - -o {{flannel_interface|default('eth1')}} - -m comment --comment "custom-flannel-docker-2" - -j MASQUERADE - - # NOTE(bogdando) the rules will not be restored, when iptables service unit is disabled & masked - - name: Persist in-memory iptables rules (w/o dynamic KUBE rules) - shell: iptables-save | grep -v KUBE > /etc/sysconfig/iptables diff --git a/playbooks/provisioning/openstack/post-provision-openstack.yml b/playbooks/provisioning/openstack/post-provision-openstack.yml deleted file mode 100644 index e460fbf12..000000000 --- a/playbooks/provisioning/openstack/post-provision-openstack.yml +++ /dev/null @@ -1,118 +0,0 @@ ---- -- hosts: cluster_hosts - name: Wait for the the nodes to come up - become: False - gather_facts: False - tasks: - - when: not openstack_use_bastion|default(False)|bool - wait_for_connection: - - when: openstack_use_bastion|default(False)|bool - delegate_to: bastion - wait_for_connection: - -- hosts: cluster_hosts - gather_facts: True - tasks: - - name: Debug hostvar - debug: - msg: "{{ hostvars[inventory_hostname] }}" - verbosity: 2 - -- name: OpenShift Pre-Requisites (part 1) - include: pre-install.yml - -- name: Assign hostnames - hosts: cluster_hosts - gather_facts: False - become: true - roles: - - role: hostnames - -- name: Subscribe DNS Host to allow for configuration below - hosts: dns - gather_facts: False - become: true - roles: - - role: subscription-manager - when: hostvars.localhost.rhsm_register|default(False) - tags: 'subscription-manager' - -- name: Determine which DNS server(s) to use for our generated records - hosts: localhost - gather_facts: False - become: False - roles: - - dns-server-detect - -- name: Build the DNS Server Views and Configure DNS Server(s) - hosts: dns - gather_facts: False - become: true - roles: - - role: dns-views - - role: infra-ansible/roles/dns-server - -- name: Build and process DNS Records - hosts: localhost - gather_facts: True - become: False - roles: - - role: dns-records - use_bastion: "{{ openstack_use_bastion|default(False)|bool }}" - - role: infra-ansible/roles/dns - -- name: Switch the stack subnet to the configured private DNS server - hosts: localhost - gather_facts: False - become: False - vars_files: - - stack_params.yaml - tasks: - - include_role: - name: openstack-stack - tasks_from: subnet_update_dns_servers - -- name: OpenShift Pre-Requisites (part 2) - hosts: OSEv3 - gather_facts: true - become: true - vars: - interface: "{{ flannel_interface|default('eth1') }}" - interface_file: /etc/sysconfig/network-scripts/ifcfg-{{ interface }} - interface_config: - DEVICE: "{{ interface }}" - TYPE: Ethernet - BOOTPROTO: dhcp - ONBOOT: 'yes' - DEFTROUTE: 'no' - PEERDNS: 'no' - pre_tasks: - - name: "Include DNS configuration to ensure proper name resolution" - lineinfile: - state: present - dest: /etc/sysconfig/network - regexp: "IP4_NAMESERVERS={{ hostvars['localhost'].private_dns_server }}" - line: "IP4_NAMESERVERS={{ hostvars['localhost'].private_dns_server }}" - - name: "Configure the flannel interface options" - when: openshift_use_flannel|default(False)|bool - block: - - file: - dest: "{{ interface_file }}" - state: touch - mode: 0644 - owner: root - group: root - - lineinfile: - state: present - dest: "{{ interface_file }}" - regexp: "{{ item.key }}=" - line: "{{ item.key }}={{ item.value }}" - with_dict: "{{ interface_config }}" - roles: - - node-network-manager - -- include: prepare-and-format-cinder-volume.yaml - when: > - prepare_and_format_registry_volume|default(False) or - (cinder_registry_volume is defined and - cinder_registry_volume.changed|default(False)) diff --git a/playbooks/provisioning/openstack/pre-install.yml b/playbooks/provisioning/openstack/pre-install.yml deleted file mode 100644 index 45e9005cc..000000000 --- a/playbooks/provisioning/openstack/pre-install.yml +++ /dev/null @@ -1,19 +0,0 @@ ---- -############################### -# OpenShift Pre-Requisites - -# - subscribe hosts -# - prepare docker -# - other prep (install additional packages, etc.) -# -- hosts: OSEv3 - become: true - roles: - - { role: subscription-manager, when: hostvars.localhost.rhsm_register|default(False), tags: 'subscription-manager', ansible_sudo: true } - - { role: docker, tags: 'docker' } - - { role: openshift-prep, tags: 'openshift-prep' } - -- hosts: localhost:cluster_hosts - become: False - tasks: - - include: pre_tasks.yml diff --git a/playbooks/provisioning/openstack/pre_tasks.yml b/playbooks/provisioning/openstack/pre_tasks.yml deleted file mode 100644 index 11fe2dd84..000000000 --- a/playbooks/provisioning/openstack/pre_tasks.yml +++ /dev/null @@ -1,53 +0,0 @@ ---- -- name: Generate Environment ID - set_fact: - env_random_id: "{{ ansible_date_time.epoch }}" - run_once: true - delegate_to: localhost - -- name: Set default Environment ID - set_fact: - default_env_id: "openshift-{{ lookup('env','OS_USERNAME') }}-{{ env_random_id }}" - delegate_to: localhost - -- name: Setting Common Facts - set_fact: - env_id: "{{ env_id | default(default_env_id) }}" - delegate_to: localhost - -- name: Updating DNS domain to include env_id (if not empty) - set_fact: - full_dns_domain: "{{ (env_id|trim == '') | ternary(public_dns_domain, env_id + '.' + public_dns_domain) }}" - delegate_to: localhost - -- name: Set the APP domain for OpenShift use - set_fact: - openshift_app_domain: "{{ openshift_app_domain | default('apps') }}" - delegate_to: localhost - -- name: Set the default app domain for routing purposes - set_fact: - openshift_master_default_subdomain: "{{ openshift_app_domain }}.{{ full_dns_domain }}" - delegate_to: localhost - when: - - openshift_master_default_subdomain is undefined - -# Check that openshift_cluster_node_labels has regions defined for all groups -# NOTE(kpilatov): if node labels are to be enabled for more groups, -# this check needs to be modified as well -- name: Set openshift_cluster_node_labels if undefined (should not happen) - set_fact: - openshift_cluster_node_labels: {'app': {'region': 'primary'}, 'infra': {'region': 'infra'}} - when: openshift_cluster_node_labels is not defined - -- name: Set openshift_cluster_node_labels for the infra group - set_fact: - openshift_cluster_node_labels: "{{ openshift_cluster_node_labels | combine({'infra': {'region': 'infra'}}, recursive=True) }}" - -- name: Set openshift_cluster_node_labels for the app group - set_fact: - openshift_cluster_node_labels: "{{ openshift_cluster_node_labels | combine({'app': {'region': 'primary'}}, recursive=True) }}" - -- name: Set openshift_cluster_node_labels for auto-scaling app nodes - set_fact: - openshift_cluster_node_labels: "{{ openshift_cluster_node_labels | combine({'app': {'autoscaling': 'app'}}, recursive=True) }}" diff --git a/playbooks/provisioning/openstack/prepare-and-format-cinder-volume.yaml b/playbooks/provisioning/openstack/prepare-and-format-cinder-volume.yaml deleted file mode 100644 index 30e094459..000000000 --- a/playbooks/provisioning/openstack/prepare-and-format-cinder-volume.yaml +++ /dev/null @@ -1,67 +0,0 @@ ---- -- hosts: localhost - gather_facts: False - become: False - tasks: - - set_fact: - cinder_volume: "{{ hostvars[groups.masters[0]].openshift_hosted_registry_storage_openstack_volumeID }}" - cinder_fs: "{{ hostvars[groups.masters[0]].openshift_hosted_registry_storage_openstack_filesystem }}" - - - name: Attach the volume to the VM - os_server_volume: - state: present - server: "{{ groups['masters'][0] }}" - volume: "{{ cinder_volume }}" - register: volume_attachment - - - set_fact: - attached_device: >- - {{ volume_attachment['attachments']|json_query("[?volume_id=='" + cinder_volume + "'].device | [0]") }} - - - delegate_to: "{{ groups['masters'][0] }}" - block: - - name: Wait for the device to appear - wait_for: path={{ attached_device }} - - - name: Create a temp directory for mounting the volume - tempfile: - prefix: cinder-volume - state: directory - register: cinder_mount_dir - - - name: Format the device - filesystem: - fstype: "{{ cinder_fs }}" - dev: "{{ attached_device }}" - - - name: Mount the device - mount: - name: "{{ cinder_mount_dir.path }}" - src: "{{ attached_device }}" - state: mounted - fstype: "{{ cinder_fs }}" - - - name: Change mode on the filesystem - file: - path: "{{ cinder_mount_dir.path }}" - state: directory - recurse: true - mode: 0777 - - - name: Unmount the device - mount: - name: "{{ cinder_mount_dir.path }}" - src: "{{ attached_device }}" - state: absent - fstype: "{{ cinder_fs }}" - - - name: Delete the temp directory - file: - name: "{{ cinder_mount_dir.path }}" - state: absent - - - name: Detach the volume from the VM - os_server_volume: - state: absent - server: "{{ groups['masters'][0] }}" - volume: "{{ cinder_volume }}" diff --git a/playbooks/provisioning/openstack/prerequisites.yml b/playbooks/provisioning/openstack/prerequisites.yml deleted file mode 100644 index 11a31411e..000000000 --- a/playbooks/provisioning/openstack/prerequisites.yml +++ /dev/null @@ -1,123 +0,0 @@ ---- -- hosts: localhost - tasks: - - # Sanity check of inventory variables - - include: net_vars_check.yaml - - # Check ansible - - name: Check Ansible version - assert: - that: > - (ansible_version.major == 2 and ansible_version.minor >= 3) or - (ansible_version.major > 2) - msg: "Ansible version must be at least 2.3" - - # Check shade - - name: Try to import python module shade - command: python -c "import shade" - ignore_errors: yes - register: shade_result - - name: Check if shade is installed - assert: - that: 'shade_result.rc == 0' - msg: "Python module shade is not installed" - - # Check jmespath - - name: Try to import python module shade - command: python -c "import jmespath" - ignore_errors: yes - register: jmespath_result - - name: Check if jmespath is installed - assert: - that: 'jmespath_result.rc == 0' - msg: "Python module jmespath is not installed" - - # Check python-dns - - name: Try to import python DNS module - command: python -c "import dns" - ignore_errors: yes - register: pythondns_result - - name: Check if python-dns is installed - assert: - that: 'pythondns_result.rc == 0' - msg: "Python module python-dns is not installed" - - # Check jinja2 - - name: Try to import jinja2 module - command: python -c "import jinja2" - ignore_errors: yes - register: jinja_result - - name: Check if jinja2 is installed - assert: - that: 'jinja_result.rc == 0' - msg: "Python module jinja2 is not installed" - - # Check Glance image - - name: Try to get image facts - os_image_facts: - image: "{{ openstack_default_image_name }}" - register: image_result - - name: Check that image is available - assert: - that: "image_result.ansible_facts.openstack_image" - msg: "Image {{ openstack_default_image_name }} is not available" - - # Check network name - - name: Try to get network facts - os_networks_facts: - name: "{{ openstack_external_network_name }}" - register: network_result - when: not openstack_provider_network_name|default(None) - - name: Check that network is available - assert: - that: "network_result.ansible_facts.openstack_networks" - msg: "Network {{ openstack_external_network_name }} is not available" - when: not openstack_provider_network_name|default(None) - - # Check keypair - # TODO kpilatov: there is no Ansible module for getting OS keypairs - # (os_keypair is not suitable for this) - # this method does not force python-openstackclient dependency - - name: Try to show keypair - command: > - python -c 'import shade; cloud = shade.openstack_cloud(); - exit(cloud.get_keypair("{{ openstack_ssh_public_key }}") is None)' - ignore_errors: yes - register: key_result - - name: Check that keypair is available - assert: - that: 'key_result.rc == 0' - msg: "Keypair {{ openstack_ssh_public_key }} is not available" - -# Check that custom images and flavors exist -- hosts: localhost - - # Include variables that will be used by heat - vars_files: - - stack_params.yaml - - tasks: - # Check that custom images are available - - include: custom_image_check.yaml - with_items: - - "{{ openstack_master_image }}" - - "{{ openstack_infra_image }}" - - "{{ openstack_node_image }}" - - "{{ openstack_lb_image }}" - - "{{ openstack_etcd_image }}" - - "{{ openstack_dns_image }}" - loop_control: - loop_var: image - - # Check that custom flavors are available - - include: custom_flavor_check.yaml - with_items: - - "{{ master_flavor }}" - - "{{ infra_flavor }}" - - "{{ node_flavor }}" - - "{{ lb_flavor }}" - - "{{ etcd_flavor }}" - - "{{ dns_flavor }}" - loop_control: - loop_var: flavor diff --git a/playbooks/provisioning/openstack/provision-openstack.yml b/playbooks/provisioning/openstack/provision-openstack.yml deleted file mode 100644 index bf424676d..000000000 --- a/playbooks/provisioning/openstack/provision-openstack.yml +++ /dev/null @@ -1,35 +0,0 @@ ---- -- hosts: localhost - gather_facts: True - become: False - vars_files: - - stack_params.yaml - pre_tasks: - - include: pre_tasks.yml - roles: - - role: openstack-stack - - role: openstack-create-cinder-registry - when: - - cinder_hosted_registry_name is defined - - cinder_hosted_registry_size_gb is defined - - role: static_inventory - when: openstack_inventory|default('static') == 'static' - inventory_path: "{{ openstack_inventory_path|default(inventory_dir) }}" - private_ssh_key: "{{ openstack_private_ssh_key|default('') }}" - ssh_config_path: "{{ openstack_ssh_config_path|default('/tmp/ssh.config.openshift.ansible' + '.' + stack_name) }}" - ssh_user: "{{ ansible_user }}" - -- name: Refresh Server inventory or exit to apply SSH config - hosts: localhost - connection: local - become: False - gather_facts: False - tasks: - - name: Exit to apply SSH config for a bastion - meta: end_play - when: openstack_use_bastion|default(False)|bool - - name: Refresh Server inventory - meta: refresh_inventory - -- include: post-provision-openstack.yml - when: not openstack_use_bastion|default(False)|bool diff --git a/playbooks/provisioning/openstack/provision.yaml b/playbooks/provisioning/openstack/provision.yaml deleted file mode 100644 index 474c9c803..000000000 --- a/playbooks/provisioning/openstack/provision.yaml +++ /dev/null @@ -1,4 +0,0 @@ ---- -- include: "prerequisites.yml" - -- include: "provision-openstack.yml" diff --git a/playbooks/provisioning/openstack/roles b/playbooks/provisioning/openstack/roles deleted file mode 120000 index e2b799b9d..000000000 --- a/playbooks/provisioning/openstack/roles +++ /dev/null @@ -1 +0,0 @@ -../../../roles/ \ No newline at end of file diff --git a/playbooks/provisioning/openstack/sample-inventory/group_vars/OSEv3.yml b/playbooks/provisioning/openstack/sample-inventory/group_vars/OSEv3.yml deleted file mode 100644 index 949a323a7..000000000 --- a/playbooks/provisioning/openstack/sample-inventory/group_vars/OSEv3.yml +++ /dev/null @@ -1,59 +0,0 @@ ---- -openshift_deployment_type: origin -#openshift_deployment_type: openshift-enterprise -#openshift_release: v3.5 -openshift_master_default_subdomain: "apps.{{ env_id }}.{{ public_dns_domain }}" - -openshift_master_cluster_method: native -openshift_master_cluster_hostname: "{{ groups.lb.0|default(groups.masters.0) }}" -openshift_master_cluster_public_hostname: "{{ groups.lb.0|default(groups.masters.0) }}" - -osm_default_node_selector: 'region=primary' - -openshift_hosted_router_wait: True -openshift_hosted_registry_wait: True - -## Openstack credentials -#openshift_cloudprovider_kind=openstack -#openshift_cloudprovider_openstack_auth_url: "{{ lookup('env','OS_AUTH_URL') }}" -#openshift_cloudprovider_openstack_username: "{{ lookup('env','OS_USERNAME') }}" -#openshift_cloudprovider_openstack_password: "{{ lookup('env','OS_PASSWORD') }}" -#openshift_cloudprovider_openstack_tenant_name: "{{ lookup('env','OS_TENANT_NAME') }}" -#openshift_cloudprovider_openstack_region="{{ lookup('env', 'OS_REGION_NAME') }}" - - -## Use Cinder volume for Openshift registry: -#openshift_hosted_registry_storage_kind: openstack -#openshift_hosted_registry_storage_access_modes: ['ReadWriteOnce'] -#openshift_hosted_registry_storage_openstack_filesystem: xfs - -## NOTE(shadower): This won't work until the openshift-ansible issue #5657 is fixed: -## https://github.com/openshift/openshift-ansible/issues/5657 -## If you're using the `cinder_hosted_registry_name` option from -## `all.yml`, uncomment these lines: -#openshift_hosted_registry_storage_openstack_volumeID: "{{ lookup('os_cinder', cinder_hosted_registry_name).id }}" -#openshift_hosted_registry_storage_volume_size: "{{ cinder_hosted_registry_size_gb }}Gi" - -## If you're using a Cinder volume you've set up yourself, uncomment these lines: -#openshift_hosted_registry_storage_openstack_volumeID: e0ba2d73-d2f9-4514-a3b2-a0ced507fa05 -#openshift_hosted_registry_storage_volume_size: 10Gi - - -# NOTE(shadower): the hostname check seems to always fail because the -# host's floating IP address doesn't match the address received from -# inside the host. -openshift_override_hostname_check: true - -# For POCs or demo environments that are using smaller instances than -# the official recommended values for RAM and DISK, uncomment the line below. -#openshift_disable_check: disk_availability,memory_availability - -# NOTE(shadower): Always switch to root on the OSEv3 nodes. -# openshift-ansible requires an explicit `become`. -ansible_become: true - -# # Flannel networking -#osm_cluster_network_cidr: 10.128.0.0/14 -#openshift_use_openshift_sdn: false -#openshift_use_flannel: true -#flannel_interface: eth1 diff --git a/playbooks/provisioning/openstack/sample-inventory/group_vars/all.yml b/playbooks/provisioning/openstack/sample-inventory/group_vars/all.yml deleted file mode 100644 index 83289307d..000000000 --- a/playbooks/provisioning/openstack/sample-inventory/group_vars/all.yml +++ /dev/null @@ -1,166 +0,0 @@ ---- -env_id: "openshift" -public_dns_domain: "example.com" -public_dns_nameservers: [] - -# # Used Hostnames -# # - set custom hostnames for roles by uncommenting corresponding lines -#openstack_master_hostname: "master" -#openstack_infra_hostname: "infra-node" -#openstack_node_hostname: "app-node" -#openstack_lb_hostname: "lb" -#openstack_etcd_hostname: "etcd" -#openstack_dns_hostname: "dns" - -openstack_ssh_public_key: "openshift" -openstack_external_network_name: "public" -#openstack_private_network_name: "openshift-ansible-{{ stack_name }}-net" -# # A dedicated Neutron network name for containers data network -# # Configures the data network to be separated from openstack_private_network_name -# # NOTE: this is only supported with Flannel SDN yet -#openstack_private_data_network_name: "openshift-ansible-{{ stack_name }}-data-net" - -## If you want to use a provider network, set its name here. -## NOTE: the `openstack_external_network_name` and -## `openstack_private_network_name` options will be ignored when using a -## provider network. -#openstack_provider_network_name: "provider" - -# # Used Images -# # - set specific images for roles by uncommenting corresponding lines -# # - note: do not remove openstack_default_image_name definition -#openstack_master_image_name: "centos7" -#openstack_infra_image_name: "centos7" -#openstack_node_image_name: "centos7" -#openstack_lb_image_name: "centos7" -#openstack_etcd_image_name: "centos7" -#openstack_dns_image_name: "centos7" -openstack_default_image_name: "centos7" - -openstack_num_masters: 1 -openstack_num_infra: 1 -openstack_num_nodes: 2 - -# # Used Flavors -# # - set specific flavors for roles by uncommenting corresponding lines -# # - note: do note remove openstack_default_flavor definition -#openstack_master_flavor: "m1.medium" -#openstack_infra_flavor: "m1.medium" -#openstack_node_flavor: "m1.medium" -#openstack_lb_flavor: "m1.medium" -#openstack_etcd_flavor: "m1.medium" -#openstack_dns_flavor: "m1.medium" -openstack_default_flavor: "m1.medium" - -# # Numerical index of nodes to remove -# openstack_nodes_to_remove: [] - -# # Docker volume size -# # - set specific volume size for roles by uncommenting corresponding lines -# # - note: do not remove docker_default_volume_size definition -#docker_master_volume_size: "15" -#docker_infra_volume_size: "15" -#docker_node_volume_size: "15" -#docker_etcd_volume_size: "2" -#docker_dns_volume_size: "1" -#docker_lb_volume_size: "5" -docker_volume_size: "15" - -## Specify server group policies for master and infra nodes. Nova must be configured to -## enable these policies. 'anti-affinity' will ensure that each VM is launched on a -## different physical host. -#openstack_master_server_group_policies: [anti-affinity] -#openstack_infra_server_group_policies: [anti-affinity] - -## Create a Cinder volume and use it for the OpenShift registry. -## NOTE: the openstack credentials and hosted registry options must be set in OSEv3.yml! -#cinder_hosted_registry_name: cinder-registry -#cinder_hosted_registry_size_gb: 10 - -## Set up a filesystem on the cinder volume specified in `OSEv3.yaml`. -## You need to specify the file system and volume ID in OSEv3 via -## `openshift_hosted_registry_storage_openstack_filesystem` and -## `openshift_hosted_registry_storage_openstack_volumeID`. -## WARNING: This will delete any data on the volume! -#prepare_and_format_registry_volume: False - -openstack_subnet_prefix: "192.168.99" - -## Red Hat subscription defaults to false which means we will not attempt to -## subscribe the nodes -#rhsm_register: False - -# # Using Red Hat Satellite: -#rhsm_register: True -#rhsm_satellite: 'sat-6.example.com' -#rhsm_org: 'OPENSHIFT_ORG' -#rhsm_activationkey: '' - -# # Or using RHN username, password and optionally pool: -#rhsm_register: True -#rhsm_username: '' -#rhsm_password: '' -#rhsm_pool: '' - -#rhsm_repos: -# - "rhel-7-server-rpms" -# - "rhel-7-server-ose-3.5-rpms" -# - "rhel-7-server-extras-rpms" -# - "rhel-7-fast-datapath-rpms" - - -# # Roll-your-own DNS -#openstack_num_dns: 0 -#external_nsupdate_keys: -# public: -# key_secret: 'SKqKNdpfk7llKxZ57bbxUnUDobaaJp9t8CjXLJPl+fRI5mPcSBuxTAyvJPa6Y9R7vUg9DwCy/6WTpgLNqnV4Hg==' -# key_algorithm: 'hmac-md5' -# server: '192.168.1.1' -# private: -# key_secret: 'kVE2bVTgZjrdJipxPhID8BEZmbHD8cExlVPR+zbFpW6la8kL5wpXiwOh8q5AAosXQI5t95UXwq3Inx8QT58duw==' -# key_algorithm: 'hmac-md5' -# server: '192.168.1.2' - -# # Customize DNS server security options -#named_public_recursion: 'no' -#named_private_recursion: 'yes' - - -# NOTE(shadower): Do not change this value. The Ansible user is currently -# hardcoded to `openshift`. -ansible_user: openshift - -# # Use a single security group for a cluster (default: false) -#openstack_flat_secgrp: false - -# # Openstack inventory type and cluster nodes access pattern -# # Defaults to 'static'. -# # Use 'dynamic' to access cluster nodes directly, via floating IPs -# # and given a dynamic inventory script, like openstack.py -#openstack_inventory: static -# # The path to checkpoint the static inventory from the in-memory one -#openstack_inventory_path: ../../../../inventory - -# # Use bastion node to access cluster nodes (Defaults to False). -# # Requires a static inventory. -#openstack_use_bastion: False -#bastion_ingress_cidr: "{{openstack_subnet_prefix}}.0/24" -# -# # The Nova key-pair's private SSH key to access inventory nodes -#openstack_private_ssh_key: ~/.ssh/openshift -# # The path for the SSH config to access all nodes -#openstack_ssh_config_path: /tmp/ssh.config.openshift.ansible.{{ env_id }}.{{ public_dns_domain }} - - -# If you want to use the VM storage instead of Cinder volumes, set this to `true`. -# NOTE: this is for testing only! Your data will be gone once the VM disappears! -# ephemeral_volumes: false - -# # OpenShift node labels -# # - in order to customise node labels for app and/or infra group, set the -# # openshift_cluster_node_labels variable -#openshift_cluster_node_labels: -# app: -# region: primary -# infra: -# region: infra diff --git a/playbooks/provisioning/openstack/sample-inventory/inventory.py b/playbooks/provisioning/openstack/sample-inventory/inventory.py deleted file mode 100755 index 6a1b74b3d..000000000 --- a/playbooks/provisioning/openstack/sample-inventory/inventory.py +++ /dev/null @@ -1,88 +0,0 @@ -#!/usr/bin/env python - -from __future__ import print_function - -import json - -import shade - - -if __name__ == '__main__': - cloud = shade.openstack_cloud() - - inventory = {} - - # TODO(shadower): filter the servers based on the `OPENSHIFT_CLUSTER` - # environment variable. - cluster_hosts = [ - server for server in cloud.list_servers() - if 'metadata' in server and 'clusterid' in server.metadata] - - masters = [server.name for server in cluster_hosts - if server.metadata['host-type'] == 'master'] - - etcd = [server.name for server in cluster_hosts - if server.metadata['host-type'] == 'etcd'] - if not etcd: - etcd = masters - - infra_hosts = [server.name for server in cluster_hosts - if server.metadata['host-type'] == 'node' and - server.metadata['sub-host-type'] == 'infra'] - - app = [server.name for server in cluster_hosts - if server.metadata['host-type'] == 'node' and - server.metadata['sub-host-type'] == 'app'] - - nodes = list(set(masters + infra_hosts + app)) - - dns = [server.name for server in cluster_hosts - if server.metadata['host-type'] == 'dns'] - - lb = [server.name for server in cluster_hosts - if server.metadata['host-type'] == 'lb'] - - osev3 = list(set(nodes + etcd + lb)) - - groups = [server.metadata.group for server in cluster_hosts - if 'group' in server.metadata] - - inventory['cluster_hosts'] = {'hosts': [s.name for s in cluster_hosts]} - inventory['OSEv3'] = {'hosts': osev3} - inventory['masters'] = {'hosts': masters} - inventory['etcd'] = {'hosts': etcd} - inventory['nodes'] = {'hosts': nodes} - inventory['infra_hosts'] = {'hosts': infra_hosts} - inventory['app'] = {'hosts': app} - inventory['dns'] = {'hosts': dns} - inventory['lb'] = {'hosts': lb} - - for server in cluster_hosts: - if 'group' in server.metadata: - group = server.metadata.group - if group not in inventory: - inventory[group] = {'hosts': []} - inventory[group]['hosts'].append(server.name) - - inventory['_meta'] = {'hostvars': {}} - - for server in cluster_hosts: - ssh_ip_address = server.public_v4 or server.private_v4 - vars = { - 'ansible_host': ssh_ip_address - } - - public_v4 = server.public_v4 or server.private_v4 - if public_v4: - vars['public_v4'] = public_v4 - # TODO(shadower): what about multiple networks? - if server.private_v4: - vars['private_v4'] = server.private_v4 - - node_labels = server.metadata.get('node_labels') - if node_labels: - vars['openshift_node_labels'] = node_labels - - inventory['_meta']['hostvars'][server.name] = vars - - print(json.dumps(inventory, indent=4, sort_keys=True)) diff --git a/playbooks/provisioning/openstack/scale-up.yaml b/playbooks/provisioning/openstack/scale-up.yaml deleted file mode 100644 index 79fc09050..000000000 --- a/playbooks/provisioning/openstack/scale-up.yaml +++ /dev/null @@ -1,75 +0,0 @@ ---- -# Get the needed information about the current deployment -- hosts: masters[0] - tasks: - - name: Get number of app nodes - shell: oc get nodes -l autoscaling=app --no-headers=true | wc -l - register: oc_old_num_nodes - - name: Get names of app nodes - shell: oc get nodes -l autoscaling=app --no-headers=true | cut -f1 -d " " - register: oc_old_app_nodes - -- hosts: localhost - tasks: - # Since both number and names of app nodes are to be removed - # localhost variables for these values need to be set - - name: Store old number and names of app nodes locally (if there is an existing deployment) - when: '"masters" in groups' - register: set_fact_result - set_fact: - oc_old_num_nodes: "{{ hostvars[groups['masters'][0]]['oc_old_num_nodes'].stdout }}" - oc_old_app_nodes: "{{ hostvars[groups['masters'][0]]['oc_old_app_nodes'].stdout_lines }}" - - - name: Set default values for old app nodes (if there is no existing deployment) - when: 'set_fact_result | skipped' - set_fact: - oc_old_num_nodes: 0 - oc_old_app_nodes: [] - - # Set how many nodes are to be added (1 by default) - - name: Set how many nodes are to be added - set_fact: - increment_by: 1 - - name: Check that the number corresponds to scaling up (not down) - assert: - that: 'increment_by | int >= 1' - msg: > - FAIL: The value of increment_by must be at least 1 - (but it is {{ increment_by | int }}). - - name: Update openstack_num_nodes variable - set_fact: - openstack_num_nodes: "{{ oc_old_num_nodes | int + increment_by | int }}" - -# Run provision.yaml with higher number of nodes to create a new app-node VM -- include: provision.yaml - -# Run config.yml to perform openshift installation -# Path to openshift-ansible can be customised: -# - the value of openshift_ansible_dir has to be an absolute path -# - the path cannot contain the '/' symbol at the end - -# Creating a new deployment by the full installation -- include: "{{ openshift_ansible_dir }}/playbooks/byo/config.yml" - vars: - openshift_ansible_dir: ../../../../openshift-ansible - when: 'not groups["new_nodes"] | list' - -# Scaling up existing deployment -- include: "{{ openshift_ansible_dir }}/playbooks/byo/openshift-node/scaleup.yml" - vars: - openshift_ansible_dir: ../../../../openshift-ansible - when: 'groups["new_nodes"] | list' - -# Post-verification: Verify new number of nodes -- hosts: masters[0] - tasks: - - name: Get number of nodes - shell: oc get nodes -l autoscaling=app --no-headers=true | wc -l - register: oc_new_num_nodes - - name: Check that the actual result matches the defined value - assert: - that: 'oc_new_num_nodes.stdout | int == (hostvars["localhost"]["oc_old_num_nodes"] | int + hostvars["localhost"]["increment_by"] | int)' - msg: > - FAIL: Number of application nodes has not been increased accordingly - (it should be {{ hostvars["localhost"]["oc_old_num_nodes"] | int + hostvars["localhost"]["increment_by"] | int }} - but it is {{ oc_new_num_nodes.stdout | int }}). diff --git a/playbooks/provisioning/openstack/stack_params.yaml b/playbooks/provisioning/openstack/stack_params.yaml deleted file mode 100644 index a4da31bfe..000000000 --- a/playbooks/provisioning/openstack/stack_params.yaml +++ /dev/null @@ -1,49 +0,0 @@ ---- -stack_name: "{{ env_id }}.{{ public_dns_domain }}" -dns_domain: "{{ public_dns_domain }}" -dns_nameservers: "{{ public_dns_nameservers }}" -subnet_prefix: "{{ openstack_subnet_prefix }}" -master_hostname: "{{ openstack_master_hostname | default('master') }}" -infra_hostname: "{{ openstack_infra_hostname | default('infra-node') }}" -node_hostname: "{{ openstack_node_hostname | default('app-node') }}" -lb_hostname: "{{ openstack_lb_hostname | default('lb') }}" -etcd_hostname: "{{ openstack_etcd_hostname | default('etcd') }}" -dns_hostname: "{{ openstack_dns_hostname | default('dns') }}" -ssh_public_key: "{{ openstack_ssh_public_key }}" -openstack_image: "{{ openstack_default_image_name }}" -lb_flavor: "{{ openstack_lb_flavor | default(openstack_default_flavor) }}" -etcd_flavor: "{{ openstack_etcd_flavor | default(openstack_default_flavor) }}" -master_flavor: "{{ openstack_master_flavor | default(openstack_default_flavor) }}" -node_flavor: "{{ openstack_node_flavor | default(openstack_default_flavor) }}" -infra_flavor: "{{ openstack_infra_flavor | default(openstack_default_flavor) }}" -dns_flavor: "{{ openstack_dns_flavor | default(openstack_default_flavor) }}" -openstack_master_image: "{{ openstack_master_image_name | default(openstack_default_image_name) }}" -openstack_infra_image: "{{ openstack_infra_image_name | default(openstack_default_image_name) }}" -openstack_node_image: "{{ openstack_node_image_name | default(openstack_default_image_name) }}" -openstack_lb_image: "{{ openstack_lb_image_name | default(openstack_default_image_name) }}" -openstack_etcd_image: "{{ openstack_etcd_image_name | default(openstack_default_image_name) }}" -openstack_dns_image: "{{ openstack_dns_image_name | default(openstack_default_image_name) }}" -openstack_private_network: >- - {% if openstack_provider_network_name | default(None) -%} - {{ openstack_provider_network_name }} - {%- else -%} - {{ openstack_private_network_name | default ('openshift-ansible-' + stack_name + '-net') }} - {%- endif -%} -provider_network: "{{ openstack_provider_network_name | default(None) }}" -external_network: "{{ openstack_external_network_name | default(None) }}" -num_etcd: "{{ openstack_num_etcd | default(0) }}" -num_masters: "{{ openstack_num_masters }}" -num_nodes: "{{ openstack_num_nodes }}" -num_infra: "{{ openstack_num_infra }}" -num_dns: "{{ openstack_num_dns | default(1) }}" -master_server_group_policies: "{{ openstack_master_server_group_policies | default([]) | to_yaml }}" -infra_server_group_policies: "{{ openstack_infra_server_group_policies | default([]) | to_yaml }}" -master_volume_size: "{{ docker_master_volume_size | default(docker_volume_size) }}" -infra_volume_size: "{{ docker_infra_volume_size | default(docker_volume_size) }}" -node_volume_size: "{{ docker_node_volume_size | default(docker_volume_size) }}" -etcd_volume_size: "{{ docker_etcd_volume_size | default('2') }}" -dns_volume_size: "{{ docker_dns_volume_size | default('1') }}" -lb_volume_size: "{{ docker_lb_volume_size | default('5') }}" -nodes_to_remove: "{{ openstack_nodes_to_remove | default([]) | to_yaml }}" -use_bastion: "{{ openstack_use_bastion|default(False) }}" -ui_ssh_tunnel: "{{ openshift_ui_ssh_tunnel|default(False) }}" -- cgit v1.2.3 From d148b6df6b8aeb925e752ac2e3dff5f785b12943 Mon Sep 17 00:00:00 2001 From: Tomas Sedovic Date: Mon, 16 Oct 2017 17:01:32 +0200 Subject: .gitignore casl-infra --- .gitignore | 1 + 1 file changed, 1 insertion(+) diff --git a/.gitignore b/.gitignore index 1e187db16..e8be4ea5b 100644 --- a/.gitignore +++ b/.gitignore @@ -24,3 +24,4 @@ multi_ec2.yaml *.egg-info .eggs cover/ +roles/infra-ansible/ -- cgit v1.2.3 From baf7066d49a80d7faa554914efb7858c80c299ee Mon Sep 17 00:00:00 2001 From: Tomas Sedovic Date: Mon, 16 Oct 2017 17:34:54 +0200 Subject: Update lookup plugins path --- playbooks/openstack/ansible.cfg | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/playbooks/openstack/ansible.cfg b/playbooks/openstack/ansible.cfg index a21f023ea..ae5669c35 100644 --- a/playbooks/openstack/ansible.cfg +++ b/playbooks/openstack/ansible.cfg @@ -15,7 +15,7 @@ fact_caching_connection = .ansible/cached_facts fact_caching_timeout = 900 stdout_callback = skippy callback_whitelist = profile_tasks -lookup_plugins = openshift-ansible-contrib/lookup_plugins +lookup_plugins = openshift-ansible/lookup_plugins [ssh_connection] -- cgit v1.2.3 From 479ba2dec446016cb0cea38e4c679f54dea24193 Mon Sep 17 00:00:00 2001 From: Tomas Sedovic Date: Mon, 16 Oct 2017 17:35:22 +0200 Subject: Update readme --- playbooks/openstack/README.md | 25 ++++++++----------------- 1 file changed, 8 insertions(+), 17 deletions(-) diff --git a/playbooks/openstack/README.md b/playbooks/openstack/README.md index a2f553f4c..f3d5b5aa8 100644 --- a/playbooks/openstack/README.md +++ b/playbooks/openstack/README.md @@ -106,13 +106,13 @@ $ openstack image list ### 2. Configuring the OpenStack Environment and OpenShift Cluster The configuration is all done in an Ansible inventory directory. We -will clone the [openshift-ansible-contrib][contrib] repository and set +will clone the [openshift-ansible][openshift-ansible] repository and set things up for a minimal installation. ``` -$ git clone https://github.com/openshift/openshift-ansible-contrib -$ cp -r openshift-ansible-contrib/playbooks/provisioning/openstack/sample-inventory/ inventory +$ git clone https://github.com/openshift/openshift-ansible +$ cp -r openshift-ansible/playbooks/openstack/sample-inventory/ inventory ``` If you're testing multiple configurations, you can have multiple @@ -185,8 +185,8 @@ has useful defaults -- copy it to the directory you're going to run Ansible from. ```bash -$ ansible-galaxy install -r openshift-ansible-contrib/playbooks/provisioning/openstack/galaxy-requirements.yaml -p openshift-ansible-contrib/roles -$ cp openshift-ansible-contrib/playbooks/provisioning/openstack/ansible.cfg ansible.cfg +$ ansible-galaxy install -r openshift-ansible/playbooks/openstack/galaxy-requirements.yaml -p openshift-ansible/roles +$ cp openshift-ansible/playbooks/openstack/ansible.cfg ansible.cfg ``` (you will only need to do this once) @@ -194,7 +194,7 @@ Then run the provisioning playbook -- this will create the OpenStack resources: ```bash -$ ansible-playbook -i inventory openshift-ansible-contrib/playbooks/provisioning/openstack/provision.yaml +$ ansible-playbook -i inventory openshift-ansible/playbooks/openstack/openshift-cluster/provision.yaml ``` If you're using multiple inventories, make sure you pass the path to @@ -203,11 +203,10 @@ the right one to `-i`. ### 4. Installing OpenShift -We will use the `openshift-ansible` project to install openshift on -top of the OpenStack nodes we have prepared: +Run the `byo/config.yml` playbook on top of the OpenStack nodes we have +prepared. ```bash -$ git clone https://github.com/openshift/openshift-ansible $ ansible-playbook -i inventory openshift-ansible/playbooks/byo/config.yml ``` @@ -236,7 +235,6 @@ advanced configuration: [devstack]: https://docs.openstack.org/devstack/ [tripleo]: http://tripleo.org/ [ansible-dependencies]: ./advanced-configuration.md#dependencies-for-localhost-ansible-controladmin-node -[contrib]: https://github.com/openshift/openshift-ansible-contrib [control-host-image]: https://hub.docker.com/r/redhatcop/control-host-openstack/ [hardware-requirements]: https://docs.openshift.org/latest/install_config/install/prerequisites.html#hardware [origin]: https://www.openshift.org/ @@ -249,10 +247,3 @@ advanced configuration: [external-dns]: ./advanced-configuration.md#dns-configuration-variables [cinder-registry]: ./advanced-configuration.md#creating-and-using-a-cinder-volume-for-the-openshift-registry [bastion]: ./advanced-configuration.md#configure-static-inventory-and-access-via-a-bastion-node - - - -## License - -Like the rest of the openshift-ansible-contrib repository, the code -here is licensed under Apache 2. -- cgit v1.2.3 From 8b8eeab919b76bee6a2e0ad1336bd4dbb1db1e95 Mon Sep 17 00:00:00 2001 From: Tomas Sedovic Date: Mon, 16 Oct 2017 17:35:54 +0200 Subject: Use the docker-storage-setup role --- playbooks/openstack/openshift-cluster/pre-install.yml | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/playbooks/openstack/openshift-cluster/pre-install.yml b/playbooks/openstack/openshift-cluster/pre-install.yml index 45e9005cc..c9f333b92 100644 --- a/playbooks/openstack/openshift-cluster/pre-install.yml +++ b/playbooks/openstack/openshift-cluster/pre-install.yml @@ -10,7 +10,9 @@ become: true roles: - { role: subscription-manager, when: hostvars.localhost.rhsm_register|default(False), tags: 'subscription-manager', ansible_sudo: true } - - { role: docker, tags: 'docker' } + - role: docker-storage-setup + docker_dev: /dev/vdb + tags: 'docker' - { role: openshift-prep, tags: 'openshift-prep' } - hosts: localhost:cluster_hosts -- cgit v1.2.3 From 4ed9aef6f8ed0850e70b498e780d0d8e22bc277f Mon Sep 17 00:00:00 2001 From: Tomas Sedovic Date: Mon, 23 Oct 2017 12:57:29 +0200 Subject: Add openshift_openstack role and move tasks there All the tasks that were previously in playbooks are now under `roles/openshift_openstack`. The `openshift-cluster` directory now only contains playbooks that include tasks from that role. This makes the structure much closer to that of the AWS provider. --- playbooks/openstack/README.md | 18 + playbooks/openstack/galaxy-requirements.yaml | 10 - .../openshift-cluster/custom_flavor_check.yaml | 9 - .../openshift-cluster/custom_image_check.yaml | 9 - playbooks/openstack/openshift-cluster/install.yml | 18 + .../openshift-cluster/net_vars_check.yaml | 14 - .../openstack/openshift-cluster/post-install.yml | 4 +- .../openshift-cluster/post-provision-openstack.yml | 118 --- .../openstack/openshift-cluster/pre-install.yml | 21 - .../openstack/openshift-cluster/pre_tasks.yml | 53 -- .../prepare-and-format-cinder-volume.yaml | 67 -- .../openstack/openshift-cluster/prerequisites.yml | 129 +-- .../openshift-cluster/provision-openstack.yml | 35 - .../openstack/openshift-cluster/provision.yaml | 4 - .../openstack/openshift-cluster/provision.yml | 37 + .../openshift-cluster/provision_install.yml | 9 + .../openstack/openshift-cluster/scale-up.yaml | 11 +- .../openstack/openshift-cluster/stack_params.yaml | 49 -- playbooks/openstack/sample-inventory/inventory.py | 36 +- requirements.txt | 1 + roles/common/defaults/main.yml | 6 - roles/dns-records/defaults/main.yml | 2 - roles/dns-records/tasks/main.yml | 121 --- roles/dns-server-detect/defaults/main.yml | 3 - roles/dns-server-detect/tasks/main.yml | 36 - roles/dns-views/defaults/main.yml | 4 - roles/dns-views/tasks/main.yml | 30 - roles/docker-storage-setup/defaults/main.yaml | 7 - roles/docker-storage-setup/tasks/main.yaml | 46 -- .../templates/docker-storage-setup-dm.j2 | 4 - .../templates/docker-storage-setup-overlayfs.j2 | 7 - roles/hostnames/tasks/main.yaml | 26 - roles/hostnames/test/inv | 12 - roles/hostnames/test/roles | 1 - roles/hostnames/test/test.yaml | 4 - roles/hostnames/vars/main.yaml | 2 - roles/hostnames/vars/records.yaml | 28 - roles/node-network-manager/tasks/main.yml | 22 - roles/openshift-prep/defaults/main.yml | 13 - roles/openshift-prep/tasks/main.yml | 4 - roles/openshift-prep/tasks/prerequisites.yml | 37 - roles/openshift_openstack/defaults/main.yml | 49 ++ .../tasks/check-prerequisites.yml | 109 +++ roles/openshift_openstack/tasks/cleanup.yml | 6 + .../tasks/container-storage-setup.yml | 37 + .../tasks/custom_flavor_check.yaml | 9 + .../tasks/custom_image_check.yaml | 10 + .../tasks/generate-templates.yml | 26 + roles/openshift_openstack/tasks/hostname.yml | 33 + .../openshift_openstack/tasks/net_vars_check.yaml | 14 + .../tasks/node-configuration.yml | 11 + roles/openshift_openstack/tasks/node-network.yml | 19 + roles/openshift_openstack/tasks/node-packages.yml | 15 + roles/openshift_openstack/tasks/populate-dns.yml | 5 + .../tasks/prepare-and-format-cinder-volume.yaml | 59 ++ roles/openshift_openstack/tasks/provision.yml | 30 + .../tasks/subnet_update_dns_servers.yaml | 9 + .../templates/docker-storage-setup-dm.j2 | 4 + .../templates/docker-storage-setup-overlayfs.j2 | 7 + .../templates/heat_stack.yaml.j2 | 888 +++++++++++++++++++++ .../templates/heat_stack_server.yaml.j2 | 270 +++++++ roles/openshift_openstack/templates/user_data.j2 | 13 + roles/openshift_openstack/vars/main.yml | 49 ++ roles/openstack-stack/tasks/main.yml | 1 - .../tasks/subnet_update_dns_servers.yaml | 9 - 65 files changed, 1791 insertions(+), 958 deletions(-) delete mode 100644 playbooks/openstack/galaxy-requirements.yaml delete mode 100644 playbooks/openstack/openshift-cluster/custom_flavor_check.yaml delete mode 100644 playbooks/openstack/openshift-cluster/custom_image_check.yaml create mode 100644 playbooks/openstack/openshift-cluster/install.yml delete mode 100644 playbooks/openstack/openshift-cluster/net_vars_check.yaml delete mode 100644 playbooks/openstack/openshift-cluster/post-provision-openstack.yml delete mode 100644 playbooks/openstack/openshift-cluster/pre-install.yml delete mode 100644 playbooks/openstack/openshift-cluster/pre_tasks.yml delete mode 100644 playbooks/openstack/openshift-cluster/prepare-and-format-cinder-volume.yaml delete mode 100644 playbooks/openstack/openshift-cluster/provision-openstack.yml delete mode 100644 playbooks/openstack/openshift-cluster/provision.yaml create mode 100644 playbooks/openstack/openshift-cluster/provision.yml create mode 100644 playbooks/openstack/openshift-cluster/provision_install.yml delete mode 100644 playbooks/openstack/openshift-cluster/stack_params.yaml delete mode 100644 roles/common/defaults/main.yml delete mode 100644 roles/dns-records/defaults/main.yml delete mode 100644 roles/dns-records/tasks/main.yml delete mode 100644 roles/dns-server-detect/defaults/main.yml delete mode 100644 roles/dns-server-detect/tasks/main.yml delete mode 100644 roles/dns-views/defaults/main.yml delete mode 100644 roles/dns-views/tasks/main.yml delete mode 100644 roles/docker-storage-setup/defaults/main.yaml delete mode 100644 roles/docker-storage-setup/tasks/main.yaml delete mode 100644 roles/docker-storage-setup/templates/docker-storage-setup-dm.j2 delete mode 100644 roles/docker-storage-setup/templates/docker-storage-setup-overlayfs.j2 delete mode 100644 roles/hostnames/tasks/main.yaml delete mode 100644 roles/hostnames/test/inv delete mode 120000 roles/hostnames/test/roles delete mode 100644 roles/hostnames/test/test.yaml delete mode 100644 roles/hostnames/vars/main.yaml delete mode 100644 roles/hostnames/vars/records.yaml delete mode 100644 roles/node-network-manager/tasks/main.yml delete mode 100644 roles/openshift-prep/defaults/main.yml delete mode 100644 roles/openshift-prep/tasks/main.yml delete mode 100644 roles/openshift-prep/tasks/prerequisites.yml create mode 100644 roles/openshift_openstack/defaults/main.yml create mode 100644 roles/openshift_openstack/tasks/check-prerequisites.yml create mode 100644 roles/openshift_openstack/tasks/cleanup.yml create mode 100644 roles/openshift_openstack/tasks/container-storage-setup.yml create mode 100644 roles/openshift_openstack/tasks/custom_flavor_check.yaml create mode 100644 roles/openshift_openstack/tasks/custom_image_check.yaml create mode 100644 roles/openshift_openstack/tasks/generate-templates.yml create mode 100644 roles/openshift_openstack/tasks/hostname.yml create mode 100644 roles/openshift_openstack/tasks/net_vars_check.yaml create mode 100644 roles/openshift_openstack/tasks/node-configuration.yml create mode 100644 roles/openshift_openstack/tasks/node-network.yml create mode 100644 roles/openshift_openstack/tasks/node-packages.yml create mode 100644 roles/openshift_openstack/tasks/populate-dns.yml create mode 100644 roles/openshift_openstack/tasks/prepare-and-format-cinder-volume.yaml create mode 100644 roles/openshift_openstack/tasks/provision.yml create mode 100644 roles/openshift_openstack/tasks/subnet_update_dns_servers.yaml create mode 100644 roles/openshift_openstack/templates/docker-storage-setup-dm.j2 create mode 100644 roles/openshift_openstack/templates/docker-storage-setup-overlayfs.j2 create mode 100644 roles/openshift_openstack/templates/heat_stack.yaml.j2 create mode 100644 roles/openshift_openstack/templates/heat_stack_server.yaml.j2 create mode 100644 roles/openshift_openstack/templates/user_data.j2 create mode 100644 roles/openshift_openstack/vars/main.yml delete mode 100644 roles/openstack-stack/tasks/subnet_update_dns_servers.yaml diff --git a/playbooks/openstack/README.md b/playbooks/openstack/README.md index f3d5b5aa8..875004cc9 100644 --- a/playbooks/openstack/README.md +++ b/playbooks/openstack/README.md @@ -38,6 +38,19 @@ Optional: * External Neutron network with a floating IP address pool +## DNS Requirements + +OpenShift requires DNS to operate properly. OpenStack supports DNS-as-a-service +in the form of the Designate project, but the playbooks here don't support it +yet. Until we do, you will need to provide a DNS solution yourself (or in case +you are not running Designate when we do). + +If your server supports nsupdate, we will use it to add the necessary records. + +TODO(shadower): describe how to build a sample DNS server and how to configure +our playbooks for nsupdate. + + ## Installation There are four main parts to the installation: @@ -143,6 +156,8 @@ $ vi inventory/group_vars/all.yml 4. Set the `openstack_default_flavor` to the flavor you want your OpenShift VMs to use. - See `openstack flavor list` for the list of available flavors. +5. Set the `public_dns_nameservers` to the list of the IP addresses + of the DNS servers used for the **private** address resolution[1]. **NOTE**: In most OpenStack environments, you will also need to configure the forwarders for the DNS server we create. This depends on @@ -153,6 +168,9 @@ put the IP addresses into `public_dns_nameservers` in `inventory/group_vars/all.yml`. +[1]: Yes, the name is bad. We will fix it. + + #### OpenShift configuration The OpenShift configuration is in `inventory/group_vars/OSEv3.yml`. diff --git a/playbooks/openstack/galaxy-requirements.yaml b/playbooks/openstack/galaxy-requirements.yaml deleted file mode 100644 index 1d745dcc3..000000000 --- a/playbooks/openstack/galaxy-requirements.yaml +++ /dev/null @@ -1,10 +0,0 @@ ---- -# This is the Ansible Galaxy requirements file to pull in the correct roles - -# From 'infra-ansible' -- src: https://github.com/redhat-cop/infra-ansible - version: master - -# From 'openshift-ansible' -- src: https://github.com/openshift/openshift-ansible - version: master diff --git a/playbooks/openstack/openshift-cluster/custom_flavor_check.yaml b/playbooks/openstack/openshift-cluster/custom_flavor_check.yaml deleted file mode 100644 index e11874c28..000000000 --- a/playbooks/openstack/openshift-cluster/custom_flavor_check.yaml +++ /dev/null @@ -1,9 +0,0 @@ ---- -- name: Try to get flavor facts - os_flavor_facts: - name: "{{ flavor }}" - register: flavor_result -- name: Check that custom flavor is available - assert: - that: "flavor_result.ansible_facts.openstack_flavors" - msg: "Flavor {{ flavor }} is not available." diff --git a/playbooks/openstack/openshift-cluster/custom_image_check.yaml b/playbooks/openstack/openshift-cluster/custom_image_check.yaml deleted file mode 100644 index 452e1e4d8..000000000 --- a/playbooks/openstack/openshift-cluster/custom_image_check.yaml +++ /dev/null @@ -1,9 +0,0 @@ ---- -- name: Try to get image facts - os_image_facts: - image: "{{ image }}" - register: image_result -- name: Check that custom image is available - assert: - that: "image_result.ansible_facts.openstack_image" - msg: "Image {{ image }} is not available." diff --git a/playbooks/openstack/openshift-cluster/install.yml b/playbooks/openstack/openshift-cluster/install.yml new file mode 100644 index 000000000..40d4767ba --- /dev/null +++ b/playbooks/openstack/openshift-cluster/install.yml @@ -0,0 +1,18 @@ +--- +# NOTE(shadower): the AWS playbook builds an in-memory inventory of +# all the EC2 instances here. We don't need to as that's done by the +# dynamic inventory. + +# TODO(shadower): the AWS playbook sets the +# `openshift_master_cluster_hostname` and `osm_custom_cors_origins` +# values here. We do it in the OSEv3 group vars. Do we need to add +# some logic here? + +- name: normalize groups + include: ../../byo/openshift-cluster/initialize_groups.yml + +- name: run the std_include + include: ../../common/openshift-cluster/std_include.yml + +- name: run the config + include: ../../common/openshift-cluster/config.yml diff --git a/playbooks/openstack/openshift-cluster/net_vars_check.yaml b/playbooks/openstack/openshift-cluster/net_vars_check.yaml deleted file mode 100644 index 68afde415..000000000 --- a/playbooks/openstack/openshift-cluster/net_vars_check.yaml +++ /dev/null @@ -1,14 +0,0 @@ ---- -- name: Check the provider network configuration - fail: - msg: "Flannel SDN requires a dedicated containers data network and can not work over a provider network" - when: - - openstack_provider_network_name is defined - - openstack_private_data_network_name is defined - -- name: Check the flannel network configuration - fail: - msg: "A dedicated containers data network is only supported with Flannel SDN" - when: - - openstack_private_data_network_name is defined - - not openshift_use_flannel|default(False)|bool diff --git a/playbooks/openstack/openshift-cluster/post-install.yml b/playbooks/openstack/openshift-cluster/post-install.yml index 417813e2a..7b1744a18 100644 --- a/playbooks/openstack/openshift-cluster/post-install.yml +++ b/playbooks/openstack/openshift-cluster/post-install.yml @@ -22,9 +22,9 @@ - when: openshift_use_flannel|default(False)|bool block: - include_role: - name: openshift-ansible/roles/os_firewall + name: os_firewall - include_role: - name: openshift-ansible/roles/lib_os_firewall + name: lib_os_firewall - name: set allow rules for dnsmasq os_firewall_manage_iptables: name: "{{ item.service }}" diff --git a/playbooks/openstack/openshift-cluster/post-provision-openstack.yml b/playbooks/openstack/openshift-cluster/post-provision-openstack.yml deleted file mode 100644 index e460fbf12..000000000 --- a/playbooks/openstack/openshift-cluster/post-provision-openstack.yml +++ /dev/null @@ -1,118 +0,0 @@ ---- -- hosts: cluster_hosts - name: Wait for the the nodes to come up - become: False - gather_facts: False - tasks: - - when: not openstack_use_bastion|default(False)|bool - wait_for_connection: - - when: openstack_use_bastion|default(False)|bool - delegate_to: bastion - wait_for_connection: - -- hosts: cluster_hosts - gather_facts: True - tasks: - - name: Debug hostvar - debug: - msg: "{{ hostvars[inventory_hostname] }}" - verbosity: 2 - -- name: OpenShift Pre-Requisites (part 1) - include: pre-install.yml - -- name: Assign hostnames - hosts: cluster_hosts - gather_facts: False - become: true - roles: - - role: hostnames - -- name: Subscribe DNS Host to allow for configuration below - hosts: dns - gather_facts: False - become: true - roles: - - role: subscription-manager - when: hostvars.localhost.rhsm_register|default(False) - tags: 'subscription-manager' - -- name: Determine which DNS server(s) to use for our generated records - hosts: localhost - gather_facts: False - become: False - roles: - - dns-server-detect - -- name: Build the DNS Server Views and Configure DNS Server(s) - hosts: dns - gather_facts: False - become: true - roles: - - role: dns-views - - role: infra-ansible/roles/dns-server - -- name: Build and process DNS Records - hosts: localhost - gather_facts: True - become: False - roles: - - role: dns-records - use_bastion: "{{ openstack_use_bastion|default(False)|bool }}" - - role: infra-ansible/roles/dns - -- name: Switch the stack subnet to the configured private DNS server - hosts: localhost - gather_facts: False - become: False - vars_files: - - stack_params.yaml - tasks: - - include_role: - name: openstack-stack - tasks_from: subnet_update_dns_servers - -- name: OpenShift Pre-Requisites (part 2) - hosts: OSEv3 - gather_facts: true - become: true - vars: - interface: "{{ flannel_interface|default('eth1') }}" - interface_file: /etc/sysconfig/network-scripts/ifcfg-{{ interface }} - interface_config: - DEVICE: "{{ interface }}" - TYPE: Ethernet - BOOTPROTO: dhcp - ONBOOT: 'yes' - DEFTROUTE: 'no' - PEERDNS: 'no' - pre_tasks: - - name: "Include DNS configuration to ensure proper name resolution" - lineinfile: - state: present - dest: /etc/sysconfig/network - regexp: "IP4_NAMESERVERS={{ hostvars['localhost'].private_dns_server }}" - line: "IP4_NAMESERVERS={{ hostvars['localhost'].private_dns_server }}" - - name: "Configure the flannel interface options" - when: openshift_use_flannel|default(False)|bool - block: - - file: - dest: "{{ interface_file }}" - state: touch - mode: 0644 - owner: root - group: root - - lineinfile: - state: present - dest: "{{ interface_file }}" - regexp: "{{ item.key }}=" - line: "{{ item.key }}={{ item.value }}" - with_dict: "{{ interface_config }}" - roles: - - node-network-manager - -- include: prepare-and-format-cinder-volume.yaml - when: > - prepare_and_format_registry_volume|default(False) or - (cinder_registry_volume is defined and - cinder_registry_volume.changed|default(False)) diff --git a/playbooks/openstack/openshift-cluster/pre-install.yml b/playbooks/openstack/openshift-cluster/pre-install.yml deleted file mode 100644 index c9f333b92..000000000 --- a/playbooks/openstack/openshift-cluster/pre-install.yml +++ /dev/null @@ -1,21 +0,0 @@ ---- -############################### -# OpenShift Pre-Requisites - -# - subscribe hosts -# - prepare docker -# - other prep (install additional packages, etc.) -# -- hosts: OSEv3 - become: true - roles: - - { role: subscription-manager, when: hostvars.localhost.rhsm_register|default(False), tags: 'subscription-manager', ansible_sudo: true } - - role: docker-storage-setup - docker_dev: /dev/vdb - tags: 'docker' - - { role: openshift-prep, tags: 'openshift-prep' } - -- hosts: localhost:cluster_hosts - become: False - tasks: - - include: pre_tasks.yml diff --git a/playbooks/openstack/openshift-cluster/pre_tasks.yml b/playbooks/openstack/openshift-cluster/pre_tasks.yml deleted file mode 100644 index 11fe2dd84..000000000 --- a/playbooks/openstack/openshift-cluster/pre_tasks.yml +++ /dev/null @@ -1,53 +0,0 @@ ---- -- name: Generate Environment ID - set_fact: - env_random_id: "{{ ansible_date_time.epoch }}" - run_once: true - delegate_to: localhost - -- name: Set default Environment ID - set_fact: - default_env_id: "openshift-{{ lookup('env','OS_USERNAME') }}-{{ env_random_id }}" - delegate_to: localhost - -- name: Setting Common Facts - set_fact: - env_id: "{{ env_id | default(default_env_id) }}" - delegate_to: localhost - -- name: Updating DNS domain to include env_id (if not empty) - set_fact: - full_dns_domain: "{{ (env_id|trim == '') | ternary(public_dns_domain, env_id + '.' + public_dns_domain) }}" - delegate_to: localhost - -- name: Set the APP domain for OpenShift use - set_fact: - openshift_app_domain: "{{ openshift_app_domain | default('apps') }}" - delegate_to: localhost - -- name: Set the default app domain for routing purposes - set_fact: - openshift_master_default_subdomain: "{{ openshift_app_domain }}.{{ full_dns_domain }}" - delegate_to: localhost - when: - - openshift_master_default_subdomain is undefined - -# Check that openshift_cluster_node_labels has regions defined for all groups -# NOTE(kpilatov): if node labels are to be enabled for more groups, -# this check needs to be modified as well -- name: Set openshift_cluster_node_labels if undefined (should not happen) - set_fact: - openshift_cluster_node_labels: {'app': {'region': 'primary'}, 'infra': {'region': 'infra'}} - when: openshift_cluster_node_labels is not defined - -- name: Set openshift_cluster_node_labels for the infra group - set_fact: - openshift_cluster_node_labels: "{{ openshift_cluster_node_labels | combine({'infra': {'region': 'infra'}}, recursive=True) }}" - -- name: Set openshift_cluster_node_labels for the app group - set_fact: - openshift_cluster_node_labels: "{{ openshift_cluster_node_labels | combine({'app': {'region': 'primary'}}, recursive=True) }}" - -- name: Set openshift_cluster_node_labels for auto-scaling app nodes - set_fact: - openshift_cluster_node_labels: "{{ openshift_cluster_node_labels | combine({'app': {'autoscaling': 'app'}}, recursive=True) }}" diff --git a/playbooks/openstack/openshift-cluster/prepare-and-format-cinder-volume.yaml b/playbooks/openstack/openshift-cluster/prepare-and-format-cinder-volume.yaml deleted file mode 100644 index 30e094459..000000000 --- a/playbooks/openstack/openshift-cluster/prepare-and-format-cinder-volume.yaml +++ /dev/null @@ -1,67 +0,0 @@ ---- -- hosts: localhost - gather_facts: False - become: False - tasks: - - set_fact: - cinder_volume: "{{ hostvars[groups.masters[0]].openshift_hosted_registry_storage_openstack_volumeID }}" - cinder_fs: "{{ hostvars[groups.masters[0]].openshift_hosted_registry_storage_openstack_filesystem }}" - - - name: Attach the volume to the VM - os_server_volume: - state: present - server: "{{ groups['masters'][0] }}" - volume: "{{ cinder_volume }}" - register: volume_attachment - - - set_fact: - attached_device: >- - {{ volume_attachment['attachments']|json_query("[?volume_id=='" + cinder_volume + "'].device | [0]") }} - - - delegate_to: "{{ groups['masters'][0] }}" - block: - - name: Wait for the device to appear - wait_for: path={{ attached_device }} - - - name: Create a temp directory for mounting the volume - tempfile: - prefix: cinder-volume - state: directory - register: cinder_mount_dir - - - name: Format the device - filesystem: - fstype: "{{ cinder_fs }}" - dev: "{{ attached_device }}" - - - name: Mount the device - mount: - name: "{{ cinder_mount_dir.path }}" - src: "{{ attached_device }}" - state: mounted - fstype: "{{ cinder_fs }}" - - - name: Change mode on the filesystem - file: - path: "{{ cinder_mount_dir.path }}" - state: directory - recurse: true - mode: 0777 - - - name: Unmount the device - mount: - name: "{{ cinder_mount_dir.path }}" - src: "{{ attached_device }}" - state: absent - fstype: "{{ cinder_fs }}" - - - name: Delete the temp directory - file: - name: "{{ cinder_mount_dir.path }}" - state: absent - - - name: Detach the volume from the VM - os_server_volume: - state: absent - server: "{{ groups['masters'][0] }}" - volume: "{{ cinder_volume }}" diff --git a/playbooks/openstack/openshift-cluster/prerequisites.yml b/playbooks/openstack/openshift-cluster/prerequisites.yml index 11a31411e..0356b37dd 100644 --- a/playbooks/openstack/openshift-cluster/prerequisites.yml +++ b/playbooks/openstack/openshift-cluster/prerequisites.yml @@ -1,123 +1,12 @@ --- - hosts: localhost tasks: - - # Sanity check of inventory variables - - include: net_vars_check.yaml - - # Check ansible - - name: Check Ansible version - assert: - that: > - (ansible_version.major == 2 and ansible_version.minor >= 3) or - (ansible_version.major > 2) - msg: "Ansible version must be at least 2.3" - - # Check shade - - name: Try to import python module shade - command: python -c "import shade" - ignore_errors: yes - register: shade_result - - name: Check if shade is installed - assert: - that: 'shade_result.rc == 0' - msg: "Python module shade is not installed" - - # Check jmespath - - name: Try to import python module shade - command: python -c "import jmespath" - ignore_errors: yes - register: jmespath_result - - name: Check if jmespath is installed - assert: - that: 'jmespath_result.rc == 0' - msg: "Python module jmespath is not installed" - - # Check python-dns - - name: Try to import python DNS module - command: python -c "import dns" - ignore_errors: yes - register: pythondns_result - - name: Check if python-dns is installed - assert: - that: 'pythondns_result.rc == 0' - msg: "Python module python-dns is not installed" - - # Check jinja2 - - name: Try to import jinja2 module - command: python -c "import jinja2" - ignore_errors: yes - register: jinja_result - - name: Check if jinja2 is installed - assert: - that: 'jinja_result.rc == 0' - msg: "Python module jinja2 is not installed" - - # Check Glance image - - name: Try to get image facts - os_image_facts: - image: "{{ openstack_default_image_name }}" - register: image_result - - name: Check that image is available - assert: - that: "image_result.ansible_facts.openstack_image" - msg: "Image {{ openstack_default_image_name }} is not available" - - # Check network name - - name: Try to get network facts - os_networks_facts: - name: "{{ openstack_external_network_name }}" - register: network_result - when: not openstack_provider_network_name|default(None) - - name: Check that network is available - assert: - that: "network_result.ansible_facts.openstack_networks" - msg: "Network {{ openstack_external_network_name }} is not available" - when: not openstack_provider_network_name|default(None) - - # Check keypair - # TODO kpilatov: there is no Ansible module for getting OS keypairs - # (os_keypair is not suitable for this) - # this method does not force python-openstackclient dependency - - name: Try to show keypair - command: > - python -c 'import shade; cloud = shade.openstack_cloud(); - exit(cloud.get_keypair("{{ openstack_ssh_public_key }}") is None)' - ignore_errors: yes - register: key_result - - name: Check that keypair is available - assert: - that: 'key_result.rc == 0' - msg: "Keypair {{ openstack_ssh_public_key }} is not available" - -# Check that custom images and flavors exist -- hosts: localhost - - # Include variables that will be used by heat - vars_files: - - stack_params.yaml - - tasks: - # Check that custom images are available - - include: custom_image_check.yaml - with_items: - - "{{ openstack_master_image }}" - - "{{ openstack_infra_image }}" - - "{{ openstack_node_image }}" - - "{{ openstack_lb_image }}" - - "{{ openstack_etcd_image }}" - - "{{ openstack_dns_image }}" - loop_control: - loop_var: image - - # Check that custom flavors are available - - include: custom_flavor_check.yaml - with_items: - - "{{ master_flavor }}" - - "{{ infra_flavor }}" - - "{{ node_flavor }}" - - "{{ lb_flavor }}" - - "{{ etcd_flavor }}" - - "{{ dns_flavor }}" - loop_control: - loop_var: flavor + - name: Check dependencies and OpenStack prerequisites + include_role: + name: openshift_openstack + tasks_from: check-prerequisites.yml + + - name: Check network configuration + include_role: + name: openshift_openstack + tasks_from: net_vars_check.yaml diff --git a/playbooks/openstack/openshift-cluster/provision-openstack.yml b/playbooks/openstack/openshift-cluster/provision-openstack.yml deleted file mode 100644 index bf424676d..000000000 --- a/playbooks/openstack/openshift-cluster/provision-openstack.yml +++ /dev/null @@ -1,35 +0,0 @@ ---- -- hosts: localhost - gather_facts: True - become: False - vars_files: - - stack_params.yaml - pre_tasks: - - include: pre_tasks.yml - roles: - - role: openstack-stack - - role: openstack-create-cinder-registry - when: - - cinder_hosted_registry_name is defined - - cinder_hosted_registry_size_gb is defined - - role: static_inventory - when: openstack_inventory|default('static') == 'static' - inventory_path: "{{ openstack_inventory_path|default(inventory_dir) }}" - private_ssh_key: "{{ openstack_private_ssh_key|default('') }}" - ssh_config_path: "{{ openstack_ssh_config_path|default('/tmp/ssh.config.openshift.ansible' + '.' + stack_name) }}" - ssh_user: "{{ ansible_user }}" - -- name: Refresh Server inventory or exit to apply SSH config - hosts: localhost - connection: local - become: False - gather_facts: False - tasks: - - name: Exit to apply SSH config for a bastion - meta: end_play - when: openstack_use_bastion|default(False)|bool - - name: Refresh Server inventory - meta: refresh_inventory - -- include: post-provision-openstack.yml - when: not openstack_use_bastion|default(False)|bool diff --git a/playbooks/openstack/openshift-cluster/provision.yaml b/playbooks/openstack/openshift-cluster/provision.yaml deleted file mode 100644 index 474c9c803..000000000 --- a/playbooks/openstack/openshift-cluster/provision.yaml +++ /dev/null @@ -1,4 +0,0 @@ ---- -- include: "prerequisites.yml" - -- include: "provision-openstack.yml" diff --git a/playbooks/openstack/openshift-cluster/provision.yml b/playbooks/openstack/openshift-cluster/provision.yml new file mode 100644 index 000000000..5b20d5720 --- /dev/null +++ b/playbooks/openstack/openshift-cluster/provision.yml @@ -0,0 +1,37 @@ +--- +- name: Create the OpenStack resources for cluster installation + hosts: localhost + tasks: + - name: provision cluster + include_role: + name: openshift_openstack + tasks_from: provision.yml + +# NOTE(shadower): the (internal) DNS must be functional at this point!! +# That will have happened in provision.yml if nsupdate was configured. + +# TODO(shadower): consider splitting this up so people can stop here +# and configure their DNS if they have to. + +- name: Prepare the Nodes in the cluster for installation + hosts: cluster_hosts + become: true + # NOTE: The nodes may not be up yet, don't gather facts here. + # They'll be collected after `wait_for_connection`. + gather_facts: no + tasks: + - name: Wait for the the nodes to come up + wait_for_connection: + + - name: Gather facts for the new nodes + setup: + + - name: Install dependencies + include_role: + name: openshift_openstack + tasks_from: node-packages.yml + + - name: Configure Node + include_role: + name: openshift_openstack + tasks_from: node-configuration.yml diff --git a/playbooks/openstack/openshift-cluster/provision_install.yml b/playbooks/openstack/openshift-cluster/provision_install.yml new file mode 100644 index 000000000..5d88c105f --- /dev/null +++ b/playbooks/openstack/openshift-cluster/provision_install.yml @@ -0,0 +1,9 @@ +--- +- name: Check the prerequisites for cluster provisioning in OpenStack + include: prerequisites.yml + +- name: Include the provision.yml playbook to create cluster + include: provision.yml + +- name: Include the install.yml playbook to install cluster + include: install.yml diff --git a/playbooks/openstack/openshift-cluster/scale-up.yaml b/playbooks/openstack/openshift-cluster/scale-up.yaml index 79fc09050..f99ff1349 100644 --- a/playbooks/openstack/openshift-cluster/scale-up.yaml +++ b/playbooks/openstack/openshift-cluster/scale-up.yaml @@ -41,21 +41,16 @@ openstack_num_nodes: "{{ oc_old_num_nodes | int + increment_by | int }}" # Run provision.yaml with higher number of nodes to create a new app-node VM -- include: provision.yaml +- include: provision.yml # Run config.yml to perform openshift installation -# Path to openshift-ansible can be customised: -# - the value of openshift_ansible_dir has to be an absolute path -# - the path cannot contain the '/' symbol at the end # Creating a new deployment by the full installation -- include: "{{ openshift_ansible_dir }}/playbooks/byo/config.yml" - vars: - openshift_ansible_dir: ../../../../openshift-ansible +- include: install.yml when: 'not groups["new_nodes"] | list' # Scaling up existing deployment -- include: "{{ openshift_ansible_dir }}/playbooks/byo/openshift-node/scaleup.yml" +- include: "../../byo/openshift-node/scaleup.yml" vars: openshift_ansible_dir: ../../../../openshift-ansible when: 'groups["new_nodes"] | list' diff --git a/playbooks/openstack/openshift-cluster/stack_params.yaml b/playbooks/openstack/openshift-cluster/stack_params.yaml deleted file mode 100644 index a4da31bfe..000000000 --- a/playbooks/openstack/openshift-cluster/stack_params.yaml +++ /dev/null @@ -1,49 +0,0 @@ ---- -stack_name: "{{ env_id }}.{{ public_dns_domain }}" -dns_domain: "{{ public_dns_domain }}" -dns_nameservers: "{{ public_dns_nameservers }}" -subnet_prefix: "{{ openstack_subnet_prefix }}" -master_hostname: "{{ openstack_master_hostname | default('master') }}" -infra_hostname: "{{ openstack_infra_hostname | default('infra-node') }}" -node_hostname: "{{ openstack_node_hostname | default('app-node') }}" -lb_hostname: "{{ openstack_lb_hostname | default('lb') }}" -etcd_hostname: "{{ openstack_etcd_hostname | default('etcd') }}" -dns_hostname: "{{ openstack_dns_hostname | default('dns') }}" -ssh_public_key: "{{ openstack_ssh_public_key }}" -openstack_image: "{{ openstack_default_image_name }}" -lb_flavor: "{{ openstack_lb_flavor | default(openstack_default_flavor) }}" -etcd_flavor: "{{ openstack_etcd_flavor | default(openstack_default_flavor) }}" -master_flavor: "{{ openstack_master_flavor | default(openstack_default_flavor) }}" -node_flavor: "{{ openstack_node_flavor | default(openstack_default_flavor) }}" -infra_flavor: "{{ openstack_infra_flavor | default(openstack_default_flavor) }}" -dns_flavor: "{{ openstack_dns_flavor | default(openstack_default_flavor) }}" -openstack_master_image: "{{ openstack_master_image_name | default(openstack_default_image_name) }}" -openstack_infra_image: "{{ openstack_infra_image_name | default(openstack_default_image_name) }}" -openstack_node_image: "{{ openstack_node_image_name | default(openstack_default_image_name) }}" -openstack_lb_image: "{{ openstack_lb_image_name | default(openstack_default_image_name) }}" -openstack_etcd_image: "{{ openstack_etcd_image_name | default(openstack_default_image_name) }}" -openstack_dns_image: "{{ openstack_dns_image_name | default(openstack_default_image_name) }}" -openstack_private_network: >- - {% if openstack_provider_network_name | default(None) -%} - {{ openstack_provider_network_name }} - {%- else -%} - {{ openstack_private_network_name | default ('openshift-ansible-' + stack_name + '-net') }} - {%- endif -%} -provider_network: "{{ openstack_provider_network_name | default(None) }}" -external_network: "{{ openstack_external_network_name | default(None) }}" -num_etcd: "{{ openstack_num_etcd | default(0) }}" -num_masters: "{{ openstack_num_masters }}" -num_nodes: "{{ openstack_num_nodes }}" -num_infra: "{{ openstack_num_infra }}" -num_dns: "{{ openstack_num_dns | default(1) }}" -master_server_group_policies: "{{ openstack_master_server_group_policies | default([]) | to_yaml }}" -infra_server_group_policies: "{{ openstack_infra_server_group_policies | default([]) | to_yaml }}" -master_volume_size: "{{ docker_master_volume_size | default(docker_volume_size) }}" -infra_volume_size: "{{ docker_infra_volume_size | default(docker_volume_size) }}" -node_volume_size: "{{ docker_node_volume_size | default(docker_volume_size) }}" -etcd_volume_size: "{{ docker_etcd_volume_size | default('2') }}" -dns_volume_size: "{{ docker_dns_volume_size | default('1') }}" -lb_volume_size: "{{ docker_lb_volume_size | default('5') }}" -nodes_to_remove: "{{ openstack_nodes_to_remove | default([]) | to_yaml }}" -use_bastion: "{{ openstack_use_bastion|default(False) }}" -ui_ssh_tunnel: "{{ openshift_ui_ssh_tunnel|default(False) }}" diff --git a/playbooks/openstack/sample-inventory/inventory.py b/playbooks/openstack/sample-inventory/inventory.py index 6a1b74b3d..47c56d94d 100755 --- a/playbooks/openstack/sample-inventory/inventory.py +++ b/playbooks/openstack/sample-inventory/inventory.py @@ -1,4 +1,11 @@ #!/usr/bin/env python +""" +This is an Ansible dynamic inventory for OpenStack. + +It requires your OpenStack credentials to be set in clouds.yaml or your shell +environment. + +""" from __future__ import print_function @@ -7,7 +14,8 @@ import json import shade -if __name__ == '__main__': +def build_inventory(): + '''Build the dynamic inventory.''' cloud = shade.openstack_cloud() inventory = {} @@ -39,13 +47,10 @@ if __name__ == '__main__': dns = [server.name for server in cluster_hosts if server.metadata['host-type'] == 'dns'] - lb = [server.name for server in cluster_hosts - if server.metadata['host-type'] == 'lb'] + load_balancers = [server.name for server in cluster_hosts + if server.metadata['host-type'] == 'lb'] - osev3 = list(set(nodes + etcd + lb)) - - groups = [server.metadata.group for server in cluster_hosts - if 'group' in server.metadata] + osev3 = list(set(nodes + etcd + load_balancers)) inventory['cluster_hosts'] = {'hosts': [s.name for s in cluster_hosts]} inventory['OSEv3'] = {'hosts': osev3} @@ -55,7 +60,7 @@ if __name__ == '__main__': inventory['infra_hosts'] = {'hosts': infra_hosts} inventory['app'] = {'hosts': app} inventory['dns'] = {'hosts': dns} - inventory['lb'] = {'hosts': lb} + inventory['lb'] = {'hosts': load_balancers} for server in cluster_hosts: if 'group' in server.metadata: @@ -68,21 +73,24 @@ if __name__ == '__main__': for server in cluster_hosts: ssh_ip_address = server.public_v4 or server.private_v4 - vars = { + hostvars = { 'ansible_host': ssh_ip_address } public_v4 = server.public_v4 or server.private_v4 if public_v4: - vars['public_v4'] = public_v4 + hostvars['public_v4'] = public_v4 # TODO(shadower): what about multiple networks? if server.private_v4: - vars['private_v4'] = server.private_v4 + hostvars['private_v4'] = server.private_v4 node_labels = server.metadata.get('node_labels') if node_labels: - vars['openshift_node_labels'] = node_labels + hostvars['openshift_node_labels'] = node_labels + + inventory['_meta']['hostvars'][server.name] = hostvars + return inventory - inventory['_meta']['hostvars'][server.name] = vars - print(json.dumps(inventory, indent=4, sort_keys=True)) +if __name__ == '__main__': + print(json.dumps(build_inventory(), indent=4, sort_keys=True)) diff --git a/requirements.txt b/requirements.txt index bf95b4ff9..3cdcff90e 100644 --- a/requirements.txt +++ b/requirements.txt @@ -7,4 +7,5 @@ pyOpenSSL==16.2.0 # We need to disable ruamel.yaml for now because of test failures #ruamel.yaml six==1.10.0 +shade==1.24.0 passlib==1.6.5 diff --git a/roles/common/defaults/main.yml b/roles/common/defaults/main.yml deleted file mode 100644 index 8db591374..000000000 --- a/roles/common/defaults/main.yml +++ /dev/null @@ -1,6 +0,0 @@ ---- -openshift_cluster_node_labels: - app: - region: primary - infra: - region: infra diff --git a/roles/dns-records/defaults/main.yml b/roles/dns-records/defaults/main.yml deleted file mode 100644 index 3f7fa783f..000000000 --- a/roles/dns-records/defaults/main.yml +++ /dev/null @@ -1,2 +0,0 @@ ---- -use_bastion: False diff --git a/roles/dns-records/tasks/main.yml b/roles/dns-records/tasks/main.yml deleted file mode 100644 index 7148b016a..000000000 --- a/roles/dns-records/tasks/main.yml +++ /dev/null @@ -1,121 +0,0 @@ ---- -- name: "Generate list of private A records" - set_fact: - private_records: "{{ private_records | default([]) + [ { 'type': 'A', 'hostname': hostvars[item]['ansible_hostname'], 'ip': hostvars[item]['private_v4'] } ] }}" - with_items: "{{ groups['cluster_hosts'] }}" - -- name: "Add wildcard records to the private A records for infrahosts" - set_fact: - private_records: "{{ private_records | default([]) + [ { 'type': 'A', 'hostname': '*.' + openshift_app_domain, 'ip': hostvars[item]['private_v4'] } ] }}" - with_items: "{{ groups['infra_hosts'] }}" - -- name: "Add public master cluster hostname records to the private A records (single master)" - set_fact: - private_records: "{{ private_records | default([]) + [ { 'type': 'A', 'hostname': (hostvars[groups.masters[0]].openshift_master_cluster_public_hostname | replace(full_dns_domain, ''))[:-1], 'ip': hostvars[groups.masters[0]].private_v4 } ] }}" - when: - - hostvars[groups.masters[0]].openshift_master_cluster_public_hostname is defined - - openstack_num_masters == 1 - -- name: "Add public master cluster hostname records to the private A records (multi-master)" - set_fact: - private_records: "{{ private_records | default([]) + [ { 'type': 'A', 'hostname': (hostvars[groups.masters[0]].openshift_master_cluster_public_hostname | replace(full_dns_domain, ''))[:-1], 'ip': hostvars[groups.lb[0]].private_v4 } ] }}" - when: - - hostvars[groups.masters[0]].openshift_master_cluster_public_hostname is defined - - openstack_num_masters > 1 - -- name: "Set the private DNS server to use the external value (if provided)" - set_fact: - nsupdate_server_private: "{{ external_nsupdate_keys['private']['server'] }}" - nsupdate_key_secret_private: "{{ external_nsupdate_keys['private']['key_secret'] }}" - nsupdate_key_algorithm_private: "{{ external_nsupdate_keys['private']['key_algorithm'] }}" - nsupdate_private_key_name: "{{ external_nsupdate_keys['private']['key_name']|default('private-' + full_dns_domain) }}" - when: - - external_nsupdate_keys is defined - - external_nsupdate_keys['private'] is defined - -- name: "Set the private DNS server to use the provisioned value" - set_fact: - nsupdate_server_private: "{{ hostvars[groups['dns'][0]].public_v4 }}" - nsupdate_key_secret_private: "{{ hostvars[groups['dns'][0]].nsupdate_keys['private-' + full_dns_domain].key_secret }}" - nsupdate_key_algorithm_private: "{{ hostvars[groups['dns'][0]].nsupdate_keys['private-' + full_dns_domain].key_algorithm }}" - when: - - nsupdate_server_private is undefined - -- name: "Generate the private Add section for DNS" - set_fact: - private_named_records: - - view: "private" - zone: "{{ full_dns_domain }}" - server: "{{ nsupdate_server_private }}" - key_name: "{{ nsupdate_private_key_name|default('private-' + full_dns_domain) }}" - key_secret: "{{ nsupdate_key_secret_private }}" - key_algorithm: "{{ nsupdate_key_algorithm_private | lower }}" - entries: "{{ private_records }}" - -- name: "Generate list of public A records" - set_fact: - public_records: "{{ public_records | default([]) + [ { 'type': 'A', 'hostname': hostvars[item]['ansible_hostname'], 'ip': hostvars[item]['public_v4'] } ] }}" - with_items: "{{ groups['cluster_hosts'] }}" - when: hostvars[item]['public_v4'] is defined - -- name: "Add wildcard records to the public A records" - set_fact: - public_records: "{{ public_records | default([]) + [ { 'type': 'A', 'hostname': '*.' + openshift_app_domain, 'ip': hostvars[item]['public_v4'] } ] }}" - with_items: "{{ groups['infra_hosts'] }}" - when: hostvars[item]['public_v4'] is defined - -- name: "Add public master cluster hostname records to the public A records (single master)" - set_fact: - public_records: "{{ public_records | default([]) + [ { 'type': 'A', 'hostname': (hostvars[groups.masters[0]].openshift_master_cluster_public_hostname | replace(full_dns_domain, ''))[:-1], 'ip': hostvars[groups.masters[0]].public_v4 } ] }}" - when: - - hostvars[groups.masters[0]].openshift_master_cluster_public_hostname is defined - - openstack_num_masters == 1 - - not use_bastion|bool - -- name: "Add public master cluster hostname records to the public A records (single master behind a bastion)" - set_fact: - public_records: "{{ public_records | default([]) + [ { 'type': 'A', 'hostname': (hostvars[groups.masters[0]].openshift_master_cluster_public_hostname | replace(full_dns_domain, ''))[:-1], 'ip': hostvars[groups.bastions[0]].public_v4 } ] }}" - when: - - hostvars[groups.masters[0]].openshift_master_cluster_public_hostname is defined - - openstack_num_masters == 1 - - use_bastion|bool - -- name: "Add public master cluster hostname records to the public A records (multi-master)" - set_fact: - public_records: "{{ public_records | default([]) + [ { 'type': 'A', 'hostname': (hostvars[groups.masters[0]].openshift_master_cluster_public_hostname | replace(full_dns_domain, ''))[:-1], 'ip': hostvars[groups.lb[0]].public_v4 } ] }}" - when: - - hostvars[groups.masters[0]].openshift_master_cluster_public_hostname is defined - - openstack_num_masters > 1 - -- name: "Set the public DNS server details to use the external value (if provided)" - set_fact: - nsupdate_server_public: "{{ external_nsupdate_keys['public']['server'] }}" - nsupdate_key_secret_public: "{{ external_nsupdate_keys['public']['key_secret'] }}" - nsupdate_key_algorithm_public: "{{ external_nsupdate_keys['public']['key_algorithm'] }}" - nsupdate_public_key_name: "{{ external_nsupdate_keys['public']['key_name']|default('public-' + full_dns_domain) }}" - when: - - external_nsupdate_keys is defined - - external_nsupdate_keys['public'] is defined - -- name: "Set the public DNS server details to use the provisioned value" - set_fact: - nsupdate_server_public: "{{ hostvars[groups['dns'][0]].public_v4 }}" - nsupdate_key_secret_public: "{{ hostvars[groups['dns'][0]].nsupdate_keys['public-' + full_dns_domain].key_secret }}" - nsupdate_key_algorithm_public: "{{ hostvars[groups['dns'][0]].nsupdate_keys['public-' + full_dns_domain].key_algorithm }}" - when: - - nsupdate_server_public is undefined - -- name: "Generate the public Add section for DNS" - set_fact: - public_named_records: - - view: "public" - zone: "{{ full_dns_domain }}" - server: "{{ nsupdate_server_public }}" - key_name: "{{ nsupdate_public_key_name|default('public-' + full_dns_domain) }}" - key_secret: "{{ nsupdate_key_secret_public }}" - key_algorithm: "{{ nsupdate_key_algorithm_public | lower }}" - entries: "{{ public_records }}" - -- name: "Generate the final dns_records_add" - set_fact: - dns_records_add: "{{ private_named_records + public_named_records }}" diff --git a/roles/dns-server-detect/defaults/main.yml b/roles/dns-server-detect/defaults/main.yml deleted file mode 100644 index 58bd861cd..000000000 --- a/roles/dns-server-detect/defaults/main.yml +++ /dev/null @@ -1,3 +0,0 @@ ---- - -external_nsupdate_keys: {} diff --git a/roles/dns-server-detect/tasks/main.yml b/roles/dns-server-detect/tasks/main.yml deleted file mode 100644 index cd775814f..000000000 --- a/roles/dns-server-detect/tasks/main.yml +++ /dev/null @@ -1,36 +0,0 @@ ---- -- fail: - msg: 'Missing required private DNS server(s)' - when: - - external_nsupdate_keys['private'] is undefined - - hostvars[groups['dns'][0]] is undefined - -- fail: - msg: 'Missing required public DNS server(s)' - when: - - external_nsupdate_keys['public'] is undefined - - hostvars[groups['dns'][0]] is undefined - -- name: "Set the private DNS server to use the external value (if provided)" - set_fact: - private_dns_server: "{{ external_nsupdate_keys['private']['server'] }}" - when: - - external_nsupdate_keys['private'] is defined - -- name: "Set the private DNS server to use the provisioned value" - set_fact: - private_dns_server: "{{ hostvars[groups['dns'][0]].private_v4 }}" - when: - - private_dns_server is undefined - -- name: "Set the public DNS server to use the external value (if provided)" - set_fact: - public_dns_server: "{{ external_nsupdate_keys['public']['server'] }}" - when: - - external_nsupdate_keys['public'] is defined - -- name: "Set the public DNS server to use the provisioned value" - set_fact: - public_dns_server: "{{ hostvars[groups['dns'][0]].public_v4 }}" - when: - - public_dns_server is undefined diff --git a/roles/dns-views/defaults/main.yml b/roles/dns-views/defaults/main.yml deleted file mode 100644 index c9f8248af..000000000 --- a/roles/dns-views/defaults/main.yml +++ /dev/null @@ -1,4 +0,0 @@ ---- -external_nsupdate_keys: {} -named_private_recursion: 'yes' -named_public_recursion: 'no' diff --git a/roles/dns-views/tasks/main.yml b/roles/dns-views/tasks/main.yml deleted file mode 100644 index ffbad2e3f..000000000 --- a/roles/dns-views/tasks/main.yml +++ /dev/null @@ -1,30 +0,0 @@ ---- -- name: "Generate ACL list for DNS server" - set_fact: - acl_list: "{{ acl_list | default([]) + [ (hostvars[item]['private_v4'] + '/32') ] }}" - with_items: "{{ groups['cluster_hosts'] }}" - -- name: "Generate the private view" - set_fact: - private_named_view: - - name: "private" - recursion: "{{ named_private_recursion }}" - acl_entry: "{{ acl_list }}" - zone: - - dns_domain: "{{ full_dns_domain }}" - forwarder: "{{ public_dns_nameservers }}" - when: external_nsupdate_keys['private'] is undefined - -- name: "Generate the public view" - set_fact: - public_named_view: - - name: "public" - recursion: "{{ named_public_recursion }}" - zone: - - dns_domain: "{{ full_dns_domain }}" - forwarder: "{{ public_dns_nameservers }}" - when: external_nsupdate_keys['public'] is undefined - -- name: "Generate the final named_config_views" - set_fact: - named_config_views: "{{ private_named_view|default([]) + public_named_view|default([]) }}" diff --git a/roles/docker-storage-setup/defaults/main.yaml b/roles/docker-storage-setup/defaults/main.yaml deleted file mode 100644 index 062f543ad..000000000 --- a/roles/docker-storage-setup/defaults/main.yaml +++ /dev/null @@ -1,7 +0,0 @@ ---- -docker_dev: "/dev/sdb" -docker_vg: "docker-vol" -docker_data_size: "95%VG" -docker_dm_basesize: "3G" -container_root_lv_name: "dockerlv" -container_root_lv_mount_path: "/var/lib/docker" diff --git a/roles/docker-storage-setup/tasks/main.yaml b/roles/docker-storage-setup/tasks/main.yaml deleted file mode 100644 index 8606eeba4..000000000 --- a/roles/docker-storage-setup/tasks/main.yaml +++ /dev/null @@ -1,46 +0,0 @@ ---- -- name: stop docker - service: name=docker state=stopped - -- block: - - name: create the docker-storage config file - template: - src: "{{ role_path }}/templates/docker-storage-setup-overlayfs.j2" - dest: /etc/sysconfig/docker-storage-setup - owner: root - group: root - mode: 0644 - when: - - ansible_distribution_version | version_compare('7.4', '>=') - - ansible_distribution == "RedHat" - -- block: - - name: create the docker-storage-setup config file - template: - src: "{{ role_path }}/templates/docker-storage-setup-dm.j2" - dest: /etc/sysconfig/docker-storage-setup - owner: root - group: root - mode: 0644 - when: - - ansible_distribution_version | version_compare('7.4', '<') - - ansible_distribution == "RedHat" - -- block: - - name: create the docker-storage-setup config file for CentOS - template: - src: "{{ role_path }}/templates/docker-storage-setup-dm.j2" - dest: /etc/sysconfig/docker-storage-setup - owner: root - group: root - mode: 0644 - - # TODO(shadower): Find out which CentOS version supports overlayfs2 - when: - - ansible_distribution == "CentOS" - -- name: Install Docker - package: name=docker state=present - -- name: start docker - service: name=docker state=restarted enabled=true diff --git a/roles/docker-storage-setup/templates/docker-storage-setup-dm.j2 b/roles/docker-storage-setup/templates/docker-storage-setup-dm.j2 deleted file mode 100644 index b5869feff..000000000 --- a/roles/docker-storage-setup/templates/docker-storage-setup-dm.j2 +++ /dev/null @@ -1,4 +0,0 @@ -DEVS="{{ docker_dev }}" -VG="{{ docker_vg }}" -DATA_SIZE="{{ docker_data_size }}" -EXTRA_DOCKER_STORAGE_OPTIONS="--storage-opt dm.basesize={{ docker_dm_basesize }}" diff --git a/roles/docker-storage-setup/templates/docker-storage-setup-overlayfs.j2 b/roles/docker-storage-setup/templates/docker-storage-setup-overlayfs.j2 deleted file mode 100644 index d8b4a0276..000000000 --- a/roles/docker-storage-setup/templates/docker-storage-setup-overlayfs.j2 +++ /dev/null @@ -1,7 +0,0 @@ -DEVS="{{ docker_dev }}" -VG="{{ docker_vg }}" -DATA_SIZE="{{ docker_data_size }}" -STORAGE_DRIVER=overlay2 -CONTAINER_ROOT_LV_NAME="{{ container_root_lv_name }}" -CONTAINER_ROOT_LV_MOUNT_PATH="{{ container_root_lv_mount_path }}" -CONTAINER_ROOT_LV_SIZE=100%FREE diff --git a/roles/hostnames/tasks/main.yaml b/roles/hostnames/tasks/main.yaml deleted file mode 100644 index c49852210..000000000 --- a/roles/hostnames/tasks/main.yaml +++ /dev/null @@ -1,26 +0,0 @@ ---- -- name: Setting Hostname Fact - set_fact: - new_hostname: "{{ custom_hostname | default(inventory_hostname_short) }}" - -- name: Setting FQDN Fact - set_fact: - new_fqdn: "{{ new_hostname }}.{{ full_dns_domain }}" - -- name: Setting hostname and DNS domain - hostname: name="{{ new_fqdn }}" - -- name: Check for cloud.cfg - stat: path=/etc/cloud/cloud.cfg - register: cloud_cfg - -- name: Prevent cloud-init updates of hostname/fqdn (if applicable) - lineinfile: - dest: /etc/cloud/cloud.cfg - state: present - regexp: "{{ item.regexp }}" - line: "{{ item.line }}" - with_items: - - { regexp: '^ - set_hostname', line: '# - set_hostname' } - - { regexp: '^ - update_hostname', line: '# - update_hostname' } - when: cloud_cfg.stat.exists == True diff --git a/roles/hostnames/test/inv b/roles/hostnames/test/inv deleted file mode 100644 index ffbe6e03d..000000000 --- a/roles/hostnames/test/inv +++ /dev/null @@ -1,12 +0,0 @@ -[all:vars] -dns_domain=example.com - -[openshift_masters] -192.168.124.41 dns_private_ip=1.1.1.41 dns_public_ip=192.168.124.41 -192.168.124.117 dns_private_ip=1.1.1.117 dns_public_ip=192.168.124.117 - -[openshift_nodes] -192.168.124.40 dns_private_ip=1.1.1.40 dns_public_ip=192.168.124.40 - -#[dns] -#192.168.124.117 dns_private_ip=1.1.1.117 diff --git a/roles/hostnames/test/roles b/roles/hostnames/test/roles deleted file mode 120000 index e2b799b9d..000000000 --- a/roles/hostnames/test/roles +++ /dev/null @@ -1 +0,0 @@ -../../../roles/ \ No newline at end of file diff --git a/roles/hostnames/test/test.yaml b/roles/hostnames/test/test.yaml deleted file mode 100644 index 0c56aea51..000000000 --- a/roles/hostnames/test/test.yaml +++ /dev/null @@ -1,4 +0,0 @@ ---- -- hosts: all - roles: - - role: hostnames diff --git a/roles/hostnames/vars/main.yaml b/roles/hostnames/vars/main.yaml deleted file mode 100644 index 3eecb8dc4..000000000 --- a/roles/hostnames/vars/main.yaml +++ /dev/null @@ -1,2 +0,0 @@ ---- -counter: 1 diff --git a/roles/hostnames/vars/records.yaml b/roles/hostnames/vars/records.yaml deleted file mode 100644 index 0cadc8181..000000000 --- a/roles/hostnames/vars/records.yaml +++ /dev/null @@ -1,28 +0,0 @@ ---- -- name: "Building Records" - set_fact: - dns_records_add: - - view: private - zone: example.com - entries: - - type: A - hostname: master1.example.com - ip: 172.16.15.94 - - type: A - hostname: node1.example.com - ip: 172.16.15.86 - - type: A - hostname: node2.example.com - ip: 172.16.15.87 - - view: public - zone: example.com - entries: - - type: A - hostname: master1.example.com - ip: 10.3.10.116 - - type: A - hostname: node1.example.com - ip: 10.3.11.46 - - type: A - hostname: node2.example.com - ip: 10.3.12.6 diff --git a/roles/node-network-manager/tasks/main.yml b/roles/node-network-manager/tasks/main.yml deleted file mode 100644 index 6a17855e7..000000000 --- a/roles/node-network-manager/tasks/main.yml +++ /dev/null @@ -1,22 +0,0 @@ ---- -- name: install NetworkManager - package: - name: NetworkManager - state: present - -- name: configure NetworkManager - lineinfile: - dest: "/etc/sysconfig/network-scripts/ifcfg-{{ ansible_default_ipv4['interface'] }}" - regexp: '^{{ item }}=' - line: '{{ item }}=yes' - state: present - create: yes - with_items: - - 'USE_PEERDNS' - - 'NM_CONTROLLED' - -- name: enable and start NetworkManager - service: - name: NetworkManager - state: restarted - enabled: yes diff --git a/roles/openshift-prep/defaults/main.yml b/roles/openshift-prep/defaults/main.yml deleted file mode 100644 index c8c9a00c0..000000000 --- a/roles/openshift-prep/defaults/main.yml +++ /dev/null @@ -1,13 +0,0 @@ ---- -# Defines either to install required packages and update all -manage_packages: true -install_debug_packages: false -required_packages: - - wget - - git - - net-tools - - bind-utils - - bridge-utils -debug_packages: - - bash-completion - - vim-enhanced diff --git a/roles/openshift-prep/tasks/main.yml b/roles/openshift-prep/tasks/main.yml deleted file mode 100644 index 5e484e75f..000000000 --- a/roles/openshift-prep/tasks/main.yml +++ /dev/null @@ -1,4 +0,0 @@ ---- -# Starting Point for OpenShift Installation and Configuration -- include: prerequisites.yml - tags: [prerequisites] diff --git a/roles/openshift-prep/tasks/prerequisites.yml b/roles/openshift-prep/tasks/prerequisites.yml deleted file mode 100644 index b7601aa48..000000000 --- a/roles/openshift-prep/tasks/prerequisites.yml +++ /dev/null @@ -1,37 +0,0 @@ ---- -- name: "Cleaning yum repositories" - command: "yum clean all" - -- name: "Install required packages" - yum: - name: "{{ item }}" - state: latest - with_items: "{{ required_packages }}" - when: manage_packages|bool - -- name: "Install debug packages (optional)" - yum: - name: "{{ item }}" - state: latest - with_items: "{{ debug_packages }}" - when: install_debug_packages|bool - -- name: "Update all packages (this can take a very long time)" - yum: - name: '*' - state: latest - when: manage_packages|bool - -- name: "Verify hostname" - shell: hostnamectl status | awk "/Static hostname/"'{ print $3 }' - register: hostname_fqdn - -- name: "Set hostname if required" - hostname: - name: "{{ ansible_fqdn }}" - when: hostname_fqdn.stdout != ansible_fqdn - -- name: "Verify SELinux is enforcing" - fail: - msg: "SELinux is required for OpenShift and has been detected as '{{ ansible_selinux.config_mode }}'" - when: ansible_selinux.config_mode != "enforcing" diff --git a/roles/openshift_openstack/defaults/main.yml b/roles/openshift_openstack/defaults/main.yml new file mode 100644 index 000000000..05f1c0911 --- /dev/null +++ b/roles/openshift_openstack/defaults/main.yml @@ -0,0 +1,49 @@ +--- + +stack_state: 'present' + +ssh_ingress_cidr: 0.0.0.0/0 +node_ingress_cidr: 0.0.0.0/0 +master_ingress_cidr: 0.0.0.0/0 +lb_ingress_cidr: 0.0.0.0/0 +bastion_ingress_cidr: 0.0.0.0/0 +num_etcd: 0 +num_masters: 1 +num_nodes: 1 +num_dns: 1 +num_infra: 1 +nodes_to_remove: [] +etcd_volume_size: 2 +dns_volume_size: 1 +lb_volume_size: 5 +use_bastion: False +ui_ssh_tunnel: False +provider_network: False + + +openshift_cluster_node_labels: + app: + region: primary + infra: + region: infra + +install_debug_packages: false +required_packages: + - docker + - NetworkManager + - wget + - git + - net-tools + - bind-utils + - bridge-utils +debug_packages: + - bash-completion + - vim-enhanced + +# container-storage-setup +docker_dev: "/dev/sdb" +docker_vg: "docker-vol" +docker_data_size: "95%VG" +docker_dm_basesize: "3G" +container_root_lv_name: "dockerlv" +container_root_lv_mount_path: "/var/lib/docker" diff --git a/roles/openshift_openstack/tasks/check-prerequisites.yml b/roles/openshift_openstack/tasks/check-prerequisites.yml new file mode 100644 index 000000000..4d7cfbf11 --- /dev/null +++ b/roles/openshift_openstack/tasks/check-prerequisites.yml @@ -0,0 +1,109 @@ +--- +# Check ansible +- name: Check Ansible version + assert: + that: > + (ansible_version.major == 2 and ansible_version.minor >= 3) or + (ansible_version.major > 2) + msg: "Ansible version must be at least 2.3" + +# Check shade +- name: Try to import python module shade + command: python -c "import shade" + ignore_errors: yes + register: shade_result +- name: Check if shade is installed + assert: + that: 'shade_result.rc == 0' + msg: "Python module shade is not installed" + +# Check jmespath +- name: Try to import python module shade + command: python -c "import jmespath" + ignore_errors: yes + register: jmespath_result +- name: Check if jmespath is installed + assert: + that: 'jmespath_result.rc == 0' + msg: "Python module jmespath is not installed" + +# Check python-dns +- name: Try to import python DNS module + command: python -c "import dns" + ignore_errors: yes + register: pythondns_result +- name: Check if python-dns is installed + assert: + that: 'pythondns_result.rc == 0' + msg: "Python module python-dns is not installed" + +# Check jinja2 +- name: Try to import jinja2 module + command: python -c "import jinja2" + ignore_errors: yes + register: jinja_result +- name: Check if jinja2 is installed + assert: + that: 'jinja_result.rc == 0' + msg: "Python module jinja2 is not installed" + +# Check Glance image +- name: Try to get image facts + os_image_facts: + image: "{{ openstack_default_image_name }}" + register: image_result +- name: Check that image is available + assert: + that: "image_result.ansible_facts.openstack_image" + msg: "Image {{ openstack_default_image_name }} is not available" + +# Check network name +- name: Try to get network facts + os_networks_facts: + name: "{{ openstack_external_network_name }}" + register: network_result + when: not openstack_provider_network_name|default(None) +- name: Check that network is available + assert: + that: "network_result.ansible_facts.openstack_networks" + msg: "Network {{ openstack_external_network_name }} is not available" + when: not openstack_provider_network_name|default(None) + +# Check keypair +# TODO kpilatov: there is no Ansible module for getting OS keypairs +# (os_keypair is not suitable for this) +# this method does not force python-openstackclient dependency +- name: Try to show keypair + command: > + python -c 'import shade; cloud = shade.openstack_cloud(); + exit(cloud.get_keypair("{{ openstack_ssh_public_key }}") is None)' + ignore_errors: yes + register: key_result +- name: Check that keypair is available + assert: + that: 'key_result.rc == 0' + msg: "Keypair {{ openstack_ssh_public_key }} is not available" + +# Check that custom images are available +- include: custom_image_check.yaml + with_items: + - "{{ openstack_master_image }}" + - "{{ openstack_infra_image }}" + - "{{ openstack_node_image }}" + - "{{ openstack_lb_image }}" + - "{{ openstack_etcd_image }}" + - "{{ openstack_dns_image }}" + loop_control: + loop_var: image + +# Check that custom flavors are available +- include: custom_flavor_check.yaml + with_items: + - "{{ master_flavor }}" + - "{{ infra_flavor }}" + - "{{ node_flavor }}" + - "{{ lb_flavor }}" + - "{{ etcd_flavor }}" + - "{{ dns_flavor }}" + loop_control: + loop_var: flavor diff --git a/roles/openshift_openstack/tasks/cleanup.yml b/roles/openshift_openstack/tasks/cleanup.yml new file mode 100644 index 000000000..258334a6b --- /dev/null +++ b/roles/openshift_openstack/tasks/cleanup.yml @@ -0,0 +1,6 @@ +--- + +- name: cleanup temp files + file: + path: "{{ stack_template_pre.path }}" + state: absent diff --git a/roles/openshift_openstack/tasks/container-storage-setup.yml b/roles/openshift_openstack/tasks/container-storage-setup.yml new file mode 100644 index 000000000..5cd48ca2c --- /dev/null +++ b/roles/openshift_openstack/tasks/container-storage-setup.yml @@ -0,0 +1,37 @@ +--- +- block: + - name: create the docker-storage config file + template: + src: "{{ role_path }}/templates/docker-storage-setup-overlayfs.j2" + dest: /etc/sysconfig/docker-storage-setup + owner: root + group: root + mode: 0644 + when: + - ansible_distribution_version | version_compare('7.4', '>=') + - ansible_distribution == "RedHat" + +- block: + - name: create the docker-storage-setup config file + template: + src: "{{ role_path }}/templates/docker-storage-setup-dm.j2" + dest: /etc/sysconfig/docker-storage-setup + owner: root + group: root + mode: 0644 + when: + - ansible_distribution_version | version_compare('7.4', '<') + - ansible_distribution == "RedHat" + +- block: + - name: create the docker-storage-setup config file for CentOS + template: + src: "{{ role_path }}/templates/docker-storage-setup-dm.j2" + dest: /etc/sysconfig/docker-storage-setup + owner: root + group: root + mode: 0644 + + # TODO(shadower): Find out which CentOS version supports overlayfs2 + when: + - ansible_distribution == "CentOS" diff --git a/roles/openshift_openstack/tasks/custom_flavor_check.yaml b/roles/openshift_openstack/tasks/custom_flavor_check.yaml new file mode 100644 index 000000000..e11874c28 --- /dev/null +++ b/roles/openshift_openstack/tasks/custom_flavor_check.yaml @@ -0,0 +1,9 @@ +--- +- name: Try to get flavor facts + os_flavor_facts: + name: "{{ flavor }}" + register: flavor_result +- name: Check that custom flavor is available + assert: + that: "flavor_result.ansible_facts.openstack_flavors" + msg: "Flavor {{ flavor }} is not available." diff --git a/roles/openshift_openstack/tasks/custom_image_check.yaml b/roles/openshift_openstack/tasks/custom_image_check.yaml new file mode 100644 index 000000000..4fbd6a687 --- /dev/null +++ b/roles/openshift_openstack/tasks/custom_image_check.yaml @@ -0,0 +1,10 @@ +--- +- name: Try to get image facts + os_image_facts: + image: "{{ image }}" + register: image_result + +- name: Check that custom image is available + assert: + that: "image_result.ansible_facts.openstack_image" + msg: "Image {{ image }} is not available." diff --git a/roles/openshift_openstack/tasks/generate-templates.yml b/roles/openshift_openstack/tasks/generate-templates.yml new file mode 100644 index 000000000..0ff50a095 --- /dev/null +++ b/roles/openshift_openstack/tasks/generate-templates.yml @@ -0,0 +1,26 @@ +--- +- name: create HOT stack template prefix + register: stack_template_pre + tempfile: + state: directory + prefix: openshift-ansible + +- name: set template paths + set_fact: + stack_template_path: "{{ stack_template_pre.path }}/stack.yaml" + user_data_template_path: "{{ stack_template_pre.path }}/user-data" + +- name: generate HOT stack template from jinja2 template + template: + src: heat_stack.yaml.j2 + dest: "{{ stack_template_path }}" + +- name: generate HOT server template from jinja2 template + template: + src: heat_stack_server.yaml.j2 + dest: "{{ stack_template_pre.path }}/server.yaml" + +- name: generate user_data from jinja2 template + template: + src: user_data.j2 + dest: "{{ user_data_template_path }}" diff --git a/roles/openshift_openstack/tasks/hostname.yml b/roles/openshift_openstack/tasks/hostname.yml new file mode 100644 index 000000000..0fc8fbc4c --- /dev/null +++ b/roles/openshift_openstack/tasks/hostname.yml @@ -0,0 +1,33 @@ +--- +- name: "Verify hostname" + command: hostnamectl status --static + register: hostname_fqdn + +- name: "Set hostname if required" + when: hostname_fqdn.stdout != ansible_fqdn + block: + - name: Setting Hostname Fact + set_fact: + new_hostname: "{{ custom_hostname | default(inventory_hostname_short) }}" + + - name: Setting FQDN Fact + set_fact: + new_fqdn: "{{ new_hostname }}.{{ full_dns_domain }}" + + - name: Setting hostname and DNS domain + hostname: name="{{ new_fqdn }}" + + - name: Check for cloud.cfg + stat: path=/etc/cloud/cloud.cfg + register: cloud_cfg + + - name: Prevent cloud-init updates of hostname/fqdn (if applicable) + lineinfile: + dest: /etc/cloud/cloud.cfg + state: present + regexp: "{{ item.regexp }}" + line: "{{ item.line }}" + with_items: + - { regexp: '^ - set_hostname', line: '# - set_hostname' } + - { regexp: '^ - update_hostname', line: '# - update_hostname' } + when: cloud_cfg.stat.exists == True diff --git a/roles/openshift_openstack/tasks/net_vars_check.yaml b/roles/openshift_openstack/tasks/net_vars_check.yaml new file mode 100644 index 000000000..68afde415 --- /dev/null +++ b/roles/openshift_openstack/tasks/net_vars_check.yaml @@ -0,0 +1,14 @@ +--- +- name: Check the provider network configuration + fail: + msg: "Flannel SDN requires a dedicated containers data network and can not work over a provider network" + when: + - openstack_provider_network_name is defined + - openstack_private_data_network_name is defined + +- name: Check the flannel network configuration + fail: + msg: "A dedicated containers data network is only supported with Flannel SDN" + when: + - openstack_private_data_network_name is defined + - not openshift_use_flannel|default(False)|bool diff --git a/roles/openshift_openstack/tasks/node-configuration.yml b/roles/openshift_openstack/tasks/node-configuration.yml new file mode 100644 index 000000000..8a6a8022f --- /dev/null +++ b/roles/openshift_openstack/tasks/node-configuration.yml @@ -0,0 +1,11 @@ +--- +- include: hostname.yml + +- include: container-storage-setup.yml + +- include: node-network.yml + +- name: "Verify SELinux is enforcing" + fail: + msg: "SELinux is required for OpenShift and has been detected as '{{ ansible_selinux.config_mode }}'" + when: ansible_selinux.config_mode != "enforcing" diff --git a/roles/openshift_openstack/tasks/node-network.yml b/roles/openshift_openstack/tasks/node-network.yml new file mode 100644 index 000000000..f494e5158 --- /dev/null +++ b/roles/openshift_openstack/tasks/node-network.yml @@ -0,0 +1,19 @@ +--- +- name: configure NetworkManager + lineinfile: + dest: "/etc/sysconfig/network-scripts/ifcfg-{{ ansible_default_ipv4['interface'] }}" + regexp: '^{{ item }}=' + line: '{{ item }}=yes' + state: present + create: yes + with_items: + - 'USE_PEERDNS' + - 'NM_CONTROLLED' + +- name: enable and start NetworkManager + service: + name: NetworkManager + state: restarted + enabled: yes + +# TODO(shadower): add the flannel interface tasks from post-provision-openstack.yml diff --git a/roles/openshift_openstack/tasks/node-packages.yml b/roles/openshift_openstack/tasks/node-packages.yml new file mode 100644 index 000000000..c65eaec3b --- /dev/null +++ b/roles/openshift_openstack/tasks/node-packages.yml @@ -0,0 +1,15 @@ +--- +# TODO: subscribe to RHEL and install docker and other packages here + +- name: Install required packages + yum: + name: "{{ item }}" + state: latest + with_items: "{{ required_packages }}" + +- name: Install debug packages (optional) + yum: + name: "{{ item }}" + state: latest + with_items: "{{ debug_packages }}" + when: install_debug_packages|bool diff --git a/roles/openshift_openstack/tasks/populate-dns.yml b/roles/openshift_openstack/tasks/populate-dns.yml new file mode 100644 index 000000000..f1a868a19 --- /dev/null +++ b/roles/openshift_openstack/tasks/populate-dns.yml @@ -0,0 +1,5 @@ +# TODO: use nsupdate to populate the DNS servers using the keys +# specified in the inventory. + +# this is an optional step -- the deployers may do whatever else they +# wish here. diff --git a/roles/openshift_openstack/tasks/prepare-and-format-cinder-volume.yaml b/roles/openshift_openstack/tasks/prepare-and-format-cinder-volume.yaml new file mode 100644 index 000000000..fc51f6dc2 --- /dev/null +++ b/roles/openshift_openstack/tasks/prepare-and-format-cinder-volume.yaml @@ -0,0 +1,59 @@ +--- +- name: Attach the volume to the VM + os_server_volume: + state: present + server: "{{ groups['masters'][0] }}" + volume: "{{ cinder_volume }}" + register: volume_attachment + +- set_fact: + attached_device: >- + {{ volume_attachment['attachments']|json_query("[?volume_id=='" + cinder_volume + "'].device | [0]") }} + +- delegate_to: "{{ groups['masters'][0] }}" + block: + - name: Wait for the device to appear + wait_for: path={{ attached_device }} + + - name: Create a temp directory for mounting the volume + tempfile: + prefix: cinder-volume + state: directory + register: cinder_mount_dir + + - name: Format the device + filesystem: + fstype: "{{ cinder_fs }}" + dev: "{{ attached_device }}" + + - name: Mount the device + mount: + name: "{{ cinder_mount_dir.path }}" + src: "{{ attached_device }}" + state: mounted + fstype: "{{ cinder_fs }}" + + - name: Change mode on the filesystem + file: + path: "{{ cinder_mount_dir.path }}" + state: directory + recurse: true + mode: 0777 + + - name: Unmount the device + mount: + name: "{{ cinder_mount_dir.path }}" + src: "{{ attached_device }}" + state: absent + fstype: "{{ cinder_fs }}" + + - name: Delete the temp directory + file: + name: "{{ cinder_mount_dir.path }}" + state: absent + +- name: Detach the volume from the VM + os_server_volume: + state: absent + server: "{{ groups['masters'][0] }}" + volume: "{{ cinder_volume }}" diff --git a/roles/openshift_openstack/tasks/provision.yml b/roles/openshift_openstack/tasks/provision.yml new file mode 100644 index 000000000..8ebda8100 --- /dev/null +++ b/roles/openshift_openstack/tasks/provision.yml @@ -0,0 +1,30 @@ +--- +- name: Generate the templates + include: generate-templates.yml + when: + - stack_state == 'present' + +- name: Handle the Stack (create/delete) + ignore_errors: False + register: stack_create + os_stack: + name: "{{ stack_name }}" + state: "{{ stack_state }}" + template: "{{ stack_template_path | default(omit) }}" + wait: yes + +- name: Add the new nodes to the inventory + meta: refresh_inventory + +- name: Populate DNS entries + include: populate-dns.yml + when: + - stack_state == 'present' + +- name: CleanUp + include: cleanup.yml + when: + - stack_state == 'present' + +# TODO(shadower): create the registry and PV Cinder volumes if specified +# and include the `prepare-and-format-cinder-volume` tasks to set it up diff --git a/roles/openshift_openstack/tasks/subnet_update_dns_servers.yaml b/roles/openshift_openstack/tasks/subnet_update_dns_servers.yaml new file mode 100644 index 000000000..af28fc98f --- /dev/null +++ b/roles/openshift_openstack/tasks/subnet_update_dns_servers.yaml @@ -0,0 +1,9 @@ +--- +- name: Live update the subnet's DNS servers + os_subnet: + name: openshift-ansible-{{ stack_name }}-subnet + network_name: openshift-ansible-{{ stack_name }}-net + state: present + use_default_subnetpool: yes + dns_nameservers: "{{ [private_dns_server|default(public_dns_nameservers[0])]|union(public_dns_nameservers)|unique }}" + when: not provider_network diff --git a/roles/openshift_openstack/templates/docker-storage-setup-dm.j2 b/roles/openshift_openstack/templates/docker-storage-setup-dm.j2 new file mode 100644 index 000000000..b5869feff --- /dev/null +++ b/roles/openshift_openstack/templates/docker-storage-setup-dm.j2 @@ -0,0 +1,4 @@ +DEVS="{{ docker_dev }}" +VG="{{ docker_vg }}" +DATA_SIZE="{{ docker_data_size }}" +EXTRA_DOCKER_STORAGE_OPTIONS="--storage-opt dm.basesize={{ docker_dm_basesize }}" diff --git a/roles/openshift_openstack/templates/docker-storage-setup-overlayfs.j2 b/roles/openshift_openstack/templates/docker-storage-setup-overlayfs.j2 new file mode 100644 index 000000000..d8b4a0276 --- /dev/null +++ b/roles/openshift_openstack/templates/docker-storage-setup-overlayfs.j2 @@ -0,0 +1,7 @@ +DEVS="{{ docker_dev }}" +VG="{{ docker_vg }}" +DATA_SIZE="{{ docker_data_size }}" +STORAGE_DRIVER=overlay2 +CONTAINER_ROOT_LV_NAME="{{ container_root_lv_name }}" +CONTAINER_ROOT_LV_MOUNT_PATH="{{ container_root_lv_mount_path }}" +CONTAINER_ROOT_LV_SIZE=100%FREE diff --git a/roles/openshift_openstack/templates/heat_stack.yaml.j2 b/roles/openshift_openstack/templates/heat_stack.yaml.j2 new file mode 100644 index 000000000..2359842a5 --- /dev/null +++ b/roles/openshift_openstack/templates/heat_stack.yaml.j2 @@ -0,0 +1,888 @@ +heat_template_version: 2016-10-14 + +description: OpenShift cluster + +parameters: + +outputs: + + etcd_names: + description: Name of the etcds + value: { get_attr: [ etcd, name ] } + + etcd_ips: + description: IPs of the etcds + value: { get_attr: [ etcd, private_ip ] } + + etcd_floating_ips: + description: Floating IPs of the etcds + value: { get_attr: [ etcd, floating_ip ] } + + master_names: + description: Name of the masters + value: { get_attr: [ masters, name ] } + + master_ips: + description: IPs of the masters + value: { get_attr: [ masters, private_ip ] } + + master_floating_ips: + description: Floating IPs of the masters + value: { get_attr: [ masters, floating_ip ] } + + node_names: + description: Name of the nodes + value: { get_attr: [ compute_nodes, name ] } + + node_ips: + description: IPs of the nodes + value: { get_attr: [ compute_nodes, private_ip ] } + + node_floating_ips: + description: Floating IPs of the nodes + value: { get_attr: [ compute_nodes, floating_ip ] } + + infra_names: + description: Name of the nodes + value: { get_attr: [ infra_nodes, name ] } + + infra_ips: + description: IPs of the nodes + value: { get_attr: [ infra_nodes, private_ip ] } + + infra_floating_ips: + description: Floating IPs of the nodes + value: { get_attr: [ infra_nodes, floating_ip ] } + +{% if num_dns|int > 0 %} + dns_name: + description: Name of the DNS + value: + get_attr: + - dns + - name + + dns_floating_ips: + description: Floating IPs of the DNS + value: { get_attr: [ dns, floating_ip ] } + + dns_private_ips: + description: Private IPs of the DNS + value: { get_attr: [ dns, private_ip ] } +{% endif %} + +conditions: + no_floating: {% if provider_network or use_bastion|bool %}true{% else %}false{% endif %} + +resources: + +{% if not provider_network %} + net: + type: OS::Neutron::Net + properties: + name: + str_replace: + template: openshift-ansible-cluster_id-net + params: + cluster_id: {{ stack_name }} + + subnet: + type: OS::Neutron::Subnet + properties: + name: + str_replace: + template: openshift-ansible-cluster_id-subnet + params: + cluster_id: {{ stack_name }} + network: { get_resource: net } + cidr: + str_replace: + template: subnet_24_prefix.0/24 + params: + subnet_24_prefix: {{ subnet_prefix }} + allocation_pools: + - start: + str_replace: + template: subnet_24_prefix.3 + params: + subnet_24_prefix: {{ subnet_prefix }} + end: + str_replace: + template: subnet_24_prefix.254 + params: + subnet_24_prefix: {{ subnet_prefix }} + dns_nameservers: +{% for nameserver in dns_nameservers %} + - {{ nameserver }} +{% endfor %} + +{% if openshift_use_flannel|default(False)|bool %} + data_net: + type: OS::Neutron::Net + properties: + name: openshift-ansible-{{ stack_name }}-data-net + port_security_enabled: false + + data_subnet: + type: OS::Neutron::Subnet + properties: + name: openshift-ansible-{{ stack_name }}-data-subnet + network: { get_resource: data_net } + cidr: {{ osm_cluster_network_cidr|default('10.128.0.0/14') }} + gateway_ip: null +{% endif %} + + router: + type: OS::Neutron::Router + properties: + name: + str_replace: + template: openshift-ansible-cluster_id-router + params: + cluster_id: {{ stack_name }} + external_gateway_info: + network: {{ external_network }} + + interface: + type: OS::Neutron::RouterInterface + properties: + router_id: { get_resource: router } + subnet_id: { get_resource: subnet } + +{% endif %} + +# keypair: +# type: OS::Nova::KeyPair +# properties: +# name: +# str_replace: +# template: openshift-ansible-cluster_id-keypair +# params: +# cluster_id: {{ stack_name }} +# public_key: {{ ssh_public_key }} + + common-secgrp: + type: OS::Neutron::SecurityGroup + properties: + name: + str_replace: + template: openshift-ansible-cluster_id-common-secgrp + params: + cluster_id: {{ stack_name }} + description: + str_replace: + template: Basic ssh/icmp security group for cluster_id OpenShift cluster + params: + cluster_id: {{ stack_name }} + rules: + - direction: ingress + protocol: tcp + port_range_min: 22 + port_range_max: 22 + remote_ip_prefix: {{ ssh_ingress_cidr }} +{% if use_bastion|bool %} + - direction: ingress + protocol: tcp + port_range_min: 22 + port_range_max: 22 + remote_ip_prefix: {{ bastion_ingress_cidr }} +{% endif %} + - direction: ingress + protocol: icmp + remote_ip_prefix: {{ ssh_ingress_cidr }} + +{% if openstack_flat_secgrp|default(False)|bool %} + flat-secgrp: + type: OS::Neutron::SecurityGroup + properties: + name: + str_replace: + template: openshift-ansible-cluster_id-flat-secgrp + params: + cluster_id: {{ stack_name }} + description: + str_replace: + template: Security group for cluster_id OpenShift cluster + params: + cluster_id: {{ stack_name }} + rules: + - direction: ingress + protocol: tcp + port_range_min: 4001 + port_range_max: 4001 + - direction: ingress + protocol: tcp + port_range_min: {{ openshift_master_api_port|default(8443) }} + port_range_max: {{ openshift_master_api_port|default(8443) }} + - direction: ingress + protocol: tcp + port_range_min: {{ openshift_master_console_port|default(8443) }} + port_range_max: {{ openshift_master_console_port|default(8443) }} + - direction: ingress + protocol: tcp + port_range_min: 8053 + port_range_max: 8053 + - direction: ingress + protocol: udp + port_range_min: 8053 + port_range_max: 8053 + - direction: ingress + protocol: tcp + port_range_min: 24224 + port_range_max: 24224 + - direction: ingress + protocol: udp + port_range_min: 24224 + port_range_max: 24224 + - direction: ingress + protocol: tcp + port_range_min: 2224 + port_range_max: 2224 + - direction: ingress + protocol: udp + port_range_min: 5404 + port_range_max: 5405 + - direction: ingress + protocol: tcp + port_range_min: 9090 + port_range_max: 9090 + - direction: ingress + protocol: tcp + port_range_min: 2379 + port_range_max: 2380 + remote_mode: remote_group_id + - direction: ingress + protocol: tcp + port_range_min: 10250 + port_range_max: 10250 + remote_mode: remote_group_id + - direction: ingress + protocol: udp + port_range_min: 10250 + port_range_max: 10250 + remote_mode: remote_group_id + - direction: ingress + protocol: tcp + port_range_min: 10255 + port_range_max: 10255 + remote_mode: remote_group_id + - direction: ingress + protocol: udp + port_range_min: 10255 + port_range_max: 10255 + remote_mode: remote_group_id + - direction: ingress + protocol: udp + port_range_min: 4789 + port_range_max: 4789 + remote_mode: remote_group_id + - direction: ingress + protocol: tcp + port_range_min: 30000 + port_range_max: 32767 + remote_ip_prefix: {{ node_ingress_cidr }} + - direction: ingress + protocol: tcp + port_range_min: 30000 + port_range_max: 32767 + remote_ip_prefix: "{{ openstack_subnet_prefix }}.0/24" +{% else %} + master-secgrp: + type: OS::Neutron::SecurityGroup + properties: + name: + str_replace: + template: openshift-ansible-cluster_id-master-secgrp + params: + cluster_id: {{ stack_name }} + description: + str_replace: + template: Security group for cluster_id OpenShift cluster master + params: + cluster_id: {{ stack_name }} + rules: + - direction: ingress + protocol: tcp + port_range_min: 4001 + port_range_max: 4001 + - direction: ingress + protocol: tcp + port_range_min: {{ openshift_master_api_port|default(8443) }} + port_range_max: {{ openshift_master_api_port|default(8443) }} + - direction: ingress + protocol: tcp + port_range_min: {{ openshift_master_console_port|default(8443) }} + port_range_max: {{ openshift_master_console_port|default(8443) }} + - direction: ingress + protocol: tcp + port_range_min: 8053 + port_range_max: 8053 + - direction: ingress + protocol: udp + port_range_min: 8053 + port_range_max: 8053 + - direction: ingress + protocol: tcp + port_range_min: 24224 + port_range_max: 24224 + - direction: ingress + protocol: udp + port_range_min: 24224 + port_range_max: 24224 + - direction: ingress + protocol: tcp + port_range_min: 2224 + port_range_max: 2224 + - direction: ingress + protocol: udp + port_range_min: 5404 + port_range_max: 5405 + - direction: ingress + protocol: tcp + port_range_min: 9090 + port_range_max: 9090 +{% if openshift_use_flannel|default(False)|bool %} + - direction: ingress + protocol: tcp + port_range_min: 2379 + port_range_max: 2379 +{% endif %} + + etcd-secgrp: + type: OS::Neutron::SecurityGroup + properties: + name: + str_replace: + template: openshift-ansible-cluster_id-etcd-secgrp + params: + cluster_id: {{ stack_name }} + description: + str_replace: + template: Security group for cluster_id etcd cluster + params: + cluster_id: {{ stack_name }} + rules: + - direction: ingress + protocol: tcp + port_range_min: 2379 + port_range_max: 2379 + remote_mode: remote_group_id + remote_group_id: { get_resource: master-secgrp } + - direction: ingress + protocol: tcp + port_range_min: 2380 + port_range_max: 2380 + remote_mode: remote_group_id + + node-secgrp: + type: OS::Neutron::SecurityGroup + properties: + name: + str_replace: + template: openshift-ansible-cluster_id-node-secgrp + params: + cluster_id: {{ stack_name }} + description: + str_replace: + template: Security group for cluster_id OpenShift cluster nodes + params: + cluster_id: {{ stack_name }} + rules: + - direction: ingress + protocol: tcp + port_range_min: 10250 + port_range_max: 10250 + remote_mode: remote_group_id + - direction: ingress + protocol: tcp + port_range_min: 10255 + port_range_max: 10255 + remote_mode: remote_group_id + - direction: ingress + protocol: udp + port_range_min: 10255 + port_range_max: 10255 + remote_mode: remote_group_id + - direction: ingress + protocol: udp + port_range_min: 4789 + port_range_max: 4789 + remote_mode: remote_group_id + - direction: ingress + protocol: tcp + port_range_min: 30000 + port_range_max: 32767 + remote_ip_prefix: {{ node_ingress_cidr }} + - direction: ingress + protocol: tcp + port_range_min: 30000 + port_range_max: 32767 + remote_ip_prefix: "{{ openstack_subnet_prefix }}.0/24" +{% endif %} + + infra-secgrp: + type: OS::Neutron::SecurityGroup + properties: + name: + str_replace: + template: openshift-ansible-cluster_id-infra-secgrp + params: + cluster_id: {{ stack_name }} + description: + str_replace: + template: Security group for cluster_id OpenShift infrastructure cluster nodes + params: + cluster_id: {{ stack_name }} + rules: + - direction: ingress + protocol: tcp + port_range_min: 80 + port_range_max: 80 + - direction: ingress + protocol: tcp + port_range_min: 443 + port_range_max: 443 + +{% if num_dns|int > 0 %} + dns-secgrp: + type: OS::Neutron::SecurityGroup + properties: + name: + str_replace: + template: openshift-ansible-cluster_id-dns-secgrp + params: + cluster_id: {{ stack_name }} + description: + str_replace: + template: Security group for cluster_id cluster DNS + params: + cluster_id: {{ stack_name }} + rules: + - direction: ingress + protocol: udp + port_range_min: 53 + port_range_max: 53 + remote_ip_prefix: {{ node_ingress_cidr }} + - direction: ingress + protocol: udp + port_range_min: 53 + port_range_max: 53 + remote_ip_prefix: "{{ openstack_subnet_prefix }}.0/24" + - direction: ingress + protocol: tcp + port_range_min: 53 + port_range_max: 53 + remote_ip_prefix: {{ node_ingress_cidr }} + - direction: ingress + protocol: tcp + port_range_min: 53 + port_range_max: 53 + remote_ip_prefix: "{{ openstack_subnet_prefix }}.0/24" +{% endif %} + +{% if num_masters|int > 1 or ui_ssh_tunnel|bool %} + lb-secgrp: + type: OS::Neutron::SecurityGroup + properties: + name: openshift-ansible-{{ stack_name }}-lb-secgrp + description: Security group for {{ stack_name }} cluster Load Balancer + rules: + - direction: ingress + protocol: tcp + port_range_min: {{ openshift_master_api_port | default(8443) }} + port_range_max: {{ openshift_master_api_port | default(8443) }} + remote_ip_prefix: {{ lb_ingress_cidr | default(bastion_ingress_cidr) }} +{% if ui_ssh_tunnel|bool %} + - direction: ingress + protocol: tcp + port_range_min: {{ openshift_master_api_port | default(8443) }} + port_range_max: {{ openshift_master_api_port | default(8443) }} + remote_ip_prefix: {{ ssh_ingress_cidr }} +{% endif %} +{% if openshift_master_console_port is defined and openshift_master_console_port != openshift_master_api_port %} + - direction: ingress + protocol: tcp + port_range_min: {{ openshift_master_console_port | default(8443) }} + port_range_max: {{ openshift_master_console_port | default(8443) }} + remote_ip_prefix: {{ lb_ingress_cidr | default(bastion_ingress_cidr) }} +{% endif %} +{% endif %} + + etcd: + type: OS::Heat::ResourceGroup + properties: + count: {{ num_etcd }} + resource_def: + type: server.yaml + properties: + name: + str_replace: + template: k8s_type-%index%.cluster_id + params: + cluster_id: {{ stack_name }} + k8s_type: {{ etcd_hostname | default('etcd') }} + cluster_env: {{ public_dns_domain }} + cluster_id: {{ stack_name }} + group: + str_replace: + template: k8s_type.cluster_id + params: + k8s_type: etcds + cluster_id: {{ stack_name }} + type: etcd + image: {{ openstack_etcd_image | default(openstack_image) }} + flavor: {{ etcd_flavor }} + key_name: {{ ssh_public_key }} +{% if provider_network %} + net: {{ provider_network }} + net_name: {{ provider_network }} +{% else %} + net: { get_resource: net } + subnet: { get_resource: subnet } + net_name: + str_replace: + template: openshift-ansible-cluster_id-net + params: + cluster_id: {{ stack_name }} +{% endif %} + secgrp: + - { get_resource: {% if openstack_flat_secgrp|default(False)|bool %}flat-secgrp{% else %}etcd-secgrp{% endif %} } + - { get_resource: common-secgrp } + floating_network: + if: + - no_floating + - null + - {{ external_network }} +{% if use_bastion|bool or provider_network %} + attach_float_net: false +{% endif %} + volume_size: {{ etcd_volume_size }} +{% if not provider_network %} + depends_on: + - interface +{% endif %} + +{% if master_server_group_policies|length > 0 %} + master_server_group: + type: OS::Nova::ServerGroup + properties: + name: master_server_group + policies: {{ master_server_group_policies }} +{% endif %} +{% if infra_server_group_policies|length > 0 %} + infra_server_group: + type: OS::Nova::ServerGroup + properties: + name: infra_server_group + policies: {{ infra_server_group_policies }} +{% endif %} +{% if num_masters|int > 1 %} + loadbalancer: + type: OS::Heat::ResourceGroup + properties: + count: 1 + resource_def: + type: server.yaml + properties: + name: + str_replace: + template: k8s_type-%index%.cluster_id + params: + cluster_id: {{ stack_name }} + k8s_type: {{ lb_hostname | default('lb') }} + cluster_env: {{ public_dns_domain }} + cluster_id: {{ stack_name }} + group: + str_replace: + template: k8s_type.cluster_id + params: + k8s_type: lb + cluster_id: {{ stack_name }} + type: lb + image: {{ openstack_lb_image | default(openstack_image) }} + flavor: {{ lb_flavor }} + key_name: {{ ssh_public_key }} +{% if provider_network %} + net: {{ provider_network }} + net_name: {{ provider_network }} +{% else %} + net: { get_resource: net } + subnet: { get_resource: subnet } + net_name: + str_replace: + template: openshift-ansible-cluster_id-net + params: + cluster_id: {{ stack_name }} +{% endif %} + secgrp: + - { get_resource: lb-secgrp } + - { get_resource: common-secgrp } +{% if not provider_network %} + floating_network: {{ external_network }} +{% endif %} + volume_size: {{ lb_volume_size }} +{% if not provider_network %} + depends_on: + - interface +{% endif %} +{% endif %} + + masters: + type: OS::Heat::ResourceGroup + properties: + count: {{ num_masters }} + resource_def: + type: server.yaml + properties: + name: + str_replace: + template: k8s_type-%index%.cluster_id + params: + cluster_id: {{ stack_name }} + k8s_type: {{ master_hostname | default('master')}} + cluster_env: {{ public_dns_domain }} + cluster_id: {{ stack_name }} + group: + str_replace: + template: k8s_type.cluster_id + params: + k8s_type: masters + cluster_id: {{ stack_name }} + type: master + image: {{ openstack_master_image | default(openstack_image) }} + flavor: {{ master_flavor }} + key_name: {{ ssh_public_key }} +{% if provider_network %} + net: {{ provider_network }} + net_name: {{ provider_network }} +{% else %} + net: { get_resource: net } + subnet: { get_resource: subnet } + net_name: + str_replace: + template: openshift-ansible-cluster_id-net + params: + cluster_id: {{ stack_name }} +{% if openshift_use_flannel|default(False)|bool %} + attach_data_net: true + data_net: { get_resource: data_net } + data_subnet: { get_resource: data_subnet } +{% endif %} +{% endif %} + secgrp: +{% if openstack_flat_secgrp|default(False)|bool %} + - { get_resource: flat-secgrp } +{% else %} + - { get_resource: master-secgrp } + - { get_resource: node-secgrp } +{% if num_etcd|int == 0 %} + - { get_resource: etcd-secgrp } +{% endif %} +{% endif %} + - { get_resource: common-secgrp } + floating_network: + if: + - no_floating + - null + - {{ external_network }} +{% if use_bastion|bool or provider_network %} + attach_float_net: false +{% endif %} + volume_size: {{ master_volume_size }} +{% if master_server_group_policies|length > 0 %} + scheduler_hints: + group: { get_resource: master_server_group } +{% endif %} +{% if not provider_network %} + depends_on: + - interface +{% endif %} + + compute_nodes: + type: OS::Heat::ResourceGroup + properties: + count: {{ num_nodes }} + removal_policies: + - resource_list: {{ nodes_to_remove }} + resource_def: + type: server.yaml + properties: + name: + str_replace: + template: sub_type_k8s_type-%index%.cluster_id + params: + cluster_id: {{ stack_name }} + sub_type_k8s_type: {{ node_hostname | default('app-node') }} + cluster_env: {{ public_dns_domain }} + cluster_id: {{ stack_name }} + group: + str_replace: + template: k8s_type.cluster_id + params: + k8s_type: nodes + cluster_id: {{ stack_name }} + type: node + subtype: app + node_labels: +{% for k, v in openshift_cluster_node_labels.app.iteritems() %} + {{ k|e }}: {{ v|e }} +{% endfor %} + image: {{ openstack_node_image | default(openstack_image) }} + flavor: {{ node_flavor }} + key_name: {{ ssh_public_key }} +{% if provider_network %} + net: {{ provider_network }} + net_name: {{ provider_network }} +{% else %} + net: { get_resource: net } + subnet: { get_resource: subnet } + net_name: + str_replace: + template: openshift-ansible-cluster_id-net + params: + cluster_id: {{ stack_name }} +{% if openshift_use_flannel|default(False)|bool %} + attach_data_net: true + data_net: { get_resource: data_net } + data_subnet: { get_resource: data_subnet } +{% endif %} +{% endif %} + secgrp: + - { get_resource: {% if openstack_flat_secgrp|default(False)|bool %}flat-secgrp{% else %}node-secgrp{% endif %} } + - { get_resource: common-secgrp } + floating_network: + if: + - no_floating + - null + - {{ external_network }} +{% if use_bastion|bool or provider_network %} + attach_float_net: false +{% endif %} + volume_size: {{ node_volume_size }} +{% if not provider_network %} + depends_on: + - interface +{% endif %} + + infra_nodes: + type: OS::Heat::ResourceGroup + properties: + count: {{ num_infra }} + resource_def: + type: server.yaml + properties: + name: + str_replace: + template: sub_type_k8s_type-%index%.cluster_id + params: + cluster_id: {{ stack_name }} + sub_type_k8s_type: {{ infra_hostname | default('infranode') }} + cluster_env: {{ public_dns_domain }} + cluster_id: {{ stack_name }} + group: + str_replace: + template: k8s_type.cluster_id + params: + k8s_type: infra + cluster_id: {{ stack_name }} + type: node + subtype: infra + node_labels: +{% for k, v in openshift_cluster_node_labels.infra.iteritems() %} + {{ k|e }}: {{ v|e }} +{% endfor %} + image: {{ openstack_infra_image | default(openstack_image) }} + flavor: {{ infra_flavor }} + key_name: {{ ssh_public_key }} +{% if provider_network %} + net: {{ provider_network }} + net_name: {{ provider_network }} +{% else %} + net: { get_resource: net } + subnet: { get_resource: subnet } + net_name: + str_replace: + template: openshift-ansible-cluster_id-net + params: + cluster_id: {{ stack_name }} +{% if openshift_use_flannel|default(False)|bool %} + attach_data_net: true + data_net: { get_resource: data_net } + data_subnet: { get_resource: data_subnet } +{% endif %} +{% endif %} + secgrp: +# TODO(bogdando) filter only required node rules into infra-secgrp +{% if openstack_flat_secgrp|default(False)|bool %} + - { get_resource: flat-secgrp } +{% else %} + - { get_resource: node-secgrp } +{% endif %} +{% if ui_ssh_tunnel|bool and num_masters|int < 2 %} + - { get_resource: lb-secgrp } +{% endif %} + - { get_resource: infra-secgrp } + - { get_resource: common-secgrp } +{% if not provider_network %} + floating_network: {{ external_network }} +{% endif %} + volume_size: {{ infra_volume_size }} +{% if infra_server_group_policies|length > 0 %} + scheduler_hints: + group: { get_resource: infra_server_group } +{% endif %} +{% if not provider_network %} + depends_on: + - interface +{% endif %} + +{% if num_dns|int > 0 %} + dns: + type: OS::Heat::ResourceGroup + properties: + count: {{ num_dns }} + resource_def: + type: server.yaml + properties: + name: + str_replace: + template: k8s_type-%index%.cluster_id + params: + cluster_id: {{ stack_name }} + k8s_type: {{ dns_hostname | default('dns') }} + cluster_env: {{ public_dns_domain }} + cluster_id: {{ stack_name }} + group: + str_replace: + template: k8s_type.cluster_id + params: + k8s_type: dns + cluster_id: {{ stack_name }} + type: dns + image: {{ openstack_dns_image | default(openstack_image) }} + flavor: {{ dns_flavor }} + key_name: {{ ssh_public_key }} +{% if provider_network %} + net: {{ provider_network }} + net_name: {{ provider_network }} +{% else %} + net: { get_resource: net } + subnet: { get_resource: subnet } + net_name: + str_replace: + template: openshift-ansible-cluster_id-net + params: + cluster_id: {{ stack_name }} +{% endif %} + secgrp: + - { get_resource: dns-secgrp } + - { get_resource: common-secgrp } +{% if not provider_network %} + floating_network: {{ external_network }} +{% endif %} + volume_size: {{ dns_volume_size }} +{% if not provider_network %} + depends_on: + - interface +{% endif %} +{% endif %} diff --git a/roles/openshift_openstack/templates/heat_stack_server.yaml.j2 b/roles/openshift_openstack/templates/heat_stack_server.yaml.j2 new file mode 100644 index 000000000..9ffe721a5 --- /dev/null +++ b/roles/openshift_openstack/templates/heat_stack_server.yaml.j2 @@ -0,0 +1,270 @@ +heat_template_version: 2016-10-14 + +description: OpenShift cluster server + +parameters: + + name: + type: string + label: Name + description: Name + + group: + type: string + label: Host Group + description: The Primary Ansible Host Group + default: host + + cluster_env: + type: string + label: Cluster environment + description: Environment of the cluster + + cluster_id: + type: string + label: Cluster ID + description: Identifier of the cluster + + type: + type: string + label: Type + description: Type master or node + + subtype: + type: string + label: Sub-type + description: Sub-type compute or infra for nodes, default otherwise + default: default + + key_name: + type: string + label: Key name + description: Key name of keypair + + image: + type: string + label: Image + description: Name of the image + + flavor: + type: string + label: Flavor + description: Name of the flavor + + net: + type: string + label: Net ID + description: Net resource + + net_name: + type: string + label: Net name + description: Net name + +{% if not provider_network %} + subnet: + type: string + label: Subnet ID + description: Subnet resource +{% endif %} + +{% if openshift_use_flannel|default(False)|bool %} + attach_data_net: + type: boolean + default: false + label: Attach-data-net + description: A switch for data port connection + + data_net: + type: string + default: '' + label: Net ID + description: Net resource + +{% if not provider_network %} + data_subnet: + type: string + default: '' + label: Subnet ID + description: Subnet resource +{% endif %} +{% endif %} + + secgrp: + type: comma_delimited_list + label: Security groups + description: Security group resources + + attach_float_net: + type: boolean + default: true + + label: Attach-float-net + description: A switch for floating network port connection + +{% if not provider_network %} + floating_network: + type: string + default: '' + label: Floating network + description: Network to allocate floating IP from +{% endif %} + + availability_zone: + type: string + description: The Availability Zone to launch the instance. + default: nova + + volume_size: + type: number + description: Size of the volume to be created. + default: 1 + constraints: + - range: { min: 1, max: 1024 } + description: must be between 1 and 1024 Gb. + + node_labels: + type: json + description: OpenShift Node Labels + default: {"region": "default" } + + scheduler_hints: + type: json + description: Server scheduler hints. + default: {} + +outputs: + + name: + description: Name of the server + value: { get_attr: [ server, name ] } + + private_ip: + description: Private IP of the server + value: + get_attr: + - server + - addresses + - { get_param: net_name } + - 0 + - addr + + floating_ip: + description: Floating IP of the server + value: + get_attr: + - server + - addresses + - { get_param: net_name } +{% if provider_network %} + - 0 +{% else %} + - 1 +{% endif %} + - addr + +conditions: + no_floating: {not: { get_param: attach_float_net} } +{% if openshift_use_flannel|default(False)|bool %} + no_data_subnet: {not: { get_param: attach_data_net} } +{% endif %} + +resources: + + server: + type: OS::Nova::Server + properties: + name: { get_param: name } + key_name: { get_param: key_name } + image: { get_param: image } + flavor: { get_param: flavor } + networks: +{% if openshift_use_flannel|default(False)|bool %} + if: + - no_data_subnet +{% if use_trunk_ports|default(false)|bool %} + - - port: { get_attr: [trunk-port, port_id] } +{% else %} + - - port: { get_resource: port } +{% endif %} +{% if use_trunk_ports|default(false)|bool %} + - - port: { get_attr: [trunk-port, port_id] } +{% else %} + - - port: { get_resource: port } + - port: { get_resource: data_port } +{% endif %} + +{% else %} +{% if use_trunk_ports|default(false)|bool %} + - port: { get_attr: [trunk-port, port_id] } +{% else %} + - port: { get_resource: port } +{% endif %} +{% endif %} + user_data: + get_file: user-data + user_data_format: RAW + user_data_update_policy: IGNORE + metadata: + group: { get_param: group } + environment: { get_param: cluster_env } + clusterid: { get_param: cluster_id } + host-type: { get_param: type } + sub-host-type: { get_param: subtype } + node_labels: { get_param: node_labels } + scheduler_hints: { get_param: scheduler_hints } + +{% if use_trunk_ports|default(false)|bool %} + trunk-port: + type: OS::Neutron::Trunk + properties: + name: { get_param: name } + port: { get_resource: port } +{% endif %} + + port: + type: OS::Neutron::Port + properties: + network: { get_param: net } +{% if not provider_network %} + fixed_ips: + - subnet: { get_param: subnet } +{% endif %} + security_groups: { get_param: secgrp } + +{% if openshift_use_flannel|default(False)|bool %} + data_port: + type: OS::Neutron::Port + condition: { not: no_data_subnet } + properties: + network: { get_param: data_net } + port_security_enabled: false +{% if not provider_network %} + fixed_ips: + - subnet: { get_param: data_subnet } +{% endif %} +{% endif %} + +{% if not provider_network %} + floating-ip: + condition: { not: no_floating } + type: OS::Neutron::FloatingIP + properties: + floating_network: { get_param: floating_network } + port_id: { get_resource: port } +{% endif %} + +{% if not ephemeral_volumes|default(false)|bool %} + cinder_volume: + type: OS::Cinder::Volume + properties: + size: { get_param: volume_size } + availability_zone: { get_param: availability_zone } + + volume_attachment: + type: OS::Cinder::VolumeAttachment + properties: + volume_id: { get_resource: cinder_volume } + instance_uuid: { get_resource: server } + mountpoint: /dev/sdb +{% endif %} diff --git a/roles/openshift_openstack/templates/user_data.j2 b/roles/openshift_openstack/templates/user_data.j2 new file mode 100644 index 000000000..eb65f7cec --- /dev/null +++ b/roles/openshift_openstack/templates/user_data.j2 @@ -0,0 +1,13 @@ +#cloud-config +disable_root: true + +system_info: + default_user: + name: openshift + sudo: ["ALL=(ALL) NOPASSWD: ALL"] + +write_files: + - path: /etc/sudoers.d/00-openshift-no-requiretty + permissions: 440 + content: | + Defaults:openshift !requiretty diff --git a/roles/openshift_openstack/vars/main.yml b/roles/openshift_openstack/vars/main.yml new file mode 100644 index 000000000..a4da31bfe --- /dev/null +++ b/roles/openshift_openstack/vars/main.yml @@ -0,0 +1,49 @@ +--- +stack_name: "{{ env_id }}.{{ public_dns_domain }}" +dns_domain: "{{ public_dns_domain }}" +dns_nameservers: "{{ public_dns_nameservers }}" +subnet_prefix: "{{ openstack_subnet_prefix }}" +master_hostname: "{{ openstack_master_hostname | default('master') }}" +infra_hostname: "{{ openstack_infra_hostname | default('infra-node') }}" +node_hostname: "{{ openstack_node_hostname | default('app-node') }}" +lb_hostname: "{{ openstack_lb_hostname | default('lb') }}" +etcd_hostname: "{{ openstack_etcd_hostname | default('etcd') }}" +dns_hostname: "{{ openstack_dns_hostname | default('dns') }}" +ssh_public_key: "{{ openstack_ssh_public_key }}" +openstack_image: "{{ openstack_default_image_name }}" +lb_flavor: "{{ openstack_lb_flavor | default(openstack_default_flavor) }}" +etcd_flavor: "{{ openstack_etcd_flavor | default(openstack_default_flavor) }}" +master_flavor: "{{ openstack_master_flavor | default(openstack_default_flavor) }}" +node_flavor: "{{ openstack_node_flavor | default(openstack_default_flavor) }}" +infra_flavor: "{{ openstack_infra_flavor | default(openstack_default_flavor) }}" +dns_flavor: "{{ openstack_dns_flavor | default(openstack_default_flavor) }}" +openstack_master_image: "{{ openstack_master_image_name | default(openstack_default_image_name) }}" +openstack_infra_image: "{{ openstack_infra_image_name | default(openstack_default_image_name) }}" +openstack_node_image: "{{ openstack_node_image_name | default(openstack_default_image_name) }}" +openstack_lb_image: "{{ openstack_lb_image_name | default(openstack_default_image_name) }}" +openstack_etcd_image: "{{ openstack_etcd_image_name | default(openstack_default_image_name) }}" +openstack_dns_image: "{{ openstack_dns_image_name | default(openstack_default_image_name) }}" +openstack_private_network: >- + {% if openstack_provider_network_name | default(None) -%} + {{ openstack_provider_network_name }} + {%- else -%} + {{ openstack_private_network_name | default ('openshift-ansible-' + stack_name + '-net') }} + {%- endif -%} +provider_network: "{{ openstack_provider_network_name | default(None) }}" +external_network: "{{ openstack_external_network_name | default(None) }}" +num_etcd: "{{ openstack_num_etcd | default(0) }}" +num_masters: "{{ openstack_num_masters }}" +num_nodes: "{{ openstack_num_nodes }}" +num_infra: "{{ openstack_num_infra }}" +num_dns: "{{ openstack_num_dns | default(1) }}" +master_server_group_policies: "{{ openstack_master_server_group_policies | default([]) | to_yaml }}" +infra_server_group_policies: "{{ openstack_infra_server_group_policies | default([]) | to_yaml }}" +master_volume_size: "{{ docker_master_volume_size | default(docker_volume_size) }}" +infra_volume_size: "{{ docker_infra_volume_size | default(docker_volume_size) }}" +node_volume_size: "{{ docker_node_volume_size | default(docker_volume_size) }}" +etcd_volume_size: "{{ docker_etcd_volume_size | default('2') }}" +dns_volume_size: "{{ docker_dns_volume_size | default('1') }}" +lb_volume_size: "{{ docker_lb_volume_size | default('5') }}" +nodes_to_remove: "{{ openstack_nodes_to_remove | default([]) | to_yaml }}" +use_bastion: "{{ openstack_use_bastion|default(False) }}" +ui_ssh_tunnel: "{{ openshift_ui_ssh_tunnel|default(False) }}" diff --git a/roles/openstack-stack/tasks/main.yml b/roles/openstack-stack/tasks/main.yml index 983567026..0348f53ce 100644 --- a/roles/openstack-stack/tasks/main.yml +++ b/roles/openstack-stack/tasks/main.yml @@ -1,5 +1,4 @@ --- - - name: Generate the templates include: generate-templates.yml when: diff --git a/roles/openstack-stack/tasks/subnet_update_dns_servers.yaml b/roles/openstack-stack/tasks/subnet_update_dns_servers.yaml deleted file mode 100644 index af28fc98f..000000000 --- a/roles/openstack-stack/tasks/subnet_update_dns_servers.yaml +++ /dev/null @@ -1,9 +0,0 @@ ---- -- name: Live update the subnet's DNS servers - os_subnet: - name: openshift-ansible-{{ stack_name }}-subnet - network_name: openshift-ansible-{{ stack_name }}-net - state: present - use_default_subnetpool: yes - dns_nameservers: "{{ [private_dns_server|default(public_dns_nameservers[0])]|union(public_dns_nameservers)|unique }}" - when: not provider_network -- cgit v1.2.3 From 63fb0c74fcb0adf4cd3b0b2b5d30e34e29a58796 Mon Sep 17 00:00:00 2001 From: Tomas Sedovic Date: Fri, 27 Oct 2017 17:27:51 +0200 Subject: Remove the extra roles The `openstack-stack` role is now under `openshift_openstack` and the `openstack-create-cinder-registry` one will be added there, later. --- .../tasks/main.yaml | 5 - roles/openstack-stack/README.md | 9 - roles/openstack-stack/defaults/main.yml | 21 - roles/openstack-stack/meta/main.yml | 3 - roles/openstack-stack/tasks/cleanup.yml | 6 - roles/openstack-stack/tasks/generate-templates.yml | 26 - roles/openstack-stack/tasks/main.yml | 26 - roles/openstack-stack/templates/heat_stack.yaml.j2 | 888 --------------------- .../templates/heat_stack_server.yaml.j2 | 270 ------- roles/openstack-stack/templates/user_data.j2 | 13 - roles/openstack-stack/test/roles | 1 - roles/openstack-stack/test/stack-create-test.yml | 18 - roles/static_inventory/defaults/main.yml | 29 - roles/static_inventory/meta/main.yml | 3 - roles/static_inventory/tasks/checkpoint.yml | 17 - .../tasks/filter_out_new_app_nodes.yaml | 15 - roles/static_inventory/tasks/main.yml | 25 - roles/static_inventory/tasks/openstack.yml | 120 --- roles/static_inventory/tasks/sshconfig.yml | 13 - roles/static_inventory/tasks/sshtun.yml | 15 - roles/static_inventory/templates/inventory.j2 | 104 --- .../templates/openstack_ssh_config.j2 | 21 - .../templates/ssh-tunnel.service.j2 | 20 - 23 files changed, 1668 deletions(-) delete mode 100644 roles/openstack-create-cinder-registry/tasks/main.yaml delete mode 100644 roles/openstack-stack/README.md delete mode 100644 roles/openstack-stack/defaults/main.yml delete mode 100644 roles/openstack-stack/meta/main.yml delete mode 100644 roles/openstack-stack/tasks/cleanup.yml delete mode 100644 roles/openstack-stack/tasks/generate-templates.yml delete mode 100644 roles/openstack-stack/tasks/main.yml delete mode 100644 roles/openstack-stack/templates/heat_stack.yaml.j2 delete mode 100644 roles/openstack-stack/templates/heat_stack_server.yaml.j2 delete mode 100644 roles/openstack-stack/templates/user_data.j2 delete mode 120000 roles/openstack-stack/test/roles delete mode 100644 roles/openstack-stack/test/stack-create-test.yml delete mode 100644 roles/static_inventory/defaults/main.yml delete mode 100644 roles/static_inventory/meta/main.yml delete mode 100644 roles/static_inventory/tasks/checkpoint.yml delete mode 100644 roles/static_inventory/tasks/filter_out_new_app_nodes.yaml delete mode 100644 roles/static_inventory/tasks/main.yml delete mode 100644 roles/static_inventory/tasks/openstack.yml delete mode 100644 roles/static_inventory/tasks/sshconfig.yml delete mode 100644 roles/static_inventory/tasks/sshtun.yml delete mode 100644 roles/static_inventory/templates/inventory.j2 delete mode 100644 roles/static_inventory/templates/openstack_ssh_config.j2 delete mode 100644 roles/static_inventory/templates/ssh-tunnel.service.j2 diff --git a/roles/openstack-create-cinder-registry/tasks/main.yaml b/roles/openstack-create-cinder-registry/tasks/main.yaml deleted file mode 100644 index 6e9d1c2e7..000000000 --- a/roles/openstack-create-cinder-registry/tasks/main.yaml +++ /dev/null @@ -1,5 +0,0 @@ ---- -- os_volume: - display_name: "{{ cinder_hosted_registry_name }}" - size: "{{ cinder_hosted_registry_size_gb }}" - register: cinder_registry_volume diff --git a/roles/openstack-stack/README.md b/roles/openstack-stack/README.md deleted file mode 100644 index 32a2b49f1..000000000 --- a/roles/openstack-stack/README.md +++ /dev/null @@ -1,9 +0,0 @@ -# Role openstack-stack - -Role for spinning up instances using OpenStack Heat. - -## To Test - -``` -ansible-playbook openshift-ansible-contrib/roles/openstack-stack/test/stack-create-test.yml -``` diff --git a/roles/openstack-stack/defaults/main.yml b/roles/openstack-stack/defaults/main.yml deleted file mode 100644 index a24e684cc..000000000 --- a/roles/openstack-stack/defaults/main.yml +++ /dev/null @@ -1,21 +0,0 @@ ---- - -stack_state: 'present' - -ssh_ingress_cidr: 0.0.0.0/0 -node_ingress_cidr: 0.0.0.0/0 -master_ingress_cidr: 0.0.0.0/0 -lb_ingress_cidr: 0.0.0.0/0 -bastion_ingress_cidr: 0.0.0.0/0 -num_etcd: 0 -num_masters: 1 -num_nodes: 1 -num_dns: 1 -num_infra: 1 -nodes_to_remove: [] -etcd_volume_size: 2 -dns_volume_size: 1 -lb_volume_size: 5 -use_bastion: False -ui_ssh_tunnel: False -provider_network: False diff --git a/roles/openstack-stack/meta/main.yml b/roles/openstack-stack/meta/main.yml deleted file mode 100644 index fdda41bb3..000000000 --- a/roles/openstack-stack/meta/main.yml +++ /dev/null @@ -1,3 +0,0 @@ ---- -dependencies: - - role: common diff --git a/roles/openstack-stack/tasks/cleanup.yml b/roles/openstack-stack/tasks/cleanup.yml deleted file mode 100644 index 258334a6b..000000000 --- a/roles/openstack-stack/tasks/cleanup.yml +++ /dev/null @@ -1,6 +0,0 @@ ---- - -- name: cleanup temp files - file: - path: "{{ stack_template_pre.path }}" - state: absent diff --git a/roles/openstack-stack/tasks/generate-templates.yml b/roles/openstack-stack/tasks/generate-templates.yml deleted file mode 100644 index 0ff50a095..000000000 --- a/roles/openstack-stack/tasks/generate-templates.yml +++ /dev/null @@ -1,26 +0,0 @@ ---- -- name: create HOT stack template prefix - register: stack_template_pre - tempfile: - state: directory - prefix: openshift-ansible - -- name: set template paths - set_fact: - stack_template_path: "{{ stack_template_pre.path }}/stack.yaml" - user_data_template_path: "{{ stack_template_pre.path }}/user-data" - -- name: generate HOT stack template from jinja2 template - template: - src: heat_stack.yaml.j2 - dest: "{{ stack_template_path }}" - -- name: generate HOT server template from jinja2 template - template: - src: heat_stack_server.yaml.j2 - dest: "{{ stack_template_pre.path }}/server.yaml" - -- name: generate user_data from jinja2 template - template: - src: user_data.j2 - dest: "{{ user_data_template_path }}" diff --git a/roles/openstack-stack/tasks/main.yml b/roles/openstack-stack/tasks/main.yml deleted file mode 100644 index 0348f53ce..000000000 --- a/roles/openstack-stack/tasks/main.yml +++ /dev/null @@ -1,26 +0,0 @@ ---- -- name: Generate the templates - include: generate-templates.yml - when: - - stack_state == 'present' - -- name: Handle the Stack (create/delete) - ignore_errors: False - register: stack_create - os_stack: - name: "{{ stack_name }}" - state: "{{ stack_state }}" - template: "{{ stack_template_path | default(omit) }}" - wait: yes - -# NOTE(bogdando) OS::Neutron::Subnet doesn't support live updates for -# dns_nameservers, so we can't do that for the "create stack" task. -- include: subnet_update_dns_servers.yaml - when: - - private_dns_server is defined - - stack_state == 'present' - -- name: CleanUp - include: cleanup.yml - when: - - stack_state == 'present' diff --git a/roles/openstack-stack/templates/heat_stack.yaml.j2 b/roles/openstack-stack/templates/heat_stack.yaml.j2 deleted file mode 100644 index 2359842a5..000000000 --- a/roles/openstack-stack/templates/heat_stack.yaml.j2 +++ /dev/null @@ -1,888 +0,0 @@ -heat_template_version: 2016-10-14 - -description: OpenShift cluster - -parameters: - -outputs: - - etcd_names: - description: Name of the etcds - value: { get_attr: [ etcd, name ] } - - etcd_ips: - description: IPs of the etcds - value: { get_attr: [ etcd, private_ip ] } - - etcd_floating_ips: - description: Floating IPs of the etcds - value: { get_attr: [ etcd, floating_ip ] } - - master_names: - description: Name of the masters - value: { get_attr: [ masters, name ] } - - master_ips: - description: IPs of the masters - value: { get_attr: [ masters, private_ip ] } - - master_floating_ips: - description: Floating IPs of the masters - value: { get_attr: [ masters, floating_ip ] } - - node_names: - description: Name of the nodes - value: { get_attr: [ compute_nodes, name ] } - - node_ips: - description: IPs of the nodes - value: { get_attr: [ compute_nodes, private_ip ] } - - node_floating_ips: - description: Floating IPs of the nodes - value: { get_attr: [ compute_nodes, floating_ip ] } - - infra_names: - description: Name of the nodes - value: { get_attr: [ infra_nodes, name ] } - - infra_ips: - description: IPs of the nodes - value: { get_attr: [ infra_nodes, private_ip ] } - - infra_floating_ips: - description: Floating IPs of the nodes - value: { get_attr: [ infra_nodes, floating_ip ] } - -{% if num_dns|int > 0 %} - dns_name: - description: Name of the DNS - value: - get_attr: - - dns - - name - - dns_floating_ips: - description: Floating IPs of the DNS - value: { get_attr: [ dns, floating_ip ] } - - dns_private_ips: - description: Private IPs of the DNS - value: { get_attr: [ dns, private_ip ] } -{% endif %} - -conditions: - no_floating: {% if provider_network or use_bastion|bool %}true{% else %}false{% endif %} - -resources: - -{% if not provider_network %} - net: - type: OS::Neutron::Net - properties: - name: - str_replace: - template: openshift-ansible-cluster_id-net - params: - cluster_id: {{ stack_name }} - - subnet: - type: OS::Neutron::Subnet - properties: - name: - str_replace: - template: openshift-ansible-cluster_id-subnet - params: - cluster_id: {{ stack_name }} - network: { get_resource: net } - cidr: - str_replace: - template: subnet_24_prefix.0/24 - params: - subnet_24_prefix: {{ subnet_prefix }} - allocation_pools: - - start: - str_replace: - template: subnet_24_prefix.3 - params: - subnet_24_prefix: {{ subnet_prefix }} - end: - str_replace: - template: subnet_24_prefix.254 - params: - subnet_24_prefix: {{ subnet_prefix }} - dns_nameservers: -{% for nameserver in dns_nameservers %} - - {{ nameserver }} -{% endfor %} - -{% if openshift_use_flannel|default(False)|bool %} - data_net: - type: OS::Neutron::Net - properties: - name: openshift-ansible-{{ stack_name }}-data-net - port_security_enabled: false - - data_subnet: - type: OS::Neutron::Subnet - properties: - name: openshift-ansible-{{ stack_name }}-data-subnet - network: { get_resource: data_net } - cidr: {{ osm_cluster_network_cidr|default('10.128.0.0/14') }} - gateway_ip: null -{% endif %} - - router: - type: OS::Neutron::Router - properties: - name: - str_replace: - template: openshift-ansible-cluster_id-router - params: - cluster_id: {{ stack_name }} - external_gateway_info: - network: {{ external_network }} - - interface: - type: OS::Neutron::RouterInterface - properties: - router_id: { get_resource: router } - subnet_id: { get_resource: subnet } - -{% endif %} - -# keypair: -# type: OS::Nova::KeyPair -# properties: -# name: -# str_replace: -# template: openshift-ansible-cluster_id-keypair -# params: -# cluster_id: {{ stack_name }} -# public_key: {{ ssh_public_key }} - - common-secgrp: - type: OS::Neutron::SecurityGroup - properties: - name: - str_replace: - template: openshift-ansible-cluster_id-common-secgrp - params: - cluster_id: {{ stack_name }} - description: - str_replace: - template: Basic ssh/icmp security group for cluster_id OpenShift cluster - params: - cluster_id: {{ stack_name }} - rules: - - direction: ingress - protocol: tcp - port_range_min: 22 - port_range_max: 22 - remote_ip_prefix: {{ ssh_ingress_cidr }} -{% if use_bastion|bool %} - - direction: ingress - protocol: tcp - port_range_min: 22 - port_range_max: 22 - remote_ip_prefix: {{ bastion_ingress_cidr }} -{% endif %} - - direction: ingress - protocol: icmp - remote_ip_prefix: {{ ssh_ingress_cidr }} - -{% if openstack_flat_secgrp|default(False)|bool %} - flat-secgrp: - type: OS::Neutron::SecurityGroup - properties: - name: - str_replace: - template: openshift-ansible-cluster_id-flat-secgrp - params: - cluster_id: {{ stack_name }} - description: - str_replace: - template: Security group for cluster_id OpenShift cluster - params: - cluster_id: {{ stack_name }} - rules: - - direction: ingress - protocol: tcp - port_range_min: 4001 - port_range_max: 4001 - - direction: ingress - protocol: tcp - port_range_min: {{ openshift_master_api_port|default(8443) }} - port_range_max: {{ openshift_master_api_port|default(8443) }} - - direction: ingress - protocol: tcp - port_range_min: {{ openshift_master_console_port|default(8443) }} - port_range_max: {{ openshift_master_console_port|default(8443) }} - - direction: ingress - protocol: tcp - port_range_min: 8053 - port_range_max: 8053 - - direction: ingress - protocol: udp - port_range_min: 8053 - port_range_max: 8053 - - direction: ingress - protocol: tcp - port_range_min: 24224 - port_range_max: 24224 - - direction: ingress - protocol: udp - port_range_min: 24224 - port_range_max: 24224 - - direction: ingress - protocol: tcp - port_range_min: 2224 - port_range_max: 2224 - - direction: ingress - protocol: udp - port_range_min: 5404 - port_range_max: 5405 - - direction: ingress - protocol: tcp - port_range_min: 9090 - port_range_max: 9090 - - direction: ingress - protocol: tcp - port_range_min: 2379 - port_range_max: 2380 - remote_mode: remote_group_id - - direction: ingress - protocol: tcp - port_range_min: 10250 - port_range_max: 10250 - remote_mode: remote_group_id - - direction: ingress - protocol: udp - port_range_min: 10250 - port_range_max: 10250 - remote_mode: remote_group_id - - direction: ingress - protocol: tcp - port_range_min: 10255 - port_range_max: 10255 - remote_mode: remote_group_id - - direction: ingress - protocol: udp - port_range_min: 10255 - port_range_max: 10255 - remote_mode: remote_group_id - - direction: ingress - protocol: udp - port_range_min: 4789 - port_range_max: 4789 - remote_mode: remote_group_id - - direction: ingress - protocol: tcp - port_range_min: 30000 - port_range_max: 32767 - remote_ip_prefix: {{ node_ingress_cidr }} - - direction: ingress - protocol: tcp - port_range_min: 30000 - port_range_max: 32767 - remote_ip_prefix: "{{ openstack_subnet_prefix }}.0/24" -{% else %} - master-secgrp: - type: OS::Neutron::SecurityGroup - properties: - name: - str_replace: - template: openshift-ansible-cluster_id-master-secgrp - params: - cluster_id: {{ stack_name }} - description: - str_replace: - template: Security group for cluster_id OpenShift cluster master - params: - cluster_id: {{ stack_name }} - rules: - - direction: ingress - protocol: tcp - port_range_min: 4001 - port_range_max: 4001 - - direction: ingress - protocol: tcp - port_range_min: {{ openshift_master_api_port|default(8443) }} - port_range_max: {{ openshift_master_api_port|default(8443) }} - - direction: ingress - protocol: tcp - port_range_min: {{ openshift_master_console_port|default(8443) }} - port_range_max: {{ openshift_master_console_port|default(8443) }} - - direction: ingress - protocol: tcp - port_range_min: 8053 - port_range_max: 8053 - - direction: ingress - protocol: udp - port_range_min: 8053 - port_range_max: 8053 - - direction: ingress - protocol: tcp - port_range_min: 24224 - port_range_max: 24224 - - direction: ingress - protocol: udp - port_range_min: 24224 - port_range_max: 24224 - - direction: ingress - protocol: tcp - port_range_min: 2224 - port_range_max: 2224 - - direction: ingress - protocol: udp - port_range_min: 5404 - port_range_max: 5405 - - direction: ingress - protocol: tcp - port_range_min: 9090 - port_range_max: 9090 -{% if openshift_use_flannel|default(False)|bool %} - - direction: ingress - protocol: tcp - port_range_min: 2379 - port_range_max: 2379 -{% endif %} - - etcd-secgrp: - type: OS::Neutron::SecurityGroup - properties: - name: - str_replace: - template: openshift-ansible-cluster_id-etcd-secgrp - params: - cluster_id: {{ stack_name }} - description: - str_replace: - template: Security group for cluster_id etcd cluster - params: - cluster_id: {{ stack_name }} - rules: - - direction: ingress - protocol: tcp - port_range_min: 2379 - port_range_max: 2379 - remote_mode: remote_group_id - remote_group_id: { get_resource: master-secgrp } - - direction: ingress - protocol: tcp - port_range_min: 2380 - port_range_max: 2380 - remote_mode: remote_group_id - - node-secgrp: - type: OS::Neutron::SecurityGroup - properties: - name: - str_replace: - template: openshift-ansible-cluster_id-node-secgrp - params: - cluster_id: {{ stack_name }} - description: - str_replace: - template: Security group for cluster_id OpenShift cluster nodes - params: - cluster_id: {{ stack_name }} - rules: - - direction: ingress - protocol: tcp - port_range_min: 10250 - port_range_max: 10250 - remote_mode: remote_group_id - - direction: ingress - protocol: tcp - port_range_min: 10255 - port_range_max: 10255 - remote_mode: remote_group_id - - direction: ingress - protocol: udp - port_range_min: 10255 - port_range_max: 10255 - remote_mode: remote_group_id - - direction: ingress - protocol: udp - port_range_min: 4789 - port_range_max: 4789 - remote_mode: remote_group_id - - direction: ingress - protocol: tcp - port_range_min: 30000 - port_range_max: 32767 - remote_ip_prefix: {{ node_ingress_cidr }} - - direction: ingress - protocol: tcp - port_range_min: 30000 - port_range_max: 32767 - remote_ip_prefix: "{{ openstack_subnet_prefix }}.0/24" -{% endif %} - - infra-secgrp: - type: OS::Neutron::SecurityGroup - properties: - name: - str_replace: - template: openshift-ansible-cluster_id-infra-secgrp - params: - cluster_id: {{ stack_name }} - description: - str_replace: - template: Security group for cluster_id OpenShift infrastructure cluster nodes - params: - cluster_id: {{ stack_name }} - rules: - - direction: ingress - protocol: tcp - port_range_min: 80 - port_range_max: 80 - - direction: ingress - protocol: tcp - port_range_min: 443 - port_range_max: 443 - -{% if num_dns|int > 0 %} - dns-secgrp: - type: OS::Neutron::SecurityGroup - properties: - name: - str_replace: - template: openshift-ansible-cluster_id-dns-secgrp - params: - cluster_id: {{ stack_name }} - description: - str_replace: - template: Security group for cluster_id cluster DNS - params: - cluster_id: {{ stack_name }} - rules: - - direction: ingress - protocol: udp - port_range_min: 53 - port_range_max: 53 - remote_ip_prefix: {{ node_ingress_cidr }} - - direction: ingress - protocol: udp - port_range_min: 53 - port_range_max: 53 - remote_ip_prefix: "{{ openstack_subnet_prefix }}.0/24" - - direction: ingress - protocol: tcp - port_range_min: 53 - port_range_max: 53 - remote_ip_prefix: {{ node_ingress_cidr }} - - direction: ingress - protocol: tcp - port_range_min: 53 - port_range_max: 53 - remote_ip_prefix: "{{ openstack_subnet_prefix }}.0/24" -{% endif %} - -{% if num_masters|int > 1 or ui_ssh_tunnel|bool %} - lb-secgrp: - type: OS::Neutron::SecurityGroup - properties: - name: openshift-ansible-{{ stack_name }}-lb-secgrp - description: Security group for {{ stack_name }} cluster Load Balancer - rules: - - direction: ingress - protocol: tcp - port_range_min: {{ openshift_master_api_port | default(8443) }} - port_range_max: {{ openshift_master_api_port | default(8443) }} - remote_ip_prefix: {{ lb_ingress_cidr | default(bastion_ingress_cidr) }} -{% if ui_ssh_tunnel|bool %} - - direction: ingress - protocol: tcp - port_range_min: {{ openshift_master_api_port | default(8443) }} - port_range_max: {{ openshift_master_api_port | default(8443) }} - remote_ip_prefix: {{ ssh_ingress_cidr }} -{% endif %} -{% if openshift_master_console_port is defined and openshift_master_console_port != openshift_master_api_port %} - - direction: ingress - protocol: tcp - port_range_min: {{ openshift_master_console_port | default(8443) }} - port_range_max: {{ openshift_master_console_port | default(8443) }} - remote_ip_prefix: {{ lb_ingress_cidr | default(bastion_ingress_cidr) }} -{% endif %} -{% endif %} - - etcd: - type: OS::Heat::ResourceGroup - properties: - count: {{ num_etcd }} - resource_def: - type: server.yaml - properties: - name: - str_replace: - template: k8s_type-%index%.cluster_id - params: - cluster_id: {{ stack_name }} - k8s_type: {{ etcd_hostname | default('etcd') }} - cluster_env: {{ public_dns_domain }} - cluster_id: {{ stack_name }} - group: - str_replace: - template: k8s_type.cluster_id - params: - k8s_type: etcds - cluster_id: {{ stack_name }} - type: etcd - image: {{ openstack_etcd_image | default(openstack_image) }} - flavor: {{ etcd_flavor }} - key_name: {{ ssh_public_key }} -{% if provider_network %} - net: {{ provider_network }} - net_name: {{ provider_network }} -{% else %} - net: { get_resource: net } - subnet: { get_resource: subnet } - net_name: - str_replace: - template: openshift-ansible-cluster_id-net - params: - cluster_id: {{ stack_name }} -{% endif %} - secgrp: - - { get_resource: {% if openstack_flat_secgrp|default(False)|bool %}flat-secgrp{% else %}etcd-secgrp{% endif %} } - - { get_resource: common-secgrp } - floating_network: - if: - - no_floating - - null - - {{ external_network }} -{% if use_bastion|bool or provider_network %} - attach_float_net: false -{% endif %} - volume_size: {{ etcd_volume_size }} -{% if not provider_network %} - depends_on: - - interface -{% endif %} - -{% if master_server_group_policies|length > 0 %} - master_server_group: - type: OS::Nova::ServerGroup - properties: - name: master_server_group - policies: {{ master_server_group_policies }} -{% endif %} -{% if infra_server_group_policies|length > 0 %} - infra_server_group: - type: OS::Nova::ServerGroup - properties: - name: infra_server_group - policies: {{ infra_server_group_policies }} -{% endif %} -{% if num_masters|int > 1 %} - loadbalancer: - type: OS::Heat::ResourceGroup - properties: - count: 1 - resource_def: - type: server.yaml - properties: - name: - str_replace: - template: k8s_type-%index%.cluster_id - params: - cluster_id: {{ stack_name }} - k8s_type: {{ lb_hostname | default('lb') }} - cluster_env: {{ public_dns_domain }} - cluster_id: {{ stack_name }} - group: - str_replace: - template: k8s_type.cluster_id - params: - k8s_type: lb - cluster_id: {{ stack_name }} - type: lb - image: {{ openstack_lb_image | default(openstack_image) }} - flavor: {{ lb_flavor }} - key_name: {{ ssh_public_key }} -{% if provider_network %} - net: {{ provider_network }} - net_name: {{ provider_network }} -{% else %} - net: { get_resource: net } - subnet: { get_resource: subnet } - net_name: - str_replace: - template: openshift-ansible-cluster_id-net - params: - cluster_id: {{ stack_name }} -{% endif %} - secgrp: - - { get_resource: lb-secgrp } - - { get_resource: common-secgrp } -{% if not provider_network %} - floating_network: {{ external_network }} -{% endif %} - volume_size: {{ lb_volume_size }} -{% if not provider_network %} - depends_on: - - interface -{% endif %} -{% endif %} - - masters: - type: OS::Heat::ResourceGroup - properties: - count: {{ num_masters }} - resource_def: - type: server.yaml - properties: - name: - str_replace: - template: k8s_type-%index%.cluster_id - params: - cluster_id: {{ stack_name }} - k8s_type: {{ master_hostname | default('master')}} - cluster_env: {{ public_dns_domain }} - cluster_id: {{ stack_name }} - group: - str_replace: - template: k8s_type.cluster_id - params: - k8s_type: masters - cluster_id: {{ stack_name }} - type: master - image: {{ openstack_master_image | default(openstack_image) }} - flavor: {{ master_flavor }} - key_name: {{ ssh_public_key }} -{% if provider_network %} - net: {{ provider_network }} - net_name: {{ provider_network }} -{% else %} - net: { get_resource: net } - subnet: { get_resource: subnet } - net_name: - str_replace: - template: openshift-ansible-cluster_id-net - params: - cluster_id: {{ stack_name }} -{% if openshift_use_flannel|default(False)|bool %} - attach_data_net: true - data_net: { get_resource: data_net } - data_subnet: { get_resource: data_subnet } -{% endif %} -{% endif %} - secgrp: -{% if openstack_flat_secgrp|default(False)|bool %} - - { get_resource: flat-secgrp } -{% else %} - - { get_resource: master-secgrp } - - { get_resource: node-secgrp } -{% if num_etcd|int == 0 %} - - { get_resource: etcd-secgrp } -{% endif %} -{% endif %} - - { get_resource: common-secgrp } - floating_network: - if: - - no_floating - - null - - {{ external_network }} -{% if use_bastion|bool or provider_network %} - attach_float_net: false -{% endif %} - volume_size: {{ master_volume_size }} -{% if master_server_group_policies|length > 0 %} - scheduler_hints: - group: { get_resource: master_server_group } -{% endif %} -{% if not provider_network %} - depends_on: - - interface -{% endif %} - - compute_nodes: - type: OS::Heat::ResourceGroup - properties: - count: {{ num_nodes }} - removal_policies: - - resource_list: {{ nodes_to_remove }} - resource_def: - type: server.yaml - properties: - name: - str_replace: - template: sub_type_k8s_type-%index%.cluster_id - params: - cluster_id: {{ stack_name }} - sub_type_k8s_type: {{ node_hostname | default('app-node') }} - cluster_env: {{ public_dns_domain }} - cluster_id: {{ stack_name }} - group: - str_replace: - template: k8s_type.cluster_id - params: - k8s_type: nodes - cluster_id: {{ stack_name }} - type: node - subtype: app - node_labels: -{% for k, v in openshift_cluster_node_labels.app.iteritems() %} - {{ k|e }}: {{ v|e }} -{% endfor %} - image: {{ openstack_node_image | default(openstack_image) }} - flavor: {{ node_flavor }} - key_name: {{ ssh_public_key }} -{% if provider_network %} - net: {{ provider_network }} - net_name: {{ provider_network }} -{% else %} - net: { get_resource: net } - subnet: { get_resource: subnet } - net_name: - str_replace: - template: openshift-ansible-cluster_id-net - params: - cluster_id: {{ stack_name }} -{% if openshift_use_flannel|default(False)|bool %} - attach_data_net: true - data_net: { get_resource: data_net } - data_subnet: { get_resource: data_subnet } -{% endif %} -{% endif %} - secgrp: - - { get_resource: {% if openstack_flat_secgrp|default(False)|bool %}flat-secgrp{% else %}node-secgrp{% endif %} } - - { get_resource: common-secgrp } - floating_network: - if: - - no_floating - - null - - {{ external_network }} -{% if use_bastion|bool or provider_network %} - attach_float_net: false -{% endif %} - volume_size: {{ node_volume_size }} -{% if not provider_network %} - depends_on: - - interface -{% endif %} - - infra_nodes: - type: OS::Heat::ResourceGroup - properties: - count: {{ num_infra }} - resource_def: - type: server.yaml - properties: - name: - str_replace: - template: sub_type_k8s_type-%index%.cluster_id - params: - cluster_id: {{ stack_name }} - sub_type_k8s_type: {{ infra_hostname | default('infranode') }} - cluster_env: {{ public_dns_domain }} - cluster_id: {{ stack_name }} - group: - str_replace: - template: k8s_type.cluster_id - params: - k8s_type: infra - cluster_id: {{ stack_name }} - type: node - subtype: infra - node_labels: -{% for k, v in openshift_cluster_node_labels.infra.iteritems() %} - {{ k|e }}: {{ v|e }} -{% endfor %} - image: {{ openstack_infra_image | default(openstack_image) }} - flavor: {{ infra_flavor }} - key_name: {{ ssh_public_key }} -{% if provider_network %} - net: {{ provider_network }} - net_name: {{ provider_network }} -{% else %} - net: { get_resource: net } - subnet: { get_resource: subnet } - net_name: - str_replace: - template: openshift-ansible-cluster_id-net - params: - cluster_id: {{ stack_name }} -{% if openshift_use_flannel|default(False)|bool %} - attach_data_net: true - data_net: { get_resource: data_net } - data_subnet: { get_resource: data_subnet } -{% endif %} -{% endif %} - secgrp: -# TODO(bogdando) filter only required node rules into infra-secgrp -{% if openstack_flat_secgrp|default(False)|bool %} - - { get_resource: flat-secgrp } -{% else %} - - { get_resource: node-secgrp } -{% endif %} -{% if ui_ssh_tunnel|bool and num_masters|int < 2 %} - - { get_resource: lb-secgrp } -{% endif %} - - { get_resource: infra-secgrp } - - { get_resource: common-secgrp } -{% if not provider_network %} - floating_network: {{ external_network }} -{% endif %} - volume_size: {{ infra_volume_size }} -{% if infra_server_group_policies|length > 0 %} - scheduler_hints: - group: { get_resource: infra_server_group } -{% endif %} -{% if not provider_network %} - depends_on: - - interface -{% endif %} - -{% if num_dns|int > 0 %} - dns: - type: OS::Heat::ResourceGroup - properties: - count: {{ num_dns }} - resource_def: - type: server.yaml - properties: - name: - str_replace: - template: k8s_type-%index%.cluster_id - params: - cluster_id: {{ stack_name }} - k8s_type: {{ dns_hostname | default('dns') }} - cluster_env: {{ public_dns_domain }} - cluster_id: {{ stack_name }} - group: - str_replace: - template: k8s_type.cluster_id - params: - k8s_type: dns - cluster_id: {{ stack_name }} - type: dns - image: {{ openstack_dns_image | default(openstack_image) }} - flavor: {{ dns_flavor }} - key_name: {{ ssh_public_key }} -{% if provider_network %} - net: {{ provider_network }} - net_name: {{ provider_network }} -{% else %} - net: { get_resource: net } - subnet: { get_resource: subnet } - net_name: - str_replace: - template: openshift-ansible-cluster_id-net - params: - cluster_id: {{ stack_name }} -{% endif %} - secgrp: - - { get_resource: dns-secgrp } - - { get_resource: common-secgrp } -{% if not provider_network %} - floating_network: {{ external_network }} -{% endif %} - volume_size: {{ dns_volume_size }} -{% if not provider_network %} - depends_on: - - interface -{% endif %} -{% endif %} diff --git a/roles/openstack-stack/templates/heat_stack_server.yaml.j2 b/roles/openstack-stack/templates/heat_stack_server.yaml.j2 deleted file mode 100644 index 9ffe721a5..000000000 --- a/roles/openstack-stack/templates/heat_stack_server.yaml.j2 +++ /dev/null @@ -1,270 +0,0 @@ -heat_template_version: 2016-10-14 - -description: OpenShift cluster server - -parameters: - - name: - type: string - label: Name - description: Name - - group: - type: string - label: Host Group - description: The Primary Ansible Host Group - default: host - - cluster_env: - type: string - label: Cluster environment - description: Environment of the cluster - - cluster_id: - type: string - label: Cluster ID - description: Identifier of the cluster - - type: - type: string - label: Type - description: Type master or node - - subtype: - type: string - label: Sub-type - description: Sub-type compute or infra for nodes, default otherwise - default: default - - key_name: - type: string - label: Key name - description: Key name of keypair - - image: - type: string - label: Image - description: Name of the image - - flavor: - type: string - label: Flavor - description: Name of the flavor - - net: - type: string - label: Net ID - description: Net resource - - net_name: - type: string - label: Net name - description: Net name - -{% if not provider_network %} - subnet: - type: string - label: Subnet ID - description: Subnet resource -{% endif %} - -{% if openshift_use_flannel|default(False)|bool %} - attach_data_net: - type: boolean - default: false - label: Attach-data-net - description: A switch for data port connection - - data_net: - type: string - default: '' - label: Net ID - description: Net resource - -{% if not provider_network %} - data_subnet: - type: string - default: '' - label: Subnet ID - description: Subnet resource -{% endif %} -{% endif %} - - secgrp: - type: comma_delimited_list - label: Security groups - description: Security group resources - - attach_float_net: - type: boolean - default: true - - label: Attach-float-net - description: A switch for floating network port connection - -{% if not provider_network %} - floating_network: - type: string - default: '' - label: Floating network - description: Network to allocate floating IP from -{% endif %} - - availability_zone: - type: string - description: The Availability Zone to launch the instance. - default: nova - - volume_size: - type: number - description: Size of the volume to be created. - default: 1 - constraints: - - range: { min: 1, max: 1024 } - description: must be between 1 and 1024 Gb. - - node_labels: - type: json - description: OpenShift Node Labels - default: {"region": "default" } - - scheduler_hints: - type: json - description: Server scheduler hints. - default: {} - -outputs: - - name: - description: Name of the server - value: { get_attr: [ server, name ] } - - private_ip: - description: Private IP of the server - value: - get_attr: - - server - - addresses - - { get_param: net_name } - - 0 - - addr - - floating_ip: - description: Floating IP of the server - value: - get_attr: - - server - - addresses - - { get_param: net_name } -{% if provider_network %} - - 0 -{% else %} - - 1 -{% endif %} - - addr - -conditions: - no_floating: {not: { get_param: attach_float_net} } -{% if openshift_use_flannel|default(False)|bool %} - no_data_subnet: {not: { get_param: attach_data_net} } -{% endif %} - -resources: - - server: - type: OS::Nova::Server - properties: - name: { get_param: name } - key_name: { get_param: key_name } - image: { get_param: image } - flavor: { get_param: flavor } - networks: -{% if openshift_use_flannel|default(False)|bool %} - if: - - no_data_subnet -{% if use_trunk_ports|default(false)|bool %} - - - port: { get_attr: [trunk-port, port_id] } -{% else %} - - - port: { get_resource: port } -{% endif %} -{% if use_trunk_ports|default(false)|bool %} - - - port: { get_attr: [trunk-port, port_id] } -{% else %} - - - port: { get_resource: port } - - port: { get_resource: data_port } -{% endif %} - -{% else %} -{% if use_trunk_ports|default(false)|bool %} - - port: { get_attr: [trunk-port, port_id] } -{% else %} - - port: { get_resource: port } -{% endif %} -{% endif %} - user_data: - get_file: user-data - user_data_format: RAW - user_data_update_policy: IGNORE - metadata: - group: { get_param: group } - environment: { get_param: cluster_env } - clusterid: { get_param: cluster_id } - host-type: { get_param: type } - sub-host-type: { get_param: subtype } - node_labels: { get_param: node_labels } - scheduler_hints: { get_param: scheduler_hints } - -{% if use_trunk_ports|default(false)|bool %} - trunk-port: - type: OS::Neutron::Trunk - properties: - name: { get_param: name } - port: { get_resource: port } -{% endif %} - - port: - type: OS::Neutron::Port - properties: - network: { get_param: net } -{% if not provider_network %} - fixed_ips: - - subnet: { get_param: subnet } -{% endif %} - security_groups: { get_param: secgrp } - -{% if openshift_use_flannel|default(False)|bool %} - data_port: - type: OS::Neutron::Port - condition: { not: no_data_subnet } - properties: - network: { get_param: data_net } - port_security_enabled: false -{% if not provider_network %} - fixed_ips: - - subnet: { get_param: data_subnet } -{% endif %} -{% endif %} - -{% if not provider_network %} - floating-ip: - condition: { not: no_floating } - type: OS::Neutron::FloatingIP - properties: - floating_network: { get_param: floating_network } - port_id: { get_resource: port } -{% endif %} - -{% if not ephemeral_volumes|default(false)|bool %} - cinder_volume: - type: OS::Cinder::Volume - properties: - size: { get_param: volume_size } - availability_zone: { get_param: availability_zone } - - volume_attachment: - type: OS::Cinder::VolumeAttachment - properties: - volume_id: { get_resource: cinder_volume } - instance_uuid: { get_resource: server } - mountpoint: /dev/sdb -{% endif %} diff --git a/roles/openstack-stack/templates/user_data.j2 b/roles/openstack-stack/templates/user_data.j2 deleted file mode 100644 index eb65f7cec..000000000 --- a/roles/openstack-stack/templates/user_data.j2 +++ /dev/null @@ -1,13 +0,0 @@ -#cloud-config -disable_root: true - -system_info: - default_user: - name: openshift - sudo: ["ALL=(ALL) NOPASSWD: ALL"] - -write_files: - - path: /etc/sudoers.d/00-openshift-no-requiretty - permissions: 440 - content: | - Defaults:openshift !requiretty diff --git a/roles/openstack-stack/test/roles b/roles/openstack-stack/test/roles deleted file mode 120000 index e2b799b9d..000000000 --- a/roles/openstack-stack/test/roles +++ /dev/null @@ -1 +0,0 @@ -../../../roles/ \ No newline at end of file diff --git a/roles/openstack-stack/test/stack-create-test.yml b/roles/openstack-stack/test/stack-create-test.yml deleted file mode 100644 index d80472193..000000000 --- a/roles/openstack-stack/test/stack-create-test.yml +++ /dev/null @@ -1,18 +0,0 @@ ---- -- hosts: localhost - gather_facts: True - become: False - roles: - - role: openstack-stack - stack_name: test-stack - dns_domain: "{{ public_dns_domain }}" - dns_nameservers: "{{ public_dns_nameservers }}" - subnet_prefix: "{{ openstack_subnet_prefix }}" - ssh_public_key: "{{ openstack_ssh_public_key }}" - openstack_image: "{{ openstack_default_image_name }}" - etcd_flavor: "{{ openstack_default_flavor }}" - master_flavor: "{{ openstack_default_flavor }}" - node_flavor: "{{ openstack_default_flavor }}" - infra_flavor: "{{ openstack_default_flavor }}" - dns_flavor: "{{ openstack_default_flavor }}" - external_network: "{{ openstack_external_network_name }}" diff --git a/roles/static_inventory/defaults/main.yml b/roles/static_inventory/defaults/main.yml deleted file mode 100644 index 871700f8c..000000000 --- a/roles/static_inventory/defaults/main.yml +++ /dev/null @@ -1,29 +0,0 @@ ---- -# Either to checkpoint the dynamic inventory into a static one -refresh_inventory: True -inventory: static -inventory_path: ~/openstack-inventory - -# Either to configure bastion -use_bastion: true - -# SSH user/key/options to access hosts via bastion -ssh_user: openshift -ssh_options: >- - -o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no - -o ConnectTimeout=90 -o ControlMaster=auto -o ControlPersist=270s - -o ServerAliveInterval=30 -o GSSAPIAuthentication=no - -# SSH key to access nodes -private_ssh_key: ~/.ssh/openshift - -# The patch to store the generated config to access bastion/hosts -ssh_config_path: /tmp/ssh.config.ansible - -# The IP:port to make an SSH tunnel to access UI on the 1st master -# via bastion node (requires sudo on the ansible control node) -ui_ssh_tunnel: False -ui_port: "{{ openshift_master_api_port | default(8443) }}" -target_ip: "{{ hostvars[groups['masters.' + stack_name|quote][0]].private_v4 }}" - -openstack_private_network: private diff --git a/roles/static_inventory/meta/main.yml b/roles/static_inventory/meta/main.yml deleted file mode 100644 index fdda41bb3..000000000 --- a/roles/static_inventory/meta/main.yml +++ /dev/null @@ -1,3 +0,0 @@ ---- -dependencies: - - role: common diff --git a/roles/static_inventory/tasks/checkpoint.yml b/roles/static_inventory/tasks/checkpoint.yml deleted file mode 100644 index c0365bd3d..000000000 --- a/roles/static_inventory/tasks/checkpoint.yml +++ /dev/null @@ -1,17 +0,0 @@ ---- -- name: check for static inventory dir - stat: - path: "{{ inventory_path }}" - register: stat_inventory_path - -- name: create static inventory dir - file: - path: "{{ inventory_path }}" - state: directory - mode: 0750 - when: not stat_inventory_path.stat.exists - -- name: create inventory from template - template: - src: inventory.j2 - dest: "{{ inventory_path }}/hosts" diff --git a/roles/static_inventory/tasks/filter_out_new_app_nodes.yaml b/roles/static_inventory/tasks/filter_out_new_app_nodes.yaml deleted file mode 100644 index 826efe78d..000000000 --- a/roles/static_inventory/tasks/filter_out_new_app_nodes.yaml +++ /dev/null @@ -1,15 +0,0 @@ ---- -- name: Add all new app nodes to new_app_nodes - when: - - 'oc_old_app_nodes is defined' - - 'oc_old_app_nodes | list' - - 'node.name not in oc_old_app_nodes' - - 'node["metadata"]["sub-host-type"] == "app"' - register: result - set_fact: - new_app_nodes: '{{ new_app_nodes }} + [ {{ node }} ]' - -- name: If the node was added to new_nodes, remove it from registered nodes - set_fact: - registered_nodes: '{{ registered_nodes | difference([ node ]) }}' - when: 'not result | skipped' diff --git a/roles/static_inventory/tasks/main.yml b/roles/static_inventory/tasks/main.yml deleted file mode 100644 index 3dab62df2..000000000 --- a/roles/static_inventory/tasks/main.yml +++ /dev/null @@ -1,25 +0,0 @@ ---- -- name: Remove any existing inventory - file: - path: "{{ inventory_path }}/hosts" - state: absent - -- name: Refresh the inventory - meta: refresh_inventory - -- name: Generate in-memory inventory - include: openstack.yml - -- name: Checkpoint in-memory data into a static inventory - include: checkpoint.yml - -- name: Generate SSH config for accessing hosts via bastion - include: sshconfig.yml - when: use_bastion|bool - -- name: Configure SSH tunneling to access UI - include: sshtun.yml - become: true - when: - - use_bastion|bool - - ui_ssh_tunnel|bool diff --git a/roles/static_inventory/tasks/openstack.yml b/roles/static_inventory/tasks/openstack.yml deleted file mode 100644 index adf78c966..000000000 --- a/roles/static_inventory/tasks/openstack.yml +++ /dev/null @@ -1,120 +0,0 @@ ---- -- no_log: true - block: - - name: fetch all nodes from openstack shade dynamic inventory - command: shade-inventory --list - register: registered_nodes_output - when: refresh_inventory|bool - - - name: set fact for openstack inventory cluster nodes - set_fact: - registered_nodes: "{{ (registered_nodes_output.stdout | from_json) | json_query(q) }}" - vars: - q: "[] | [?metadata.clusterid=='{{stack_name}}']" - when: - - refresh_inventory|bool - - - name: set_fact for openstack inventory nodes - set_fact: - registered_bastion_nodes: "{{ (registered_nodes_output.stdout | from_json) | json_query(q) }}" - registered_nodes_floating: "{{ (registered_nodes_output.stdout | from_json) | json_query(q2) }}" - vars: - q: "[] | [?metadata.group=='infra.{{stack_name}}']" - q2: "[] | [?metadata.clusterid=='{{stack_name}}'] | [?public_v4!='']" - when: - - refresh_inventory|bool - - - name: set_fact for openstack inventory nodes with provider network - set_fact: - registered_nodes_floating: "{{ (registered_nodes_output.stdout | from_json) | json_query(q) }}" - vars: - q: "[] | [?metadata.clusterid=='{{stack_name}}'] | [?public_v4=='']" - when: - - refresh_inventory|bool - - openstack_provider_network_name|default(None) - - - name: Add cluster nodes w/o floating IPs to inventory - with_items: "{{ registered_nodes|difference(registered_nodes_floating) }}" - add_host: - name: '{{ item.name }}' - ansible_host: >- - {% if use_bastion|bool -%} - {{ item.name }} - {%- else -%} - {%- set node = registered_nodes | json_query("[?name=='" + item.name + "']") -%} - {{ node[0].addresses[openstack_private_network|quote][0].addr }} - {%- endif %} - ansible_fqdn: '{{ item.name }}' - ansible_user: '{{ ssh_user }}' - ansible_private_key_file: '{{ private_ssh_key }}' - ansible_ssh_extra_args: '-F {{ ssh_config_path }}' - private_v4: >- - {% set node = registered_nodes | json_query("[?name=='" + item.name + "']") -%} - {{ node[0].addresses[openstack_private_network|quote][0].addr }} - - - name: Add cluster nodes with floating IPs to inventory - with_items: "{{ registered_nodes_floating }}" - add_host: - name: '{{ item.name }}' - ansible_host: >- - {% if use_bastion|bool -%} - {{ item.name }} - {%- elif openstack_provider_network_name|default(None) -%} - {{ item.private_v4 }} - {%- else -%} - {{ item.public_v4 }} - {%- endif %} - ansible_fqdn: '{{ item.name }}' - ansible_user: '{{ ssh_user }}' - ansible_private_key_file: '{{ private_ssh_key }}' - ansible_ssh_extra_args: '-F {{ ssh_config_path }}' - private_v4: >- - {% set node = registered_nodes | json_query("[?name=='" + item.name + "']") -%} - {{ node[0].addresses[openstack_private_network|quote][0].addr }} - public_v4: >- - {% if openstack_provider_network_name|default(None) -%} - {{ item.private_v4 }} - {%- else -%} - {{ item.public_v4 }} - {%- endif %} - - # Split registered_nodes into old nodes and new app nodes - # Add new app nodes to new_nodes host group for upscaling - - name: Create new_app_nodes variable - set_fact: - new_app_nodes: [] - - - name: Filter new app nodes out of registered_nodes - include: filter_out_new_app_nodes.yaml - with_items: "{{ registered_nodes }}" - loop_control: - loop_var: node - - - name: Add new app nodes to the new_nodes section (if a deployment already exists) - with_items: "{{ new_app_nodes }}" - add_host: - name: "{{ item.name }}" - groups: new_nodes, app - - - name: Add the rest of cluster nodes to their corresponding groups - with_items: "{{ registered_nodes }}" - add_host: - name: '{{ item.name }}' - groups: '{{ item.metadata.group }}' - - - name: Add bastion node to inventory - add_host: - name: bastion - groups: bastions - ansible_host: '{{ registered_bastion_nodes[0].public_v4 }}' - ansible_fqdn: '{{ registered_bastion_nodes[0].name }}' - ansible_user: '{{ ssh_user }}' - ansible_private_key_file: '{{ private_ssh_key }}' - ansible_ssh_extra_args: '-F {{ ssh_config_path }}' - private_v4: >- - {% set node = registered_nodes | json_query("[?name=='" + registered_bastion_nodes[0].name + "']") -%} - {{ node[0].addresses[openstack_private_network|quote][0].addr }} - public_v4: '{{ registered_bastion_nodes[0].public_v4 }}' - when: - - registered_bastion_nodes is defined - - use_bastion|bool diff --git a/roles/static_inventory/tasks/sshconfig.yml b/roles/static_inventory/tasks/sshconfig.yml deleted file mode 100644 index 7119fe6ff..000000000 --- a/roles/static_inventory/tasks/sshconfig.yml +++ /dev/null @@ -1,13 +0,0 @@ ---- -- name: set ssh proxy command prefix for accessing nodes via bastion - set_fact: - ssh_proxy_command: >- - ssh {{ ssh_options }} - -i {{ private_ssh_key }} - {{ ssh_user }}@{{ hostvars['bastion'].ansible_host }} - -- name: regenerate ssh config - template: - src: openstack_ssh_config.j2 - dest: "{{ ssh_config_path }}" - mode: 0644 diff --git a/roles/static_inventory/tasks/sshtun.yml b/roles/static_inventory/tasks/sshtun.yml deleted file mode 100644 index b0e4c832c..000000000 --- a/roles/static_inventory/tasks/sshtun.yml +++ /dev/null @@ -1,15 +0,0 @@ ---- -- name: Create ssh tunnel systemd service - template: - src: ssh-tunnel.service.j2 - dest: /etc/systemd/system/ssh-tunnel.service - mode: 0644 - -- name: reload the systemctl daemon after file update - command: systemctl daemon-reload - -- name: Enable ssh tunnel service - service: - name: ssh-tunnel - enabled: true - state: restarted diff --git a/roles/static_inventory/templates/inventory.j2 b/roles/static_inventory/templates/inventory.j2 deleted file mode 100644 index 9dfbe3a5b..000000000 --- a/roles/static_inventory/templates/inventory.j2 +++ /dev/null @@ -1,104 +0,0 @@ -# BEGIN Autogenerated hosts -{% for host in groups['all'] %} -{% if hostvars[host].get('ansible_connection', '') == 'local' %} -{{ host }} ansible_connection=local -{% else %} - -{{ host }}{% if 'ansible_host' in hostvars[host] -%} ansible_host={{ hostvars[host]['ansible_host'] }}{% endif %} -{% if 'private_v4' in hostvars[host] -%} private_v4={{ hostvars[host]['private_v4'] }}{% endif %} -{% if 'public_v4' in hostvars[host] -%} public_v4={{ hostvars[host]['public_v4'] }}{% endif %} -{% if 'ansible_user' in hostvars[host] -%} ansible_user={{ hostvars[host]['ansible_user'] }}{% endif %} -{% if 'ansible_private_key_file' in hostvars[host] and hostvars[host]['ansible_private_key_file'] -%} ansible_private_key_file={{ hostvars[host]['ansible_private_key_file'] }}{% endif %} -{% if use_bastion|bool and 'ansible_ssh_extra_args' in hostvars[host] -%} ansible_ssh_extra_args={{ hostvars[host]['ansible_ssh_extra_args']|quote }}{% endif %} openshift_hostname={{ host }} - -{% endif %} -{% endfor %} -# END autogenerated hosts - -#[all:vars] -# For all group_vars, see ./group_vars/all.yml -[infra_hosts:vars] -openshift_node_labels={{ openshift_cluster_node_labels.infra | to_json | quote }} - -[app:vars] -openshift_node_labels={{ openshift_cluster_node_labels.app | to_json | quote }} - -# Create an OSEv3 group that contains the master, nodes, etcd, and lb groups. -# The lb group lets Ansible configure HAProxy as the load balancing solution. -# Comment lb out if your load balancer is pre-configured. -[cluster_hosts:children] -OSEv3 -dns - -[OSEv3:children] -nodes -etcd -lb -new_nodes - -# Set variables common for all OSEv3 hosts -[OSEv3:vars] - -# For OSEv3 normal group vars, see ./group_vars/OSEv3.yml - -{% if cinder_registry_volume is defined and 'volume' in cinder_registry_volume %} -openshift_hosted_registry_storage_openstack_volumeID="{{ cinder_registry_volume.id }}" -openshift_hosted_registry_storage_volume_size="{{ cinder_registry_volume.volume.size }}Gi" -{% endif %} - - -# Host Groups - -[masters:children] -masters.{{ stack_name }} - -[etcd:children] -etcd.{{ stack_name }} -{% if 'etcd' not in groups or groups['etcd']|length == 0 %}masters.{{ stack_name }}{% endif %} - -[nodes:children] -masters -infra.{{ stack_name }} -nodes.{{ stack_name }} - -[infra_hosts:children] -infra.{{ stack_name }} - -[app:children] -nodes.{{ stack_name }} - -[dns:children] -dns.{{ stack_name }} - -[lb:children] -lb.{{ stack_name }} - -[new_nodes:children] - -# Empty placeholders for all groups of the cluster nodes -[masters.{{ stack_name }}] -[etcd.{{ stack_name }}] -[infra.{{ stack_name }}] -[nodes.{{ stack_name }}] -[app.{{ stack_name }}] -[dns.{{ stack_name }}] -[lb.{{ stack_name }}] -[new_nodes.{{ stack_name }}] - -# BEGIN Autogenerated groups -{% for group in groups %} -{% if group not in ['ungrouped', 'all'] %} -[{{ group }}] -{% for host in groups[group] %} -{{ host }} -{% endfor %} - -{% endif %} -{% endfor %} -# END Autogenerated groups diff --git a/roles/static_inventory/templates/openstack_ssh_config.j2 b/roles/static_inventory/templates/openstack_ssh_config.j2 deleted file mode 100644 index ad5d1253a..000000000 --- a/roles/static_inventory/templates/openstack_ssh_config.j2 +++ /dev/null @@ -1,21 +0,0 @@ -Host * - IdentitiesOnly yes - -Host bastion - Hostname {{ hostvars['bastion'].ansible_host }} - IdentityFile {{ hostvars['bastion'].ansible_private_key_file }} - User {{ ssh_user }} - StrictHostKeyChecking no - UserKnownHostsFile=/dev/null - -{% for host in groups['all'] | difference(groups['bastions'][0]) %} - -Host {{ host }} - Hostname {{ hostvars[host].ansible_host }} - ProxyCommand {{ ssh_proxy_command }} -W {{ hostvars[host].private_v4 }}:22 - IdentityFile {{ hostvars[host].ansible_private_key_file }} - User {{ ssh_user }} - StrictHostKeyChecking no - UserKnownHostsFile=/dev/null - -{% endfor %} diff --git a/roles/static_inventory/templates/ssh-tunnel.service.j2 b/roles/static_inventory/templates/ssh-tunnel.service.j2 deleted file mode 100644 index 0d1cf8f79..000000000 --- a/roles/static_inventory/templates/ssh-tunnel.service.j2 +++ /dev/null @@ -1,20 +0,0 @@ -[Unit] -Description=Set up ssh tunneling for OpenShift cluster UI -After=network.target - -[Service] -ExecStart=/usr/bin/ssh -NT -o \ - ServerAliveInterval=60 -o \ - UserKnownHostsFile=/dev/null -o \ - StrictHostKeyChecking=no -o \ - ExitOnForwardFailure=no -i \ - {{ private_ssh_key }} {{ ssh_user }}@{{ hostvars['bastion'].ansible_host }} \ - -L 0.0.0.0:{{ ui_port }}:{{ target_ip }}:{{ ui_port }} - - -# Restart every >2 seconds to avoid StartLimitInterval failure -RestartSec=5 -Restart=always - -[Install] -WantedBy=multi-user.target -- cgit v1.2.3 From b1e4629ae3e86c59503ac29a781a62a8e75c14f2 Mon Sep 17 00:00:00 2001 From: Tomas Sedovic Date: Fri, 27 Oct 2017 17:29:39 +0200 Subject: Remove the openstack custom-actions for now They're duplicating a lot of functionality that's already in openshift-ansible and they're not actually used from the provisioning playbooks. We'll revisit them later. --- playbooks/openstack/custom-actions/add-cas.yml | 13 ---- .../custom-actions/add-docker-registry.yml | 90 ---------------------- .../openstack/custom-actions/add-rhn-pools.yml | 13 ---- .../openstack/custom-actions/add-yum-repos.yml | 12 --- 4 files changed, 128 deletions(-) delete mode 100644 playbooks/openstack/custom-actions/add-cas.yml delete mode 100644 playbooks/openstack/custom-actions/add-docker-registry.yml delete mode 100644 playbooks/openstack/custom-actions/add-rhn-pools.yml delete mode 100644 playbooks/openstack/custom-actions/add-yum-repos.yml diff --git a/playbooks/openstack/custom-actions/add-cas.yml b/playbooks/openstack/custom-actions/add-cas.yml deleted file mode 100644 index b2c195f91..000000000 --- a/playbooks/openstack/custom-actions/add-cas.yml +++ /dev/null @@ -1,13 +0,0 @@ ---- -- hosts: cluster_hosts - become: true - vars: - ca_files: [] - tasks: - - name: Copy CAs to the trusted CAs location - with_items: "{{ ca_files }}" - copy: - src: "{{ item }}" - dest: /etc/pki/ca-trust/source/anchors/ - - name: Update trusted CAs - shell: 'update-ca-trust enable && update-ca-trust extract' diff --git a/playbooks/openstack/custom-actions/add-docker-registry.yml b/playbooks/openstack/custom-actions/add-docker-registry.yml deleted file mode 100644 index e118a71dc..000000000 --- a/playbooks/openstack/custom-actions/add-docker-registry.yml +++ /dev/null @@ -1,90 +0,0 @@ ---- -- hosts: OSEv3 - become: true - vars: - registries: [] - insecure_registries: [] - - tasks: - - name: Check if docker is even installed - command: docker - - - name: Install atomic-registries package - yum: - name: atomic-registries - state: latest - - - name: Get registry configuration file - register: file_result - stat: - path: /etc/containers/registries.conf - - - name: Check if it exists - assert: - that: 'file_result.stat.exists' - msg: "Configuration file does not exist." - - - name: Load configuration file - shell: cat /etc/containers/registries.conf - register: file_content - - - name: Store file content into a variable - set_fact: - docker_conf: "{{ file_content.stdout | from_yaml }}" - - - name: Make sure that docker file content is a dictionary - when: '(docker_conf is string) and (not docker_conf)' - set_fact: - docker_conf: {} - - - name: Make sure that registries is a list - when: 'registries is string' - set_fact: - registries_list: [ "{{ registries }}" ] - - - name: Make sure that insecure_registries is a list - when: 'insecure_registries is string' - set_fact: - insecure_registries_list: [ "{{ insecure_registries }}" ] - - - name: Set default values if there are no registries defined - set_fact: - docker_conf_registries: "{{ [] if docker_conf['registries'] is not defined else docker_conf['registries'] }}" - docker_conf_insecure_registries: "{{ [] if docker_conf['insecure_registries'] is not defined else docker_conf['insecure_registries'] }}" - - - name: Add other registries - when: 'registries_list is not defined' - register: registries_merge_result - set_fact: - docker_conf: "{{ docker_conf | combine({'registries': (docker_conf_registries + registries) | unique}, recursive=True) }}" - - - name: Add other registries (if registries had to be converted) - when: 'registries_merge_result|skipped' - set_fact: - docker_conf: "{{ docker_conf | combine({'registries': (docker_conf_registries + registries_list) | unique}, recursive=True) }}" - - - name: Add insecure registries - when: 'insecure_registries_list is not defined' - register: insecure_registries_merge_result - set_fact: - docker_conf: "{{ docker_conf | combine({'insecure_registries': (docker_conf_insecure_registries + insecure_registries) | unique }, recursive=True) }}" - - - name: Add insecure registries (if insecure_registries had to be converted) - when: 'insecure_registries_merge_result|skipped' - set_fact: - docker_conf: "{{ docker_conf | combine({'insecure_registries': (docker_conf_insecure_registries + insecure_registries_list) | unique }, recursive=True) }}" - - - name: Load variable back to file - copy: - content: "{{ docker_conf | to_yaml }}" - dest: /etc/containers/registries.conf - - - name: Restart registries service - service: - name: registries - state: restarted - - - name: Restart docker - service: - name: docker - state: restarted diff --git a/playbooks/openstack/custom-actions/add-rhn-pools.yml b/playbooks/openstack/custom-actions/add-rhn-pools.yml deleted file mode 100644 index d17c1e335..000000000 --- a/playbooks/openstack/custom-actions/add-rhn-pools.yml +++ /dev/null @@ -1,13 +0,0 @@ ---- -- hosts: cluster_hosts - vars: - rhn_pools: [] - tasks: - - name: Attach additional RHN pools - become: true - with_items: "{{ rhn_pools }}" - command: "/usr/bin/subscription-manager attach --pool={{ item }}" - register: attach_rhn_pools_result - until: attach_rhn_pools_result.rc == 0 - retries: 10 - delay: 1 diff --git a/playbooks/openstack/custom-actions/add-yum-repos.yml b/playbooks/openstack/custom-actions/add-yum-repos.yml deleted file mode 100644 index ffebcb642..000000000 --- a/playbooks/openstack/custom-actions/add-yum-repos.yml +++ /dev/null @@ -1,12 +0,0 @@ ---- -- hosts: cluster_hosts - vars: - yum_repos: [] - tasks: - # enable additional yum repos - - name: Add repository - yum_repository: - name: "{{ item.name }}" - description: "{{ item.description }}" - baseurl: "{{ item.baseurl }}" - with_items: "{{ yum_repos }}" -- cgit v1.2.3 From 94413931c26e47fd9acd3c0d20bbcfd1704755d1 Mon Sep 17 00:00:00 2001 From: Tomas Sedovic Date: Fri, 27 Oct 2017 17:59:44 +0200 Subject: Remove the post-install and scale-up playbooks They're not necessary for the initial PR so let's add them properly later. --- .../openstack/openshift-cluster/post-install.yml | 57 ------------------ .../openstack/openshift-cluster/scale-up.yaml | 70 ---------------------- 2 files changed, 127 deletions(-) delete mode 100644 playbooks/openstack/openshift-cluster/post-install.yml delete mode 100644 playbooks/openstack/openshift-cluster/scale-up.yaml diff --git a/playbooks/openstack/openshift-cluster/post-install.yml b/playbooks/openstack/openshift-cluster/post-install.yml deleted file mode 100644 index 7b1744a18..000000000 --- a/playbooks/openstack/openshift-cluster/post-install.yml +++ /dev/null @@ -1,57 +0,0 @@ ---- -- hosts: OSEv3 - gather_facts: False - become: True - tasks: - - name: Save iptables rules to a backup file - when: openshift_use_flannel|default(False)|bool - shell: iptables-save > /etc/sysconfig/iptables.orig-$(date +%Y%m%d%H%M%S) - -# Enable iptables service on app nodes to persist custom rules (flannel SDN) -# FIXME(bogdando) w/a https://bugzilla.redhat.com/show_bug.cgi?id=1490820 -- hosts: app - gather_facts: False - become: True - vars: - os_firewall_allow: - - service: dnsmasq tcp - port: 53/tcp - - service: dnsmasq udp - port: 53/udp - tasks: - - when: openshift_use_flannel|default(False)|bool - block: - - include_role: - name: os_firewall - - include_role: - name: lib_os_firewall - - name: set allow rules for dnsmasq - os_firewall_manage_iptables: - name: "{{ item.service }}" - action: add - protocol: "{{ item.port.split('/')[1] }}" - port: "{{ item.port.split('/')[0] }}" - with_items: "{{ os_firewall_allow }}" - -- hosts: OSEv3 - gather_facts: False - become: True - tasks: - - name: Apply post-install iptables hacks for Flannel SDN (the best effort) - when: openshift_use_flannel|default(False)|bool - block: - - name: set allow/masquerade rules for for flannel/docker - shell: >- - (iptables-save | grep -q custom-flannel-docker-1) || - iptables -A DOCKER -w - -p all -j ACCEPT - -m comment --comment "custom-flannel-docker-1"; - (iptables-save | grep -q custom-flannel-docker-2) || - iptables -t nat -A POSTROUTING -w - -o {{flannel_interface|default('eth1')}} - -m comment --comment "custom-flannel-docker-2" - -j MASQUERADE - - # NOTE(bogdando) the rules will not be restored, when iptables service unit is disabled & masked - - name: Persist in-memory iptables rules (w/o dynamic KUBE rules) - shell: iptables-save | grep -v KUBE > /etc/sysconfig/iptables diff --git a/playbooks/openstack/openshift-cluster/scale-up.yaml b/playbooks/openstack/openshift-cluster/scale-up.yaml deleted file mode 100644 index f99ff1349..000000000 --- a/playbooks/openstack/openshift-cluster/scale-up.yaml +++ /dev/null @@ -1,70 +0,0 @@ ---- -# Get the needed information about the current deployment -- hosts: masters[0] - tasks: - - name: Get number of app nodes - shell: oc get nodes -l autoscaling=app --no-headers=true | wc -l - register: oc_old_num_nodes - - name: Get names of app nodes - shell: oc get nodes -l autoscaling=app --no-headers=true | cut -f1 -d " " - register: oc_old_app_nodes - -- hosts: localhost - tasks: - # Since both number and names of app nodes are to be removed - # localhost variables for these values need to be set - - name: Store old number and names of app nodes locally (if there is an existing deployment) - when: '"masters" in groups' - register: set_fact_result - set_fact: - oc_old_num_nodes: "{{ hostvars[groups['masters'][0]]['oc_old_num_nodes'].stdout }}" - oc_old_app_nodes: "{{ hostvars[groups['masters'][0]]['oc_old_app_nodes'].stdout_lines }}" - - - name: Set default values for old app nodes (if there is no existing deployment) - when: 'set_fact_result | skipped' - set_fact: - oc_old_num_nodes: 0 - oc_old_app_nodes: [] - - # Set how many nodes are to be added (1 by default) - - name: Set how many nodes are to be added - set_fact: - increment_by: 1 - - name: Check that the number corresponds to scaling up (not down) - assert: - that: 'increment_by | int >= 1' - msg: > - FAIL: The value of increment_by must be at least 1 - (but it is {{ increment_by | int }}). - - name: Update openstack_num_nodes variable - set_fact: - openstack_num_nodes: "{{ oc_old_num_nodes | int + increment_by | int }}" - -# Run provision.yaml with higher number of nodes to create a new app-node VM -- include: provision.yml - -# Run config.yml to perform openshift installation - -# Creating a new deployment by the full installation -- include: install.yml - when: 'not groups["new_nodes"] | list' - -# Scaling up existing deployment -- include: "../../byo/openshift-node/scaleup.yml" - vars: - openshift_ansible_dir: ../../../../openshift-ansible - when: 'groups["new_nodes"] | list' - -# Post-verification: Verify new number of nodes -- hosts: masters[0] - tasks: - - name: Get number of nodes - shell: oc get nodes -l autoscaling=app --no-headers=true | wc -l - register: oc_new_num_nodes - - name: Check that the actual result matches the defined value - assert: - that: 'oc_new_num_nodes.stdout | int == (hostvars["localhost"]["oc_old_num_nodes"] | int + hostvars["localhost"]["increment_by"] | int)' - msg: > - FAIL: Number of application nodes has not been increased accordingly - (it should be {{ hostvars["localhost"]["oc_old_num_nodes"] | int + hostvars["localhost"]["increment_by"] | int }} - but it is {{ oc_new_num_nodes.stdout | int }}). -- cgit v1.2.3 From eb1f8107bb5b76cec7004f9a1ea7effab5aa0516 Mon Sep 17 00:00:00 2001 From: Tomas Sedovic Date: Fri, 27 Oct 2017 18:00:50 +0200 Subject: Use correct host group in provision.yml --- playbooks/openstack/openshift-cluster/provision.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/playbooks/openstack/openshift-cluster/provision.yml b/playbooks/openstack/openshift-cluster/provision.yml index 5b20d5720..ed44d4a32 100644 --- a/playbooks/openstack/openshift-cluster/provision.yml +++ b/playbooks/openstack/openshift-cluster/provision.yml @@ -14,7 +14,7 @@ # and configure their DNS if they have to. - name: Prepare the Nodes in the cluster for installation - hosts: cluster_hosts + hosts: oo_all_hosts become: true # NOTE: The nodes may not be up yet, don't gather facts here. # They'll be collected after `wait_for_connection`. -- cgit v1.2.3 From fabf16250b3947a04fc3b3bcb9b6fc7c1265651b Mon Sep 17 00:00:00 2001 From: Tomas Sedovic Date: Mon, 30 Oct 2017 17:53:02 +0100 Subject: Add a stub of the dns record update code in This will mostly not work but it's a starting point. --- roles/openshift_openstack/defaults/main.yml | 6 + roles/openshift_openstack/tasks/populate-dns.yml | 167 +++++++++++++++++++++++ 2 files changed, 173 insertions(+) diff --git a/roles/openshift_openstack/defaults/main.yml b/roles/openshift_openstack/defaults/main.yml index 05f1c0911..19e6e6f51 100644 --- a/roles/openshift_openstack/defaults/main.yml +++ b/roles/openshift_openstack/defaults/main.yml @@ -47,3 +47,9 @@ docker_data_size: "95%VG" docker_dm_basesize: "3G" container_root_lv_name: "dockerlv" container_root_lv_mount_path: "/var/lib/docker" + + +# populate-dns +dns_records_rm: [] +dns_records_add: [] +external_nsupdate_keys: {} diff --git a/roles/openshift_openstack/tasks/populate-dns.yml b/roles/openshift_openstack/tasks/populate-dns.yml index f1a868a19..c8243dc1f 100644 --- a/roles/openshift_openstack/tasks/populate-dns.yml +++ b/roles/openshift_openstack/tasks/populate-dns.yml @@ -3,3 +3,170 @@ # this is an optional step -- the deployers may do whatever else they # wish here. + + +# TODO: build records +# TODO: run nsupdate + + +- name: "Generate list of private A records" + set_fact: + private_records: "{{ [ { 'type': 'A', 'hostname': hostvars[item]['ansible_hostname'], 'ip': hostvars[item]['private_v4'] } ] }}" + with_items: "{{ groups['cluster_hosts'] }}" + +# - name: "Add wildcard records to the private A records for infrahosts" +# set_fact: +# private_records: "{{ private_records | default([]) + [ { 'type': 'A', 'hostname': '*.' + openshift_app_domain, 'ip': hostvars[item]['private_v4'] } ] }}" +# with_items: "{{ groups['infra_hosts'] }}" + +# - name: "Add public master cluster hostname records to the private A records (single master)" +# set_fact: +# private_records: "{{ private_records | default([]) + [ { 'type': 'A', 'hostname': (hostvars[groups.masters[0]].openshift_master_cluster_public_hostname | replace(full_dns_domain, ''))[:-1], 'ip': hostvars[groups.masters[0]].private_v4 } ] }}" +# when: +# - hostvars[groups.masters[0]].openshift_master_cluster_public_hostname is defined +# - openstack_num_masters == 1 + +# - name: "Add public master cluster hostname records to the private A records (multi-master)" +# set_fact: +# private_records: "{{ private_records | default([]) + [ { 'type': 'A', 'hostname': (hostvars[groups.masters[0]].openshift_master_cluster_public_hostname | replace(full_dns_domain, ''))[:-1], 'ip': hostvars[groups.lb[0]].private_v4 } ] }}" +# when: +# - hostvars[groups.masters[0]].openshift_master_cluster_public_hostname is defined +# - openstack_num_masters > 1 + +- name: "Set the private DNS server to use the external value (if provided)" + set_fact: + nsupdate_server_private: "{{ external_nsupdate_keys['private']['server'] }}" + nsupdate_key_secret_private: "{{ external_nsupdate_keys['private']['key_secret'] }}" + nsupdate_key_algorithm_private: "{{ external_nsupdate_keys['private']['key_algorithm'] }}" + nsupdate_private_key_name: "{{ external_nsupdate_keys['private']['key_name']|default('private-' + full_dns_domain) }}" + when: + - external_nsupdate_keys is defined + - external_nsupdate_keys['private'] is defined + + +- name: "Generate the private Add section for DNS" + set_fact: + private_named_records: + - view: "private" + zone: "{{ full_dns_domain }}" + server: "{{ nsupdate_server_private }}" + key_name: "{{ nsupdate_private_key_name|default('private-' + full_dns_domain) }}" + key_secret: "{{ nsupdate_key_secret_private }}" + key_algorithm: "{{ nsupdate_key_algorithm_private | lower }}" + entries: "{{ private_records }}" + +# - name: "Generate list of public A records" +# set_fact: +# public_records: "{{ public_records | default([]) + [ { 'type': 'A', 'hostname': hostvars[item]['ansible_hostname'], 'ip': hostvars[item]['public_v4'] } ] }}" +# with_items: "{{ groups['cluster_hosts'] }}" +# when: hostvars[item]['public_v4'] is defined + +# - name: "Add wildcard records to the public A records" +# set_fact: +# public_records: "{{ public_records | default([]) + [ { 'type': 'A', 'hostname': '*.' + openshift_app_domain, 'ip': hostvars[item]['public_v4'] } ] }}" +# with_items: "{{ groups['infra_hosts'] }}" +# when: hostvars[item]['public_v4'] is defined + +# - name: "Add public master cluster hostname records to the public A records (single master)" +# set_fact: +# public_records: "{{ public_records | default([]) + [ { 'type': 'A', 'hostname': (hostvars[groups.masters[0]].openshift_master_cluster_public_hostname | replace(full_dns_domain, ''))[:-1], 'ip': hostvars[groups.masters[0]].public_v4 } ] }}" +# when: +# - hostvars[groups.masters[0]].openshift_master_cluster_public_hostname is defined +# - openstack_num_masters == 1 +# - not use_bastion|bool + +# - name: "Add public master cluster hostname records to the public A records (single master behind a bastion)" +# set_fact: +# public_records: "{{ public_records | default([]) + [ { 'type': 'A', 'hostname': (hostvars[groups.masters[0]].openshift_master_cluster_public_hostname | replace(full_dns_domain, ''))[:-1], 'ip': hostvars[groups.bastions[0]].public_v4 } ] }}" +# when: +# - hostvars[groups.masters[0]].openshift_master_cluster_public_hostname is defined +# - openstack_num_masters == 1 +# - use_bastion|bool + +# - name: "Add public master cluster hostname records to the public A records (multi-master)" +# set_fact: +# public_records: "{{ public_records | default([]) + [ { 'type': 'A', 'hostname': (hostvars[groups.masters[0]].openshift_master_cluster_public_hostname | replace(full_dns_domain, ''))[:-1], 'ip': hostvars[groups.lb[0]].public_v4 } ] }}" +# when: +# - hostvars[groups.masters[0]].openshift_master_cluster_public_hostname is defined +# - openstack_num_masters > 1 + +# - name: "Set the public DNS server details to use the external value (if provided)" +# set_fact: +# nsupdate_server_public: "{{ external_nsupdate_keys['public']['server'] }}" +# nsupdate_key_secret_public: "{{ external_nsupdate_keys['public']['key_secret'] }}" +# nsupdate_key_algorithm_public: "{{ external_nsupdate_keys['public']['key_algorithm'] }}" +# nsupdate_public_key_name: "{{ external_nsupdate_keys['public']['key_name']|default('public-' + full_dns_domain) }}" +# when: +# - external_nsupdate_keys is defined +# - external_nsupdate_keys['public'] is defined + +# - name: "Set the public DNS server details to use the provisioned value" +# set_fact: +# nsupdate_server_public: "{{ hostvars[groups['dns'][0]].public_v4 }}" +# nsupdate_key_secret_public: "{{ hostvars[groups['dns'][0]].nsupdate_keys['public-' + full_dns_domain].key_secret }}" +# nsupdate_key_algorithm_public: "{{ hostvars[groups['dns'][0]].nsupdate_keys['public-' + full_dns_domain].key_algorithm }}" +# when: +# - nsupdate_server_public is undefined + +# - name: "Generate the public Add section for DNS" +# set_fact: +# public_named_records: +# - view: "public" +# zone: "{{ full_dns_domain }}" +# server: "{{ nsupdate_server_public }}" +# key_name: "{{ nsupdate_public_key_name|default('public-' + full_dns_domain) }}" +# key_secret: "{{ nsupdate_key_secret_public }}" +# key_algorithm: "{{ nsupdate_key_algorithm_public | lower }}" +# entries: "{{ public_records }}" + + + + + + +- name: "Generate the final dns_records_add" + set_fact: + # TODO(shadower): enable this when we add public records + #dns_records_add: "{{ private_named_records + public_named_records }}" + dns_records_add: "{{ private_named_records }}" + + + +# RUN NSUPDATE + +- name: "Remove any deleted DNS A records" + nsupdate: + key_name: "{{ item.0.key_name }}" + key_secret: "{{ item.0.key_secret }}" + key_algorithm: "{{ item.0.key_algorithm }}" + server: "{{ item.0.server }}" + zone: "{{ item.0.zone }}" + record: "{{ item.1.hostname }}" + type: "{{ item.1.type }}" + state: absent + with_subelements: + - "{{ dns_records_rm | default({}) }}" + - entries + register: nsupdate_remove_result + until: nsupdate_remove_result|succeeded + retries: 10 + delay: 1 + +- name: "Add DNS A records" + nsupdate: + key_name: "{{ item.0.key_name }}" + key_secret: "{{ item.0.key_secret }}" + key_algorithm: "{{ item.0.key_algorithm }}" + server: "{{ item.0.server }}" + zone: "{{ item.0.zone }}" + record: "{{ item.1.hostname }}" + value: "{{ item.1.ip }}" + type: "{{ item.1.type }}" + state: present + with_subelements: + - "{{ dns_records_add | default({}) }}" + - entries + register: nsupdate_add_result + until: nsupdate_add_result|succeeded + retries: 10 + delay: 1 -- cgit v1.2.3 From 84259a3ed4ac741ee782f57884ba36729e277eae Mon Sep 17 00:00:00 2001 From: Tomas Sedovic Date: Wed, 1 Nov 2017 11:14:10 +0100 Subject: Remove the subscription-manager role The repo already contains the `rhel_subscribe` role so we should use that instead. --- roles/subscription-manager/README.md | 156 --------------------- roles/subscription-manager/pre_tasks/pre_tasks.yml | 45 ------ roles/subscription-manager/tasks/main.yml | 150 -------------------- 3 files changed, 351 deletions(-) delete mode 100644 roles/subscription-manager/README.md delete mode 100644 roles/subscription-manager/pre_tasks/pre_tasks.yml delete mode 100644 roles/subscription-manager/tasks/main.yml diff --git a/roles/subscription-manager/README.md b/roles/subscription-manager/README.md deleted file mode 100644 index 748de282c..000000000 --- a/roles/subscription-manager/README.md +++ /dev/null @@ -1,156 +0,0 @@ -# Red Hat Subscription Manager Ansible Role - -## Parameters - -This role depends on user specified variables. These can be set in the inventory file, group_vars or passed to the playbook from the CLI. No values are set by default which disables this role. The variables are: - -### rhsm_satellite - -Subscription Manager server hostname. If using a Satellite server set the FQDN here. If using RHSM Hosted this value must be left blank, none or false. - -Default: none - -### rhsm_username - -Subscription Manager username. Required for RHSM Hosted. Can be optionally used for Satellite, but it may be better to use **rhsm_activationkey** for this. - -Default: none - -### rhsm_password - -Subscription Manager password. Required for RHSM Hosted. Can be optionally used for Satellite, but it may be better to use **rhsm_activationkey** for this. - -NOTE: If this variable is specified on the command-line or set in a variable file it may leave your password exposed. For this reason you may perfer to use an Activation Key if using Satellite. For RHSM Hosted, your password must be specified. There are two ways to provide the password to the Ansible playbook without exposing it to prying eyes. - -1. The first method is to use a **vars_prompt** to collect the password up front one time for the playbook. Ansible will not display the password if the prompt is configured as **private** and the task will not display the password on the CLI. This is the a good method as it supports automating the task to every host with only one password entry. To enable **vars_prompt** add the following to the very top of your playbook after the **hosts** declaration and before any **pre_tasks** section: - - ``` - - hosts: localhost - # Add the following lines after a -hosts: declaration and before pre_tasks: - # Start of vars_prompt code block - vars_prompt: - - name: "rhsm_password" - prompt: "Subscription Manager password" - confirm: yes - private: yes - # End of vars_prompt code block - pre_tasks: - ``` - -2. A second method is to use an encrypted file via **ansible-vault**. This does does not require modifying any code as the previous method, but does require more work to create and encrypt the file. To accomplish this, first create a file containing at least the **rhsm_password** variable (it is also possible to specify additional variables to encrypt them all as well): - 1. Create a file to contain the variable such as **secrets.yml**: - - ``` - --- - rhsm_password: "my_secret_password" - # other variables can optionally be placed here as well - ``` - - 2. Encrypt the file with **ansible-vault**: - - ``` - $ ansible-vault encrypt secrets.yml - Vault password: - Confirm Vault password: - Encryption successful - ``` - - 3. When executing **ansible-playbook** specify **--ask-vault-pass** to be prompted for the decryption password, and also specify the location of the **secrets.yml** as such: - - ``` - $ ansible-playbook --ask-vault-pass --extra-vars=@secrets.yml --extra-vars="rhsm_username=myusername" - ``` - - NOTE: Optionally the file containing the encrypted variables can be decrypted with **ansible-vault** and the **--ask-vault-pass** option omitted to prevent any password prompting (for automated runs) and the file can be encrypted after the run. This can be used if an external system such as Jenkins would handle the decryption/encryption outside of Ansible. - -Default: none - -### rhsm_org - -Optional Subscription Manager Satellite Organization. Required for Satellite, ignored if using RHSM Hosted. - -Default: none - -### rhsm_activationkey - -Optional Subscription Manager Satellite Activation Key, use this instead of **rhsm_username** and **rhsm_password** if using Satellite to provide repositories and authentication in a key instead. - -Default: none - -### rhsm_pool - -Optional Subscription Manager pool, determine this by running **subscription-manager list --available** on a registered system. Valid for RHSM Hosted or Satellite. Specifying **rhsm_activationkey** will ignore this option. - -Default: none - -### rhsm_repos - -Optional list of repositories to enable. If left blank it is expected that the **rhsm_activationkey** will specify repos instead. If populated, a **subscription-manager repos --disable=\*** will be run and each of the specified repos explicitly enabled. Valid for RHSM Hosted or Satellite - -NOTE: If specifying this value in an inventory file as opposed to group_vars, be sure to define it as a proper list as such: - -rhsm_repos='["rhel-7-server-rpms", "rhel-7-server-ose-3.1-rpms", "rhel-7-server-extras-rpms"]' - -Default: none - -## Calling This Role -Calling this role is done at both **pre_tasks** and **roles** sections of a playbook and optionally a **vars_prompt**. - -### vars_prompt -Unfortunately **vars_prompt** can only be used at the play level before role tasks are executed, so this is the only place it can go. It also cannot be shown conditionally. For this reason it is not included in this role by default. A better method may be using a file containing the password variable encrypted with **ansible-vault**. See the **rhsm_password** section for more details. - -To Add a prompt to capture **rhsm_password**: - -``` -- hosts: localhost - # Add the following lines after a -hosts: declaration and before pre_tasks: - # Start of vars_prompt code block - vars_prompt: - - name: "rhsm_password" - prompt: "Subscription Manager password" - confirm: yes - private: yes - # End of vars_prompt code block - pre_tasks: -``` - -### pre-tasks - -A number of variable checks are performed before any tasks to ensure the proper parameters are set. To include these checks call the pre_task yaml before any roles: - -``` -pre_tasks: -- include: roles/subscription-manager/pre_tasks/pre_tasks.yml -``` - -### roles - -The bulk of the work is performed in the main.yml for this role. The pre-task play will set a variable which can be checked to contitionally include this role as such: - -``` -roles: - - { role: subscription-manager, when: hostvars.localhost.rhsm_register, tags: 'subscription-manager' } -``` - -## Running Playbooks with this Role - -- To register to RHSM Hosted or Satellite with a username and plain text password (NOTE: This may retain your password in your CLI history): - - ``` - $ ansible-playbook --extra-vars="rhsm_username=vvaldez rhsm_password=my_secret_password " - ``` - -- To register to RHSM Hosted or Satellite with username and an encrypted file containing the password: - - ``` - $ ansible-playbook --ask-vault-pass --extra-vars=@secrets.yml --extra-vars="rhsm_username=myusername" - - ``` - -- To register to a Satellite server with an activation key: - - ``` - $ ansible-playbook --extra-vars="rhsm_satellite=satellite.example.com rhsm_org=example_org rhsm_activationkey=rhel-7-ose-3-1 " - - ``` -- To ignore any Subscription Manager activities, simply do not set any parameters. diff --git a/roles/subscription-manager/pre_tasks/pre_tasks.yml b/roles/subscription-manager/pre_tasks/pre_tasks.yml deleted file mode 100644 index 464670fc0..000000000 --- a/roles/subscription-manager/pre_tasks/pre_tasks.yml +++ /dev/null @@ -1,45 +0,0 @@ ---- -- name: "Set password fact" - set_fact: - rhsm_password: "{{ rhsm_password | default(None) }}" - no_log: true - -- name: "Initialize Subscription Manager fact" - set_fact: - rhsm_register: true - -- name: "Determine if Subscription Manager should be used" - set_fact: - rhsm_register: false - when: - - rhsm_satellite is undefined or rhsm_satellite is none or rhsm_satellite|trim == '' - - rhsm_username is undefined or rhsm_username is none or rhsm_username|trim == '' - - rhsm_password is undefined or rhsm_password is none or rhsm_password|trim == '' - - rhsm_org is undefined or rhsm_org is none or rhsm_org|trim == '' - - rhsm_activationkey is undefined or rhsm_activationkey is none or rhsm_activationkey|trim == '' - - rhsm_pool is undefined or rhsm_pool is none or rhsm_pool|trim == '' - -- name: "Validate Subscription Manager organization is set" - fail: msg="Cannot register to a Satellite server without a value for the Organization via 'rhsm_org'" - when: - - rhsm_org is undefined or rhsm_org is none or rhsm_org|trim == '' - - rhsm_satellite is defined - - rhsm_satellite is not none - - rhsm_satellite|trim != '' - - rhsm_register - -- name: "Validate Subscription Manager authentication is defined" - fail: msg="Cannot register without ('rhsm_username' and 'rhsm_password') or 'rhsm_activationkey' variables set. See the README.md for details on securely prompting for a password" - when: - - (rhsm_username is undefined or rhsm_username is none or rhsm_username|trim == '') or (rhsm_password is undefined or rhsm_password is none or rhsm_password|trim == '') - - rhsm_activationkey is undefined or rhsm_activationkey is none or rhsm_activationkey|trim == '' - - rhsm_register - -- name: "Validate activation key and Hosted are not requested together" - fail: msg="Cannot register to RHSM Hosted with 'rhsm_activationkey'" - when: - - rhsm_satellite is undefined or rhsm_satellite is none or rhsm_satellite|trim == '' - - rhsm_activationkey is defined - - rhsm_activationkey is not none - - rhsm_activationkey|trim != '' - - rhsm_register diff --git a/roles/subscription-manager/tasks/main.yml b/roles/subscription-manager/tasks/main.yml deleted file mode 100644 index e4c9fdffb..000000000 --- a/roles/subscription-manager/tasks/main.yml +++ /dev/null @@ -1,150 +0,0 @@ ---- -- name: "Initialize rhsm_password variable if vars_prompt was used" - set_fact: - rhsm_password: "{{ hostvars.localhost.rhsm_password }}" - when: - - rhsm_password is not defined or rhsm_password is none or rhsm_password|trim == '' - -- name: "Initializing Subscription Manager authentication method" - set_fact: - rhsm_authentication: false - -# 'rhsm_activationkey' will take precedence even if 'rhsm_username' and 'rhsm_password' are also set -- name: "Setting Subscription Manager Activation Key Fact" - set_fact: - rhsm_authentication: "key" - when: - - rhsm_activationkey is defined - - rhsm_activationkey is not none - - rhsm_activationkey|trim != '' - - not rhsm_authentication - -# If 'rhsm_username' and 'rhsm_password' are set but not 'rhsm_activationkey', set 'rhsm_authentication' to password -- name: "Setting Subscription Manager Username and Password Fact" - set_fact: - rhsm_authentication: "password" - when: - - rhsm_username is defined - - rhsm_username is not none - - rhsm_username|trim != '' - - rhsm_password is defined - - rhsm_password is not none - - rhsm_password|trim != '' - - not rhsm_authentication - -- name: "Initializing registration status" - set_fact: - registered: false - -- name: "Checking subscription status (a failure means it is not registered and will be)" - command: "/usr/bin/subscription-manager status" - ignore_errors: yes - changed_when: no - register: check_if_registered - -- name: "Set registration fact if system is already registered" - set_fact: - registered: true - when: check_if_registered.rc == 0 - -- name: "Cleaning any old subscriptions" - command: "/usr/bin/subscription-manager clean" - when: - - not registered - - rhsm_authentication is defined - register: cleaningsubs_result - until: cleaningsubs_result.rc == 0 - retries: 10 - delay: 1 - -- name: "Install Satellite certificate" - command: "rpm -Uvh --force http://{{ rhsm_satellite }}/pub/katello-ca-consumer-latest.noarch.rpm" - when: - - not registered - - rhsm_satellite is defined - - rhsm_satellite is not none - - rhsm_satellite|trim != '' - -- name: "Register to Satellite using activation key" - command: "/usr/bin/subscription-manager register --activationkey={{ rhsm_activationkey }} --org='{{ rhsm_org }}'" - when: - - not registered - - rhsm_authentication == 'key' - - rhsm_satellite is defined - - rhsm_satellite is not none - - rhsm_satellite|trim != '' - register: register_key_result - until: register_key_result.rc == 0 - retries: 10 - delay: 1 - -# This can apply to either Hosted or Satellite -- name: "Register using username and password" - command: "/usr/bin/subscription-manager register --username={{ rhsm_username }} --password={{ rhsm_password }}" - no_log: true - when: - - not registered - - rhsm_authentication == "password" - - rhsm_org is not defined or rhsm_org is none or rhsm_org|trim == '' - register: register_userpw_result - until: register_userpw_result.rc == 0 - retries: 10 - delay: 1 - -# This can apply to either Hosted or Satellite -- name: "Register using username, password and organization" - command: "/usr/bin/subscription-manager register --username={{ rhsm_username }} --password={{ rhsm_password }} --org={{ rhsm_org }}" - no_log: true - when: - - not registered - - rhsm_authentication == "password" - - rhsm_org is defined - - rhsm_org is not none - - rhsm_org|trim != '' - register: register_userpworg_result - until: register_userpworg_result.rc == 0 - retries: 10 - delay: 1 - -- name: "Auto-attach to Subscription Manager Pool" - command: "/usr/bin/subscription-manager attach --auto" - when: - - not registered - - rhsm_pool is undefined or rhsm_pool is none or rhsm_pool|trim == '' - register: autoattach_result - until: autoattach_result.rc == 0 - retries: 10 - delay: 1 - -- name: "Attach to a specific pool" - command: "/usr/bin/subscription-manager attach --pool={{ rhsm_pool }}" - when: - - rhsm_pool is defined - - rhsm_pool is not none - - rhsm_pool|trim != '' - - not registered - register: attachpool_result - until: attachpool_result.rc == 0 - retries: 10 - delay: 1 - -- name: "Disable all repositories" - command: "/usr/bin/subscription-manager repos --disable=*" - when: - - not registered - - rhsm_repos is defined - - rhsm_repos is not none - - rhsm_repos|trim != '' - -- name: "Enable specified repositories" - command: "/usr/bin/subscription-manager repos --enable={{ item }}" - with_items: "{{ rhsm_repos }}" - when: - - not registered - - rhsm_repos is defined - - rhsm_repos is not none - - rhsm_repos|trim != '' - register: enablerepos_result - until: enablerepos_result.rc == 0 - retries: 10 - delay: 1 -- cgit v1.2.3 From 88907aca794716d1a2db4cc31e03375720695424 Mon Sep 17 00:00:00 2001 From: Tomas Sedovic Date: Wed, 1 Nov 2017 11:33:38 +0100 Subject: Use the existing ansible.cfg file --- ansible.cfg | 2 ++ playbooks/openstack/README.md | 15 +++++++-------- playbooks/openstack/ansible.cfg | 24 ------------------------ 3 files changed, 9 insertions(+), 32 deletions(-) delete mode 100644 playbooks/openstack/ansible.cfg diff --git a/ansible.cfg b/ansible.cfg index 589a58e9d..f96bf871f 100644 --- a/ansible.cfg +++ b/ansible.cfg @@ -23,6 +23,8 @@ fact_caching = jsonfile fact_caching_connection = $HOME/ansible/facts fact_caching_timeout = 600 callback_whitelist = profile_tasks +inventory_ignore_extensions = secrets.py, .pyc, .cfg, .crt +timeout = 30 # work around privilege escalation timeouts in ansible # Uncomment to use the provided BYO inventory #hostfile = inventory/byo/hosts diff --git a/playbooks/openstack/README.md b/playbooks/openstack/README.md index 875004cc9..4347ddaa8 100644 --- a/playbooks/openstack/README.md +++ b/playbooks/openstack/README.md @@ -197,27 +197,26 @@ the [advanced configuration][advanced-configuration]. ### 3. Creating the OpenStack resources (VMs, networking, etc.) -We will install the DNS server roles using ansible galaxy and then run -the openstack provisioning playbook. The `ansible.cfg` file we provide -has useful defaults -- copy it to the directory you're going to run -Ansible from. +We provide an `ansible.cfg` file which has some useful defaults -- you should +copy it to the directory you're going to run `ansible-playbook` from. ```bash -$ ansible-galaxy install -r openshift-ansible/playbooks/openstack/galaxy-requirements.yaml -p openshift-ansible/roles -$ cp openshift-ansible/playbooks/openstack/ansible.cfg ansible.cfg +$ cp openshift-ansible/ansible.cfg ansible.cfg ``` -(you will only need to do this once) Then run the provisioning playbook -- this will create the OpenStack resources: ```bash -$ ansible-playbook -i inventory openshift-ansible/playbooks/openstack/openshift-cluster/provision.yaml +$ ansible-playbook --user openshift -i inventory openshift-ansible/playbooks/openstack/openshift-cluster/provision.yaml ``` If you're using multiple inventories, make sure you pass the path to the right one to `-i`. +If your SSH private key is not in `~/.ssh/id_rsa` use the `--private-key` +option to specify the correct path. + ### 4. Installing OpenShift diff --git a/playbooks/openstack/ansible.cfg b/playbooks/openstack/ansible.cfg deleted file mode 100644 index ae5669c35..000000000 --- a/playbooks/openstack/ansible.cfg +++ /dev/null @@ -1,24 +0,0 @@ -# config file for ansible -- http://ansible.com/ -# ============================================== -[defaults] -ansible_user = openshift -forks = 50 -# work around privilege escalation timeouts in ansible -timeout = 30 -host_key_checking = false -inventory = inventory -inventory_ignore_extensions = secrets.py, .pyc, .cfg, .crt -gathering = smart -retry_files_enabled = false -fact_caching = jsonfile -fact_caching_connection = .ansible/cached_facts -fact_caching_timeout = 900 -stdout_callback = skippy -callback_whitelist = profile_tasks -lookup_plugins = openshift-ansible/lookup_plugins - - -[ssh_connection] -ssh_args = -o ControlMaster=auto -o ControlPersist=900s -o GSSAPIAuthentication=no -control_path = /var/tmp/%%h-%%r -pipelining = True -- cgit v1.2.3 From dba6b457d86f7517c1c4f432784af06856960bc3 Mon Sep 17 00:00:00 2001 From: Tomas Sedovic Date: Wed, 1 Nov 2017 11:36:55 +0100 Subject: Remove the static_inventory and bastion samples These options will have no effect until we add static inventory and bastion support back in. --- .../openstack/sample-inventory/group_vars/all.yml | 19 ------------------- 1 file changed, 19 deletions(-) diff --git a/playbooks/openstack/sample-inventory/group_vars/all.yml b/playbooks/openstack/sample-inventory/group_vars/all.yml index 83289307d..8ea798c14 100644 --- a/playbooks/openstack/sample-inventory/group_vars/all.yml +++ b/playbooks/openstack/sample-inventory/group_vars/all.yml @@ -133,25 +133,6 @@ ansible_user: openshift # # Use a single security group for a cluster (default: false) #openstack_flat_secgrp: false -# # Openstack inventory type and cluster nodes access pattern -# # Defaults to 'static'. -# # Use 'dynamic' to access cluster nodes directly, via floating IPs -# # and given a dynamic inventory script, like openstack.py -#openstack_inventory: static -# # The path to checkpoint the static inventory from the in-memory one -#openstack_inventory_path: ../../../../inventory - -# # Use bastion node to access cluster nodes (Defaults to False). -# # Requires a static inventory. -#openstack_use_bastion: False -#bastion_ingress_cidr: "{{openstack_subnet_prefix}}.0/24" -# -# # The Nova key-pair's private SSH key to access inventory nodes -#openstack_private_ssh_key: ~/.ssh/openshift -# # The path for the SSH config to access all nodes -#openstack_ssh_config_path: /tmp/ssh.config.openshift.ansible.{{ env_id }}.{{ public_dns_domain }} - - # If you want to use the VM storage instead of Cinder volumes, set this to `true`. # NOTE: this is for testing only! Your data will be gone once the VM disappears! # ephemeral_volumes: false -- cgit v1.2.3 From fcf14943814ea1e5f31a967589c4269e722c0856 Mon Sep 17 00:00:00 2001 From: Tomas Sedovic Date: Wed, 1 Nov 2017 11:43:08 +0100 Subject: FIXUP ANSIBLE CFG --- ansible.cfg | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/ansible.cfg b/ansible.cfg index f96bf871f..4a79b843f 100644 --- a/ansible.cfg +++ b/ansible.cfg @@ -24,7 +24,8 @@ fact_caching_connection = $HOME/ansible/facts fact_caching_timeout = 600 callback_whitelist = profile_tasks inventory_ignore_extensions = secrets.py, .pyc, .cfg, .crt -timeout = 30 # work around privilege escalation timeouts in ansible +# work around privilege escalation timeouts in ansible: +timeout = 30 # Uncomment to use the provided BYO inventory #hostfile = inventory/byo/hosts -- cgit v1.2.3 From e34025f43b1a8b03b0a5e74bb1dfea946375dbf7 Mon Sep 17 00:00:00 2001 From: Tomas Sedovic Date: Wed, 1 Nov 2017 11:52:25 +0100 Subject: Move the vars/main.yml to defaults The contents of roles/openshift_openstack/vars/main.yml were moved to the defaults/main.yml file instead. There are now duplication warnings we need to address, but the deployment does still work. --- roles/openshift_openstack/defaults/main.yml | 53 +++++++++++++++++++++++++++++ roles/openshift_openstack/vars/main.yml | 49 -------------------------- 2 files changed, 53 insertions(+), 49 deletions(-) delete mode 100644 roles/openshift_openstack/vars/main.yml diff --git a/roles/openshift_openstack/defaults/main.yml b/roles/openshift_openstack/defaults/main.yml index 19e6e6f51..d1408abf0 100644 --- a/roles/openshift_openstack/defaults/main.yml +++ b/roles/openshift_openstack/defaults/main.yml @@ -53,3 +53,56 @@ container_root_lv_mount_path: "/var/lib/docker" dns_records_rm: [] dns_records_add: [] external_nsupdate_keys: {} + +full_dns_domain: "{{ (env_id|trim == '') | ternary(public_dns_domain, env_id + '.' + public_dns_domain) }}" +openshift_app_domain: "apps" + + + +# heat vars +stack_name: "{{ env_id }}.{{ public_dns_domain }}" +dns_domain: "{{ public_dns_domain }}" +dns_nameservers: "{{ public_dns_nameservers }}" +subnet_prefix: "{{ openstack_subnet_prefix }}" +master_hostname: "{{ openstack_master_hostname | default('master') }}" +infra_hostname: "{{ openstack_infra_hostname | default('infra-node') }}" +node_hostname: "{{ openstack_node_hostname | default('app-node') }}" +lb_hostname: "{{ openstack_lb_hostname | default('lb') }}" +etcd_hostname: "{{ openstack_etcd_hostname | default('etcd') }}" +dns_hostname: "{{ openstack_dns_hostname | default('dns') }}" +ssh_public_key: "{{ openstack_ssh_public_key }}" +openstack_image: "{{ openstack_default_image_name }}" +lb_flavor: "{{ openstack_lb_flavor | default(openstack_default_flavor) }}" +etcd_flavor: "{{ openstack_etcd_flavor | default(openstack_default_flavor) }}" +master_flavor: "{{ openstack_master_flavor | default(openstack_default_flavor) }}" +node_flavor: "{{ openstack_node_flavor | default(openstack_default_flavor) }}" +infra_flavor: "{{ openstack_infra_flavor | default(openstack_default_flavor) }}" +dns_flavor: "{{ openstack_dns_flavor | default(openstack_default_flavor) }}" +openstack_master_image: "{{ openstack_master_image_name | default(openstack_default_image_name) }}" +openstack_infra_image: "{{ openstack_infra_image_name | default(openstack_default_image_name) }}" +openstack_node_image: "{{ openstack_node_image_name | default(openstack_default_image_name) }}" +openstack_lb_image: "{{ openstack_lb_image_name | default(openstack_default_image_name) }}" +openstack_etcd_image: "{{ openstack_etcd_image_name | default(openstack_default_image_name) }}" +openstack_dns_image: "{{ openstack_dns_image_name | default(openstack_default_image_name) }}" +openstack_private_network: >- + {% if openstack_provider_network_name | default(None) -%} + {{ openstack_provider_network_name }} + {%- else -%} + {{ openstack_private_network_name | default ('openshift-ansible-' + stack_name + '-net') }} + {%- endif -%} +provider_network: "{{ openstack_provider_network_name | default(None) }}" +external_network: "{{ openstack_external_network_name | default(None) }}" +num_etcd: "{{ openstack_num_etcd | default(0) }}" +num_masters: "{{ openstack_num_masters }}" +num_nodes: "{{ openstack_num_nodes }}" +num_infra: "{{ openstack_num_infra }}" +num_dns: "{{ openstack_num_dns | default(1) }}" +master_server_group_policies: "{{ openstack_master_server_group_policies | default([]) | to_yaml }}" +infra_server_group_policies: "{{ openstack_infra_server_group_policies | default([]) | to_yaml }}" +master_volume_size: "{{ docker_master_volume_size | default(docker_volume_size) }}" +infra_volume_size: "{{ docker_infra_volume_size | default(docker_volume_size) }}" +node_volume_size: "{{ docker_node_volume_size | default(docker_volume_size) }}" +etcd_volume_size: "{{ docker_etcd_volume_size | default('2') }}" +dns_volume_size: "{{ docker_dns_volume_size | default('1') }}" +lb_volume_size: "{{ docker_lb_volume_size | default('5') }}" +nodes_to_remove: "{{ openstack_nodes_to_remove | default([]) | to_yaml }}" diff --git a/roles/openshift_openstack/vars/main.yml b/roles/openshift_openstack/vars/main.yml deleted file mode 100644 index a4da31bfe..000000000 --- a/roles/openshift_openstack/vars/main.yml +++ /dev/null @@ -1,49 +0,0 @@ ---- -stack_name: "{{ env_id }}.{{ public_dns_domain }}" -dns_domain: "{{ public_dns_domain }}" -dns_nameservers: "{{ public_dns_nameservers }}" -subnet_prefix: "{{ openstack_subnet_prefix }}" -master_hostname: "{{ openstack_master_hostname | default('master') }}" -infra_hostname: "{{ openstack_infra_hostname | default('infra-node') }}" -node_hostname: "{{ openstack_node_hostname | default('app-node') }}" -lb_hostname: "{{ openstack_lb_hostname | default('lb') }}" -etcd_hostname: "{{ openstack_etcd_hostname | default('etcd') }}" -dns_hostname: "{{ openstack_dns_hostname | default('dns') }}" -ssh_public_key: "{{ openstack_ssh_public_key }}" -openstack_image: "{{ openstack_default_image_name }}" -lb_flavor: "{{ openstack_lb_flavor | default(openstack_default_flavor) }}" -etcd_flavor: "{{ openstack_etcd_flavor | default(openstack_default_flavor) }}" -master_flavor: "{{ openstack_master_flavor | default(openstack_default_flavor) }}" -node_flavor: "{{ openstack_node_flavor | default(openstack_default_flavor) }}" -infra_flavor: "{{ openstack_infra_flavor | default(openstack_default_flavor) }}" -dns_flavor: "{{ openstack_dns_flavor | default(openstack_default_flavor) }}" -openstack_master_image: "{{ openstack_master_image_name | default(openstack_default_image_name) }}" -openstack_infra_image: "{{ openstack_infra_image_name | default(openstack_default_image_name) }}" -openstack_node_image: "{{ openstack_node_image_name | default(openstack_default_image_name) }}" -openstack_lb_image: "{{ openstack_lb_image_name | default(openstack_default_image_name) }}" -openstack_etcd_image: "{{ openstack_etcd_image_name | default(openstack_default_image_name) }}" -openstack_dns_image: "{{ openstack_dns_image_name | default(openstack_default_image_name) }}" -openstack_private_network: >- - {% if openstack_provider_network_name | default(None) -%} - {{ openstack_provider_network_name }} - {%- else -%} - {{ openstack_private_network_name | default ('openshift-ansible-' + stack_name + '-net') }} - {%- endif -%} -provider_network: "{{ openstack_provider_network_name | default(None) }}" -external_network: "{{ openstack_external_network_name | default(None) }}" -num_etcd: "{{ openstack_num_etcd | default(0) }}" -num_masters: "{{ openstack_num_masters }}" -num_nodes: "{{ openstack_num_nodes }}" -num_infra: "{{ openstack_num_infra }}" -num_dns: "{{ openstack_num_dns | default(1) }}" -master_server_group_policies: "{{ openstack_master_server_group_policies | default([]) | to_yaml }}" -infra_server_group_policies: "{{ openstack_infra_server_group_policies | default([]) | to_yaml }}" -master_volume_size: "{{ docker_master_volume_size | default(docker_volume_size) }}" -infra_volume_size: "{{ docker_infra_volume_size | default(docker_volume_size) }}" -node_volume_size: "{{ docker_node_volume_size | default(docker_volume_size) }}" -etcd_volume_size: "{{ docker_etcd_volume_size | default('2') }}" -dns_volume_size: "{{ docker_dns_volume_size | default('1') }}" -lb_volume_size: "{{ docker_lb_volume_size | default('5') }}" -nodes_to_remove: "{{ openstack_nodes_to_remove | default([]) | to_yaml }}" -use_bastion: "{{ openstack_use_bastion|default(False) }}" -ui_ssh_tunnel: "{{ openshift_ui_ssh_tunnel|default(False) }}" -- cgit v1.2.3 From 23674d565f2801d88060bd0443ec384fbdcdad59 Mon Sep 17 00:00:00 2001 From: Tomas Sedovic Date: Wed, 1 Nov 2017 15:47:04 +0100 Subject: Remove the subnet_update_dns_servers task list It's no longer being used. --- roles/openshift_openstack/tasks/subnet_update_dns_servers.yaml | 9 --------- 1 file changed, 9 deletions(-) delete mode 100644 roles/openshift_openstack/tasks/subnet_update_dns_servers.yaml diff --git a/roles/openshift_openstack/tasks/subnet_update_dns_servers.yaml b/roles/openshift_openstack/tasks/subnet_update_dns_servers.yaml deleted file mode 100644 index af28fc98f..000000000 --- a/roles/openshift_openstack/tasks/subnet_update_dns_servers.yaml +++ /dev/null @@ -1,9 +0,0 @@ ---- -- name: Live update the subnet's DNS servers - os_subnet: - name: openshift-ansible-{{ stack_name }}-subnet - network_name: openshift-ansible-{{ stack_name }}-net - state: present - use_default_subnetpool: yes - dns_nameservers: "{{ [private_dns_server|default(public_dns_nameservers[0])]|union(public_dns_nameservers)|unique }}" - when: not provider_network -- cgit v1.2.3 From 79f29bc825286c4f69073827a5b6d71f71f47c91 Mon Sep 17 00:00:00 2001 From: Tomas Sedovic Date: Wed, 1 Nov 2017 16:43:13 +0100 Subject: Add the DNS updates and rename the openstack vars Most of the vars in `roles/openshift_openstack/defaults/main.yml` are now prefixed with `openstack_`. --- .gitignore | 1 - playbooks/openstack/README.md | 18 +- playbooks/openstack/advanced-configuration.md | 11 +- .../openstack/openshift-cluster/provision.yml | 36 +++- .../sample-inventory/group_vars/OSEv3.yml | 4 +- .../openstack/sample-inventory/group_vars/all.yml | 4 +- roles/openshift_openstack/defaults/main.yml | 91 +++++----- .../tasks/check-prerequisites.yml | 4 +- .../tasks/generate-templates.yml | 3 + roles/openshift_openstack/tasks/hostname.yml | 49 +++--- roles/openshift_openstack/tasks/populate-dns.yml | 187 ++++++++------------ roles/openshift_openstack/tasks/provision.yml | 5 - .../templates/heat_stack.yaml.j2 | 190 ++++++++++----------- .../templates/heat_stack_server.yaml.j2 | 14 +- 14 files changed, 284 insertions(+), 333 deletions(-) diff --git a/.gitignore b/.gitignore index e8be4ea5b..1e187db16 100644 --- a/.gitignore +++ b/.gitignore @@ -24,4 +24,3 @@ multi_ec2.yaml *.egg-info .eggs cover/ -roles/infra-ansible/ diff --git a/playbooks/openstack/README.md b/playbooks/openstack/README.md index 4347ddaa8..99f4ab12f 100644 --- a/playbooks/openstack/README.md +++ b/playbooks/openstack/README.md @@ -142,7 +142,7 @@ corresponding to your OpenStack installation. $ vi inventory/group_vars/all.yml ``` -1. Set the `openstack_ssh_public_key` to your OpenStack keypair name. +1. Set the `openstack_keypair_name` to your OpenStack keypair name. - See `openstack keypair list` to find the keypairs registered with OpenShift. - This must correspond to your private SSH key in `~/.ssh/id_rsa` @@ -156,20 +156,16 @@ $ vi inventory/group_vars/all.yml 4. Set the `openstack_default_flavor` to the flavor you want your OpenShift VMs to use. - See `openstack flavor list` for the list of available flavors. -5. Set the `public_dns_nameservers` to the list of the IP addresses - of the DNS servers used for the **private** address resolution[1]. +5. Set the `openstack_dns_nameservers` to the list of the IP addresses + of the DNS servers used for the **private** address resolution. -**NOTE**: In most OpenStack environments, you will also need to -configure the forwarders for the DNS server we create. This depends on -your environment. +**NOTE ON DNS**: at minimum, the OpenShift nodes need to be able to access each +other by their hostname. OpenStack doesn't provide this by default, so you +need to provide a DNS server. Put the address of that DNS server in +`openstack_dns_nameservers` variable. -Launch a VM in your OpenStack and look at its `/etc/resolv.conf` and -put the IP addresses into `public_dns_nameservers` in -`inventory/group_vars/all.yml`. -[1]: Yes, the name is bad. We will fix it. - #### OpenShift configuration diff --git a/playbooks/openstack/advanced-configuration.md b/playbooks/openstack/advanced-configuration.md index 72bb95254..5ffec708a 100644 --- a/playbooks/openstack/advanced-configuration.md +++ b/playbooks/openstack/advanced-configuration.md @@ -192,11 +192,10 @@ The `openstack__hostname` is a set of variables used for customising hostnames of servers with a given role. When such a variable stays commented, default hostname (usually the role name) is used. -The `public_dns_nameservers` is a list of DNS servers accessible from all -the created Nova servers. These will be serving as your DNS forwarders for -external FQDNs that do not belong to the cluster's DNS domain and its subdomains. -If you're unsure what to put in here, you can try the google or opendns servers, -but note that some organizations may be blocking them. +The `openstack_dns_nameservers` is a list of DNS servers accessible from all +the created Nova servers. These will provide the internal name resolution for +your OpenShift nodes (as well as upstream name resolution for installing +packages, etc.). The `openshift_use_dnsmasq` controls either dnsmasq is deployed or not. By default, dnsmasq is deployed and comes as the hosts' /etc/resolv.conf file @@ -265,7 +264,7 @@ step for flannel and docker iptables configuration: ## Other configuration variables -`openstack_ssh_public_key` is a Nova keypair - you can see your +`openstack_keypair_name` is a Nova keypair - you can see your keypairs with `openstack keypair list`. It must correspond to the private SSH key Ansible will use to log into the created VMs. This is `~/.ssh/id_rsa` by default, but you can use a different key by passing diff --git a/playbooks/openstack/openshift-cluster/provision.yml b/playbooks/openstack/openshift-cluster/provision.yml index ed44d4a32..b1dff1870 100644 --- a/playbooks/openstack/openshift-cluster/provision.yml +++ b/playbooks/openstack/openshift-cluster/provision.yml @@ -7,15 +7,17 @@ name: openshift_openstack tasks_from: provision.yml -# NOTE(shadower): the (internal) DNS must be functional at this point!! -# That will have happened in provision.yml if nsupdate was configured. -# TODO(shadower): consider splitting this up so people can stop here -# and configure their DNS if they have to. +# NOTE(shadower): Bring in the host groups: +- name: normalize groups + include: ../../byo/openshift-cluster/initialize_groups.yml +- name: evaluate groups + include: ../../common/openshift-cluster/evaluate_groups.yml -- name: Prepare the Nodes in the cluster for installation + +- name: Wait for the nodes and gather their facts hosts: oo_all_hosts - become: true + become: yes # NOTE: The nodes may not be up yet, don't gather facts here. # They'll be collected after `wait_for_connection`. gather_facts: no @@ -26,6 +28,28 @@ - name: Gather facts for the new nodes setup: + +# NOTE(shadower): the (internal) DNS must be functional at this point!! +# That will have happened in provision.yml if nsupdate was configured. + +# TODO(shadower): consider splitting this up so people can stop here +# and configure their DNS if they have to. +- name: Populate the DNS entries + hosts: localhost + tasks: + - name: Populate DNS entries + include_role: + name: openshift_openstack + tasks_from: populate-dns.yml + when: + - external_nsupdate_keys is defined + - external_nsupdate_keys.private is defined or external_nsupdate_keys.public is defined + +- name: Prepare the Nodes in the cluster for installation + hosts: oo_all_hosts + become: yes + gather_facts: yes + tasks: - name: Install dependencies include_role: name: openshift_openstack diff --git a/playbooks/openstack/sample-inventory/group_vars/OSEv3.yml b/playbooks/openstack/sample-inventory/group_vars/OSEv3.yml index 949a323a7..7d8dc157e 100644 --- a/playbooks/openstack/sample-inventory/group_vars/OSEv3.yml +++ b/playbooks/openstack/sample-inventory/group_vars/OSEv3.yml @@ -5,8 +5,8 @@ openshift_deployment_type: origin openshift_master_default_subdomain: "apps.{{ env_id }}.{{ public_dns_domain }}" openshift_master_cluster_method: native -openshift_master_cluster_hostname: "{{ groups.lb.0|default(groups.masters.0) }}" -openshift_master_cluster_public_hostname: "{{ groups.lb.0|default(groups.masters.0) }}" +openshift_master_cluster_hostname: "console.{{ env_id }}.{{ public_dns_domain }}" +openshift_master_cluster_public_hostname: "{{ openshift_master_cluster_hostname }}" osm_default_node_selector: 'region=primary' diff --git a/playbooks/openstack/sample-inventory/group_vars/all.yml b/playbooks/openstack/sample-inventory/group_vars/all.yml index 8ea798c14..e0618d685 100644 --- a/playbooks/openstack/sample-inventory/group_vars/all.yml +++ b/playbooks/openstack/sample-inventory/group_vars/all.yml @@ -1,7 +1,7 @@ --- env_id: "openshift" public_dns_domain: "example.com" -public_dns_nameservers: [] +openstack_dns_nameservers: [] # # Used Hostnames # # - set custom hostnames for roles by uncommenting corresponding lines @@ -12,7 +12,7 @@ public_dns_nameservers: [] #openstack_etcd_hostname: "etcd" #openstack_dns_hostname: "dns" -openstack_ssh_public_key: "openshift" +openstack_keypair_name: "openshift" openstack_external_network_name: "public" #openstack_private_network_name: "openshift-ansible-{{ stack_name }}-net" # # A dedicated Neutron network name for containers data network diff --git a/roles/openshift_openstack/defaults/main.yml b/roles/openshift_openstack/defaults/main.yml index d1408abf0..aa03c088e 100644 --- a/roles/openshift_openstack/defaults/main.yml +++ b/roles/openshift_openstack/defaults/main.yml @@ -1,5 +1,4 @@ --- - stack_state: 'present' ssh_ingress_cidr: 0.0.0.0/0 @@ -7,18 +6,13 @@ node_ingress_cidr: 0.0.0.0/0 master_ingress_cidr: 0.0.0.0/0 lb_ingress_cidr: 0.0.0.0/0 bastion_ingress_cidr: 0.0.0.0/0 -num_etcd: 0 -num_masters: 1 -num_nodes: 1 -num_dns: 1 -num_infra: 1 -nodes_to_remove: [] -etcd_volume_size: 2 -dns_volume_size: 1 -lb_volume_size: 5 -use_bastion: False -ui_ssh_tunnel: False -provider_network: False +openstack_num_etcd: 0 +openstack_num_masters: 1 +openstack_num_nodes: 1 +openstack_num_dns: 0 +openstack_num_infra: 1 +openstack_dns_nameservers: [] +openstack_nodes_to_remove: [] openshift_cluster_node_labels: @@ -61,48 +55,41 @@ openshift_app_domain: "apps" # heat vars stack_name: "{{ env_id }}.{{ public_dns_domain }}" -dns_domain: "{{ public_dns_domain }}" -dns_nameservers: "{{ public_dns_nameservers }}" -subnet_prefix: "{{ openstack_subnet_prefix }}" -master_hostname: "{{ openstack_master_hostname | default('master') }}" -infra_hostname: "{{ openstack_infra_hostname | default('infra-node') }}" -node_hostname: "{{ openstack_node_hostname | default('app-node') }}" -lb_hostname: "{{ openstack_lb_hostname | default('lb') }}" -etcd_hostname: "{{ openstack_etcd_hostname | default('etcd') }}" -dns_hostname: "{{ openstack_dns_hostname | default('dns') }}" -ssh_public_key: "{{ openstack_ssh_public_key }}" -openstack_image: "{{ openstack_default_image_name }}" -lb_flavor: "{{ openstack_lb_flavor | default(openstack_default_flavor) }}" -etcd_flavor: "{{ openstack_etcd_flavor | default(openstack_default_flavor) }}" -master_flavor: "{{ openstack_master_flavor | default(openstack_default_flavor) }}" -node_flavor: "{{ openstack_node_flavor | default(openstack_default_flavor) }}" -infra_flavor: "{{ openstack_infra_flavor | default(openstack_default_flavor) }}" -dns_flavor: "{{ openstack_dns_flavor | default(openstack_default_flavor) }}" -openstack_master_image: "{{ openstack_master_image_name | default(openstack_default_image_name) }}" -openstack_infra_image: "{{ openstack_infra_image_name | default(openstack_default_image_name) }}" -openstack_node_image: "{{ openstack_node_image_name | default(openstack_default_image_name) }}" -openstack_lb_image: "{{ openstack_lb_image_name | default(openstack_default_image_name) }}" -openstack_etcd_image: "{{ openstack_etcd_image_name | default(openstack_default_image_name) }}" -openstack_dns_image: "{{ openstack_dns_image_name | default(openstack_default_image_name) }}" +openstack_subnet_prefix: "192.168.99" +openstack_master_hostname: master +openstack_infra_hostname: infra-node +openstack_node_hostname: app-node +openstack_lb_hostname: lb +openstack_etcd_hostname: etcd +openstack_dns_hostname: dns +openstack_keypair_name: openshift +openstack_lb_flavor: "{{ openstack_default_flavor }}" +openstack_etcd_flavor: "{{ openstack_default_flavor }}" +openstack_master_flavor: "{{ openstack_default_flavor }}" +openstack_node_flavor: "{{ openstack_default_flavor }}" +openstack_infra_flavor: "{{ openstack_default_flavor }}" +openstack_dns_flavor: "{{ openstack_default_flavor }}" +openstack_master_image: "{{ openstack_default_image_name }}" +openstack_infra_image: "{{ openstack_default_image_name }}" +openstack_node_image: "{{ openstack_default_image_name }}" +openstack_lb_image: "{{ openstack_default_image_name }}" +openstack_etcd_image: "{{ openstack_default_image_name }}" +openstack_dns_image: "{{ openstack_default_image_name }}" +openstack_provider_network_name: False +openstack_external_network_name: False openstack_private_network: >- {% if openstack_provider_network_name | default(None) -%} {{ openstack_provider_network_name }} {%- else -%} {{ openstack_private_network_name | default ('openshift-ansible-' + stack_name + '-net') }} {%- endif -%} -provider_network: "{{ openstack_provider_network_name | default(None) }}" -external_network: "{{ openstack_external_network_name | default(None) }}" -num_etcd: "{{ openstack_num_etcd | default(0) }}" -num_masters: "{{ openstack_num_masters }}" -num_nodes: "{{ openstack_num_nodes }}" -num_infra: "{{ openstack_num_infra }}" -num_dns: "{{ openstack_num_dns | default(1) }}" -master_server_group_policies: "{{ openstack_master_server_group_policies | default([]) | to_yaml }}" -infra_server_group_policies: "{{ openstack_infra_server_group_policies | default([]) | to_yaml }}" -master_volume_size: "{{ docker_master_volume_size | default(docker_volume_size) }}" -infra_volume_size: "{{ docker_infra_volume_size | default(docker_volume_size) }}" -node_volume_size: "{{ docker_node_volume_size | default(docker_volume_size) }}" -etcd_volume_size: "{{ docker_etcd_volume_size | default('2') }}" -dns_volume_size: "{{ docker_dns_volume_size | default('1') }}" -lb_volume_size: "{{ docker_lb_volume_size | default('5') }}" -nodes_to_remove: "{{ openstack_nodes_to_remove | default([]) | to_yaml }}" +openstack_master_server_group_policies: [] +openstack_infra_server_group_policies: [] +openstack_master_volume_size: "{{ docker_volume_size }}" +openstack_infra_volume_size: "{{ docker_volume_size }}" +openstack_node_volume_size: "{{ docker_volume_size }}" +openstack_etcd_volume_size: 2 +openstack_dns_volume_size: 1 +openstack_lb_volume_size: 5 +openstack_use_bastion: false +openshift_ui_ssh_tunnel: false diff --git a/roles/openshift_openstack/tasks/check-prerequisites.yml b/roles/openshift_openstack/tasks/check-prerequisites.yml index 4d7cfbf11..13000e31f 100644 --- a/roles/openshift_openstack/tasks/check-prerequisites.yml +++ b/roles/openshift_openstack/tasks/check-prerequisites.yml @@ -76,13 +76,13 @@ - name: Try to show keypair command: > python -c 'import shade; cloud = shade.openstack_cloud(); - exit(cloud.get_keypair("{{ openstack_ssh_public_key }}") is None)' + exit(cloud.get_keypair("{{ openstack_keypair_name }}") is None)' ignore_errors: yes register: key_result - name: Check that keypair is available assert: that: 'key_result.rc == 0' - msg: "Keypair {{ openstack_ssh_public_key }} is not available" + msg: "Keypair {{ openstack_keypair_name }} is not available" # Check that custom images are available - include: custom_image_check.yaml diff --git a/roles/openshift_openstack/tasks/generate-templates.yml b/roles/openshift_openstack/tasks/generate-templates.yml index 0ff50a095..3a8b588e9 100644 --- a/roles/openshift_openstack/tasks/generate-templates.yml +++ b/roles/openshift_openstack/tasks/generate-templates.yml @@ -10,6 +10,9 @@ stack_template_path: "{{ stack_template_pre.path }}/stack.yaml" user_data_template_path: "{{ stack_template_pre.path }}/user-data" +- name: Print out the Heat template directory + debug: var=stack_template_pre + - name: generate HOT stack template from jinja2 template template: src: heat_stack.yaml.j2 diff --git a/roles/openshift_openstack/tasks/hostname.yml b/roles/openshift_openstack/tasks/hostname.yml index 0fc8fbc4c..9815d0e80 100644 --- a/roles/openshift_openstack/tasks/hostname.yml +++ b/roles/openshift_openstack/tasks/hostname.yml @@ -1,33 +1,26 @@ --- -- name: "Verify hostname" - command: hostnamectl status --static - register: hostname_fqdn +- name: Setting Hostname Fact + set_fact: + new_hostname: "{{ custom_hostname | default(inventory_hostname_short) }}" -- name: "Set hostname if required" - when: hostname_fqdn.stdout != ansible_fqdn - block: - - name: Setting Hostname Fact - set_fact: - new_hostname: "{{ custom_hostname | default(inventory_hostname_short) }}" +- name: Setting FQDN Fact + set_fact: + new_fqdn: "{{ new_hostname }}.{{ full_dns_domain }}" - - name: Setting FQDN Fact - set_fact: - new_fqdn: "{{ new_hostname }}.{{ full_dns_domain }}" +- name: Setting hostname and DNS domain + hostname: name="{{ new_fqdn }}" - - name: Setting hostname and DNS domain - hostname: name="{{ new_fqdn }}" +- name: Check for cloud.cfg + stat: path=/etc/cloud/cloud.cfg + register: cloud_cfg - - name: Check for cloud.cfg - stat: path=/etc/cloud/cloud.cfg - register: cloud_cfg - - - name: Prevent cloud-init updates of hostname/fqdn (if applicable) - lineinfile: - dest: /etc/cloud/cloud.cfg - state: present - regexp: "{{ item.regexp }}" - line: "{{ item.line }}" - with_items: - - { regexp: '^ - set_hostname', line: '# - set_hostname' } - - { regexp: '^ - update_hostname', line: '# - update_hostname' } - when: cloud_cfg.stat.exists == True +- name: Prevent cloud-init updates of hostname/fqdn (if applicable) + lineinfile: + dest: /etc/cloud/cloud.cfg + state: present + regexp: "{{ item.regexp }}" + line: "{{ item.line }}" + with_items: + - { regexp: '^ - set_hostname', line: '# - set_hostname' } + - { regexp: '^ - update_hostname', line: '# - update_hostname' } + when: cloud_cfg.stat.exists == True diff --git a/roles/openshift_openstack/tasks/populate-dns.yml b/roles/openshift_openstack/tasks/populate-dns.yml index c8243dc1f..669b65a01 100644 --- a/roles/openshift_openstack/tasks/populate-dns.yml +++ b/roles/openshift_openstack/tasks/populate-dns.yml @@ -1,37 +1,26 @@ -# TODO: use nsupdate to populate the DNS servers using the keys -# specified in the inventory. - -# this is an optional step -- the deployers may do whatever else they -# wish here. - - -# TODO: build records -# TODO: run nsupdate - - - name: "Generate list of private A records" set_fact: - private_records: "{{ [ { 'type': 'A', 'hostname': hostvars[item]['ansible_hostname'], 'ip': hostvars[item]['private_v4'] } ] }}" + private_records: "{{ private_records | default([]) + [ { 'type': 'A', 'hostname': hostvars[item]['ansible_hostname'], 'ip': hostvars[item]['private_v4'] } ] }}" with_items: "{{ groups['cluster_hosts'] }}" -# - name: "Add wildcard records to the private A records for infrahosts" -# set_fact: -# private_records: "{{ private_records | default([]) + [ { 'type': 'A', 'hostname': '*.' + openshift_app_domain, 'ip': hostvars[item]['private_v4'] } ] }}" -# with_items: "{{ groups['infra_hosts'] }}" - -# - name: "Add public master cluster hostname records to the private A records (single master)" -# set_fact: -# private_records: "{{ private_records | default([]) + [ { 'type': 'A', 'hostname': (hostvars[groups.masters[0]].openshift_master_cluster_public_hostname | replace(full_dns_domain, ''))[:-1], 'ip': hostvars[groups.masters[0]].private_v4 } ] }}" -# when: -# - hostvars[groups.masters[0]].openshift_master_cluster_public_hostname is defined -# - openstack_num_masters == 1 - -# - name: "Add public master cluster hostname records to the private A records (multi-master)" -# set_fact: -# private_records: "{{ private_records | default([]) + [ { 'type': 'A', 'hostname': (hostvars[groups.masters[0]].openshift_master_cluster_public_hostname | replace(full_dns_domain, ''))[:-1], 'ip': hostvars[groups.lb[0]].private_v4 } ] }}" -# when: -# - hostvars[groups.masters[0]].openshift_master_cluster_public_hostname is defined -# - openstack_num_masters > 1 +- name: "Add wildcard records to the private A records for infrahosts" + set_fact: + private_records: "{{ private_records | default([]) + [ { 'type': 'A', 'hostname': '*.' + openshift_app_domain, 'ip': hostvars[item]['private_v4'] } ] }}" + with_items: "{{ groups['infra_hosts'] }}" + +- name: "Add public master cluster hostname records to the private A records (single master)" + set_fact: + private_records: "{{ private_records | default([]) + [ { 'type': 'A', 'hostname': (hostvars[groups.masters[0]].openshift_master_cluster_public_hostname | replace(full_dns_domain, ''))[:-1], 'ip': hostvars[groups.masters[0]].private_v4 } ] }}" + when: + - hostvars[groups.masters[0]].openshift_master_cluster_public_hostname is defined + - openstack_num_masters == 1 + +- name: "Add public master cluster hostname records to the private A records (multi-master)" + set_fact: + private_records: "{{ private_records | default([]) + [ { 'type': 'A', 'hostname': (hostvars[groups.masters[0]].openshift_master_cluster_public_hostname | replace(full_dns_domain, ''))[:-1], 'ip': hostvars[groups.lb[0]].private_v4 } ] }}" + when: + - hostvars[groups.masters[0]].openshift_master_cluster_public_hostname is defined + - openstack_num_masters > 1 - name: "Set the private DNS server to use the external value (if provided)" set_fact: @@ -55,102 +44,67 @@ key_algorithm: "{{ nsupdate_key_algorithm_private | lower }}" entries: "{{ private_records }}" -# - name: "Generate list of public A records" -# set_fact: -# public_records: "{{ public_records | default([]) + [ { 'type': 'A', 'hostname': hostvars[item]['ansible_hostname'], 'ip': hostvars[item]['public_v4'] } ] }}" -# with_items: "{{ groups['cluster_hosts'] }}" -# when: hostvars[item]['public_v4'] is defined - -# - name: "Add wildcard records to the public A records" -# set_fact: -# public_records: "{{ public_records | default([]) + [ { 'type': 'A', 'hostname': '*.' + openshift_app_domain, 'ip': hostvars[item]['public_v4'] } ] }}" -# with_items: "{{ groups['infra_hosts'] }}" -# when: hostvars[item]['public_v4'] is defined - -# - name: "Add public master cluster hostname records to the public A records (single master)" -# set_fact: -# public_records: "{{ public_records | default([]) + [ { 'type': 'A', 'hostname': (hostvars[groups.masters[0]].openshift_master_cluster_public_hostname | replace(full_dns_domain, ''))[:-1], 'ip': hostvars[groups.masters[0]].public_v4 } ] }}" -# when: -# - hostvars[groups.masters[0]].openshift_master_cluster_public_hostname is defined -# - openstack_num_masters == 1 -# - not use_bastion|bool - -# - name: "Add public master cluster hostname records to the public A records (single master behind a bastion)" -# set_fact: -# public_records: "{{ public_records | default([]) + [ { 'type': 'A', 'hostname': (hostvars[groups.masters[0]].openshift_master_cluster_public_hostname | replace(full_dns_domain, ''))[:-1], 'ip': hostvars[groups.bastions[0]].public_v4 } ] }}" -# when: -# - hostvars[groups.masters[0]].openshift_master_cluster_public_hostname is defined -# - openstack_num_masters == 1 -# - use_bastion|bool - -# - name: "Add public master cluster hostname records to the public A records (multi-master)" -# set_fact: -# public_records: "{{ public_records | default([]) + [ { 'type': 'A', 'hostname': (hostvars[groups.masters[0]].openshift_master_cluster_public_hostname | replace(full_dns_domain, ''))[:-1], 'ip': hostvars[groups.lb[0]].public_v4 } ] }}" -# when: -# - hostvars[groups.masters[0]].openshift_master_cluster_public_hostname is defined -# - openstack_num_masters > 1 - -# - name: "Set the public DNS server details to use the external value (if provided)" -# set_fact: -# nsupdate_server_public: "{{ external_nsupdate_keys['public']['server'] }}" -# nsupdate_key_secret_public: "{{ external_nsupdate_keys['public']['key_secret'] }}" -# nsupdate_key_algorithm_public: "{{ external_nsupdate_keys['public']['key_algorithm'] }}" -# nsupdate_public_key_name: "{{ external_nsupdate_keys['public']['key_name']|default('public-' + full_dns_domain) }}" -# when: -# - external_nsupdate_keys is defined -# - external_nsupdate_keys['public'] is defined - -# - name: "Set the public DNS server details to use the provisioned value" -# set_fact: -# nsupdate_server_public: "{{ hostvars[groups['dns'][0]].public_v4 }}" -# nsupdate_key_secret_public: "{{ hostvars[groups['dns'][0]].nsupdate_keys['public-' + full_dns_domain].key_secret }}" -# nsupdate_key_algorithm_public: "{{ hostvars[groups['dns'][0]].nsupdate_keys['public-' + full_dns_domain].key_algorithm }}" -# when: -# - nsupdate_server_public is undefined - -# - name: "Generate the public Add section for DNS" -# set_fact: -# public_named_records: -# - view: "public" -# zone: "{{ full_dns_domain }}" -# server: "{{ nsupdate_server_public }}" -# key_name: "{{ nsupdate_public_key_name|default('public-' + full_dns_domain) }}" -# key_secret: "{{ nsupdate_key_secret_public }}" -# key_algorithm: "{{ nsupdate_key_algorithm_public | lower }}" -# entries: "{{ public_records }}" - +- name: "Generate list of public A records" + set_fact: + public_records: "{{ public_records | default([]) + [ { 'type': 'A', 'hostname': hostvars[item]['ansible_hostname'], 'ip': hostvars[item]['public_v4'] } ] }}" + with_items: "{{ groups['cluster_hosts'] }}" + when: hostvars[item]['public_v4'] is defined +- name: "Add wildcard records to the public A records" + set_fact: + public_records: "{{ public_records | default([]) + [ { 'type': 'A', 'hostname': '*.' + openshift_app_domain, 'ip': hostvars[item]['public_v4'] } ] }}" + with_items: "{{ groups['infra_hosts'] }}" + when: hostvars[item]['public_v4'] is defined +- name: "Add public master cluster hostname records to the public A records (single master)" + set_fact: + public_records: "{{ public_records | default([]) + [ { 'type': 'A', 'hostname': (hostvars[groups.masters[0]].openshift_master_cluster_public_hostname | replace(full_dns_domain, ''))[:-1], 'ip': hostvars[groups.masters[0]].public_v4 } ] }}" + when: + - hostvars[groups.masters[0]].openshift_master_cluster_public_hostname is defined + - openstack_num_masters == 1 + - not openstack_use_bastion|bool +- name: "Add public master cluster hostname records to the public A records (single master behind a bastion)" + set_fact: + public_records: "{{ public_records | default([]) + [ { 'type': 'A', 'hostname': (hostvars[groups.masters[0]].openshift_master_cluster_public_hostname | replace(full_dns_domain, ''))[:-1], 'ip': hostvars[groups.bastions[0]].public_v4 } ] }}" + when: + - hostvars[groups.masters[0]].openshift_master_cluster_public_hostname is defined + - openstack_num_masters == 1 + - openstack_use_bastion|bool +- name: "Add public master cluster hostname records to the public A records (multi-master)" + set_fact: + public_records: "{{ public_records | default([]) + [ { 'type': 'A', 'hostname': (hostvars[groups.masters[0]].openshift_master_cluster_public_hostname | replace(full_dns_domain, ''))[:-1], 'ip': hostvars[groups.lb[0]].public_v4 } ] }}" + when: + - hostvars[groups.masters[0]].openshift_master_cluster_public_hostname is defined + - openstack_num_masters > 1 -- name: "Generate the final dns_records_add" +- name: "Set the public DNS server details to use the external value (if provided)" set_fact: - # TODO(shadower): enable this when we add public records - #dns_records_add: "{{ private_named_records + public_named_records }}" - dns_records_add: "{{ private_named_records }}" + nsupdate_server_public: "{{ external_nsupdate_keys['public']['server'] }}" + nsupdate_key_secret_public: "{{ external_nsupdate_keys['public']['key_secret'] }}" + nsupdate_key_algorithm_public: "{{ external_nsupdate_keys['public']['key_algorithm'] }}" + nsupdate_public_key_name: "{{ external_nsupdate_keys['public']['key_name']|default('public-' + full_dns_domain) }}" + when: + - external_nsupdate_keys is defined + - external_nsupdate_keys['public'] is defined +- name: "Generate the public Add section for DNS" + set_fact: + public_named_records: + - view: "public" + zone: "{{ full_dns_domain }}" + server: "{{ nsupdate_server_public }}" + key_name: "{{ nsupdate_public_key_name|default('public-' + full_dns_domain) }}" + key_secret: "{{ nsupdate_key_secret_public }}" + key_algorithm: "{{ nsupdate_key_algorithm_public | lower }}" + entries: "{{ public_records }}" -# RUN NSUPDATE +- name: "Generate the final dns_records_add" + set_fact: + dns_records_add: "{{ private_named_records + public_named_records }}" -- name: "Remove any deleted DNS A records" - nsupdate: - key_name: "{{ item.0.key_name }}" - key_secret: "{{ item.0.key_secret }}" - key_algorithm: "{{ item.0.key_algorithm }}" - server: "{{ item.0.server }}" - zone: "{{ item.0.zone }}" - record: "{{ item.1.hostname }}" - type: "{{ item.1.type }}" - state: absent - with_subelements: - - "{{ dns_records_rm | default({}) }}" - - entries - register: nsupdate_remove_result - until: nsupdate_remove_result|succeeded - retries: 10 - delay: 1 - name: "Add DNS A records" nsupdate: @@ -162,6 +116,7 @@ record: "{{ item.1.hostname }}" value: "{{ item.1.ip }}" type: "{{ item.1.type }}" + # TODO(shadower): add a cleanup playbook that removes these records, too! state: present with_subelements: - "{{ dns_records_add | default({}) }}" diff --git a/roles/openshift_openstack/tasks/provision.yml b/roles/openshift_openstack/tasks/provision.yml index 8ebda8100..e693f535a 100644 --- a/roles/openshift_openstack/tasks/provision.yml +++ b/roles/openshift_openstack/tasks/provision.yml @@ -16,11 +16,6 @@ - name: Add the new nodes to the inventory meta: refresh_inventory -- name: Populate DNS entries - include: populate-dns.yml - when: - - stack_state == 'present' - - name: CleanUp include: cleanup.yml when: diff --git a/roles/openshift_openstack/templates/heat_stack.yaml.j2 b/roles/openshift_openstack/templates/heat_stack.yaml.j2 index 2359842a5..28634f9a4 100644 --- a/roles/openshift_openstack/templates/heat_stack.yaml.j2 +++ b/roles/openshift_openstack/templates/heat_stack.yaml.j2 @@ -54,7 +54,7 @@ outputs: description: Floating IPs of the nodes value: { get_attr: [ infra_nodes, floating_ip ] } -{% if num_dns|int > 0 %} +{% if openstack_num_dns|int > 0 %} dns_name: description: Name of the DNS value: @@ -72,11 +72,11 @@ outputs: {% endif %} conditions: - no_floating: {% if provider_network or use_bastion|bool %}true{% else %}false{% endif %} + no_floating: {% if openstack_provider_network_name or openstack_use_bastion|bool %}true{% else %}false{% endif %} resources: -{% if not provider_network %} +{% if not openstack_provider_network_name %} net: type: OS::Neutron::Net properties: @@ -99,20 +99,20 @@ resources: str_replace: template: subnet_24_prefix.0/24 params: - subnet_24_prefix: {{ subnet_prefix }} + subnet_24_prefix: {{ openstack_subnet_prefix }} allocation_pools: - start: str_replace: template: subnet_24_prefix.3 params: - subnet_24_prefix: {{ subnet_prefix }} + subnet_24_prefix: {{ openstack_subnet_prefix }} end: str_replace: template: subnet_24_prefix.254 params: - subnet_24_prefix: {{ subnet_prefix }} + subnet_24_prefix: {{ openstack_subnet_prefix }} dns_nameservers: -{% for nameserver in dns_nameservers %} +{% for nameserver in openstack_dns_nameservers %} - {{ nameserver }} {% endfor %} @@ -141,7 +141,7 @@ resources: params: cluster_id: {{ stack_name }} external_gateway_info: - network: {{ external_network }} + network: {{ openstack_external_network_name }} interface: type: OS::Neutron::RouterInterface @@ -159,7 +159,7 @@ resources: # template: openshift-ansible-cluster_id-keypair # params: # cluster_id: {{ stack_name }} -# public_key: {{ ssh_public_key }} +# public_key: {{ openstack_keypair_name }} common-secgrp: type: OS::Neutron::SecurityGroup @@ -180,7 +180,7 @@ resources: port_range_min: 22 port_range_max: 22 remote_ip_prefix: {{ ssh_ingress_cidr }} -{% if use_bastion|bool %} +{% if openstack_use_bastion|bool %} - direction: ingress protocol: tcp port_range_min: 22 @@ -443,7 +443,7 @@ resources: port_range_min: 443 port_range_max: 443 -{% if num_dns|int > 0 %} +{% if openstack_num_dns|int > 0 %} dns-secgrp: type: OS::Neutron::SecurityGroup properties: @@ -480,7 +480,7 @@ resources: remote_ip_prefix: "{{ openstack_subnet_prefix }}.0/24" {% endif %} -{% if num_masters|int > 1 or ui_ssh_tunnel|bool %} +{% if openstack_num_masters|int > 1 or openshift_ui_ssh_tunnel|bool %} lb-secgrp: type: OS::Neutron::SecurityGroup properties: @@ -492,7 +492,7 @@ resources: port_range_min: {{ openshift_master_api_port | default(8443) }} port_range_max: {{ openshift_master_api_port | default(8443) }} remote_ip_prefix: {{ lb_ingress_cidr | default(bastion_ingress_cidr) }} -{% if ui_ssh_tunnel|bool %} +{% if openshift_ui_ssh_tunnel|bool %} - direction: ingress protocol: tcp port_range_min: {{ openshift_master_api_port | default(8443) }} @@ -511,7 +511,7 @@ resources: etcd: type: OS::Heat::ResourceGroup properties: - count: {{ num_etcd }} + count: {{ openstack_num_etcd }} resource_def: type: server.yaml properties: @@ -520,7 +520,7 @@ resources: template: k8s_type-%index%.cluster_id params: cluster_id: {{ stack_name }} - k8s_type: {{ etcd_hostname | default('etcd') }} + k8s_type: {{ openstack_etcd_hostname }} cluster_env: {{ public_dns_domain }} cluster_id: {{ stack_name }} group: @@ -530,12 +530,12 @@ resources: k8s_type: etcds cluster_id: {{ stack_name }} type: etcd - image: {{ openstack_etcd_image | default(openstack_image) }} - flavor: {{ etcd_flavor }} - key_name: {{ ssh_public_key }} -{% if provider_network %} - net: {{ provider_network }} - net_name: {{ provider_network }} + image: {{ openstack_etcd_image }} + flavor: {{ openstack_etcd_flavor }} + key_name: {{ openstack_keypair_name }} +{% if openstack_provider_network_name %} + net: {{ openstack_provider_network_name }} + net_name: {{ openstack_provider_network_name }} {% else %} net: { get_resource: net } subnet: { get_resource: subnet } @@ -552,31 +552,31 @@ resources: if: - no_floating - null - - {{ external_network }} -{% if use_bastion|bool or provider_network %} + - {{ openstack_external_network_name }} +{% if openstack_use_bastion|bool or openstack_provider_network_name %} attach_float_net: false {% endif %} - volume_size: {{ etcd_volume_size }} -{% if not provider_network %} + volume_size: {{ openstack_etcd_volume_size }} +{% if not openstack_provider_network_name %} depends_on: - interface {% endif %} -{% if master_server_group_policies|length > 0 %} +{% if openstack_master_server_group_policies|length > 0 %} master_server_group: type: OS::Nova::ServerGroup properties: name: master_server_group - policies: {{ master_server_group_policies }} + policies: {{ openstack_master_server_group_policies }} {% endif %} -{% if infra_server_group_policies|length > 0 %} +{% if openstack_infra_server_group_policies|length > 0 %} infra_server_group: type: OS::Nova::ServerGroup properties: name: infra_server_group - policies: {{ infra_server_group_policies }} + policies: {{ openstack_infra_server_group_policies }} {% endif %} -{% if num_masters|int > 1 %} +{% if openstack_num_masters|int > 1 %} loadbalancer: type: OS::Heat::ResourceGroup properties: @@ -589,7 +589,7 @@ resources: template: k8s_type-%index%.cluster_id params: cluster_id: {{ stack_name }} - k8s_type: {{ lb_hostname | default('lb') }} + k8s_type: {{ openstack_lb_hostname }} cluster_env: {{ public_dns_domain }} cluster_id: {{ stack_name }} group: @@ -599,12 +599,12 @@ resources: k8s_type: lb cluster_id: {{ stack_name }} type: lb - image: {{ openstack_lb_image | default(openstack_image) }} - flavor: {{ lb_flavor }} - key_name: {{ ssh_public_key }} -{% if provider_network %} - net: {{ provider_network }} - net_name: {{ provider_network }} + image: {{ openstack_lb_image }} + flavor: {{ openstack_lb_flavor }} + key_name: {{ openstack_keypair_name }} +{% if openstack_provider_network_name %} + net: {{ openstack_provider_network_name }} + net_name: {{ openstack_provider_network_name }} {% else %} net: { get_resource: net } subnet: { get_resource: subnet } @@ -617,11 +617,11 @@ resources: secgrp: - { get_resource: lb-secgrp } - { get_resource: common-secgrp } -{% if not provider_network %} - floating_network: {{ external_network }} +{% if not openstack_provider_network_name %} + floating_network: {{ openstack_external_network_name }} {% endif %} - volume_size: {{ lb_volume_size }} -{% if not provider_network %} + volume_size: {{ openstack_lb_volume_size }} +{% if not openstack_provider_network_name %} depends_on: - interface {% endif %} @@ -630,7 +630,7 @@ resources: masters: type: OS::Heat::ResourceGroup properties: - count: {{ num_masters }} + count: {{ openstack_num_masters }} resource_def: type: server.yaml properties: @@ -639,7 +639,7 @@ resources: template: k8s_type-%index%.cluster_id params: cluster_id: {{ stack_name }} - k8s_type: {{ master_hostname | default('master')}} + k8s_type: {{ openstack_master_hostname }} cluster_env: {{ public_dns_domain }} cluster_id: {{ stack_name }} group: @@ -649,12 +649,12 @@ resources: k8s_type: masters cluster_id: {{ stack_name }} type: master - image: {{ openstack_master_image | default(openstack_image) }} - flavor: {{ master_flavor }} - key_name: {{ ssh_public_key }} -{% if provider_network %} - net: {{ provider_network }} - net_name: {{ provider_network }} + image: {{ openstack_master_image }} + flavor: {{ openstack_master_flavor }} + key_name: {{ openstack_keypair_name }} +{% if openstack_provider_network_name %} + net: {{ openstack_provider_network_name }} + net_name: {{ openstack_provider_network_name }} {% else %} net: { get_resource: net } subnet: { get_resource: subnet } @@ -675,7 +675,7 @@ resources: {% else %} - { get_resource: master-secgrp } - { get_resource: node-secgrp } -{% if num_etcd|int == 0 %} +{% if openstack_num_etcd|int == 0 %} - { get_resource: etcd-secgrp } {% endif %} {% endif %} @@ -684,16 +684,16 @@ resources: if: - no_floating - null - - {{ external_network }} -{% if use_bastion|bool or provider_network %} + - {{ openstack_external_network_name }} +{% if openstack_use_bastion|bool or openstack_provider_network_name %} attach_float_net: false {% endif %} - volume_size: {{ master_volume_size }} -{% if master_server_group_policies|length > 0 %} + volume_size: {{ openstack_master_volume_size }} +{% if openstack_master_server_group_policies|length > 0 %} scheduler_hints: group: { get_resource: master_server_group } {% endif %} -{% if not provider_network %} +{% if not openstack_provider_network_name %} depends_on: - interface {% endif %} @@ -701,9 +701,9 @@ resources: compute_nodes: type: OS::Heat::ResourceGroup properties: - count: {{ num_nodes }} + count: {{ openstack_num_nodes }} removal_policies: - - resource_list: {{ nodes_to_remove }} + - resource_list: {{ openstack_nodes_to_remove }} resource_def: type: server.yaml properties: @@ -712,7 +712,7 @@ resources: template: sub_type_k8s_type-%index%.cluster_id params: cluster_id: {{ stack_name }} - sub_type_k8s_type: {{ node_hostname | default('app-node') }} + sub_type_k8s_type: {{ openstack_node_hostname }} cluster_env: {{ public_dns_domain }} cluster_id: {{ stack_name }} group: @@ -727,12 +727,12 @@ resources: {% for k, v in openshift_cluster_node_labels.app.iteritems() %} {{ k|e }}: {{ v|e }} {% endfor %} - image: {{ openstack_node_image | default(openstack_image) }} - flavor: {{ node_flavor }} - key_name: {{ ssh_public_key }} -{% if provider_network %} - net: {{ provider_network }} - net_name: {{ provider_network }} + image: {{ openstack_node_image }} + flavor: {{ openstack_node_flavor }} + key_name: {{ openstack_keypair_name }} +{% if openstack_provider_network_name %} + net: {{ openstack_provider_network_name }} + net_name: {{ openstack_provider_network_name }} {% else %} net: { get_resource: net } subnet: { get_resource: subnet } @@ -754,12 +754,12 @@ resources: if: - no_floating - null - - {{ external_network }} -{% if use_bastion|bool or provider_network %} + - {{ openstack_external_network_name }} +{% if openstack_use_bastion|bool or openstack_provider_network_name %} attach_float_net: false {% endif %} - volume_size: {{ node_volume_size }} -{% if not provider_network %} + volume_size: {{ openstack_node_volume_size }} +{% if not openstack_provider_network_name %} depends_on: - interface {% endif %} @@ -767,7 +767,7 @@ resources: infra_nodes: type: OS::Heat::ResourceGroup properties: - count: {{ num_infra }} + count: {{ openstack_num_infra }} resource_def: type: server.yaml properties: @@ -776,7 +776,7 @@ resources: template: sub_type_k8s_type-%index%.cluster_id params: cluster_id: {{ stack_name }} - sub_type_k8s_type: {{ infra_hostname | default('infranode') }} + sub_type_k8s_type: {{ openstack_infra_hostname }} cluster_env: {{ public_dns_domain }} cluster_id: {{ stack_name }} group: @@ -791,12 +791,12 @@ resources: {% for k, v in openshift_cluster_node_labels.infra.iteritems() %} {{ k|e }}: {{ v|e }} {% endfor %} - image: {{ openstack_infra_image | default(openstack_image) }} - flavor: {{ infra_flavor }} - key_name: {{ ssh_public_key }} -{% if provider_network %} - net: {{ provider_network }} - net_name: {{ provider_network }} + image: {{ openstack_infra_image }} + flavor: {{ openstack_infra_flavor }} + key_name: {{ openstack_keypair_name }} +{% if openstack_provider_network_name %} + net: {{ openstack_provider_network_name }} + net_name: {{ openstack_provider_network_name }} {% else %} net: { get_resource: net } subnet: { get_resource: subnet } @@ -818,29 +818,29 @@ resources: {% else %} - { get_resource: node-secgrp } {% endif %} -{% if ui_ssh_tunnel|bool and num_masters|int < 2 %} +{% if openshift_ui_ssh_tunnel|bool and openstack_num_masters|int < 2 %} - { get_resource: lb-secgrp } {% endif %} - { get_resource: infra-secgrp } - { get_resource: common-secgrp } -{% if not provider_network %} - floating_network: {{ external_network }} +{% if not openstack_provider_network_name %} + floating_network: {{ openstack_external_network_name }} {% endif %} - volume_size: {{ infra_volume_size }} -{% if infra_server_group_policies|length > 0 %} + volume_size: {{ openstack_infra_volume_size }} +{% if openstack_infra_server_group_policies|length > 0 %} scheduler_hints: group: { get_resource: infra_server_group } {% endif %} -{% if not provider_network %} +{% if not openstack_provider_network_name %} depends_on: - interface {% endif %} -{% if num_dns|int > 0 %} +{% if openstack_num_dns|int > 0 %} dns: type: OS::Heat::ResourceGroup properties: - count: {{ num_dns }} + count: {{ openstack_num_dns }} resource_def: type: server.yaml properties: @@ -849,7 +849,7 @@ resources: template: k8s_type-%index%.cluster_id params: cluster_id: {{ stack_name }} - k8s_type: {{ dns_hostname | default('dns') }} + k8s_type: {{ openstack_dns_hostname }} cluster_env: {{ public_dns_domain }} cluster_id: {{ stack_name }} group: @@ -859,12 +859,12 @@ resources: k8s_type: dns cluster_id: {{ stack_name }} type: dns - image: {{ openstack_dns_image | default(openstack_image) }} - flavor: {{ dns_flavor }} - key_name: {{ ssh_public_key }} -{% if provider_network %} - net: {{ provider_network }} - net_name: {{ provider_network }} + image: {{ openstack_dns_image }} + flavor: {{ openstack_dns_flavor }} + key_name: {{ openstack_keypair_name }} +{% if openstack_provider_network_name %} + net: {{ openstack_provider_network_name }} + net_name: {{ openstack_provider_network_name }} {% else %} net: { get_resource: net } subnet: { get_resource: subnet } @@ -877,11 +877,11 @@ resources: secgrp: - { get_resource: dns-secgrp } - { get_resource: common-secgrp } -{% if not provider_network %} - floating_network: {{ external_network }} +{% if not openstack_provider_network_name %} + floating_network: {{ openstack_external_network_name }} {% endif %} - volume_size: {{ dns_volume_size }} -{% if not provider_network %} + volume_size: {{ openstack_dns_volume_size }} +{% if not openstack_provider_network_name %} depends_on: - interface {% endif %} diff --git a/roles/openshift_openstack/templates/heat_stack_server.yaml.j2 b/roles/openshift_openstack/templates/heat_stack_server.yaml.j2 index 9ffe721a5..160345baf 100644 --- a/roles/openshift_openstack/templates/heat_stack_server.yaml.j2 +++ b/roles/openshift_openstack/templates/heat_stack_server.yaml.j2 @@ -61,7 +61,7 @@ parameters: label: Net name description: Net name -{% if not provider_network %} +{% if not openstack_provider_network_name %} subnet: type: string label: Subnet ID @@ -81,7 +81,7 @@ parameters: label: Net ID description: Net resource -{% if not provider_network %} +{% if not openstack_provider_network_name %} data_subnet: type: string default: '' @@ -102,7 +102,7 @@ parameters: label: Attach-float-net description: A switch for floating network port connection -{% if not provider_network %} +{% if not openstack_provider_network_name %} floating_network: type: string default: '' @@ -156,7 +156,7 @@ outputs: - server - addresses - { get_param: net_name } -{% if provider_network %} +{% if openstack_provider_network_name %} - 0 {% else %} - 1 @@ -226,7 +226,7 @@ resources: type: OS::Neutron::Port properties: network: { get_param: net } -{% if not provider_network %} +{% if not openstack_provider_network_name %} fixed_ips: - subnet: { get_param: subnet } {% endif %} @@ -239,13 +239,13 @@ resources: properties: network: { get_param: data_net } port_security_enabled: false -{% if not provider_network %} +{% if not openstack_provider_network_name %} fixed_ips: - subnet: { get_param: data_subnet } {% endif %} {% endif %} -{% if not provider_network %} +{% if not openstack_provider_network_name %} floating-ip: condition: { not: no_floating } type: OS::Neutron::FloatingIP -- cgit v1.2.3 From f462e7a682cb65085864d7eff4b7898fe8555a75 Mon Sep 17 00:00:00 2001 From: Tomas Sedovic Date: Thu, 2 Nov 2017 10:07:08 +0100 Subject: Move the selinux check up --- roles/openshift_openstack/tasks/node-configuration.yml | 10 +++++----- 1 file changed, 5 insertions(+), 5 deletions(-) diff --git a/roles/openshift_openstack/tasks/node-configuration.yml b/roles/openshift_openstack/tasks/node-configuration.yml index 8a6a8022f..89e58d830 100644 --- a/roles/openshift_openstack/tasks/node-configuration.yml +++ b/roles/openshift_openstack/tasks/node-configuration.yml @@ -1,11 +1,11 @@ --- +- name: "Verify SELinux is enforcing" + fail: + msg: "SELinux is required for OpenShift and has been detected as '{{ ansible_selinux.config_mode }}'" + when: ansible_selinux.config_mode != "enforcing" + - include: hostname.yml - include: container-storage-setup.yml - include: node-network.yml - -- name: "Verify SELinux is enforcing" - fail: - msg: "SELinux is required for OpenShift and has been detected as '{{ ansible_selinux.config_mode }}'" - when: ansible_selinux.config_mode != "enforcing" -- cgit v1.2.3 From bde35d577f4ccb786a65a84142fabe90eb903599 Mon Sep 17 00:00:00 2001 From: Tomas Sedovic Date: Thu, 2 Nov 2017 10:15:42 +0100 Subject: Use the default `item` loop variable for checks --- roles/openshift_openstack/tasks/check-prerequisites.yml | 16 ++++++---------- roles/openshift_openstack/tasks/custom_flavor_check.yaml | 5 +++-- roles/openshift_openstack/tasks/custom_image_check.yaml | 4 ++-- 3 files changed, 11 insertions(+), 14 deletions(-) diff --git a/roles/openshift_openstack/tasks/check-prerequisites.yml b/roles/openshift_openstack/tasks/check-prerequisites.yml index 13000e31f..a91e60640 100644 --- a/roles/openshift_openstack/tasks/check-prerequisites.yml +++ b/roles/openshift_openstack/tasks/check-prerequisites.yml @@ -93,17 +93,13 @@ - "{{ openstack_lb_image }}" - "{{ openstack_etcd_image }}" - "{{ openstack_dns_image }}" - loop_control: - loop_var: image # Check that custom flavors are available - include: custom_flavor_check.yaml with_items: - - "{{ master_flavor }}" - - "{{ infra_flavor }}" - - "{{ node_flavor }}" - - "{{ lb_flavor }}" - - "{{ etcd_flavor }}" - - "{{ dns_flavor }}" - loop_control: - loop_var: flavor + - "{{ openstack_master_flavor }}" + - "{{ openstack_infra_flavor }}" + - "{{ openstack_node_flavor }}" + - "{{ openstack_lb_flavor }}" + - "{{ openstack_etcd_flavor }}" + - "{{ openstack_dns_flavor }}" diff --git a/roles/openshift_openstack/tasks/custom_flavor_check.yaml b/roles/openshift_openstack/tasks/custom_flavor_check.yaml index e11874c28..5fb7a76ff 100644 --- a/roles/openshift_openstack/tasks/custom_flavor_check.yaml +++ b/roles/openshift_openstack/tasks/custom_flavor_check.yaml @@ -1,9 +1,10 @@ --- - name: Try to get flavor facts os_flavor_facts: - name: "{{ flavor }}" + name: "{{ item }}" register: flavor_result + - name: Check that custom flavor is available assert: that: "flavor_result.ansible_facts.openstack_flavors" - msg: "Flavor {{ flavor }} is not available." + msg: "Flavor {{ item }} is not available." diff --git a/roles/openshift_openstack/tasks/custom_image_check.yaml b/roles/openshift_openstack/tasks/custom_image_check.yaml index 4fbd6a687..4ae163406 100644 --- a/roles/openshift_openstack/tasks/custom_image_check.yaml +++ b/roles/openshift_openstack/tasks/custom_image_check.yaml @@ -1,10 +1,10 @@ --- - name: Try to get image facts os_image_facts: - image: "{{ image }}" + image: "{{ item }}" register: image_result - name: Check that custom image is available assert: that: "image_result.ansible_facts.openstack_image" - msg: "Image {{ image }} is not available." + msg: "Image {{ item }} is not available." -- cgit v1.2.3 From 4fd33e96eed4d1d5eaca0af8f2ef3e81fcaf5498 Mon Sep 17 00:00:00 2001 From: Tomas Sedovic Date: Thu, 2 Nov 2017 10:44:41 +0100 Subject: Simplify the template paths for the storage setup Because the templates are present in a role, the `template` module is able to look them up directly, without having to use `{{ role_path }}/templates`. --- roles/openshift_openstack/tasks/container-storage-setup.yml | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/roles/openshift_openstack/tasks/container-storage-setup.yml b/roles/openshift_openstack/tasks/container-storage-setup.yml index 5cd48ca2c..82307b208 100644 --- a/roles/openshift_openstack/tasks/container-storage-setup.yml +++ b/roles/openshift_openstack/tasks/container-storage-setup.yml @@ -2,7 +2,7 @@ - block: - name: create the docker-storage config file template: - src: "{{ role_path }}/templates/docker-storage-setup-overlayfs.j2" + src: docker-storage-setup-overlayfs.j2 dest: /etc/sysconfig/docker-storage-setup owner: root group: root @@ -14,7 +14,7 @@ - block: - name: create the docker-storage-setup config file template: - src: "{{ role_path }}/templates/docker-storage-setup-dm.j2" + src: docker-storage-setup-dm.j2 dest: /etc/sysconfig/docker-storage-setup owner: root group: root @@ -26,7 +26,7 @@ - block: - name: create the docker-storage-setup config file for CentOS template: - src: "{{ role_path }}/templates/docker-storage-setup-dm.j2" + src: docker-storage-setup-dm.j2 dest: /etc/sysconfig/docker-storage-setup owner: root group: root -- cgit v1.2.3 From ad84935b5021da5ab0d21ffdf630079c1a59083d Mon Sep 17 00:00:00 2001 From: Tomas Sedovic Date: Thu, 2 Nov 2017 10:52:30 +0100 Subject: Use `null` instead of `False` where it makes sense The `openstack_*_network_name` vars are strings, not booleans, so the absense shouldn't really be marked by `False`. --- roles/openshift_openstack/defaults/main.yml | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/roles/openshift_openstack/defaults/main.yml b/roles/openshift_openstack/defaults/main.yml index aa03c088e..1f9c09c96 100644 --- a/roles/openshift_openstack/defaults/main.yml +++ b/roles/openshift_openstack/defaults/main.yml @@ -75,8 +75,8 @@ openstack_node_image: "{{ openstack_default_image_name }}" openstack_lb_image: "{{ openstack_default_image_name }}" openstack_etcd_image: "{{ openstack_default_image_name }}" openstack_dns_image: "{{ openstack_default_image_name }}" -openstack_provider_network_name: False -openstack_external_network_name: False +openstack_provider_network_name: null +openstack_external_network_name: null openstack_private_network: >- {% if openstack_provider_network_name | default(None) -%} {{ openstack_provider_network_name }} -- cgit v1.2.3 From b95170503613bb97c00175324b31ed91f6f41ea1 Mon Sep 17 00:00:00 2001 From: Tomas Sedovic Date: Thu, 2 Nov 2017 11:03:39 +0100 Subject: Namespace the OpenStack vars This makes sure that all the variables used in the `openshift_openstack` role are prefixed with `openshift_openstack_` as is the convention. --- playbooks/openstack/README.md | 12 +- playbooks/openstack/advanced-configuration.md | 80 ++--- .../openstack/openshift-cluster/provision.yml | 4 +- .../sample-inventory/group_vars/OSEv3.yml | 10 +- .../openstack/sample-inventory/group_vars/all.yml | 102 +++---- roles/openshift_openstack/defaults/main.yml | 132 ++++---- .../tasks/check-prerequisites.yml | 40 +-- roles/openshift_openstack/tasks/hostname.yml | 2 +- .../openshift_openstack/tasks/net_vars_check.yaml | 2 +- roles/openshift_openstack/tasks/node-packages.yml | 6 +- roles/openshift_openstack/tasks/populate-dns.yml | 66 ++-- roles/openshift_openstack/tasks/provision.yml | 8 +- .../templates/docker-storage-setup-dm.j2 | 8 +- .../templates/docker-storage-setup-overlayfs.j2 | 10 +- .../templates/heat_stack.yaml.j2 | 336 ++++++++++----------- .../templates/heat_stack_server.yaml.j2 | 16 +- 16 files changed, 418 insertions(+), 416 deletions(-) diff --git a/playbooks/openstack/README.md b/playbooks/openstack/README.md index 99f4ab12f..f3fe13530 100644 --- a/playbooks/openstack/README.md +++ b/playbooks/openstack/README.md @@ -142,27 +142,27 @@ corresponding to your OpenStack installation. $ vi inventory/group_vars/all.yml ``` -1. Set the `openstack_keypair_name` to your OpenStack keypair name. +1. Set the `openshift_openstack_keypair_name` to your OpenStack keypair name. - See `openstack keypair list` to find the keypairs registered with OpenShift. - This must correspond to your private SSH key in `~/.ssh/id_rsa` -2. Set the `openstack_external_network_name` to the floating IP +2. Set the `openshift_openstack_external_network_name` to the floating IP network of your openstack. - See `openstack network list` for the list of networks. - It's often called `public`, `external` or `ext-net`. -3. Set the `openstack_default_image_name` to the image you want your +3. Set the `openshift_openstack_default_image_name` to the image you want your OpenShift VMs to run. - See `openstack image list` for the list of available images. -4. Set the `openstack_default_flavor` to the flavor you want your +4. Set the `openshift_openstack_default_flavor` to the flavor you want your OpenShift VMs to use. - See `openstack flavor list` for the list of available flavors. -5. Set the `openstack_dns_nameservers` to the list of the IP addresses +5. Set the `openshift_openstack_dns_nameservers` to the list of the IP addresses of the DNS servers used for the **private** address resolution. **NOTE ON DNS**: at minimum, the OpenShift nodes need to be able to access each other by their hostname. OpenStack doesn't provide this by default, so you need to provide a DNS server. Put the address of that DNS server in -`openstack_dns_nameservers` variable. +`openshift_openstack_dns_nameservers` variable. diff --git a/playbooks/openstack/advanced-configuration.md b/playbooks/openstack/advanced-configuration.md index 5ffec708a..90cc20b98 100644 --- a/playbooks/openstack/advanced-configuration.md +++ b/playbooks/openstack/advanced-configuration.md @@ -182,17 +182,17 @@ So the provisioned cluster nodes will start using those natively as default nameservers. Technically, this allows to deploy OpenShift clusters without dnsmasq proxies. -The `env_id` and `public_dns_domain` will form the cluster's DNS domain all +The `openshift_openstack_clusterid` and `openshift_openstack_public_dns_domain` will form the cluster's DNS domain all your servers will be under. With the default values, this will be `openshift.example.com`. For workloads, the default subdomain is 'apps'. -That sudomain can be set as well by the `openshift_app_domain` variable in +That sudomain can be set as well by the `openshift_openstack_app_subdomain` variable in the inventory. The `openstack__hostname` is a set of variables used for customising hostnames of servers with a given role. When such a variable stays commented, default hostname (usually the role name) is used. -The `openstack_dns_nameservers` is a list of DNS servers accessible from all +The `openshift_openstack_dns_nameservers` is a list of DNS servers accessible from all the created Nova servers. These will provide the internal name resolution for your OpenShift nodes (as well as upstream name resolution for installing packages, etc.). @@ -204,10 +204,10 @@ daemon that in turn proxies DNS requests to the authoritative DNS server. When Network Manager is enabled for provisioned cluster nodes, which is normally the case, you should not change the defaults and always deploy dnsmasq. -`external_nsupdate_keys` describes an external authoritative DNS server(s) +`openshift_openstack_external_nsupdate_keys` describes an external authoritative DNS server(s) processing dynamic records updates in the public and private cluster views: - external_nsupdate_keys: + openshift_openstack_external_nsupdate_keys: public: key_secret: key_algorithm: 'hmac-md5' @@ -227,7 +227,7 @@ another external DNS server. Another example defines an external DNS server for the public view additionally to the in-stack DNS server used for the private view only: - external_nsupdate_keys: + openshift_openstack_external_nsupdate_keys: public: key_secret: key_algorithm: 'hmac-sha256' @@ -264,51 +264,51 @@ step for flannel and docker iptables configuration: ## Other configuration variables -`openstack_keypair_name` is a Nova keypair - you can see your +`openshift_openstack_keypair_name` is a Nova keypair - you can see your keypairs with `openstack keypair list`. It must correspond to the private SSH key Ansible will use to log into the created VMs. This is `~/.ssh/id_rsa` by default, but you can use a different key by passing `--private-key` to `ansible-playbook`. -`openstack_default_image_name` is the default name of the Glance image the +`openshift_openstack_default_image_name` is the default name of the Glance image the servers will use. You can see your images with `openstack image list`. In order to set a different image for a role, uncomment the line with the -corresponding variable (e.g. `openstack_lb_image_name` for load balancer) and -set its value to another available image name. `openstack_default_image_name` +corresponding variable (e.g. `openshift_openstack_lb_image_name` for load balancer) and +set its value to another available image name. `openshift_openstack_default_image_name` must stay defined as it is used as a default value for the rest of the roles. -`openstack_default_flavor` is the default Nova flavor the servers will use. +`openshift_openstack_default_flavor` is the default Nova flavor the servers will use. You can see your flavors with `openstack flavor list`. In order to set a different flavor for a role, uncomment the line with the -corresponding variable (e.g. `openstack_lb_flavor` for load balancer) and -set its value to another available flavor. `openstack_default_flavor` must +corresponding variable (e.g. `openshift_openstack_lb_flavor` for load balancer) and +set its value to another available flavor. `openshift_openstack_default_flavor` must stay defined as it is used as a default value for the rest of the roles. -`openstack_external_network_name` is the name of the Neutron network +`openshift_openstack_external_network_name` is the name of the Neutron network providing external connectivity. It is often called `public`, `external` or `ext-net`. You can see your networks with `openstack network list`. -`openstack_private_network_name` is the name of the private Neutron network +`openshift_openstack_private_network_name` is the name of the private Neutron network providing admin/control access for ansible. It can be merged with other cluster networks, there are no special requirements for networking. -The `openstack_num_masters`, `openstack_num_infra` and -`openstack_num_nodes` values specify the number of Master, Infra and +The `openshift_openstack_num_masters`, `openshift_openstack_num_infra` and +`openshift_openstack_num_nodes` values specify the number of Master, Infra and App nodes to create. -The `openshift_cluster_node_labels` defines custom labels for your openshift +The `openshift_openstack_cluster_node_labels` defines custom labels for your openshift cluster node groups. It currently supports app and infra node groups. The default value of this variable sets `region: primary` to app nodes and `region: infra` to infra nodes. An example of setting a customised label: ``` -openshift_cluster_node_labels: +openshift_openstack_cluster_node_labels: app: mylabel: myvalue ``` -The `openstack_nodes_to_remove` allows you to specify the numerical indexes +The `openshift_openstack_nodes_to_remove` allows you to specify the numerical indexes of App nodes that should be removed; for example, ['0', '2'], The `docker_volume_size` is the default Docker volume size the servers will use. @@ -318,15 +318,15 @@ for master) and change its value. `docker_volume_size` must stay defined as it i used as a default value for some of the servers (master, infra, app node). The rest of the roles (etcd, load balancer, dns) have their defaults hard-coded. -**Note**: If the `ephemeral_volumes` is set to `true`, the `*_volume_size` variables +**Note**: If the `openshift_openstack_ephemeral_volumes` is set to `true`, the `*_volume_size` variables will be ignored and the deployment will not create any cinder volumes. -The `openstack_flat_secgrp`, controls Neutron security groups creation for Heat +The `openshift_openstack_flat_secgrp`, controls Neutron security groups creation for Heat stacks. Set it to true, if you experience issues with sec group rules quotas. It trades security for number of rules, by sharing the same set of firewall rules for master, node, etcd and infra nodes. -The `required_packages` variable also provides a list of the additional +The `openshift_openstack_required_packages` variable also provides a list of the additional prerequisite packages to be installed before to deploy an OpenShift cluster. Those are ignored though, if the `manage_packages: False`. @@ -358,11 +358,11 @@ floating IP addresses to each node. If you have a provider network set up, this is all unnecessary as you can just access servers that are placed in the provider network directly. -To use a provider network, set its name in `openstack_provider_network_name` in +To use a provider network, set its name in `openshift_openstack_provider_network_name` in `inventory/group_vars/all.yml`. -If you set the provider network name, the `openstack_external_network_name` and -`openstack_private_network_name` fields will be ignored. +If you set the provider network name, the `openshift_openstack_external_network_name` and +`openshift_openstack_private_network_name` fields will be ignored. **NOTE**: this will not update the nodes' DNS, so running openshift-ansible right after provisioning will fail (unless you're using an external DNS server @@ -373,7 +373,7 @@ resolve each other by name. Configure required `*_ingress_cidr` variables to restrict public access to provisioned servers from your laptop (a /32 notation should be used) -or your trusted network. The most important is the `node_ingress_cidr` +or your trusted network. The most important is the `openshift_openstack_node_ingress_cidr` that restricts public access to the deployed DNS server and cluster nodes' ephemeral ports range. @@ -388,7 +388,7 @@ implications though, and is not recommended for production deployments. ### DNS servers security options -Aside from `node_ingress_cidr` restricting public access to in-stack DNS +Aside from `openshift_openstack_node_ingress_cidr` restricting public access to in-stack DNS servers, there are following (bind/named specific) DNS security options available: @@ -435,8 +435,8 @@ it up as the OpenShift hosted registry. To do that you need specify the desired Cinder volume name and size in Gigabytes in `inventory/group_vars/all.yml`: - cinder_hosted_registry_name: cinder-registry - cinder_hosted_registry_size_gb: 10 + openshift_openstack_cinder_hosted_registry_name: cinder-registry + openshift_openstack_cinder_hosted_registry_size_gb: 10 With this, the playbooks will create the volume and set up its filesystem. If there is an existing volume of the same name, we will @@ -483,8 +483,8 @@ the volume. If you're using the dynamic inventory, you must uncomment these two values as well: - #openshift_hosted_registry_storage_openstack_volumeID: "{{ lookup('os_cinder', cinder_hosted_registry_name).id }}" - #openshift_hosted_registry_storage_volume_size: "{{ cinder_hosted_registry_size_gb }}Gi" + #openshift_hosted_registry_storage_openstack_volumeID: "{{ lookup('os_cinder', openshift_openstack_cinder_hosted_registry_name).id }}" + #openshift_hosted_registry_storage_volume_size: "{{ openshift_openstack_cinder_hosted_registry_size_gb }}Gi" But note that they use the `os_cinder` lookup plugin we provide, so you must tell Ansible where to find it either in `ansible.cfg` (the one we provide is @@ -528,7 +528,7 @@ the **UUID** of the Cinder volume, *not its name*. We can do formate the volume for you if you ask for it in `inventory/group_vars/all.yml`: - prepare_and_format_registry_volume: true + openshift_openstack_prepare_and_format_registry_volume: true **NOTE:** doing so **will destroy any data that's currently on the volume**! @@ -544,16 +544,16 @@ You can also run the registry setup playbook directly: Example inventory variables: - openstack_use_bastion: true - bastion_ingress_cidr: "{{openstack_subnet_prefix}}.0/24" + openshift_openstack_use_bastion: true + openshift_openstack_bastion_ingress_cidr: "{{openshift_openstack_subnet_prefix}}.0/24" openstack_private_ssh_key: ~/.ssh/id_rsa openstack_inventory: static openstack_inventory_path: ../../../../inventory openstack_ssh_config_path: /tmp/ssh.config.openshift.ansible.openshift.example.com -The `openstack_subnet_prefix` is the openstack private network for your cluster. -And the `bastion_ingress_cidr` defines accepted range for SSH connections to nodes -additionally to the `ssh_ingress_cidr`` (see the security notes above). +The `openshift_openstack_subnet_prefix` is the openstack private network for your cluster. +And the `openshift_openstack_bastion_ingress_cidr` defines accepted range for SSH connections to nodes +additionally to the `openshift_openstack_ssh_ingress_cidr`` (see the security notes above). The SSH config will be stored on the ansible control node by the gitven path. Ansible uses it automatically. To access the cluster nodes with @@ -738,7 +738,7 @@ OpenShift UI may be accessed via the 1st master node FQDN, port 8443. When using a bastion, you may want to make an SSH tunnel from your control node to access UI on the `https://localhost:8443`, with this inventory variable: - openshift_ui_ssh_tunnel: True + openshift_openstack_ui_ssh_tunnel: True Note, this requires sudo rights on the ansible control node and an absolute path for the `openstack_private_ssh_key`. You should also update the control node's @@ -769,4 +769,4 @@ Usage: ansible-playbook -i openshift-ansible-contrib/playbooks/provisioning/openstack/scale-up.yaml` [-e increment_by=] [-e openshift_ansible_dir=] ``` -Note: This playbook works only without a bastion node (`openstack_use_bastion: False`). +Note: This playbook works only without a bastion node (`openshift_openstack_use_bastion: False`). diff --git a/playbooks/openstack/openshift-cluster/provision.yml b/playbooks/openstack/openshift-cluster/provision.yml index b1dff1870..fe3057158 100644 --- a/playbooks/openstack/openshift-cluster/provision.yml +++ b/playbooks/openstack/openshift-cluster/provision.yml @@ -42,8 +42,8 @@ name: openshift_openstack tasks_from: populate-dns.yml when: - - external_nsupdate_keys is defined - - external_nsupdate_keys.private is defined or external_nsupdate_keys.public is defined + - openshift_openstack_external_nsupdate_keys is defined + - openshift_openstack_external_nsupdate_keys.private is defined or openshift_openstack_external_nsupdate_keys.public is defined - name: Prepare the Nodes in the cluster for installation hosts: oo_all_hosts diff --git a/playbooks/openstack/sample-inventory/group_vars/OSEv3.yml b/playbooks/openstack/sample-inventory/group_vars/OSEv3.yml index 7d8dc157e..1e55adb9e 100644 --- a/playbooks/openstack/sample-inventory/group_vars/OSEv3.yml +++ b/playbooks/openstack/sample-inventory/group_vars/OSEv3.yml @@ -2,10 +2,10 @@ openshift_deployment_type: origin #openshift_deployment_type: openshift-enterprise #openshift_release: v3.5 -openshift_master_default_subdomain: "apps.{{ env_id }}.{{ public_dns_domain }}" +openshift_master_default_subdomain: "apps.{{ openshift_openstack_clusterid }}.{{ openshift_openstack_public_dns_domain }}" openshift_master_cluster_method: native -openshift_master_cluster_hostname: "console.{{ env_id }}.{{ public_dns_domain }}" +openshift_master_cluster_hostname: "console.{{ openshift_openstack_clusterid }}.{{ openshift_openstack_public_dns_domain }}" openshift_master_cluster_public_hostname: "{{ openshift_master_cluster_hostname }}" osm_default_node_selector: 'region=primary' @@ -29,10 +29,10 @@ openshift_hosted_registry_wait: True ## NOTE(shadower): This won't work until the openshift-ansible issue #5657 is fixed: ## https://github.com/openshift/openshift-ansible/issues/5657 -## If you're using the `cinder_hosted_registry_name` option from +## If you're using the `openshift_openstack_cinder_hosted_registry_name` option from ## `all.yml`, uncomment these lines: -#openshift_hosted_registry_storage_openstack_volumeID: "{{ lookup('os_cinder', cinder_hosted_registry_name).id }}" -#openshift_hosted_registry_storage_volume_size: "{{ cinder_hosted_registry_size_gb }}Gi" +#openshift_hosted_registry_storage_openstack_volumeID: "{{ lookup('os_cinder', openshift_openstack_cinder_hosted_registry_name).id }}" +#openshift_hosted_registry_storage_volume_size: "{{ openshift_openstack_cinder_hosted_registry_size_gb }}Gi" ## If you're using a Cinder volume you've set up yourself, uncomment these lines: #openshift_hosted_registry_storage_openstack_volumeID: e0ba2d73-d2f9-4514-a3b2-a0ced507fa05 diff --git a/playbooks/openstack/sample-inventory/group_vars/all.yml b/playbooks/openstack/sample-inventory/group_vars/all.yml index e0618d685..450642c81 100644 --- a/playbooks/openstack/sample-inventory/group_vars/all.yml +++ b/playbooks/openstack/sample-inventory/group_vars/all.yml @@ -1,59 +1,59 @@ --- -env_id: "openshift" -public_dns_domain: "example.com" -openstack_dns_nameservers: [] +openshift_openstack_clusterid: "openshift" +openshift_openstack_public_dns_domain: "example.com" +openshift_openstack_dns_nameservers: [] # # Used Hostnames # # - set custom hostnames for roles by uncommenting corresponding lines -#openstack_master_hostname: "master" -#openstack_infra_hostname: "infra-node" -#openstack_node_hostname: "app-node" -#openstack_lb_hostname: "lb" -#openstack_etcd_hostname: "etcd" -#openstack_dns_hostname: "dns" - -openstack_keypair_name: "openshift" -openstack_external_network_name: "public" -#openstack_private_network_name: "openshift-ansible-{{ stack_name }}-net" +#openshift_openstack_master_hostname: "master" +#openshift_openstack_infra_hostname: "infra-node" +#openshift_openstack_node_hostname: "app-node" +#openshift_openstack_lb_hostname: "lb" +#openshift_openstack_etcd_hostname: "etcd" +#openshift_openstack_dns_hostname: "dns" + +openshift_openstack_keypair_name: "openshift" +openshift_openstack_external_network_name: "public" +#openshift_openstack_private_network_name: "openshift-ansible-{{ openshift_openstack_stack_name }}-net" # # A dedicated Neutron network name for containers data network -# # Configures the data network to be separated from openstack_private_network_name +# # Configures the data network to be separated from openshift_openstack_private_network_name # # NOTE: this is only supported with Flannel SDN yet -#openstack_private_data_network_name: "openshift-ansible-{{ stack_name }}-data-net" +#openstack_private_data_network_name: "openshift-ansible-{{ openshift_openstack_stack_name }}-data-net" ## If you want to use a provider network, set its name here. -## NOTE: the `openstack_external_network_name` and -## `openstack_private_network_name` options will be ignored when using a +## NOTE: the `openshift_openstack_external_network_name` and +## `openshift_openstack_private_network_name` options will be ignored when using a ## provider network. -#openstack_provider_network_name: "provider" +#openshift_openstack_provider_network_name: "provider" # # Used Images # # - set specific images for roles by uncommenting corresponding lines -# # - note: do not remove openstack_default_image_name definition -#openstack_master_image_name: "centos7" -#openstack_infra_image_name: "centos7" -#openstack_node_image_name: "centos7" -#openstack_lb_image_name: "centos7" -#openstack_etcd_image_name: "centos7" -#openstack_dns_image_name: "centos7" -openstack_default_image_name: "centos7" - -openstack_num_masters: 1 -openstack_num_infra: 1 -openstack_num_nodes: 2 +# # - note: do not remove openshift_openstack_default_image_name definition +#openshift_openstack_master_image_name: "centos7" +#openshift_openstack_infra_image_name: "centos7" +#openshift_openstack_node_image_name: "centos7" +#openshift_openstack_lb_image_name: "centos7" +#openshift_openstack_etcd_image_name: "centos7" +#openshift_openstack_dns_image_name: "centos7" +openshift_openstack_default_image_name: "centos7" + +openshift_openstack_num_masters: 1 +openshift_openstack_num_infra: 1 +openshift_openstack_num_nodes: 2 # # Used Flavors # # - set specific flavors for roles by uncommenting corresponding lines -# # - note: do note remove openstack_default_flavor definition -#openstack_master_flavor: "m1.medium" -#openstack_infra_flavor: "m1.medium" -#openstack_node_flavor: "m1.medium" -#openstack_lb_flavor: "m1.medium" -#openstack_etcd_flavor: "m1.medium" -#openstack_dns_flavor: "m1.medium" -openstack_default_flavor: "m1.medium" +# # - note: do note remove openshift_openstack_default_flavor definition +#openshift_openstack_master_flavor: "m1.medium" +#openshift_openstack_infra_flavor: "m1.medium" +#openshift_openstack_node_flavor: "m1.medium" +#openshift_openstack_lb_flavor: "m1.medium" +#openshift_openstack_etcd_flavor: "m1.medium" +#openshift_openstack_dns_flavor: "m1.medium" +openshift_openstack_default_flavor: "m1.medium" # # Numerical index of nodes to remove -# openstack_nodes_to_remove: [] +# openshift_openstack_nodes_to_remove: [] # # Docker volume size # # - set specific volume size for roles by uncommenting corresponding lines @@ -69,22 +69,22 @@ docker_volume_size: "15" ## Specify server group policies for master and infra nodes. Nova must be configured to ## enable these policies. 'anti-affinity' will ensure that each VM is launched on a ## different physical host. -#openstack_master_server_group_policies: [anti-affinity] -#openstack_infra_server_group_policies: [anti-affinity] +#openshift_openstack_master_server_group_policies: [anti-affinity] +#openshift_openstack_infra_server_group_policies: [anti-affinity] ## Create a Cinder volume and use it for the OpenShift registry. ## NOTE: the openstack credentials and hosted registry options must be set in OSEv3.yml! -#cinder_hosted_registry_name: cinder-registry -#cinder_hosted_registry_size_gb: 10 +#openshift_openstack_cinder_hosted_registry_name: cinder-registry +#openshift_openstack_cinder_hosted_registry_size_gb: 10 ## Set up a filesystem on the cinder volume specified in `OSEv3.yaml`. ## You need to specify the file system and volume ID in OSEv3 via ## `openshift_hosted_registry_storage_openstack_filesystem` and ## `openshift_hosted_registry_storage_openstack_volumeID`. ## WARNING: This will delete any data on the volume! -#prepare_and_format_registry_volume: False +#openshift_openstack_prepare_and_format_registry_volume: False -openstack_subnet_prefix: "192.168.99" +openshift_openstack_subnet_prefix: "192.168.99" ## Red Hat subscription defaults to false which means we will not attempt to ## subscribe the nodes @@ -110,8 +110,8 @@ openstack_subnet_prefix: "192.168.99" # # Roll-your-own DNS -#openstack_num_dns: 0 -#external_nsupdate_keys: +#openshift_openstack_num_dns: 0 +#openshift_openstack_external_nsupdate_keys: # public: # key_secret: 'SKqKNdpfk7llKxZ57bbxUnUDobaaJp9t8CjXLJPl+fRI5mPcSBuxTAyvJPa6Y9R7vUg9DwCy/6WTpgLNqnV4Hg==' # key_algorithm: 'hmac-md5' @@ -131,16 +131,16 @@ openstack_subnet_prefix: "192.168.99" ansible_user: openshift # # Use a single security group for a cluster (default: false) -#openstack_flat_secgrp: false +#openshift_openstack_flat_secgrp: false # If you want to use the VM storage instead of Cinder volumes, set this to `true`. # NOTE: this is for testing only! Your data will be gone once the VM disappears! -# ephemeral_volumes: false +# openshift_openstack_ephemeral_volumes: false # # OpenShift node labels # # - in order to customise node labels for app and/or infra group, set the -# # openshift_cluster_node_labels variable -#openshift_cluster_node_labels: +# # openshift_openstack_cluster_node_labels variable +#openshift_openstack_cluster_node_labels: # app: # region: primary # infra: diff --git a/roles/openshift_openstack/defaults/main.yml b/roles/openshift_openstack/defaults/main.yml index 1f9c09c96..3eca52963 100644 --- a/roles/openshift_openstack/defaults/main.yml +++ b/roles/openshift_openstack/defaults/main.yml @@ -1,28 +1,27 @@ --- -stack_state: 'present' +openshift_openstack_stack_state: 'present' -ssh_ingress_cidr: 0.0.0.0/0 -node_ingress_cidr: 0.0.0.0/0 -master_ingress_cidr: 0.0.0.0/0 -lb_ingress_cidr: 0.0.0.0/0 -bastion_ingress_cidr: 0.0.0.0/0 -openstack_num_etcd: 0 -openstack_num_masters: 1 -openstack_num_nodes: 1 -openstack_num_dns: 0 -openstack_num_infra: 1 -openstack_dns_nameservers: [] -openstack_nodes_to_remove: [] +openshift_openstack_ssh_ingress_cidr: 0.0.0.0/0 +openshift_openstack_node_ingress_cidr: 0.0.0.0/0 +openshift_openstack_lb_ingress_cidr: 0.0.0.0/0 +openshift_openstack_bastion_ingress_cidr: 0.0.0.0/0 +openshift_openstack_num_etcd: 0 +openshift_openstack_num_masters: 1 +openshift_openstack_num_nodes: 1 +openshift_openstack_num_dns: 0 +openshift_openstack_num_infra: 1 +openshift_openstack_dns_nameservers: [] +openshift_openstack_nodes_to_remove: [] -openshift_cluster_node_labels: +openshift_openstack_cluster_node_labels: app: region: primary infra: region: infra -install_debug_packages: false -required_packages: +openshift_openstack_install_debug_packages: false +openshift_openstack_required_packages: - docker - NetworkManager - wget @@ -30,66 +29,69 @@ required_packages: - net-tools - bind-utils - bridge-utils -debug_packages: +openshift_openstack_debug_packages: - bash-completion - vim-enhanced # container-storage-setup -docker_dev: "/dev/sdb" -docker_vg: "docker-vol" -docker_data_size: "95%VG" -docker_dm_basesize: "3G" -container_root_lv_name: "dockerlv" -container_root_lv_mount_path: "/var/lib/docker" +openshift_openstack_container_storage_setup: + docker_dev: "/dev/sdb" + docker_vg: "docker-vol" + docker_data_size: "95%VG" + docker_dm_basesize: "3G" + container_root_lv_name: "dockerlv" + container_root_lv_mount_path: "/var/lib/docker" # populate-dns -dns_records_rm: [] -dns_records_add: [] -external_nsupdate_keys: {} +openshift_openstack_dns_records_add: [] +openshift_openstack_external_nsupdate_keys: {} -full_dns_domain: "{{ (env_id|trim == '') | ternary(public_dns_domain, env_id + '.' + public_dns_domain) }}" -openshift_app_domain: "apps" +openshift_openstack_full_dns_domain: "{{ (openshift_openstack_clusterid|trim == '') | ternary(openshift_openstack_public_dns_domain, openshift_openstack_clusterid + '.' + openshift_openstack_public_dns_domain) }}" +openshift_openstack_app_subdomain: "apps" # heat vars -stack_name: "{{ env_id }}.{{ public_dns_domain }}" -openstack_subnet_prefix: "192.168.99" -openstack_master_hostname: master -openstack_infra_hostname: infra-node -openstack_node_hostname: app-node -openstack_lb_hostname: lb -openstack_etcd_hostname: etcd -openstack_dns_hostname: dns -openstack_keypair_name: openshift -openstack_lb_flavor: "{{ openstack_default_flavor }}" -openstack_etcd_flavor: "{{ openstack_default_flavor }}" -openstack_master_flavor: "{{ openstack_default_flavor }}" -openstack_node_flavor: "{{ openstack_default_flavor }}" -openstack_infra_flavor: "{{ openstack_default_flavor }}" -openstack_dns_flavor: "{{ openstack_default_flavor }}" -openstack_master_image: "{{ openstack_default_image_name }}" -openstack_infra_image: "{{ openstack_default_image_name }}" -openstack_node_image: "{{ openstack_default_image_name }}" -openstack_lb_image: "{{ openstack_default_image_name }}" -openstack_etcd_image: "{{ openstack_default_image_name }}" -openstack_dns_image: "{{ openstack_default_image_name }}" -openstack_provider_network_name: null -openstack_external_network_name: null -openstack_private_network: >- - {% if openstack_provider_network_name | default(None) -%} - {{ openstack_provider_network_name }} +openshift_openstack_clusterid: openshift +openshift_openstack_stack_name: "{{ openshift_openstack_clusterid }}.{{ openshift_openstack_public_dns_domain }}" +openshift_openstack_subnet_prefix: "192.168.99" +openshift_openstack_master_hostname: master +openshift_openstack_infra_hostname: infra-node +openshift_openstack_node_hostname: app-node +openshift_openstack_lb_hostname: lb +openshift_openstack_etcd_hostname: etcd +openshift_openstack_dns_hostname: dns +openshift_openstack_keypair_name: openshift +openshift_openstack_lb_flavor: "{{ openshift_openstack_default_flavor }}" +openshift_openstack_etcd_flavor: "{{ openshift_openstack_default_flavor }}" +openshift_openstack_master_flavor: "{{ openshift_openstack_default_flavor }}" +openshift_openstack_node_flavor: "{{ openshift_openstack_default_flavor }}" +openshift_openstack_infra_flavor: "{{ openshift_openstack_default_flavor }}" +openshift_openstack_dns_flavor: "{{ openshift_openstack_default_flavor }}" +openshift_openstack_master_image: "{{ openshift_openstack_default_image_name }}" +openshift_openstack_infra_image: "{{ openshift_openstack_default_image_name }}" +openshift_openstack_node_image: "{{ openshift_openstack_default_image_name }}" +openshift_openstack_lb_image: "{{ openshift_openstack_default_image_name }}" +openshift_openstack_etcd_image: "{{ openshift_openstack_default_image_name }}" +openshift_openstack_dns_image: "{{ openshift_openstack_default_image_name }}" +openshift_openstack_provider_network_name: null +openshift_openstack_external_network_name: null +openshift_openstack_private_network: >- + {% if openshift_openstack_provider_network_name | default(None) -%} + {{ openshift_openstack_provider_network_name }} {%- else -%} - {{ openstack_private_network_name | default ('openshift-ansible-' + stack_name + '-net') }} + {{ openshift_openstack_private_network_name | default ('openshift-ansible-' + openshift_openstack_stack_name + '-net') }} {%- endif -%} -openstack_master_server_group_policies: [] -openstack_infra_server_group_policies: [] -openstack_master_volume_size: "{{ docker_volume_size }}" -openstack_infra_volume_size: "{{ docker_volume_size }}" -openstack_node_volume_size: "{{ docker_volume_size }}" -openstack_etcd_volume_size: 2 -openstack_dns_volume_size: 1 -openstack_lb_volume_size: 5 -openstack_use_bastion: false -openshift_ui_ssh_tunnel: false +openshift_openstack_master_server_group_policies: [] +openshift_openstack_infra_server_group_policies: [] +openshift_openstack_docker_volume_size: 15 +openshift_openstack_master_volume_size: "{{ openshift_openstack_docker_volume_size }}" +openshift_openstack_infra_volume_size: "{{ openshift_openstack_docker_volume_size }}" +openshift_openstack_node_volume_size: "{{ openshift_openstack_docker_volume_size }}" +openshift_openstack_etcd_volume_size: 2 +openshift_openstack_dns_volume_size: 1 +openshift_openstack_lb_volume_size: 5 +openshift_openstack_use_bastion: false +openshift_openstack_ui_ssh_tunnel: false +openshift_openstack_ephemeral_volumes: false diff --git a/roles/openshift_openstack/tasks/check-prerequisites.yml b/roles/openshift_openstack/tasks/check-prerequisites.yml index a91e60640..57c7238d1 100644 --- a/roles/openshift_openstack/tasks/check-prerequisites.yml +++ b/roles/openshift_openstack/tasks/check-prerequisites.yml @@ -50,24 +50,24 @@ # Check Glance image - name: Try to get image facts os_image_facts: - image: "{{ openstack_default_image_name }}" + image: "{{ openshift_openstack_default_image_name }}" register: image_result - name: Check that image is available assert: that: "image_result.ansible_facts.openstack_image" - msg: "Image {{ openstack_default_image_name }} is not available" + msg: "Image {{ openshift_openstack_default_image_name }} is not available" # Check network name - name: Try to get network facts os_networks_facts: - name: "{{ openstack_external_network_name }}" + name: "{{ openshift_openstack_external_network_name }}" register: network_result - when: not openstack_provider_network_name|default(None) + when: not openshift_openstack_provider_network_name|default(None) - name: Check that network is available assert: that: "network_result.ansible_facts.openstack_networks" - msg: "Network {{ openstack_external_network_name }} is not available" - when: not openstack_provider_network_name|default(None) + msg: "Network {{ openshift_openstack_external_network_name }} is not available" + when: not openshift_openstack_provider_network_name|default(None) # Check keypair # TODO kpilatov: there is no Ansible module for getting OS keypairs @@ -76,30 +76,30 @@ - name: Try to show keypair command: > python -c 'import shade; cloud = shade.openstack_cloud(); - exit(cloud.get_keypair("{{ openstack_keypair_name }}") is None)' + exit(cloud.get_keypair("{{ openshift_openstack_keypair_name }}") is None)' ignore_errors: yes register: key_result - name: Check that keypair is available assert: that: 'key_result.rc == 0' - msg: "Keypair {{ openstack_keypair_name }} is not available" + msg: "Keypair {{ openshift_openstack_keypair_name }} is not available" # Check that custom images are available - include: custom_image_check.yaml with_items: - - "{{ openstack_master_image }}" - - "{{ openstack_infra_image }}" - - "{{ openstack_node_image }}" - - "{{ openstack_lb_image }}" - - "{{ openstack_etcd_image }}" - - "{{ openstack_dns_image }}" + - "{{ openshift_openstack_master_image }}" + - "{{ openshift_openstack_infra_image }}" + - "{{ openshift_openstack_node_image }}" + - "{{ openshift_openstack_lb_image }}" + - "{{ openshift_openstack_etcd_image }}" + - "{{ openshift_openstack_dns_image }}" # Check that custom flavors are available - include: custom_flavor_check.yaml with_items: - - "{{ openstack_master_flavor }}" - - "{{ openstack_infra_flavor }}" - - "{{ openstack_node_flavor }}" - - "{{ openstack_lb_flavor }}" - - "{{ openstack_etcd_flavor }}" - - "{{ openstack_dns_flavor }}" + - "{{ openshift_openstack_master_flavor }}" + - "{{ openshift_openstack_infra_flavor }}" + - "{{ openshift_openstack_node_flavor }}" + - "{{ openshift_openstack_lb_flavor }}" + - "{{ openshift_openstack_etcd_flavor }}" + - "{{ openshift_openstack_dns_flavor }}" diff --git a/roles/openshift_openstack/tasks/hostname.yml b/roles/openshift_openstack/tasks/hostname.yml index 9815d0e80..e1a18425f 100644 --- a/roles/openshift_openstack/tasks/hostname.yml +++ b/roles/openshift_openstack/tasks/hostname.yml @@ -5,7 +5,7 @@ - name: Setting FQDN Fact set_fact: - new_fqdn: "{{ new_hostname }}.{{ full_dns_domain }}" + new_fqdn: "{{ new_hostname }}.{{ openshift_openstack_full_dns_domain }}" - name: Setting hostname and DNS domain hostname: name="{{ new_fqdn }}" diff --git a/roles/openshift_openstack/tasks/net_vars_check.yaml b/roles/openshift_openstack/tasks/net_vars_check.yaml index 68afde415..18b9b21b9 100644 --- a/roles/openshift_openstack/tasks/net_vars_check.yaml +++ b/roles/openshift_openstack/tasks/net_vars_check.yaml @@ -3,7 +3,7 @@ fail: msg: "Flannel SDN requires a dedicated containers data network and can not work over a provider network" when: - - openstack_provider_network_name is defined + - openshift_openstack_provider_network_name is defined - openstack_private_data_network_name is defined - name: Check the flannel network configuration diff --git a/roles/openshift_openstack/tasks/node-packages.yml b/roles/openshift_openstack/tasks/node-packages.yml index c65eaec3b..7864f5269 100644 --- a/roles/openshift_openstack/tasks/node-packages.yml +++ b/roles/openshift_openstack/tasks/node-packages.yml @@ -5,11 +5,11 @@ yum: name: "{{ item }}" state: latest - with_items: "{{ required_packages }}" + with_items: "{{ openshift_openstack_required_packages }}" - name: Install debug packages (optional) yum: name: "{{ item }}" state: latest - with_items: "{{ debug_packages }}" - when: install_debug_packages|bool + with_items: "{{ openshift_openstack_debug_packages }}" + when: openshift_openstack_install_debug_packages|bool diff --git a/roles/openshift_openstack/tasks/populate-dns.yml b/roles/openshift_openstack/tasks/populate-dns.yml index 669b65a01..080c3aca9 100644 --- a/roles/openshift_openstack/tasks/populate-dns.yml +++ b/roles/openshift_openstack/tasks/populate-dns.yml @@ -5,41 +5,41 @@ - name: "Add wildcard records to the private A records for infrahosts" set_fact: - private_records: "{{ private_records | default([]) + [ { 'type': 'A', 'hostname': '*.' + openshift_app_domain, 'ip': hostvars[item]['private_v4'] } ] }}" + private_records: "{{ private_records | default([]) + [ { 'type': 'A', 'hostname': '*.' + openshift_openstack_app_subdomain, 'ip': hostvars[item]['private_v4'] } ] }}" with_items: "{{ groups['infra_hosts'] }}" - name: "Add public master cluster hostname records to the private A records (single master)" set_fact: - private_records: "{{ private_records | default([]) + [ { 'type': 'A', 'hostname': (hostvars[groups.masters[0]].openshift_master_cluster_public_hostname | replace(full_dns_domain, ''))[:-1], 'ip': hostvars[groups.masters[0]].private_v4 } ] }}" + private_records: "{{ private_records | default([]) + [ { 'type': 'A', 'hostname': (hostvars[groups.masters[0]].openshift_master_cluster_public_hostname | replace(openshift_openstack_full_dns_domain, ''))[:-1], 'ip': hostvars[groups.masters[0]].private_v4 } ] }}" when: - hostvars[groups.masters[0]].openshift_master_cluster_public_hostname is defined - - openstack_num_masters == 1 + - openshift_openstack_num_masters == 1 - name: "Add public master cluster hostname records to the private A records (multi-master)" set_fact: - private_records: "{{ private_records | default([]) + [ { 'type': 'A', 'hostname': (hostvars[groups.masters[0]].openshift_master_cluster_public_hostname | replace(full_dns_domain, ''))[:-1], 'ip': hostvars[groups.lb[0]].private_v4 } ] }}" + private_records: "{{ private_records | default([]) + [ { 'type': 'A', 'hostname': (hostvars[groups.masters[0]].openshift_master_cluster_public_hostname | replace(openshift_openstack_full_dns_domain, ''))[:-1], 'ip': hostvars[groups.lb[0]].private_v4 } ] }}" when: - hostvars[groups.masters[0]].openshift_master_cluster_public_hostname is defined - - openstack_num_masters > 1 + - openshift_openstack_num_masters > 1 - name: "Set the private DNS server to use the external value (if provided)" set_fact: - nsupdate_server_private: "{{ external_nsupdate_keys['private']['server'] }}" - nsupdate_key_secret_private: "{{ external_nsupdate_keys['private']['key_secret'] }}" - nsupdate_key_algorithm_private: "{{ external_nsupdate_keys['private']['key_algorithm'] }}" - nsupdate_private_key_name: "{{ external_nsupdate_keys['private']['key_name']|default('private-' + full_dns_domain) }}" + nsupdate_server_private: "{{ openshift_openstack_external_nsupdate_keys['private']['server'] }}" + nsupdate_key_secret_private: "{{ openshift_openstack_external_nsupdate_keys['private']['key_secret'] }}" + nsupdate_key_algorithm_private: "{{ openshift_openstack_external_nsupdate_keys['private']['key_algorithm'] }}" + nsupdate_private_key_name: "{{ openshift_openstack_external_nsupdate_keys['private']['key_name']|default('private-' + openshift_openstack_full_dns_domain) }}" when: - - external_nsupdate_keys is defined - - external_nsupdate_keys['private'] is defined + - openshift_openstack_external_nsupdate_keys is defined + - openshift_openstack_external_nsupdate_keys['private'] is defined - name: "Generate the private Add section for DNS" set_fact: private_named_records: - view: "private" - zone: "{{ full_dns_domain }}" + zone: "{{ openshift_openstack_full_dns_domain }}" server: "{{ nsupdate_server_private }}" - key_name: "{{ nsupdate_private_key_name|default('private-' + full_dns_domain) }}" + key_name: "{{ nsupdate_private_key_name|default('private-' + openshift_openstack_full_dns_domain) }}" key_secret: "{{ nsupdate_key_secret_private }}" key_algorithm: "{{ nsupdate_key_algorithm_private | lower }}" entries: "{{ private_records }}" @@ -52,58 +52,58 @@ - name: "Add wildcard records to the public A records" set_fact: - public_records: "{{ public_records | default([]) + [ { 'type': 'A', 'hostname': '*.' + openshift_app_domain, 'ip': hostvars[item]['public_v4'] } ] }}" + public_records: "{{ public_records | default([]) + [ { 'type': 'A', 'hostname': '*.' + openshift_openstack_app_subdomain, 'ip': hostvars[item]['public_v4'] } ] }}" with_items: "{{ groups['infra_hosts'] }}" when: hostvars[item]['public_v4'] is defined - name: "Add public master cluster hostname records to the public A records (single master)" set_fact: - public_records: "{{ public_records | default([]) + [ { 'type': 'A', 'hostname': (hostvars[groups.masters[0]].openshift_master_cluster_public_hostname | replace(full_dns_domain, ''))[:-1], 'ip': hostvars[groups.masters[0]].public_v4 } ] }}" + public_records: "{{ public_records | default([]) + [ { 'type': 'A', 'hostname': (hostvars[groups.masters[0]].openshift_master_cluster_public_hostname | replace(openshift_openstack_full_dns_domain, ''))[:-1], 'ip': hostvars[groups.masters[0]].public_v4 } ] }}" when: - hostvars[groups.masters[0]].openshift_master_cluster_public_hostname is defined - - openstack_num_masters == 1 - - not openstack_use_bastion|bool + - openshift_openstack_num_masters == 1 + - not openshift_openstack_use_bastion|bool - name: "Add public master cluster hostname records to the public A records (single master behind a bastion)" set_fact: - public_records: "{{ public_records | default([]) + [ { 'type': 'A', 'hostname': (hostvars[groups.masters[0]].openshift_master_cluster_public_hostname | replace(full_dns_domain, ''))[:-1], 'ip': hostvars[groups.bastions[0]].public_v4 } ] }}" + public_records: "{{ public_records | default([]) + [ { 'type': 'A', 'hostname': (hostvars[groups.masters[0]].openshift_master_cluster_public_hostname | replace(openshift_openstack_full_dns_domain, ''))[:-1], 'ip': hostvars[groups.bastions[0]].public_v4 } ] }}" when: - hostvars[groups.masters[0]].openshift_master_cluster_public_hostname is defined - - openstack_num_masters == 1 - - openstack_use_bastion|bool + - openshift_openstack_num_masters == 1 + - openshift_openstack_use_bastion|bool - name: "Add public master cluster hostname records to the public A records (multi-master)" set_fact: - public_records: "{{ public_records | default([]) + [ { 'type': 'A', 'hostname': (hostvars[groups.masters[0]].openshift_master_cluster_public_hostname | replace(full_dns_domain, ''))[:-1], 'ip': hostvars[groups.lb[0]].public_v4 } ] }}" + public_records: "{{ public_records | default([]) + [ { 'type': 'A', 'hostname': (hostvars[groups.masters[0]].openshift_master_cluster_public_hostname | replace(openshift_openstack_full_dns_domain, ''))[:-1], 'ip': hostvars[groups.lb[0]].public_v4 } ] }}" when: - hostvars[groups.masters[0]].openshift_master_cluster_public_hostname is defined - - openstack_num_masters > 1 + - openshift_openstack_num_masters > 1 - name: "Set the public DNS server details to use the external value (if provided)" set_fact: - nsupdate_server_public: "{{ external_nsupdate_keys['public']['server'] }}" - nsupdate_key_secret_public: "{{ external_nsupdate_keys['public']['key_secret'] }}" - nsupdate_key_algorithm_public: "{{ external_nsupdate_keys['public']['key_algorithm'] }}" - nsupdate_public_key_name: "{{ external_nsupdate_keys['public']['key_name']|default('public-' + full_dns_domain) }}" + nsupdate_server_public: "{{ openshift_openstack_external_nsupdate_keys['public']['server'] }}" + nsupdate_key_secret_public: "{{ openshift_openstack_external_nsupdate_keys['public']['key_secret'] }}" + nsupdate_key_algorithm_public: "{{ openshift_openstack_external_nsupdate_keys['public']['key_algorithm'] }}" + nsupdate_public_key_name: "{{ openshift_openstack_external_nsupdate_keys['public']['key_name']|default('public-' + openshift_openstack_full_dns_domain) }}" when: - - external_nsupdate_keys is defined - - external_nsupdate_keys['public'] is defined + - openshift_openstack_external_nsupdate_keys is defined + - openshift_openstack_external_nsupdate_keys['public'] is defined - name: "Generate the public Add section for DNS" set_fact: public_named_records: - view: "public" - zone: "{{ full_dns_domain }}" + zone: "{{ openshift_openstack_full_dns_domain }}" server: "{{ nsupdate_server_public }}" - key_name: "{{ nsupdate_public_key_name|default('public-' + full_dns_domain) }}" + key_name: "{{ nsupdate_public_key_name|default('public-' + openshift_openstack_full_dns_domain) }}" key_secret: "{{ nsupdate_key_secret_public }}" key_algorithm: "{{ nsupdate_key_algorithm_public | lower }}" entries: "{{ public_records }}" -- name: "Generate the final dns_records_add" +- name: "Generate the final openshift_openstack_dns_records_add" set_fact: - dns_records_add: "{{ private_named_records + public_named_records }}" + openshift_openstack_dns_records_add: "{{ private_named_records + public_named_records }}" - name: "Add DNS A records" @@ -119,7 +119,7 @@ # TODO(shadower): add a cleanup playbook that removes these records, too! state: present with_subelements: - - "{{ dns_records_add | default({}) }}" + - "{{ openshift_openstack_dns_records_add | default({}) }}" - entries register: nsupdate_add_result until: nsupdate_add_result|succeeded diff --git a/roles/openshift_openstack/tasks/provision.yml b/roles/openshift_openstack/tasks/provision.yml index e693f535a..dccbe334c 100644 --- a/roles/openshift_openstack/tasks/provision.yml +++ b/roles/openshift_openstack/tasks/provision.yml @@ -2,14 +2,14 @@ - name: Generate the templates include: generate-templates.yml when: - - stack_state == 'present' + - openshift_openstack_stack_state == 'present' - name: Handle the Stack (create/delete) ignore_errors: False register: stack_create os_stack: - name: "{{ stack_name }}" - state: "{{ stack_state }}" + name: "{{ openshift_openstack_stack_name }}" + state: "{{ openshift_openstack_stack_state }}" template: "{{ stack_template_path | default(omit) }}" wait: yes @@ -19,7 +19,7 @@ - name: CleanUp include: cleanup.yml when: - - stack_state == 'present' + - openshift_openstack_stack_state == 'present' # TODO(shadower): create the registry and PV Cinder volumes if specified # and include the `prepare-and-format-cinder-volume` tasks to set it up diff --git a/roles/openshift_openstack/templates/docker-storage-setup-dm.j2 b/roles/openshift_openstack/templates/docker-storage-setup-dm.j2 index b5869feff..32c6b5838 100644 --- a/roles/openshift_openstack/templates/docker-storage-setup-dm.j2 +++ b/roles/openshift_openstack/templates/docker-storage-setup-dm.j2 @@ -1,4 +1,4 @@ -DEVS="{{ docker_dev }}" -VG="{{ docker_vg }}" -DATA_SIZE="{{ docker_data_size }}" -EXTRA_DOCKER_STORAGE_OPTIONS="--storage-opt dm.basesize={{ docker_dm_basesize }}" +DEVS="{{ openshift_openstack_container_storage_setup.docker_dev }}" +VG="{{ openshift_openstack_container_storage_setup.docker_vg }}" +DATA_SIZE="{{ openshift_openstack_container_storage_setup.docker_data_size }}" +EXTRA_DOCKER_STORAGE_OPTIONS="--storage-opt dm.basesize={{ openshift_openstack_container_storage_setup.docker_dm_basesize }}" diff --git a/roles/openshift_openstack/templates/docker-storage-setup-overlayfs.j2 b/roles/openshift_openstack/templates/docker-storage-setup-overlayfs.j2 index d8b4a0276..1bf366bdc 100644 --- a/roles/openshift_openstack/templates/docker-storage-setup-overlayfs.j2 +++ b/roles/openshift_openstack/templates/docker-storage-setup-overlayfs.j2 @@ -1,7 +1,7 @@ -DEVS="{{ docker_dev }}" -VG="{{ docker_vg }}" -DATA_SIZE="{{ docker_data_size }}" +DEVS="{{ openshift_openstack_container_storage_setup.docker_dev }}" +VG="{{ openshift_openstack_container_storage_setup.docker_vg }}" +DATA_SIZE="{{ openshift_openstack_container_storage_setup.docker_data_size }}" STORAGE_DRIVER=overlay2 -CONTAINER_ROOT_LV_NAME="{{ container_root_lv_name }}" -CONTAINER_ROOT_LV_MOUNT_PATH="{{ container_root_lv_mount_path }}" +CONTAINER_ROOT_LV_NAME="{{ openshift_openstack_container_storage_setup.container_root_lv_name }}" +CONTAINER_ROOT_LV_MOUNT_PATH="{{ openshift_openstack_container_storage_setup.container_root_lv_mount_path }}" CONTAINER_ROOT_LV_SIZE=100%FREE diff --git a/roles/openshift_openstack/templates/heat_stack.yaml.j2 b/roles/openshift_openstack/templates/heat_stack.yaml.j2 index 28634f9a4..bfa65b460 100644 --- a/roles/openshift_openstack/templates/heat_stack.yaml.j2 +++ b/roles/openshift_openstack/templates/heat_stack.yaml.j2 @@ -54,7 +54,7 @@ outputs: description: Floating IPs of the nodes value: { get_attr: [ infra_nodes, floating_ip ] } -{% if openstack_num_dns|int > 0 %} +{% if openshift_openstack_num_dns|int > 0 %} dns_name: description: Name of the DNS value: @@ -72,11 +72,11 @@ outputs: {% endif %} conditions: - no_floating: {% if openstack_provider_network_name or openstack_use_bastion|bool %}true{% else %}false{% endif %} + no_floating: {% if openshift_openstack_provider_network_name or openshift_openstack_use_bastion|bool %}true{% else %}false{% endif %} resources: -{% if not openstack_provider_network_name %} +{% if not openshift_openstack_provider_network_name %} net: type: OS::Neutron::Net properties: @@ -84,7 +84,7 @@ resources: str_replace: template: openshift-ansible-cluster_id-net params: - cluster_id: {{ stack_name }} + cluster_id: {{ openshift_openstack_stack_name }} subnet: type: OS::Neutron::Subnet @@ -93,26 +93,26 @@ resources: str_replace: template: openshift-ansible-cluster_id-subnet params: - cluster_id: {{ stack_name }} + cluster_id: {{ openshift_openstack_stack_name }} network: { get_resource: net } cidr: str_replace: template: subnet_24_prefix.0/24 params: - subnet_24_prefix: {{ openstack_subnet_prefix }} + subnet_24_prefix: {{ openshift_openstack_subnet_prefix }} allocation_pools: - start: str_replace: template: subnet_24_prefix.3 params: - subnet_24_prefix: {{ openstack_subnet_prefix }} + subnet_24_prefix: {{ openshift_openstack_subnet_prefix }} end: str_replace: template: subnet_24_prefix.254 params: - subnet_24_prefix: {{ openstack_subnet_prefix }} + subnet_24_prefix: {{ openshift_openstack_subnet_prefix }} dns_nameservers: -{% for nameserver in openstack_dns_nameservers %} +{% for nameserver in openshift_openstack_dns_nameservers %} - {{ nameserver }} {% endfor %} @@ -120,13 +120,13 @@ resources: data_net: type: OS::Neutron::Net properties: - name: openshift-ansible-{{ stack_name }}-data-net + name: openshift-ansible-{{ openshift_openstack_stack_name }}-data-net port_security_enabled: false data_subnet: type: OS::Neutron::Subnet properties: - name: openshift-ansible-{{ stack_name }}-data-subnet + name: openshift-ansible-{{ openshift_openstack_stack_name }}-data-subnet network: { get_resource: data_net } cidr: {{ osm_cluster_network_cidr|default('10.128.0.0/14') }} gateway_ip: null @@ -139,9 +139,9 @@ resources: str_replace: template: openshift-ansible-cluster_id-router params: - cluster_id: {{ stack_name }} + cluster_id: {{ openshift_openstack_stack_name }} external_gateway_info: - network: {{ openstack_external_network_name }} + network: {{ openshift_openstack_external_network_name }} interface: type: OS::Neutron::RouterInterface @@ -158,8 +158,8 @@ resources: # str_replace: # template: openshift-ansible-cluster_id-keypair # params: -# cluster_id: {{ stack_name }} -# public_key: {{ openstack_keypair_name }} +# cluster_id: {{ openshift_openstack_stack_name }} +# public_key: {{ openshift_openstack_keypair_name }} common-secgrp: type: OS::Neutron::SecurityGroup @@ -168,30 +168,30 @@ resources: str_replace: template: openshift-ansible-cluster_id-common-secgrp params: - cluster_id: {{ stack_name }} + cluster_id: {{ openshift_openstack_stack_name }} description: str_replace: template: Basic ssh/icmp security group for cluster_id OpenShift cluster params: - cluster_id: {{ stack_name }} + cluster_id: {{ openshift_openstack_stack_name }} rules: - direction: ingress protocol: tcp port_range_min: 22 port_range_max: 22 - remote_ip_prefix: {{ ssh_ingress_cidr }} -{% if openstack_use_bastion|bool %} + remote_ip_prefix: {{ openshift_openstack_ssh_ingress_cidr }} +{% if openshift_openstack_use_bastion|bool %} - direction: ingress protocol: tcp port_range_min: 22 port_range_max: 22 - remote_ip_prefix: {{ bastion_ingress_cidr }} + remote_ip_prefix: {{ openshift_openstack_bastion_ingress_cidr }} {% endif %} - direction: ingress protocol: icmp - remote_ip_prefix: {{ ssh_ingress_cidr }} + remote_ip_prefix: {{ openshift_openstack_ssh_ingress_cidr }} -{% if openstack_flat_secgrp|default(False)|bool %} +{% if openshift_openstack_flat_secgrp|default(False)|bool %} flat-secgrp: type: OS::Neutron::SecurityGroup properties: @@ -199,12 +199,12 @@ resources: str_replace: template: openshift-ansible-cluster_id-flat-secgrp params: - cluster_id: {{ stack_name }} + cluster_id: {{ openshift_openstack_stack_name }} description: str_replace: template: Security group for cluster_id OpenShift cluster params: - cluster_id: {{ stack_name }} + cluster_id: {{ openshift_openstack_stack_name }} rules: - direction: ingress protocol: tcp @@ -280,12 +280,12 @@ resources: protocol: tcp port_range_min: 30000 port_range_max: 32767 - remote_ip_prefix: {{ node_ingress_cidr }} + remote_ip_prefix: {{ openshift_openstack_node_ingress_cidr }} - direction: ingress protocol: tcp port_range_min: 30000 port_range_max: 32767 - remote_ip_prefix: "{{ openstack_subnet_prefix }}.0/24" + remote_ip_prefix: "{{ openshift_openstack_subnet_prefix }}.0/24" {% else %} master-secgrp: type: OS::Neutron::SecurityGroup @@ -294,12 +294,12 @@ resources: str_replace: template: openshift-ansible-cluster_id-master-secgrp params: - cluster_id: {{ stack_name }} + cluster_id: {{ openshift_openstack_stack_name }} description: str_replace: template: Security group for cluster_id OpenShift cluster master params: - cluster_id: {{ stack_name }} + cluster_id: {{ openshift_openstack_stack_name }} rules: - direction: ingress protocol: tcp @@ -355,12 +355,12 @@ resources: str_replace: template: openshift-ansible-cluster_id-etcd-secgrp params: - cluster_id: {{ stack_name }} + cluster_id: {{ openshift_openstack_stack_name }} description: str_replace: template: Security group for cluster_id etcd cluster params: - cluster_id: {{ stack_name }} + cluster_id: {{ openshift_openstack_stack_name }} rules: - direction: ingress protocol: tcp @@ -381,12 +381,12 @@ resources: str_replace: template: openshift-ansible-cluster_id-node-secgrp params: - cluster_id: {{ stack_name }} + cluster_id: {{ openshift_openstack_stack_name }} description: str_replace: template: Security group for cluster_id OpenShift cluster nodes params: - cluster_id: {{ stack_name }} + cluster_id: {{ openshift_openstack_stack_name }} rules: - direction: ingress protocol: tcp @@ -412,12 +412,12 @@ resources: protocol: tcp port_range_min: 30000 port_range_max: 32767 - remote_ip_prefix: {{ node_ingress_cidr }} + remote_ip_prefix: {{ openshift_openstack_node_ingress_cidr }} - direction: ingress protocol: tcp port_range_min: 30000 port_range_max: 32767 - remote_ip_prefix: "{{ openstack_subnet_prefix }}.0/24" + remote_ip_prefix: "{{ openshift_openstack_subnet_prefix }}.0/24" {% endif %} infra-secgrp: @@ -427,12 +427,12 @@ resources: str_replace: template: openshift-ansible-cluster_id-infra-secgrp params: - cluster_id: {{ stack_name }} + cluster_id: {{ openshift_openstack_stack_name }} description: str_replace: template: Security group for cluster_id OpenShift infrastructure cluster nodes params: - cluster_id: {{ stack_name }} + cluster_id: {{ openshift_openstack_stack_name }} rules: - direction: ingress protocol: tcp @@ -443,7 +443,7 @@ resources: port_range_min: 443 port_range_max: 443 -{% if openstack_num_dns|int > 0 %} +{% if openshift_openstack_num_dns|int > 0 %} dns-secgrp: type: OS::Neutron::SecurityGroup properties: @@ -451,67 +451,67 @@ resources: str_replace: template: openshift-ansible-cluster_id-dns-secgrp params: - cluster_id: {{ stack_name }} + cluster_id: {{ openshift_openstack_stack_name }} description: str_replace: template: Security group for cluster_id cluster DNS params: - cluster_id: {{ stack_name }} + cluster_id: {{ openshift_openstack_stack_name }} rules: - direction: ingress protocol: udp port_range_min: 53 port_range_max: 53 - remote_ip_prefix: {{ node_ingress_cidr }} + remote_ip_prefix: {{ openshift_openstack_node_ingress_cidr }} - direction: ingress protocol: udp port_range_min: 53 port_range_max: 53 - remote_ip_prefix: "{{ openstack_subnet_prefix }}.0/24" + remote_ip_prefix: "{{ openshift_openstack_subnet_prefix }}.0/24" - direction: ingress protocol: tcp port_range_min: 53 port_range_max: 53 - remote_ip_prefix: {{ node_ingress_cidr }} + remote_ip_prefix: {{ openshift_openstack_node_ingress_cidr }} - direction: ingress protocol: tcp port_range_min: 53 port_range_max: 53 - remote_ip_prefix: "{{ openstack_subnet_prefix }}.0/24" + remote_ip_prefix: "{{ openshift_openstack_subnet_prefix }}.0/24" {% endif %} -{% if openstack_num_masters|int > 1 or openshift_ui_ssh_tunnel|bool %} +{% if openshift_openstack_num_masters|int > 1 or openshift_openstack_ui_ssh_tunnel|bool %} lb-secgrp: type: OS::Neutron::SecurityGroup properties: - name: openshift-ansible-{{ stack_name }}-lb-secgrp - description: Security group for {{ stack_name }} cluster Load Balancer + name: openshift-ansible-{{ openshift_openstack_stack_name }}-lb-secgrp + description: Security group for {{ openshift_openstack_stack_name }} cluster Load Balancer rules: - direction: ingress protocol: tcp port_range_min: {{ openshift_master_api_port | default(8443) }} port_range_max: {{ openshift_master_api_port | default(8443) }} - remote_ip_prefix: {{ lb_ingress_cidr | default(bastion_ingress_cidr) }} -{% if openshift_ui_ssh_tunnel|bool %} + remote_ip_prefix: {{ openshift_openstack_lb_ingress_cidr | default(openshift_openstack_bastion_ingress_cidr) }} +{% if openshift_openstack_ui_ssh_tunnel|bool %} - direction: ingress protocol: tcp port_range_min: {{ openshift_master_api_port | default(8443) }} port_range_max: {{ openshift_master_api_port | default(8443) }} - remote_ip_prefix: {{ ssh_ingress_cidr }} + remote_ip_prefix: {{ openshift_openstack_ssh_ingress_cidr }} {% endif %} {% if openshift_master_console_port is defined and openshift_master_console_port != openshift_master_api_port %} - direction: ingress protocol: tcp port_range_min: {{ openshift_master_console_port | default(8443) }} port_range_max: {{ openshift_master_console_port | default(8443) }} - remote_ip_prefix: {{ lb_ingress_cidr | default(bastion_ingress_cidr) }} + remote_ip_prefix: {{ openshift_openstack_lb_ingress_cidr | default(openshift_openstack_bastion_ingress_cidr) }} {% endif %} {% endif %} etcd: type: OS::Heat::ResourceGroup properties: - count: {{ openstack_num_etcd }} + count: {{ openshift_openstack_num_etcd }} resource_def: type: server.yaml properties: @@ -519,23 +519,23 @@ resources: str_replace: template: k8s_type-%index%.cluster_id params: - cluster_id: {{ stack_name }} - k8s_type: {{ openstack_etcd_hostname }} - cluster_env: {{ public_dns_domain }} - cluster_id: {{ stack_name }} + cluster_id: {{ openshift_openstack_stack_name }} + k8s_type: {{ openshift_openstack_etcd_hostname }} + cluster_env: {{ openshift_openstack_public_dns_domain }} + cluster_id: {{ openshift_openstack_stack_name }} group: str_replace: template: k8s_type.cluster_id params: k8s_type: etcds - cluster_id: {{ stack_name }} + cluster_id: {{ openshift_openstack_stack_name }} type: etcd - image: {{ openstack_etcd_image }} - flavor: {{ openstack_etcd_flavor }} - key_name: {{ openstack_keypair_name }} -{% if openstack_provider_network_name %} - net: {{ openstack_provider_network_name }} - net_name: {{ openstack_provider_network_name }} + image: {{ openshift_openstack_etcd_image }} + flavor: {{ openshift_openstack_etcd_flavor }} + key_name: {{ openshift_openstack_keypair_name }} +{% if openshift_openstack_provider_network_name %} + net: {{ openshift_openstack_provider_network_name }} + net_name: {{ openshift_openstack_provider_network_name }} {% else %} net: { get_resource: net } subnet: { get_resource: subnet } @@ -543,40 +543,40 @@ resources: str_replace: template: openshift-ansible-cluster_id-net params: - cluster_id: {{ stack_name }} + cluster_id: {{ openshift_openstack_stack_name }} {% endif %} secgrp: - - { get_resource: {% if openstack_flat_secgrp|default(False)|bool %}flat-secgrp{% else %}etcd-secgrp{% endif %} } + - { get_resource: {% if openshift_openstack_flat_secgrp|default(False)|bool %}flat-secgrp{% else %}etcd-secgrp{% endif %} } - { get_resource: common-secgrp } floating_network: if: - no_floating - null - - {{ openstack_external_network_name }} -{% if openstack_use_bastion|bool or openstack_provider_network_name %} + - {{ openshift_openstack_external_network_name }} +{% if openshift_openstack_use_bastion|bool or openshift_openstack_provider_network_name %} attach_float_net: false {% endif %} - volume_size: {{ openstack_etcd_volume_size }} -{% if not openstack_provider_network_name %} + volume_size: {{ openshift_openstack_etcd_volume_size }} +{% if not openshift_openstack_provider_network_name %} depends_on: - interface {% endif %} -{% if openstack_master_server_group_policies|length > 0 %} +{% if openshift_openstack_master_server_group_policies|length > 0 %} master_server_group: type: OS::Nova::ServerGroup properties: name: master_server_group - policies: {{ openstack_master_server_group_policies }} + policies: {{ openshift_openstack_master_server_group_policies }} {% endif %} -{% if openstack_infra_server_group_policies|length > 0 %} +{% if openshift_openstack_infra_server_group_policies|length > 0 %} infra_server_group: type: OS::Nova::ServerGroup properties: name: infra_server_group - policies: {{ openstack_infra_server_group_policies }} + policies: {{ openshift_openstack_infra_server_group_policies }} {% endif %} -{% if openstack_num_masters|int > 1 %} +{% if openshift_openstack_num_masters|int > 1 %} loadbalancer: type: OS::Heat::ResourceGroup properties: @@ -588,23 +588,23 @@ resources: str_replace: template: k8s_type-%index%.cluster_id params: - cluster_id: {{ stack_name }} - k8s_type: {{ openstack_lb_hostname }} - cluster_env: {{ public_dns_domain }} - cluster_id: {{ stack_name }} + cluster_id: {{ openshift_openstack_stack_name }} + k8s_type: {{ openshift_openstack_lb_hostname }} + cluster_env: {{ openshift_openstack_public_dns_domain }} + cluster_id: {{ openshift_openstack_stack_name }} group: str_replace: template: k8s_type.cluster_id params: k8s_type: lb - cluster_id: {{ stack_name }} + cluster_id: {{ openshift_openstack_stack_name }} type: lb - image: {{ openstack_lb_image }} - flavor: {{ openstack_lb_flavor }} - key_name: {{ openstack_keypair_name }} -{% if openstack_provider_network_name %} - net: {{ openstack_provider_network_name }} - net_name: {{ openstack_provider_network_name }} + image: {{ openshift_openstack_lb_image }} + flavor: {{ openshift_openstack_lb_flavor }} + key_name: {{ openshift_openstack_keypair_name }} +{% if openshift_openstack_provider_network_name %} + net: {{ openshift_openstack_provider_network_name }} + net_name: {{ openshift_openstack_provider_network_name }} {% else %} net: { get_resource: net } subnet: { get_resource: subnet } @@ -612,16 +612,16 @@ resources: str_replace: template: openshift-ansible-cluster_id-net params: - cluster_id: {{ stack_name }} + cluster_id: {{ openshift_openstack_stack_name }} {% endif %} secgrp: - { get_resource: lb-secgrp } - { get_resource: common-secgrp } -{% if not openstack_provider_network_name %} - floating_network: {{ openstack_external_network_name }} +{% if not openshift_openstack_provider_network_name %} + floating_network: {{ openshift_openstack_external_network_name }} {% endif %} - volume_size: {{ openstack_lb_volume_size }} -{% if not openstack_provider_network_name %} + volume_size: {{ openshift_openstack_lb_volume_size }} +{% if not openshift_openstack_provider_network_name %} depends_on: - interface {% endif %} @@ -630,7 +630,7 @@ resources: masters: type: OS::Heat::ResourceGroup properties: - count: {{ openstack_num_masters }} + count: {{ openshift_openstack_num_masters }} resource_def: type: server.yaml properties: @@ -638,23 +638,23 @@ resources: str_replace: template: k8s_type-%index%.cluster_id params: - cluster_id: {{ stack_name }} - k8s_type: {{ openstack_master_hostname }} - cluster_env: {{ public_dns_domain }} - cluster_id: {{ stack_name }} + cluster_id: {{ openshift_openstack_stack_name }} + k8s_type: {{ openshift_openstack_master_hostname }} + cluster_env: {{ openshift_openstack_public_dns_domain }} + cluster_id: {{ openshift_openstack_stack_name }} group: str_replace: template: k8s_type.cluster_id params: k8s_type: masters - cluster_id: {{ stack_name }} + cluster_id: {{ openshift_openstack_stack_name }} type: master - image: {{ openstack_master_image }} - flavor: {{ openstack_master_flavor }} - key_name: {{ openstack_keypair_name }} -{% if openstack_provider_network_name %} - net: {{ openstack_provider_network_name }} - net_name: {{ openstack_provider_network_name }} + image: {{ openshift_openstack_master_image }} + flavor: {{ openshift_openstack_master_flavor }} + key_name: {{ openshift_openstack_keypair_name }} +{% if openshift_openstack_provider_network_name %} + net: {{ openshift_openstack_provider_network_name }} + net_name: {{ openshift_openstack_provider_network_name }} {% else %} net: { get_resource: net } subnet: { get_resource: subnet } @@ -662,7 +662,7 @@ resources: str_replace: template: openshift-ansible-cluster_id-net params: - cluster_id: {{ stack_name }} + cluster_id: {{ openshift_openstack_stack_name }} {% if openshift_use_flannel|default(False)|bool %} attach_data_net: true data_net: { get_resource: data_net } @@ -670,12 +670,12 @@ resources: {% endif %} {% endif %} secgrp: -{% if openstack_flat_secgrp|default(False)|bool %} +{% if openshift_openstack_flat_secgrp|default(False)|bool %} - { get_resource: flat-secgrp } {% else %} - { get_resource: master-secgrp } - { get_resource: node-secgrp } -{% if openstack_num_etcd|int == 0 %} +{% if openshift_openstack_num_etcd|int == 0 %} - { get_resource: etcd-secgrp } {% endif %} {% endif %} @@ -684,16 +684,16 @@ resources: if: - no_floating - null - - {{ openstack_external_network_name }} -{% if openstack_use_bastion|bool or openstack_provider_network_name %} + - {{ openshift_openstack_external_network_name }} +{% if openshift_openstack_use_bastion|bool or openshift_openstack_provider_network_name %} attach_float_net: false {% endif %} - volume_size: {{ openstack_master_volume_size }} -{% if openstack_master_server_group_policies|length > 0 %} + volume_size: {{ openshift_openstack_master_volume_size }} +{% if openshift_openstack_master_server_group_policies|length > 0 %} scheduler_hints: group: { get_resource: master_server_group } {% endif %} -{% if not openstack_provider_network_name %} +{% if not openshift_openstack_provider_network_name %} depends_on: - interface {% endif %} @@ -701,9 +701,9 @@ resources: compute_nodes: type: OS::Heat::ResourceGroup properties: - count: {{ openstack_num_nodes }} + count: {{ openshift_openstack_num_nodes }} removal_policies: - - resource_list: {{ openstack_nodes_to_remove }} + - resource_list: {{ openshift_openstack_nodes_to_remove }} resource_def: type: server.yaml properties: @@ -711,28 +711,28 @@ resources: str_replace: template: sub_type_k8s_type-%index%.cluster_id params: - cluster_id: {{ stack_name }} - sub_type_k8s_type: {{ openstack_node_hostname }} - cluster_env: {{ public_dns_domain }} - cluster_id: {{ stack_name }} + cluster_id: {{ openshift_openstack_stack_name }} + sub_type_k8s_type: {{ openshift_openstack_node_hostname }} + cluster_env: {{ openshift_openstack_public_dns_domain }} + cluster_id: {{ openshift_openstack_stack_name }} group: str_replace: template: k8s_type.cluster_id params: k8s_type: nodes - cluster_id: {{ stack_name }} + cluster_id: {{ openshift_openstack_stack_name }} type: node subtype: app node_labels: -{% for k, v in openshift_cluster_node_labels.app.iteritems() %} +{% for k, v in openshift_openstack_cluster_node_labels.app.iteritems() %} {{ k|e }}: {{ v|e }} {% endfor %} - image: {{ openstack_node_image }} - flavor: {{ openstack_node_flavor }} - key_name: {{ openstack_keypair_name }} -{% if openstack_provider_network_name %} - net: {{ openstack_provider_network_name }} - net_name: {{ openstack_provider_network_name }} + image: {{ openshift_openstack_node_image }} + flavor: {{ openshift_openstack_node_flavor }} + key_name: {{ openshift_openstack_keypair_name }} +{% if openshift_openstack_provider_network_name %} + net: {{ openshift_openstack_provider_network_name }} + net_name: {{ openshift_openstack_provider_network_name }} {% else %} net: { get_resource: net } subnet: { get_resource: subnet } @@ -740,7 +740,7 @@ resources: str_replace: template: openshift-ansible-cluster_id-net params: - cluster_id: {{ stack_name }} + cluster_id: {{ openshift_openstack_stack_name }} {% if openshift_use_flannel|default(False)|bool %} attach_data_net: true data_net: { get_resource: data_net } @@ -748,18 +748,18 @@ resources: {% endif %} {% endif %} secgrp: - - { get_resource: {% if openstack_flat_secgrp|default(False)|bool %}flat-secgrp{% else %}node-secgrp{% endif %} } + - { get_resource: {% if openshift_openstack_flat_secgrp|default(False)|bool %}flat-secgrp{% else %}node-secgrp{% endif %} } - { get_resource: common-secgrp } floating_network: if: - no_floating - null - - {{ openstack_external_network_name }} -{% if openstack_use_bastion|bool or openstack_provider_network_name %} + - {{ openshift_openstack_external_network_name }} +{% if openshift_openstack_use_bastion|bool or openshift_openstack_provider_network_name %} attach_float_net: false {% endif %} - volume_size: {{ openstack_node_volume_size }} -{% if not openstack_provider_network_name %} + volume_size: {{ openshift_openstack_node_volume_size }} +{% if not openshift_openstack_provider_network_name %} depends_on: - interface {% endif %} @@ -767,7 +767,7 @@ resources: infra_nodes: type: OS::Heat::ResourceGroup properties: - count: {{ openstack_num_infra }} + count: {{ openshift_openstack_num_infra }} resource_def: type: server.yaml properties: @@ -775,28 +775,28 @@ resources: str_replace: template: sub_type_k8s_type-%index%.cluster_id params: - cluster_id: {{ stack_name }} - sub_type_k8s_type: {{ openstack_infra_hostname }} - cluster_env: {{ public_dns_domain }} - cluster_id: {{ stack_name }} + cluster_id: {{ openshift_openstack_stack_name }} + sub_type_k8s_type: {{ openshift_openstack_infra_hostname }} + cluster_env: {{ openshift_openstack_public_dns_domain }} + cluster_id: {{ openshift_openstack_stack_name }} group: str_replace: template: k8s_type.cluster_id params: k8s_type: infra - cluster_id: {{ stack_name }} + cluster_id: {{ openshift_openstack_stack_name }} type: node subtype: infra node_labels: -{% for k, v in openshift_cluster_node_labels.infra.iteritems() %} +{% for k, v in openshift_openstack_cluster_node_labels.infra.iteritems() %} {{ k|e }}: {{ v|e }} {% endfor %} - image: {{ openstack_infra_image }} - flavor: {{ openstack_infra_flavor }} - key_name: {{ openstack_keypair_name }} -{% if openstack_provider_network_name %} - net: {{ openstack_provider_network_name }} - net_name: {{ openstack_provider_network_name }} + image: {{ openshift_openstack_infra_image }} + flavor: {{ openshift_openstack_infra_flavor }} + key_name: {{ openshift_openstack_keypair_name }} +{% if openshift_openstack_provider_network_name %} + net: {{ openshift_openstack_provider_network_name }} + net_name: {{ openshift_openstack_provider_network_name }} {% else %} net: { get_resource: net } subnet: { get_resource: subnet } @@ -804,7 +804,7 @@ resources: str_replace: template: openshift-ansible-cluster_id-net params: - cluster_id: {{ stack_name }} + cluster_id: {{ openshift_openstack_stack_name }} {% if openshift_use_flannel|default(False)|bool %} attach_data_net: true data_net: { get_resource: data_net } @@ -813,34 +813,34 @@ resources: {% endif %} secgrp: # TODO(bogdando) filter only required node rules into infra-secgrp -{% if openstack_flat_secgrp|default(False)|bool %} +{% if openshift_openstack_flat_secgrp|default(False)|bool %} - { get_resource: flat-secgrp } {% else %} - { get_resource: node-secgrp } {% endif %} -{% if openshift_ui_ssh_tunnel|bool and openstack_num_masters|int < 2 %} +{% if openshift_openstack_ui_ssh_tunnel|bool and openshift_openstack_num_masters|int < 2 %} - { get_resource: lb-secgrp } {% endif %} - { get_resource: infra-secgrp } - { get_resource: common-secgrp } -{% if not openstack_provider_network_name %} - floating_network: {{ openstack_external_network_name }} +{% if not openshift_openstack_provider_network_name %} + floating_network: {{ openshift_openstack_external_network_name }} {% endif %} - volume_size: {{ openstack_infra_volume_size }} -{% if openstack_infra_server_group_policies|length > 0 %} + volume_size: {{ openshift_openstack_infra_volume_size }} +{% if openshift_openstack_infra_server_group_policies|length > 0 %} scheduler_hints: group: { get_resource: infra_server_group } {% endif %} -{% if not openstack_provider_network_name %} +{% if not openshift_openstack_provider_network_name %} depends_on: - interface {% endif %} -{% if openstack_num_dns|int > 0 %} +{% if openshift_openstack_num_dns|int > 0 %} dns: type: OS::Heat::ResourceGroup properties: - count: {{ openstack_num_dns }} + count: {{ openshift_openstack_num_dns }} resource_def: type: server.yaml properties: @@ -848,23 +848,23 @@ resources: str_replace: template: k8s_type-%index%.cluster_id params: - cluster_id: {{ stack_name }} - k8s_type: {{ openstack_dns_hostname }} - cluster_env: {{ public_dns_domain }} - cluster_id: {{ stack_name }} + cluster_id: {{ openshift_openstack_stack_name }} + k8s_type: {{ openshift_openstack_dns_hostname }} + cluster_env: {{ openshift_openstack_public_dns_domain }} + cluster_id: {{ openshift_openstack_stack_name }} group: str_replace: template: k8s_type.cluster_id params: k8s_type: dns - cluster_id: {{ stack_name }} + cluster_id: {{ openshift_openstack_stack_name }} type: dns - image: {{ openstack_dns_image }} - flavor: {{ openstack_dns_flavor }} - key_name: {{ openstack_keypair_name }} -{% if openstack_provider_network_name %} - net: {{ openstack_provider_network_name }} - net_name: {{ openstack_provider_network_name }} + image: {{ openshift_openstack_dns_image }} + flavor: {{ openshift_openstack_dns_flavor }} + key_name: {{ openshift_openstack_keypair_name }} +{% if openshift_openstack_provider_network_name %} + net: {{ openshift_openstack_provider_network_name }} + net_name: {{ openshift_openstack_provider_network_name }} {% else %} net: { get_resource: net } subnet: { get_resource: subnet } @@ -872,16 +872,16 @@ resources: str_replace: template: openshift-ansible-cluster_id-net params: - cluster_id: {{ stack_name }} + cluster_id: {{ openshift_openstack_stack_name }} {% endif %} secgrp: - { get_resource: dns-secgrp } - { get_resource: common-secgrp } -{% if not openstack_provider_network_name %} - floating_network: {{ openstack_external_network_name }} +{% if not openshift_openstack_provider_network_name %} + floating_network: {{ openshift_openstack_external_network_name }} {% endif %} - volume_size: {{ openstack_dns_volume_size }} -{% if not openstack_provider_network_name %} + volume_size: {{ openshift_openstack_dns_volume_size }} +{% if not openshift_openstack_provider_network_name %} depends_on: - interface {% endif %} diff --git a/roles/openshift_openstack/templates/heat_stack_server.yaml.j2 b/roles/openshift_openstack/templates/heat_stack_server.yaml.j2 index 160345baf..a829da34f 100644 --- a/roles/openshift_openstack/templates/heat_stack_server.yaml.j2 +++ b/roles/openshift_openstack/templates/heat_stack_server.yaml.j2 @@ -61,7 +61,7 @@ parameters: label: Net name description: Net name -{% if not openstack_provider_network_name %} +{% if not openshift_openstack_provider_network_name %} subnet: type: string label: Subnet ID @@ -81,7 +81,7 @@ parameters: label: Net ID description: Net resource -{% if not openstack_provider_network_name %} +{% if not openshift_openstack_provider_network_name %} data_subnet: type: string default: '' @@ -102,7 +102,7 @@ parameters: label: Attach-float-net description: A switch for floating network port connection -{% if not openstack_provider_network_name %} +{% if not openshift_openstack_provider_network_name %} floating_network: type: string default: '' @@ -156,7 +156,7 @@ outputs: - server - addresses - { get_param: net_name } -{% if openstack_provider_network_name %} +{% if openshift_openstack_provider_network_name %} - 0 {% else %} - 1 @@ -226,7 +226,7 @@ resources: type: OS::Neutron::Port properties: network: { get_param: net } -{% if not openstack_provider_network_name %} +{% if not openshift_openstack_provider_network_name %} fixed_ips: - subnet: { get_param: subnet } {% endif %} @@ -239,13 +239,13 @@ resources: properties: network: { get_param: data_net } port_security_enabled: false -{% if not openstack_provider_network_name %} +{% if not openshift_openstack_provider_network_name %} fixed_ips: - subnet: { get_param: data_subnet } {% endif %} {% endif %} -{% if not openstack_provider_network_name %} +{% if not openshift_openstack_provider_network_name %} floating-ip: condition: { not: no_floating } type: OS::Neutron::FloatingIP @@ -254,7 +254,7 @@ resources: port_id: { get_resource: port } {% endif %} -{% if not ephemeral_volumes|default(false)|bool %} +{% if not openshift_openstack_ephemeral_volumes|default(false)|bool %} cinder_volume: type: OS::Cinder::Volume properties: -- cgit v1.2.3 From 67791867abbeb06c9bd11a1583ab6b976902fd15 Mon Sep 17 00:00:00 2001 From: Tomas Sedovic Date: Thu, 2 Nov 2017 18:08:03 +0100 Subject: Fix tox --- roles/openshift_openstack/defaults/main.yml | 1 - roles/openshift_openstack/tasks/populate-dns.yml | 5 +++-- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/roles/openshift_openstack/defaults/main.yml b/roles/openshift_openstack/defaults/main.yml index 3eca52963..5f182e0d6 100644 --- a/roles/openshift_openstack/defaults/main.yml +++ b/roles/openshift_openstack/defaults/main.yml @@ -51,7 +51,6 @@ openshift_openstack_full_dns_domain: "{{ (openshift_openstack_clusterid|trim == openshift_openstack_app_subdomain: "apps" - # heat vars openshift_openstack_clusterid: openshift openshift_openstack_stack_name: "{{ openshift_openstack_clusterid }}.{{ openshift_openstack_public_dns_domain }}" diff --git a/roles/openshift_openstack/tasks/populate-dns.yml b/roles/openshift_openstack/tasks/populate-dns.yml index 080c3aca9..c03aceb94 100644 --- a/roles/openshift_openstack/tasks/populate-dns.yml +++ b/roles/openshift_openstack/tasks/populate-dns.yml @@ -1,3 +1,4 @@ +--- - name: "Generate list of private A records" set_fact: private_records: "{{ private_records | default([]) + [ { 'type': 'A', 'hostname': hostvars[item]['ansible_hostname'], 'ip': hostvars[item]['private_v4'] } ] }}" @@ -119,8 +120,8 @@ # TODO(shadower): add a cleanup playbook that removes these records, too! state: present with_subelements: - - "{{ openshift_openstack_dns_records_add | default({}) }}" - - entries + - "{{ openshift_openstack_dns_records_add | default({}) }}" + - entries register: nsupdate_add_result until: nsupdate_add_result|succeeded retries: 10 -- cgit v1.2.3 From 6241f87f2279e7a57f5adb05ff7ef18b7c943e67 Mon Sep 17 00:00:00 2001 From: Tomas Sedovic Date: Thu, 2 Nov 2017 18:21:32 +0100 Subject: Namespace the docker volumes --- playbooks/openstack/sample-inventory/group_vars/all.yml | 14 +++++++------- 1 file changed, 7 insertions(+), 7 deletions(-) diff --git a/playbooks/openstack/sample-inventory/group_vars/all.yml b/playbooks/openstack/sample-inventory/group_vars/all.yml index 450642c81..921edb867 100644 --- a/playbooks/openstack/sample-inventory/group_vars/all.yml +++ b/playbooks/openstack/sample-inventory/group_vars/all.yml @@ -58,13 +58,13 @@ openshift_openstack_default_flavor: "m1.medium" # # Docker volume size # # - set specific volume size for roles by uncommenting corresponding lines # # - note: do not remove docker_default_volume_size definition -#docker_master_volume_size: "15" -#docker_infra_volume_size: "15" -#docker_node_volume_size: "15" -#docker_etcd_volume_size: "2" -#docker_dns_volume_size: "1" -#docker_lb_volume_size: "5" -docker_volume_size: "15" +#openshift_openstack_docker_master_volume_size: "15" +#openshift_openstack_docker_infra_volume_size: "15" +#openshift_openstack_docker_node_volume_size: "15" +#openshift_openstack_docker_etcd_volume_size: "2" +#openshift_openstack_docker_dns_volume_size: "1" +#openshift_openstack_docker_lb_volume_size: "5" +openshift_openstack_docker_volume_size: "15" ## Specify server group policies for master and infra nodes. Nova must be configured to ## enable these policies. 'anti-affinity' will ensure that each VM is launched on a -- cgit v1.2.3 From 2e9d134d4564d87dbbc7853b07204f7f44ee01e6 Mon Sep 17 00:00:00 2001 From: Tomas Sedovic Date: Tue, 7 Nov 2017 14:42:43 +1100 Subject: Remove an unused retry file --- roles/hostnames/test/test.retry | 3 --- 1 file changed, 3 deletions(-) delete mode 100644 roles/hostnames/test/test.retry diff --git a/roles/hostnames/test/test.retry b/roles/hostnames/test/test.retry deleted file mode 100644 index 63fc08e4c..000000000 --- a/roles/hostnames/test/test.retry +++ /dev/null @@ -1,3 +0,0 @@ -192.168.124.117 -192.168.124.40 -192.168.124.41 -- cgit v1.2.3