diff options
| author | OpenShift Merge Robot <openshift-merge-robot@users.noreply.github.com> | 2017-10-26 10:26:52 -0700 | 
|---|---|---|
| committer | GitHub <noreply@github.com> | 2017-10-26 10:26:52 -0700 | 
| commit | 6d3f9ab2473fb6f69974fd770bc9ae3fb4952b98 (patch) | |
| tree | df8e007ef91cfe951ed241df0e74ed3ff70cb659 | |
| parent | 34f6e3e2543ab961bcded8cbc7e531a7bbf5b02c (diff) | |
| parent | ac62ea0066934877f94e99bda6ec53a9c03ababb (diff) | |
Merge pull request #5793 from tbielawa/openshift_management_module
Automatic merge from submit-queue.
Openshift MGMT Fixes and Container Provider Integration
Various small fixes/polishing to the openshift management role.
* Updated uninstall script
* Automatically add cluster to new management deployment
* Scaffolding for scripts to add *N* number of OCP/Origin clusters as Container Providers in any arbitrary MIQ/CFME instance
* Fix mis-named task in `openshift_nfs/create_export`
* Uninstall playbook only runs on one master
20 files changed, 493 insertions, 106 deletions
diff --git a/filter_plugins/oo_filters.py b/filter_plugins/oo_filters.py index 2fbd23450..f9564499d 100644 --- a/filter_plugins/oo_filters.py +++ b/filter_plugins/oo_filters.py @@ -1125,6 +1125,73 @@ of items as ['region=infra', 'zone=primary']      return selectors +def oo_filter_sa_secrets(sa_secrets, secret_hint='-token-'): +    """Parse the Service Account Secrets list, `sa_secrets`, (as from +oc_serviceaccount_secret:state=list) and return the name of the secret +containing the `secret_hint` string. For example, by default this will +return the name of the secret holding the SA bearer token. + +Only provide the 'results' object to this filter. This filter expects +to receive a list like this: + +    [ +        { +            "name": "management-admin-dockercfg-p31s2" +        }, +        { +            "name": "management-admin-token-bnqsh" +        } +    ] + + +Returns: + +* `secret_name` [string] - The name of the secret matching the +  `secret_hint` parameter. By default this is the secret holding the +  SA's bearer token. + +Example playbook usage: + +Register a return value from oc_serviceaccount_secret with and pass +that result to this filter plugin. + +    - name: Get all SA Secrets +      oc_serviceaccount_secret: +        state: list +        service_account: management-admin +        namespace: management-infra +      register: sa + +    - name: Save the SA bearer token secret name +      set_fact: +        management_token: "{{ sa.results | oo_filter_sa_secrets }}" + +    - name: Get the SA bearer token value +      oc_secret: +        state: list +        name: "{{ management_token }}" +        namespace: management-infra +        decode: true +      register: sa_secret + +    - name: Print the bearer token value +      debug: +        var: sa_secret.results.decoded.token + +    """ +    secret_name = None + +    for secret in sa_secrets: +        # each secret is a hash +        if secret['name'].find(secret_hint) == -1: +            continue +        else: +            secret_name = secret['name'] +            break + +    return secret_name + +  class FilterModule(object):      """ Custom ansible filter mapping """ @@ -1167,5 +1234,6 @@ class FilterModule(object):              "to_padded_yaml": to_padded_yaml,              "oo_random_word": oo_random_word,              "oo_contains_rule": oo_contains_rule, -            "oo_selector_to_string_list": oo_selector_to_string_list +            "oo_selector_to_string_list": oo_selector_to_string_list, +            "oo_filter_sa_secrets": oo_filter_sa_secrets,          } diff --git a/inventory/byo/hosts.example b/inventory/byo/hosts.example index 7c4a7885d..75ddf8e10 100644 --- a/inventory/byo/hosts.example +++ b/inventory/byo/hosts.example @@ -974,25 +974,7 @@ openshift_master_identity_providers=[{'name': 'htpasswd_auth', 'login': 'true',  # openshift_upgrade_post_storage_migration_enabled=true  # openshift_upgrade_post_storage_migration_fatal=false -# host group for masters -[masters] -ose3-master[1:3]-ansible.test.example.com - -[etcd] -ose3-etcd[1:3]-ansible.test.example.com - -# NOTE: Containerized load balancer hosts are not yet supported, if using a global -# containerized=true host variable we must set to false. -[lb] -ose3-lb-ansible.test.example.com containerized=false - -# NOTE: Currently we require that masters be part of the SDN which requires that they also be nodes -# However, in order to ensure that your masters are not burdened with running pods you should -# make them unschedulable by adding openshift_schedulable=False any node that's also a master. -[nodes] -ose3-master[1:3]-ansible.test.example.com -ose3-node[1:2]-ansible.test.example.com openshift_node_labels="{'region': 'primary', 'zone': 'default'}" - +######################################################################  # CloudForms/ManageIQ (CFME/MIQ) Configuration  # See the readme for full descriptions and getting started @@ -1042,6 +1024,17 @@ ose3-node[1:2]-ansible.test.example.com openshift_node_labels="{'region': 'prima  # setting this variable. Useful for testing specific task files.  #openshift_management_storage_nfs_local_hostname: false +# These are the default values for the username and password of the +# management app. Changing these values in your inventory will not +# change your username or password. You should only need to change +# these values in your inventory if you already changed the actual +# name and password AND are trying to use integration scripts. +# +# For example, adding this cluster as a container provider, +# playbooks/byo/openshift-management/add_container_provider.yml +#openshift_management_username: admin +#openshift_management_password: smartvm +  # A hash of parameters you want to override or set in the  # miq-template.yaml or miq-template-ext-db.yaml templates. Set this in  # your inventory file as a simple hash. Acceptable values are defined @@ -1050,3 +1043,22 @@ ose3-node[1:2]-ansible.test.example.com openshift_node_labels="{'region': 'prima  #  # openshift_management_template_parameters={'APPLICATION_MEM_REQ': '512Mi'}  #openshift_management_template_parameters: {} + +# host group for masters +[masters] +ose3-master[1:3]-ansible.test.example.com + +[etcd] +ose3-etcd[1:3]-ansible.test.example.com + +# NOTE: Containerized load balancer hosts are not yet supported, if using a global +# containerized=true host variable we must set to false. +[lb] +ose3-lb-ansible.test.example.com containerized=false + +# NOTE: Currently we require that masters be part of the SDN which requires that they also be nodes +# However, in order to ensure that your masters are not burdened with running pods you should +# make them unschedulable by adding openshift_schedulable=False any node that's also a master. +[nodes] +ose3-master[1:3]-ansible.test.example.com +ose3-node[1:2]-ansible.test.example.com openshift_node_labels="{'region': 'primary', 'zone': 'default'}" diff --git a/playbooks/byo/openshift-management/add_container_provider.yml b/playbooks/byo/openshift-management/add_container_provider.yml new file mode 100644 index 000000000..3378b5abd --- /dev/null +++ b/playbooks/byo/openshift-management/add_container_provider.yml @@ -0,0 +1,6 @@ +--- +- include: ../openshift-cluster/initialize_groups.yml + +- include: ../../common/openshift-cluster/evaluate_groups.yml + +- include: ../../common/openshift-management/add_container_provider.yml diff --git a/playbooks/byo/openshift-management/add_many_container_providers.yml b/playbooks/byo/openshift-management/add_many_container_providers.yml new file mode 100644 index 000000000..62fdb11c5 --- /dev/null +++ b/playbooks/byo/openshift-management/add_many_container_providers.yml @@ -0,0 +1,36 @@ +--- +- hosts: localhost +  tasks: +  - name: Ensure the container provider configuration is defined +    assert: +      that: container_providers_config is defined +      msg: | +        Error: Must provide providers config path. Fix: Add '-e container_providers_config=/path/to/your/config' to the ansible-playbook command + +  - name: Include providers/management configuration +    include_vars: +      file: "{{ container_providers_config }}" + +  - name: Ensure this cluster is a container provider +    uri: +      url: "https://{{ management_server['hostname'] }}/api/providers" +      body_format: json +      method: POST +      user: "{{ management_server['user'] }}" +      password: "{{ management_server['password'] }}" +      validate_certs: no +      # Docs on formatting the BODY of the POST request: +      # http://manageiq.org/docs/reference/latest/api/reference/providers.html#specifying-connection-configurations +      body: "{{ item }}" +    failed_when: false +    with_items: "{{ container_providers }}" +    register: results + +  # Include openshift_management for access to filter_plugins. +  - include_role: +      name: openshift_management +      tasks_from: noop + +  - name: print each result +    debug: +      msg: "{{ results.results | oo_filter_container_providers }}" diff --git a/playbooks/byo/openshift-management/roles b/playbooks/byo/openshift-management/roles new file mode 120000 index 000000000..20c4c58cf --- /dev/null +++ b/playbooks/byo/openshift-management/roles @@ -0,0 +1 @@ +../../../roles
\ No newline at end of file diff --git a/playbooks/byo/openshift-management/uninstall.yml b/playbooks/byo/openshift-management/uninstall.yml index a1fb1cdc4..e95c1c88a 100644 --- a/playbooks/byo/openshift-management/uninstall.yml +++ b/playbooks/byo/openshift-management/uninstall.yml @@ -1,4 +1,2 @@  --- -# - include: ../openshift-cluster/initialize_groups.yml -  - include: ../../common/openshift-management/uninstall.yml diff --git a/playbooks/common/openshift-management/add_container_provider.yml b/playbooks/common/openshift-management/add_container_provider.yml new file mode 100644 index 000000000..facb3a5b9 --- /dev/null +++ b/playbooks/common/openshift-management/add_container_provider.yml @@ -0,0 +1,8 @@ +--- +- name: Add Container Provider to Management +  hosts: oo_first_master +  tasks: +  - name: Run the Management Integration Tasks +    include_role: +      name: openshift_management +      tasks_from: add_container_provider diff --git a/playbooks/common/openshift-management/uninstall.yml b/playbooks/common/openshift-management/uninstall.yml index 698d93405..9f35cc276 100644 --- a/playbooks/common/openshift-management/uninstall.yml +++ b/playbooks/common/openshift-management/uninstall.yml @@ -1,6 +1,6 @@  ---  - name: Uninstall CFME -  hosts: masters +  hosts: masters[0]    tasks:    - name: Run the CFME Uninstall Role Tasks      include_role: diff --git a/roles/openshift_management/README.md b/roles/openshift_management/README.md index 3a71d9211..05ca27913 100644 --- a/roles/openshift_management/README.md +++ b/roles/openshift_management/README.md @@ -38,6 +38,10 @@ deployment type (`openshift_deployment_type`):           * [Cloud Provider](#cloud-provider)           * [Preconfigured (Expert Configuration Only)](#preconfigured-expert-configuration-only)     * [Customization](#customization) +   * [Container Provider](#container-provider) +      * [Manually](#manually) +      * [Automatically](#automatically) +      * [Multiple Providers](#multiple-providers)     * [Uninstall](#uninstall)     * [Additional Information](#additional-information) @@ -80,30 +84,10 @@ to there being no databases that require pods.  *Be extra careful* if you are overriding template  parameters. Including parameters not defined in a template **will -cause errors**. - -**Container Provider Integration** - If you want add your container -platform (OCP/Origin) as a *Container Provider* in CFME/MIQ then you -must ensure that the infrastructure management hooks are installed. - -* During your OCP/Origin install, ensure that you have the -  `openshift_use_manageiq` parameter set to `true` in your inventory -  at install time. This will create a `management-infra` project and a -  service account user. -* After CFME/MIQ is installed, obtain the `management-admin` service -  account token and copy it somewhere safe. - -```bash -$ oc serviceaccounts get-token -n management-infra management-admin -eyJhuGdiOiJSUzI1NiIsInR5dCI6IkpXVCJ9.eyJpd9MiOiJrbWJldm5lbGVzL9NldnZpY2VhY2NvbW50Iiwiy9ViZXJuZXRldy5puy9zZXJ2yWNlYWNju9VubC9uYW1ld9BhY2UiOiJtYW5hZ2VtZW50LWluZnJhIiwiy9ViZXJuZXRldy5puy9zZXJ2yWNlYWNju9VubC9zZWNyZXQuumFtZSI6Im1humFnZW1lunQtYWRtyW4tbG9rZW4tdDBnOTAiLCJrbWJldm5lbGVzLmlvL9NldnZpY2VhY2NvbW50L9NldnZpY2UtYWNju9VubC5uYW1lIjoiuWFuYWbluWVubC1hZG1puiIsImt1YmVyumV0ZXMuyW8vd2VybmljZWFjY291unQvd2VybmljZS1hY2NvbW50LnVpZCI6IjRiZDM2MWQ1LWE1NDAtMTFlNy04YzI5LTUyNTQwMDliMmNkZCIsInN1YiI6InN5d9RluTpzZXJ2yWNlYWNju9VubDptYW5hZ2VtZW50LWluZnJhOm1humFnZW1lunQtYWRtyW4ifQ.B6sZLGD9O4vBu9MHwiG-C_4iEwjBXb7Af8BPw-LNlujDmHhOnQ-Oo4QxQKyj9edynfmDy2yutUyJ2Mm9HfDGWg4C9xhWImHoq6Nl7T5_9djkeGKkK7Ejvg4fA-IkrzEsZeQuluBvXnE6wvP0LCjUo_dx4pPyZJyp46teV9NqKQeDzeysjlMCyqp6AK6-Lj8ILG8YA6d_97HlzL_EgFBLAu0lBSn-uC_9J0gLysqBtK6TI0nExfhv9Bm1_5bdHEbKHPW7xIlYlI9AgmyTyhsQ6SoQWtL2khBjkG9TlPBq9wYJj9bzqgVZlqEfICZxgtXO7sYyuoje4y8lo0YQ0kZmig -``` - -* In the CFME/MIQ web interface, navigate to `Compute` → -  `Containers` → `Providers` and select `⚙ Configuration` → `⊕ -  Add a new Containers Provider` - -*See the [upstream documentation](http://manageiq.org/docs/reference/latest/doc-Managing_Providers/miq/index.html#containers-providers) for additional information.* - +cause errors**. If you do receive an error during the `Ensure the CFME +App is created` task, we recommend running the +[uninstall scripts](#uninstall) first before running the installer +again.  # Requirements @@ -140,11 +124,13 @@ used in your Ansible inventory to control the behavior of this  installer. -| Variable                                       | Required | Default                        | Description                         | -|------------------------------------------------|:--------:|:------------------------------:|-------------------------------------| -| `openshift_management_project`                       | **No**   | `openshift-management`               | Namespace for the installation.     | +| Variable                                             | Required | Default                        | Description                         | +|------------------------------------------------------|:--------:|:------------------------------:|-------------------------------------| +| `openshift_management_project`                       | **No**   | `openshift-management`         | Namespace for the installation.     |  | `openshift_management_project_description`           | **No**   | *CloudForms Management Engine* | Namespace/project description.      | -| `openshift_management_install_management`                  | **No**   | `false`                        | Boolean, set to `true` to install the application | +| `openshift_management_install_management`            | **No**   | `false`                        | Boolean, set to `true` to install the application | +| `openshift_management_username`                      | **No**   | `admin`                        | Default management username. Changing this values **does not change the username**. Only change this value if you have changed the name already and are running integration scripts (such as the [add container provider](#container-provider) script) | +| `openshift_management_password`                      | **No**   | `smartvm`                      | Default management password. Changing this values **does not change the password**. Only change this value if you have changed the password already and are running integration scripts (such as the [add-container-provider](#container-provider) script) |  | **PRODUCT CHOICE**  | | | | |  | `openshift_management_app_template`                  | **No**   | `miq-template`                 | The project flavor to install. Choices: <ul><li>`miq-template`: ManageIQ using a podified database</li> <li> `miq-template-ext-db`: ManageIQ using an external database</li> <li>`cfme-template`: CloudForms using a podified database<sup>[1]</sup></li> <li> `cfme-template-ext-db`: CloudForms using an external database.<sup>[1]</sup></li></ul> |  | **STORAGE CLASSES** | | | | | @@ -268,6 +254,9 @@ openshift_management_app_template=cfme-template-ext-db  openshift_management_template_parameters={'DATABASE_USER': 'root', 'DATABASE_PASSWORD': 'r1ck&M0r7y', 'DATABASE_IP': '10.10.10.10', 'DATABASE_PORT': '5432', 'DATABASE_NAME': 'cfme'}  ``` +**NOTE:** Ensure your are running PostgreSQL 9.5 or you may not be +able to deploy the app successfully. +  # Limitations  This release is the first OpenShift CFME release in the OCP 3.7 @@ -318,6 +307,9 @@ inventory. The following keys are required:  * `DATABASE_PORT` - *note: Most PostgreSQL servers run on port `5432`*  * `DATABASE_NAME` +**NOTE:** Ensure your are running PostgreSQL 9.5 or you may not be +able to deploy the app successfully. +  Your inventory would contain a line similar to this:  ```ini @@ -453,6 +445,116 @@ hash. This applies to **CloudForms** installations as well:  [cfme-template.yaml](files/templates/cloudforms/cfme-template.yaml),  [cfme-template-ext-db.yaml](files/templates/cloudforms/cfme-template-ext-db.yaml). +# Container Provider + +There are two methods for enabling container provider integration. You +can manually add OCP/Origin as a container provider, or you can try +the playbooks included with this role. + +## Manually + +See the online documentation for steps to manually add you cluster as +a container provider: + +* [Container Providers](http://manageiq.org/docs/reference/latest/doc-Managing_Providers/miq/#containers-providers) + +## Automatically + +Automated container provider integration can be accomplished using the +playbooks included with this role. + +This playbook will: + +1. Gather the necessary authentication secrets +1. Find the public routes to the Management app and the cluster API +1. Make a REST call to add this cluster as a container provider + + +``` +$ ansible-playbook -v -i <YOUR_INVENTORY> playbooks/byo/openshift-management/add_container_provider.yml +``` + +## Multiple Providers + +As well as providing playbooks to integrate your *current* container +platform into the management service, this role includes a **tech +preview** script which allows you to add multiple container platforms +as container providers in any arbitrary MIQ/CFME server. + +Using the multiple-provider script requires manual configuration and +setting an `EXTRA_VARS` parameter on the command-line. + + +1. Copy the +   [container_providers.yml](files/examples/container_providers.yml) +   example somewhere, such as `/tmp/cp.yml` +1. If you changed your CFME/MIQ name or password, update the +   `hostname`, `user`, and `password` parameters in the +   `management_server` key in the `container_providers.yml` file copy +1. Fill in an entry under the `container_providers` key for *each* OCP +   or Origin cluster you want to add as container providers + +**Parameters Which MUST Be Configured:** + +* `auth_key` - This is the token of a service account which has admin capabilities on the cluster. +* `hostname` - This is the hostname that points to the cluster API. Each container provider must have a unique hostname. +* `name` - This is the name of the cluster as displayed in the management server container providers overview. This must be unique. + +*Note*: You can obtain the `auth_key` bearer token from your clusters + with this command: `oc serviceaccounts get-token -n management-infra + management-admin` + +**Parameters Which MAY Be Configured:** + +* `port` - Update this key if your OCP/Origin cluster runs the API on a port other than `8443` +* `endpoint` - You may enable SSL verification (`verify_ssl`) or change the validation setting to `ssl-with-validation`. Support for custom trusted CA certificates is not available at this time. + + +Let's see an example describing the following scenario: + +* You copied `files/examples/container_providers.yml` to `/tmp/cp.yml` +* You're adding two OCP clusters +* Your management server runs on `mgmt.example.com` + +You would customize `/tmp/cp.yml` as such: + +```yaml +--- +container_providers: +  - connection_configurations: +      - authentication: {auth_key: "management-token-for-this-cluster", authtype: bearer, type: AuthToken} +        endpoint: {role: default, security_protocol: ssl-without-validation, verify_ssl: 0} +    hostname: "ocp-prod.example.com" +    name: OCP Production +    port: 8443 +    type: "ManageIQ::Providers::Openshift::ContainerManager" +  - connection_configurations: +      - authentication: {auth_key: "management-token-for-this-cluster", authtype: bearer, type: AuthToken} +        endpoint: {role: default, security_protocol: ssl-without-validation, verify_ssl: 0} +    hostname: "ocp-test.example.com" +    name: OCP Testing +    port: 8443 +    type: "ManageIQ::Providers::Openshift::ContainerManager" +management_server: +  hostname: "mgmt.example.com" +  user: admin +  password: b3tt3r_p4SSw0rd +``` + +Then you will run the many-container-providers integration script. You +**must** provide the path to the container providers configuration +file as an `EXTRA_VARS` parameter to `ansible-playbook`. Use the `-e` +(or `--extra-vars`) parameter to set `container_providers_config` to +the config file path. + +``` +$ ansible-playbook -v -e container_providers_config=/tmp/cp.yml \ +      playbooks/byo/openshift-management/add_many_container_providers.yml +``` + +Afterwards you will find two new container providers in your +management service. Navigate to `Compute` → `Containers` → `Providers` +to see an overview.  # Uninstall @@ -461,6 +563,11 @@ installation:  * `playbooks/byo/openshift-management/uninstall.yml` +NFS export definitions and data stored on NFS exports are not +automatically removed. You are urged to manually erase any data from +old application or database deployments before attempting to +initialize a new deployment. +  # Additional Information  The upstream project, diff --git a/roles/openshift_management/defaults/main.yml b/roles/openshift_management/defaults/main.yml index ebb56313f..8ba65b386 100644 --- a/roles/openshift_management/defaults/main.yml +++ b/roles/openshift_management/defaults/main.yml @@ -77,6 +77,20 @@ openshift_management_storage_nfs_base_dir: /exports  openshift_management_storage_nfs_local_hostname: false  ###################################################################### +# DEFAULT ACCOUNT INFORMATION +###################################################################### +# These are the default values for the username and password of the +# management app. Changing these values in your inventory will not +# change your username or password. You should only need to change +# these values in your inventory if you already changed the actual +# name and password AND are trying to use integration scripts. +# +# For example, adding this cluster as a container provider, +# playbooks/byo/openshift-management/add_container_provider.yml +openshift_management_username: admin +openshift_management_password: smartvm + +######################################################################  # SCAFFOLDING - These are parameters we pre-seed that a user may or  # may not set later  ###################################################################### diff --git a/roles/openshift_management/files/examples/container_providers.yml b/roles/openshift_management/files/examples/container_providers.yml new file mode 100644 index 000000000..661f62e4d --- /dev/null +++ b/roles/openshift_management/files/examples/container_providers.yml @@ -0,0 +1,22 @@ +--- +container_providers: +  - connection_configurations: +      - authentication: {auth_key: "management-admin-token-here", authtype: bearer, type: AuthToken} +        endpoint: {role: default, security_protocol: ssl-without-validation, verify_ssl: 0} +    hostname: "OCP/Origin cluster hostname (providing API access)" +    name: openshift-management +    port: 8443 +    type: "ManageIQ::Providers::Openshift::ContainerManager" +# Copy and update for as many OCP or Origin providers as you want to +# add to your management service +  # - connection_configurations: +  #     - authentication: {auth_key: "management-admin-token-here", authtype: bearer, type: AuthToken} +  #       endpoint: {role: default, security_protocol: ssl-without-validation, verify_ssl: 0} +  #   hostname: "OCP/Origin cluster hostname (providing API access)" +  #   name: openshift-management +  #   port: 8443 +  #   type: "ManageIQ::Providers::Openshift::ContainerManager" +management_server: +  hostname: "Management server hostname (providing API access)" +  user: admin +  password: smartvm diff --git a/roles/openshift_management/filter_plugins/oo_management_filters.py b/roles/openshift_management/filter_plugins/oo_management_filters.py new file mode 100644 index 000000000..3b7013d9a --- /dev/null +++ b/roles/openshift_management/filter_plugins/oo_management_filters.py @@ -0,0 +1,32 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- +""" +Filter methods for the management role +""" + + +def oo_filter_container_providers(results): +    """results - the result from posting the API calls for adding new +providers""" +    all_results = [] +    for result in results: +        if 'results' in result['json']: +            # We got an OK response +            res = result['json']['results'][0] +            all_results.append("Provider '{}' - Added successfully".format(res['name'])) +        elif 'error' in result['json']: +            # This was a problem +            all_results.append("Provider '{}' - Failed to add. Message: {}".format( +                result['item']['name'], result['json']['error']['message'])) +    return all_results + + +class FilterModule(object): +    """ Custom ansible filter mapping """ + +    # pylint: disable=no-self-use, too-few-public-methods +    def filters(self): +        """ returns a mapping of filters to methods """ +        return { +            "oo_filter_container_providers": oo_filter_container_providers, +        } diff --git a/roles/openshift_management/tasks/add_container_provider.yml b/roles/openshift_management/tasks/add_container_provider.yml new file mode 100644 index 000000000..383e6edb5 --- /dev/null +++ b/roles/openshift_management/tasks/add_container_provider.yml @@ -0,0 +1,65 @@ +--- +- name: Ensure lib_openshift modules are available +  include_role: +    role: lib_openshift + +- name: Ensure OpenShift facts module is available +  include_role: +    role: openshift_facts + +- name: Ensure OpenShift facts are loaded +  openshift_facts: + +- name: Ensure the management SA Secrets are read +  oc_serviceaccount_secret: +    state: list +    service_account: management-admin +    namespace: management-infra +  register: sa + +- name: Ensure the management SA bearer token is identified +  set_fact: +    management_token: "{{ sa.results | oo_filter_sa_secrets }}" + +- name: Ensure the SA bearer token value is read +  oc_secret: +    state: list +    name: "{{ management_token }}" +    namespace: management-infra +    decode: true +  no_log: True +  register: sa_secret + +- name: Ensure the SA bearer token value is saved +  set_fact: +    management_bearer_token: "{{ sa_secret.results.decoded.token }}" + +- name: Ensure we have the public route to the management service +  oc_route: +    state: list +    name: httpd +    namespace: openshift-management +  register: route + +- name: Ensure the management service route is saved +  set_fact: +    management_route: "{{ route.results.0.spec.host }}" + +- name: Ensure this cluster is a container provider +  uri: +    url: "https://{{ management_route }}/api/providers" +    body_format: json +    method: POST +    user: "{{ openshift_management_username }}" +    password: "{{ openshift_management_password }}" +    validate_certs: no +    # Docs on formatting the BODY of the POST request: +    # http://manageiq.org/docs/reference/latest/api/reference/providers.html#specifying-connection-configurations +    body: +      connection_configurations: +        - authentication: {auth_key: "{{ management_bearer_token }}", authtype: bearer, type: AuthToken} +          endpoint: {role: default, security_protocol: ssl-without-validation, verify_ssl: 0} +      hostname: "{{ openshift.master.cluster_public_hostname }}" +      name: "{{ openshift_management_project }}" +      port: "{{ openshift.master.api_port }}" +      type: "ManageIQ::Providers::Openshift::ContainerManager" diff --git a/roles/openshift_management/tasks/main.yml b/roles/openshift_management/tasks/main.yml index 86c4d0010..9be923a57 100644 --- a/roles/openshift_management/tasks/main.yml +++ b/roles/openshift_management/tasks/main.yml @@ -2,23 +2,33 @@  ######################################################################)  # Users, projects, and privileges -- name: Run pre-install CFME validation checks +- name: Run pre-install Management validation checks    include: validate.yml -- name: "Ensure the CFME '{{ openshift_management_project }}' namespace exists" +# This creates a service account allowing Container Provider +# integration (managing OCP/Origin via MIQ/Management) +- name: Enable Container Provider Integration +  include_role: +    role: openshift_manageiq + +- name: "Ensure the Management '{{ openshift_management_project }}' namespace exists"    oc_project:      state: present      name: "{{ openshift_management_project }}"      display_name: "{{ openshift_management_project_description }}" -- name: Create and Authorize CFME Accounts +- name: Create and Authorize Management Accounts    include: accounts.yml  ######################################################################  # STORAGE - Initialize basic storage class +- name: Determine the correct NFS host if required +  include: storage/nfs_server.yml +  when: openshift_management_storage_class in ['nfs', 'nfs_external'] +  #---------------------------------------------------------------------  # * nfs - set up NFS shares on the first master for a proof of concept -- name: Create required NFS exports for CFME app storage +- name: Create required NFS exports for Management app storage    include: storage/nfs.yml    when: openshift_management_storage_class == 'nfs' @@ -45,7 +55,7 @@  ######################################################################  # APPLICATION TEMPLATE -- name: Install the CFME app and PV templates +- name: Install the Management app and PV templates    include: template.yml  ###################################################################### @@ -71,9 +81,16 @@    when:      - openshift_management_app_template in ['miq-template', 'cfme-template'] -- name: Ensure the CFME App is created +- name: Ensure the Management App is created    oc_process:      namespace: "{{ openshift_management_project }}"      template_name: "{{ openshift_management_template_name }}"      create: True      params: "{{ openshift_management_template_parameters }}" + +- name: Wait for the app to come up. May take several minutes, 30s check intervals, 10m max +  command: "oc logs {{ openshift_management_flavor }}-0 -n {{ openshift_management_project }}" +  register: app_seeding_logs +  until: app_seeding_logs.stdout.find('Server starting complete') != -1 +  delay: 30 +  retries: 20 diff --git a/roles/openshift_management/tasks/noop.yml b/roles/openshift_management/tasks/noop.yml new file mode 100644 index 000000000..ed97d539c --- /dev/null +++ b/roles/openshift_management/tasks/noop.yml @@ -0,0 +1 @@ +--- diff --git a/roles/openshift_management/tasks/storage/create_nfs_pvs.yml b/roles/openshift_management/tasks/storage/create_nfs_pvs.yml index 31c845725..d1b9a8d5c 100644 --- a/roles/openshift_management/tasks/storage/create_nfs_pvs.yml +++ b/roles/openshift_management/tasks/storage/create_nfs_pvs.yml @@ -26,7 +26,7 @@        when:          - openshift_management_template_parameters.DATABASE_VOLUME_CAPACITY is not defined -- name: Check if the CFME App PV has been created +- name: Check if the Management App PV has been created    oc_obj:      namespace: "{{ openshift_management_project }}"      state: list @@ -34,7 +34,7 @@      name: "{{ openshift_management_flavor_short }}-app"    register: miq_app_pv_check -- name: Check if the CFME DB PV has been created +- name: Check if the Management DB PV has been created    oc_obj:      namespace: "{{ openshift_management_project }}"      state: list @@ -44,7 +44,7 @@    when:      - openshift_management_app_template in ['miq-template', 'cfme-template'] -- name: Ensure the CFME App PV is created +- name: Ensure the Management App PV is created    oc_process:      namespace: "{{ openshift_management_project }}"      template_name: "{{ openshift_management_flavor }}-app-pv" @@ -55,7 +55,7 @@        NFS_HOST: "{{ openshift_management_nfs_server }}"    when: miq_app_pv_check.results.results == [{}] -- name: Ensure the CFME DB PV is created +- name: Ensure the Management DB PV is created    oc_process:      namespace: "{{ openshift_management_project }}"      template_name: "{{ openshift_management_flavor }}-db-pv" diff --git a/roles/openshift_management/tasks/storage/nfs.yml b/roles/openshift_management/tasks/storage/nfs.yml index 696808328..94e11137c 100644 --- a/roles/openshift_management/tasks/storage/nfs.yml +++ b/roles/openshift_management/tasks/storage/nfs.yml @@ -2,37 +2,6 @@  # Tasks to statically provision NFS volumes  # Include if not using dynamic volume provisioning -- name: Ensure we save the local NFS server if one is provided -  set_fact: -    openshift_management_nfs_server: "{{ openshift_management_storage_nfs_local_hostname }}" -  when: -    - openshift_management_storage_nfs_local_hostname is defined -    - openshift_management_storage_nfs_local_hostname != False -    - openshift_management_storage_class == "nfs" - -- name: Ensure we save the local NFS server -  set_fact: -    openshift_management_nfs_server: "{{ groups['oo_nfs_to_config'].0 }}" -  when: -    - openshift_management_nfs_server is not defined -    - openshift_management_storage_class == "nfs" - -- name: Ensure we save the external NFS server -  set_fact: -    openshift_management_nfs_server: "{{ openshift_management_storage_nfs_external_hostname }}" -  when: -    - openshift_management_storage_class == "nfs_external" - -- name: Failed NFS server detection -  assert: -    that: -      - openshift_management_nfs_server is defined -    msg: | -      "Unable to detect an NFS server. The 'nfs_external' -      openshift_management_storage_class option requires that you set -      openshift_management_storage_nfs_external_hostname. NFS hosts detected -      for local nfs services: {{ groups['oo_nfs_to_config'] | join(', ') }}" -  - name: Setting up NFS storage    block:      - name: Include the NFS Setup role tasks diff --git a/roles/openshift_management/tasks/storage/nfs_server.yml b/roles/openshift_management/tasks/storage/nfs_server.yml new file mode 100644 index 000000000..96a742c83 --- /dev/null +++ b/roles/openshift_management/tasks/storage/nfs_server.yml @@ -0,0 +1,31 @@ +--- +- name: Ensure we save the local NFS server if one is provided +  set_fact: +    openshift_management_nfs_server: "{{ openshift_management_storage_nfs_local_hostname }}" +  when: +    - openshift_management_storage_nfs_local_hostname is defined +    - openshift_management_storage_nfs_local_hostname != False +    - openshift_management_storage_class == "nfs" + +- name: Ensure we save the local NFS server +  set_fact: +    openshift_management_nfs_server: "{{ groups['oo_nfs_to_config'].0 }}" +  when: +    - openshift_management_nfs_server is not defined +    - openshift_management_storage_class == "nfs" + +- name: Ensure we save the external NFS server +  set_fact: +    openshift_management_nfs_server: "{{ openshift_management_storage_nfs_external_hostname }}" +  when: +    - openshift_management_storage_class == "nfs_external" + +- name: Failed NFS server detection +  assert: +    that: +      - openshift_management_nfs_server is defined +    msg: | +      "Unable to detect an NFS server. The 'nfs_external' +      openshift_management_storage_class option requires that you set +      openshift_management_storage_nfs_external_hostname. NFS hosts detected +      for local nfs services: {{ groups['oo_nfs_to_config'] | join(', ') }}" diff --git a/roles/openshift_management/tasks/template.yml b/roles/openshift_management/tasks/template.yml index 299158ac4..9f97cdcb9 100644 --- a/roles/openshift_management/tasks/template.yml +++ b/roles/openshift_management/tasks/template.yml @@ -15,7 +15,7 @@  # STANDARD PODIFIED DATABASE TEMPLATE  - when: openshift_management_app_template in ['miq-template', 'cfme-template']    block: -  - name: Check if the CFME Server template has been created already +  - name: Check if the Management Server template has been created already      oc_obj:        namespace: "{{ openshift_management_project }}"        state: list @@ -25,12 +25,12 @@    - when: miq_server_check.results.results == [{}]      block: -    - name: Copy over CFME Server template +    - name: Copy over Management Server template        copy:          src: "templates/{{ openshift_management_flavor }}/{{ openshift_management_flavor_short }}-template.yaml"          dest: "{{ template_dir }}/" -    - name: Ensure CFME Server Template is created +    - name: Ensure Management Server Template is created        oc_obj:          namespace: "{{ openshift_management_project }}"          name: "{{ openshift_management_flavor }}" @@ -41,9 +41,9 @@  ######################################################################  # EXTERNAL DATABASE TEMPLATE -- when: openshift_management_app_template in ['miq-template-ext-db', 'cfme-template'] +- when: openshift_management_app_template in ['miq-template-ext-db', 'cfme-template-ext-db']    block: -  - name: Check if the CFME Ext-DB Server template has been created already +  - name: Check if the Management Ext-DB Server template has been created already      oc_obj:        namespace: "{{ openshift_management_project }}"        state: list @@ -53,12 +53,12 @@    - when: miq_ext_db_server_check.results.results == [{}]      block: -    - name: Copy over CFME Ext-DB Server template +    - name: Copy over Management Ext-DB Server template        copy:          src: "templates/{{ openshift_management_flavor }}/{{openshift_management_flavor_short}}-template-ext-db.yaml"          dest: "{{ template_dir }}/" -    - name: Ensure CFME Ext-DB Server Template is created +    - name: Ensure Management Ext-DB Server Template is created        oc_obj:          namespace: "{{ openshift_management_project }}"          name: "{{ openshift_management_flavor }}-ext-db" @@ -74,7 +74,7 @@  # Begin conditional PV template creations  # Required for the application server -- name: Check if the CFME App PV template has been created already +- name: Check if the Management App PV template has been created already    oc_obj:      namespace: "{{ openshift_management_project }}"      state: list @@ -84,12 +84,12 @@  - when: miq_app_pv_check.results.results == [{}]    block: -  - name: Copy over CFME App PV template +  - name: Copy over Management App PV template      copy:        src: "templates/{{ openshift_management_flavor }}/{{ openshift_management_flavor_short }}-pv-server-example.yaml"        dest: "{{ template_dir }}/" -  - name: Ensure CFME App PV Template is created +  - name: Ensure Management App PV Template is created      oc_obj:        namespace: "{{ openshift_management_project }}"        name: "{{ openshift_management_flavor }}-app-pv" @@ -103,7 +103,7 @@  # Required for database if the installation is fully podified  - when: openshift_management_app_template in ['miq-template', 'cfme-template']    block: -  - name: Check if the CFME DB PV template has been created already +  - name: Check if the Management DB PV template has been created already      oc_obj:        namespace: "{{ openshift_management_project }}"        state: list @@ -113,12 +113,12 @@    - when: miq_db_pv_check.results.results == [{}]      block: -    - name: Copy over CFME DB PV template +    - name: Copy over Management DB PV template        copy:          src: "templates/{{ openshift_management_flavor }}/{{ openshift_management_flavor_short }}-pv-db-example.yaml"          dest: "{{ template_dir }}/" -    - name: Ensure CFME DB PV Template is created +    - name: Ensure Management DB PV Template is created        oc_obj:          namespace: "{{ openshift_management_project }}"          name: "{{ openshift_management_flavor }}-db-pv" diff --git a/roles/openshift_nfs/tasks/create_export.yml b/roles/openshift_nfs/tasks/create_export.yml index 39323904f..b0b888d56 100644 --- a/roles/openshift_nfs/tasks/create_export.yml +++ b/roles/openshift_nfs/tasks/create_export.yml @@ -12,7 +12,7 @@  #   l_nfs_export_name: Name of sub-directory of the export  #   l_nfs_options: Mount Options -- name: Ensure CFME App NFS export directory exists +- name: "Ensure {{ l_nfs_export_name }} NFS export directory exists"    file:      path: "{{ l_nfs_base_dir }}/{{ l_nfs_export_name }}"      state: directory  | 
