summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--.tito/packages/openshift-ansible2
-rw-r--r--filter_plugins/oo_filters.py70
-rw-r--r--inventory/byo/hosts.example50
-rw-r--r--openshift-ansible.spec32
-rw-r--r--playbooks/aws/README.md2
-rw-r--r--playbooks/aws/provisioning_vars.yml.example4
-rw-r--r--playbooks/byo/openshift-cluster/config.yml2
-rw-r--r--playbooks/byo/openshift-management/add_container_provider.yml6
-rw-r--r--playbooks/byo/openshift-management/add_many_container_providers.yml36
l---------playbooks/byo/openshift-management/roles1
-rw-r--r--playbooks/byo/openshift-management/uninstall.yml2
-rw-r--r--playbooks/common/openshift-cluster/upgrades/v3_5/upgrade.yml2
-rw-r--r--playbooks/common/openshift-cluster/upgrades/v3_6/upgrade.yml2
-rw-r--r--playbooks/common/openshift-cluster/upgrades/v3_7/upgrade.yml4
-rw-r--r--playbooks/common/openshift-management/add_container_provider.yml8
-rw-r--r--playbooks/common/openshift-management/uninstall.yml2
-rw-r--r--playbooks/common/openshift-node/clean_image.yml10
-rw-r--r--playbooks/common/openshift-node/image_prep.yml3
-rw-r--r--roles/etcd/defaults/main.yaml4
-rw-r--r--roles/etcd/tasks/backup.force_new_cluster.yml4
-rw-r--r--roles/etcd/tasks/backup/backup.yml16
-rw-r--r--roles/etcd/tasks/backup/copy.yml2
-rw-r--r--roles/etcd/tasks/backup/unarchive.yml2
-rw-r--r--roles/etcd/tasks/backup/vars.yml5
-rw-r--r--roles/etcd/tasks/certificates/fetch_server_certificates_from_ca.yml21
-rw-r--r--roles/etcd/tasks/system_container.yml54
-rw-r--r--roles/kuryr/defaults/main.yaml6
-rw-r--r--roles/openshift_aws/tasks/seal_ami.yml7
-rw-r--r--roles/openshift_etcd_facts/vars/main.yml1
-rw-r--r--roles/openshift_logging/tasks/install_logging.yaml33
-rw-r--r--roles/openshift_management/README.md163
-rw-r--r--roles/openshift_management/defaults/main.yml14
-rw-r--r--roles/openshift_management/files/examples/container_providers.yml22
-rw-r--r--roles/openshift_management/filter_plugins/oo_management_filters.py32
-rw-r--r--roles/openshift_management/tasks/add_container_provider.yml65
-rw-r--r--roles/openshift_management/tasks/main.yml29
-rw-r--r--roles/openshift_management/tasks/noop.yml1
-rw-r--r--roles/openshift_management/tasks/storage/create_nfs_pvs.yml8
-rw-r--r--roles/openshift_management/tasks/storage/nfs.yml31
-rw-r--r--roles/openshift_management/tasks/storage/nfs_server.yml31
-rw-r--r--roles/openshift_management/tasks/template.yml26
-rw-r--r--roles/openshift_master/tasks/journald.yml5
-rw-r--r--roles/openshift_nfs/tasks/create_export.yml2
-rw-r--r--roles/openshift_sanitize_inventory/tasks/main.yml2
44 files changed, 611 insertions, 213 deletions
diff --git a/.tito/packages/openshift-ansible b/.tito/packages/openshift-ansible
index b340654f4..7a7838ed3 100644
--- a/.tito/packages/openshift-ansible
+++ b/.tito/packages/openshift-ansible
@@ -1 +1 @@
-3.7.0-0.178.0 ./
+3.7.0-0.183.0 ./
diff --git a/filter_plugins/oo_filters.py b/filter_plugins/oo_filters.py
index 2fbd23450..f9564499d 100644
--- a/filter_plugins/oo_filters.py
+++ b/filter_plugins/oo_filters.py
@@ -1125,6 +1125,73 @@ of items as ['region=infra', 'zone=primary']
return selectors
+def oo_filter_sa_secrets(sa_secrets, secret_hint='-token-'):
+ """Parse the Service Account Secrets list, `sa_secrets`, (as from
+oc_serviceaccount_secret:state=list) and return the name of the secret
+containing the `secret_hint` string. For example, by default this will
+return the name of the secret holding the SA bearer token.
+
+Only provide the 'results' object to this filter. This filter expects
+to receive a list like this:
+
+ [
+ {
+ "name": "management-admin-dockercfg-p31s2"
+ },
+ {
+ "name": "management-admin-token-bnqsh"
+ }
+ ]
+
+
+Returns:
+
+* `secret_name` [string] - The name of the secret matching the
+ `secret_hint` parameter. By default this is the secret holding the
+ SA's bearer token.
+
+Example playbook usage:
+
+Register a return value from oc_serviceaccount_secret with and pass
+that result to this filter plugin.
+
+ - name: Get all SA Secrets
+ oc_serviceaccount_secret:
+ state: list
+ service_account: management-admin
+ namespace: management-infra
+ register: sa
+
+ - name: Save the SA bearer token secret name
+ set_fact:
+ management_token: "{{ sa.results | oo_filter_sa_secrets }}"
+
+ - name: Get the SA bearer token value
+ oc_secret:
+ state: list
+ name: "{{ management_token }}"
+ namespace: management-infra
+ decode: true
+ register: sa_secret
+
+ - name: Print the bearer token value
+ debug:
+ var: sa_secret.results.decoded.token
+
+ """
+ secret_name = None
+
+ for secret in sa_secrets:
+ # each secret is a hash
+ if secret['name'].find(secret_hint) == -1:
+ continue
+ else:
+ secret_name = secret['name']
+ break
+
+ return secret_name
+
+
class FilterModule(object):
""" Custom ansible filter mapping """
@@ -1167,5 +1234,6 @@ class FilterModule(object):
"to_padded_yaml": to_padded_yaml,
"oo_random_word": oo_random_word,
"oo_contains_rule": oo_contains_rule,
- "oo_selector_to_string_list": oo_selector_to_string_list
+ "oo_selector_to_string_list": oo_selector_to_string_list,
+ "oo_filter_sa_secrets": oo_filter_sa_secrets,
}
diff --git a/inventory/byo/hosts.example b/inventory/byo/hosts.example
index 7c4a7885d..75ddf8e10 100644
--- a/inventory/byo/hosts.example
+++ b/inventory/byo/hosts.example
@@ -974,25 +974,7 @@ openshift_master_identity_providers=[{'name': 'htpasswd_auth', 'login': 'true',
# openshift_upgrade_post_storage_migration_enabled=true
# openshift_upgrade_post_storage_migration_fatal=false
-# host group for masters
-[masters]
-ose3-master[1:3]-ansible.test.example.com
-
-[etcd]
-ose3-etcd[1:3]-ansible.test.example.com
-
-# NOTE: Containerized load balancer hosts are not yet supported, if using a global
-# containerized=true host variable we must set to false.
-[lb]
-ose3-lb-ansible.test.example.com containerized=false
-
-# NOTE: Currently we require that masters be part of the SDN which requires that they also be nodes
-# However, in order to ensure that your masters are not burdened with running pods you should
-# make them unschedulable by adding openshift_schedulable=False any node that's also a master.
-[nodes]
-ose3-master[1:3]-ansible.test.example.com
-ose3-node[1:2]-ansible.test.example.com openshift_node_labels="{'region': 'primary', 'zone': 'default'}"
-
+######################################################################
# CloudForms/ManageIQ (CFME/MIQ) Configuration
# See the readme for full descriptions and getting started
@@ -1042,6 +1024,17 @@ ose3-node[1:2]-ansible.test.example.com openshift_node_labels="{'region': 'prima
# setting this variable. Useful for testing specific task files.
#openshift_management_storage_nfs_local_hostname: false
+# These are the default values for the username and password of the
+# management app. Changing these values in your inventory will not
+# change your username or password. You should only need to change
+# these values in your inventory if you already changed the actual
+# name and password AND are trying to use integration scripts.
+#
+# For example, adding this cluster as a container provider,
+# playbooks/byo/openshift-management/add_container_provider.yml
+#openshift_management_username: admin
+#openshift_management_password: smartvm
+
# A hash of parameters you want to override or set in the
# miq-template.yaml or miq-template-ext-db.yaml templates. Set this in
# your inventory file as a simple hash. Acceptable values are defined
@@ -1050,3 +1043,22 @@ ose3-node[1:2]-ansible.test.example.com openshift_node_labels="{'region': 'prima
#
# openshift_management_template_parameters={'APPLICATION_MEM_REQ': '512Mi'}
#openshift_management_template_parameters: {}
+
+# host group for masters
+[masters]
+ose3-master[1:3]-ansible.test.example.com
+
+[etcd]
+ose3-etcd[1:3]-ansible.test.example.com
+
+# NOTE: Containerized load balancer hosts are not yet supported, if using a global
+# containerized=true host variable we must set to false.
+[lb]
+ose3-lb-ansible.test.example.com containerized=false
+
+# NOTE: Currently we require that masters be part of the SDN which requires that they also be nodes
+# However, in order to ensure that your masters are not burdened with running pods you should
+# make them unschedulable by adding openshift_schedulable=False any node that's also a master.
+[nodes]
+ose3-master[1:3]-ansible.test.example.com
+ose3-node[1:2]-ansible.test.example.com openshift_node_labels="{'region': 'primary', 'zone': 'default'}"
diff --git a/openshift-ansible.spec b/openshift-ansible.spec
index 5ca9ac3a9..44521ed53 100644
--- a/openshift-ansible.spec
+++ b/openshift-ansible.spec
@@ -10,7 +10,7 @@
Name: openshift-ansible
Version: 3.7.0
-Release: 0.178.0%{?dist}
+Release: 0.183.0%{?dist}
Summary: Openshift and Atomic Enterprise Ansible
License: ASL 2.0
URL: https://github.com/openshift/openshift-ansible
@@ -280,6 +280,36 @@ Atomic OpenShift Utilities includes
%changelog
+* Fri Oct 27 2017 Jenkins CD Merge Bot <smunilla@redhat.com> 3.7.0-0.183.0
+-
+
+* Thu Oct 26 2017 Jenkins CD Merge Bot <smunilla@redhat.com> 3.7.0-0.182.0
+- Fixing documentation for the cert_key_path variable name.
+ (kwoodson@redhat.com)
+- Moving removal of unwanted artifacts to image_prep. (kwoodson@redhat.com)
+- Ensure journald persistence directories exist (mgugino@redhat.com)
+- Fix lint (tbielawa@redhat.com)
+- Move add_many_container_providers.yml to playbooks/byo/openshift-management
+ with a noop task include to load filter plugins. (abutcher@redhat.com)
+- Refactor adding multiple container providers (tbielawa@redhat.com)
+- Management Cleanup and Provider Integration (tbielawa@redhat.com)
+
+* Thu Oct 26 2017 Jenkins CD Merge Bot <smunilla@redhat.com> 3.7.0-0.181.0
+- Fix loop_var warnings during logging install (mgugino@redhat.com)
+- Fix typo and add detailed comments in kuryr (sngchlko@gmail.com)
+
+* Thu Oct 26 2017 Jenkins CD Merge Bot <smunilla@redhat.com> 3.7.0-0.179.0
+- Remove pause from master service startup (rteague@redhat.com)
+- Change default in prometheus storage type to emptydir (zgalor@redhat.com)
+- Bug 1491636 - honor node selectors (jwozniak@redhat.com)
+- Sync latest imagestreams and templates (sdodson@redhat.com)
+- Remove base package install (mgugino@redhat.com)
+- etcd: remove hacks for the system container (gscrivan@redhat.com)
+- Ensure deployment_subtype is set within openshift_sanitize_inventory.
+ (abutcher@redhat.com)
+- Add installer checkpoint for prometheus (zgalor@redhat.com)
+- Remove unused registry_volume_claim variable (hansmi@vshn.ch)
+
* Wed Oct 25 2017 Jenkins CD Merge Bot <smunilla@redhat.com> 3.7.0-0.178.0
- Split prometheus image defaults to prefix and version (zgalor@redhat.com)
- Remove extraneous spaces that yamllint dislikes (staebler@redhat.com)
diff --git a/playbooks/aws/README.md b/playbooks/aws/README.md
index fbab61189..4e5c1017b 100644
--- a/playbooks/aws/README.md
+++ b/playbooks/aws/README.md
@@ -66,7 +66,7 @@ openshift_pkg_version: # example: -3.7.0
openshift_aws_ssh_key_name: # example: myuser_key
openshift_aws_base_ami: # example: ami-12345678
openshift_aws_iam_cert_path: # example: '/path/to/wildcard.<clusterid>.example.com.crt'
-openshift_aws_iam_key_path: # example: '/path/to/wildcard.<clusterid>.example.com.key'
+openshift_aws_iam_cert_key_path: # example: '/path/to/wildcard.<clusterid>.example.com.key'
```
If customization is required for the instances, scale groups, or any other configurable option please see the ['openshift_aws/defaults/main.yml'](../../roles/openshift_aws/defaults/main.yml) for variables and overrides. These overrides can be placed in the `provisioning_vars.yml`, `inventory`, or `group_vars`.
diff --git a/playbooks/aws/provisioning_vars.yml.example b/playbooks/aws/provisioning_vars.yml.example
index aa91363ae..1491fb868 100644
--- a/playbooks/aws/provisioning_vars.yml.example
+++ b/playbooks/aws/provisioning_vars.yml.example
@@ -116,5 +116,5 @@ openshift_aws_base_ami: # ami-12345678
# custom certificates are required for the ELB
openshift_aws_iam_cert_path: # '/path/to/wildcard.<clusterid>.example.com.crt'
-openshift_aws_iam_key_path: # '/path/to/wildcard.<clusterid>.example.com.key'
-#openshift_aws_iam_cert_chain_path: '/path/to/cert.ca.crt'
+openshift_aws_iam_cert_key_path: # '/path/to/wildcard.<clusterid>.example.com.key'
+openshift_aws_iam_cert_chain_path: # '/path/to/cert.ca.crt'
diff --git a/playbooks/byo/openshift-cluster/config.yml b/playbooks/byo/openshift-cluster/config.yml
index 60fa44c5b..f2e52782b 100644
--- a/playbooks/byo/openshift-cluster/config.yml
+++ b/playbooks/byo/openshift-cluster/config.yml
@@ -8,5 +8,3 @@
- always
- include: ../../common/openshift-cluster/config.yml
- vars:
- openshift_deployment_subtype: "{{ deployment_subtype | default(none) }}"
diff --git a/playbooks/byo/openshift-management/add_container_provider.yml b/playbooks/byo/openshift-management/add_container_provider.yml
new file mode 100644
index 000000000..3378b5abd
--- /dev/null
+++ b/playbooks/byo/openshift-management/add_container_provider.yml
@@ -0,0 +1,6 @@
+---
+- include: ../openshift-cluster/initialize_groups.yml
+
+- include: ../../common/openshift-cluster/evaluate_groups.yml
+
+- include: ../../common/openshift-management/add_container_provider.yml
diff --git a/playbooks/byo/openshift-management/add_many_container_providers.yml b/playbooks/byo/openshift-management/add_many_container_providers.yml
new file mode 100644
index 000000000..62fdb11c5
--- /dev/null
+++ b/playbooks/byo/openshift-management/add_many_container_providers.yml
@@ -0,0 +1,36 @@
+---
+- hosts: localhost
+ tasks:
+ - name: Ensure the container provider configuration is defined
+ assert:
+ that: container_providers_config is defined
+ msg: |
+ Error: Must provide providers config path. Fix: Add '-e container_providers_config=/path/to/your/config' to the ansible-playbook command
+
+ - name: Include providers/management configuration
+ include_vars:
+ file: "{{ container_providers_config }}"
+
+ - name: Ensure this cluster is a container provider
+ uri:
+ url: "https://{{ management_server['hostname'] }}/api/providers"
+ body_format: json
+ method: POST
+ user: "{{ management_server['user'] }}"
+ password: "{{ management_server['password'] }}"
+ validate_certs: no
+ # Docs on formatting the BODY of the POST request:
+ # http://manageiq.org/docs/reference/latest/api/reference/providers.html#specifying-connection-configurations
+ body: "{{ item }}"
+ failed_when: false
+ with_items: "{{ container_providers }}"
+ register: results
+
+ # Include openshift_management for access to filter_plugins.
+ - include_role:
+ name: openshift_management
+ tasks_from: noop
+
+ - name: print each result
+ debug:
+ msg: "{{ results.results | oo_filter_container_providers }}"
diff --git a/playbooks/byo/openshift-management/roles b/playbooks/byo/openshift-management/roles
new file mode 120000
index 000000000..20c4c58cf
--- /dev/null
+++ b/playbooks/byo/openshift-management/roles
@@ -0,0 +1 @@
+../../../roles \ No newline at end of file
diff --git a/playbooks/byo/openshift-management/uninstall.yml b/playbooks/byo/openshift-management/uninstall.yml
index a1fb1cdc4..e95c1c88a 100644
--- a/playbooks/byo/openshift-management/uninstall.yml
+++ b/playbooks/byo/openshift-management/uninstall.yml
@@ -1,4 +1,2 @@
---
-# - include: ../openshift-cluster/initialize_groups.yml
-
- include: ../../common/openshift-management/uninstall.yml
diff --git a/playbooks/common/openshift-cluster/upgrades/v3_5/upgrade.yml b/playbooks/common/openshift-cluster/upgrades/v3_5/upgrade.yml
index 30e719d8f..bda245fe1 100644
--- a/playbooks/common/openshift-cluster/upgrades/v3_5/upgrade.yml
+++ b/playbooks/common/openshift-cluster/upgrades/v3_5/upgrade.yml
@@ -112,6 +112,8 @@
- include: ../cleanup_unused_images.yml
- include: ../upgrade_control_plane.yml
+ vars:
+ master_config_hook: "v3_5/master_config_upgrade.yml"
- include: ../upgrade_nodes.yml
diff --git a/playbooks/common/openshift-cluster/upgrades/v3_6/upgrade.yml b/playbooks/common/openshift-cluster/upgrades/v3_6/upgrade.yml
index 920dc2ffc..dd109cfa9 100644
--- a/playbooks/common/openshift-cluster/upgrades/v3_6/upgrade.yml
+++ b/playbooks/common/openshift-cluster/upgrades/v3_6/upgrade.yml
@@ -116,6 +116,8 @@
- include: ../cleanup_unused_images.yml
- include: ../upgrade_control_plane.yml
+ vars:
+ master_config_hook: "v3_6/master_config_upgrade.yml"
- include: ../upgrade_nodes.yml
diff --git a/playbooks/common/openshift-cluster/upgrades/v3_7/upgrade.yml b/playbooks/common/openshift-cluster/upgrades/v3_7/upgrade.yml
index 81f6dc8a4..f4862e321 100644
--- a/playbooks/common/openshift-cluster/upgrades/v3_7/upgrade.yml
+++ b/playbooks/common/openshift-cluster/upgrades/v3_7/upgrade.yml
@@ -119,9 +119,9 @@
tasks:
- include: ../cleanup_unused_images.yml
-#TODO: Why doesn't this compose using ./upgrade_control_plane rather than
-# ../upgrade_control_plane?
- include: ../upgrade_control_plane.yml
+ vars:
+ master_config_hook: "v3_7/master_config_upgrade.yml"
# All controllers must be stopped at the same time then restarted
- name: Cycle all controller services to force new leader election mode
diff --git a/playbooks/common/openshift-management/add_container_provider.yml b/playbooks/common/openshift-management/add_container_provider.yml
new file mode 100644
index 000000000..facb3a5b9
--- /dev/null
+++ b/playbooks/common/openshift-management/add_container_provider.yml
@@ -0,0 +1,8 @@
+---
+- name: Add Container Provider to Management
+ hosts: oo_first_master
+ tasks:
+ - name: Run the Management Integration Tasks
+ include_role:
+ name: openshift_management
+ tasks_from: add_container_provider
diff --git a/playbooks/common/openshift-management/uninstall.yml b/playbooks/common/openshift-management/uninstall.yml
index 698d93405..9f35cc276 100644
--- a/playbooks/common/openshift-management/uninstall.yml
+++ b/playbooks/common/openshift-management/uninstall.yml
@@ -1,6 +1,6 @@
---
- name: Uninstall CFME
- hosts: masters
+ hosts: masters[0]
tasks:
- name: Run the CFME Uninstall Role Tasks
include_role:
diff --git a/playbooks/common/openshift-node/clean_image.yml b/playbooks/common/openshift-node/clean_image.yml
new file mode 100644
index 000000000..38753d0af
--- /dev/null
+++ b/playbooks/common/openshift-node/clean_image.yml
@@ -0,0 +1,10 @@
+---
+- name: Configure nodes
+ hosts: oo_nodes_to_config:!oo_containerized_master_nodes
+ tasks:
+ - name: Remove any ansible facts created during AMI creation
+ file:
+ path: "/etc/ansible/facts.d/{{ item }}"
+ state: absent
+ with_items:
+ - openshift.fact
diff --git a/playbooks/common/openshift-node/image_prep.yml b/playbooks/common/openshift-node/image_prep.yml
index 00d167c22..30651a1df 100644
--- a/playbooks/common/openshift-node/image_prep.yml
+++ b/playbooks/common/openshift-node/image_prep.yml
@@ -19,3 +19,6 @@
- name: Re-enable excluders
include: enable_excluders.yml
+
+- name: Remove any undesired artifacts from build
+ include: clean_image.yml
diff --git a/roles/etcd/defaults/main.yaml b/roles/etcd/defaults/main.yaml
index 78f231416..4b734d4ed 100644
--- a/roles/etcd/defaults/main.yaml
+++ b/roles/etcd/defaults/main.yaml
@@ -13,8 +13,6 @@ r_etcd_common_etcdctl_command: "{{ 'etcdctl' if r_etcd_common_etcd_runtime == 'h
# etcd server vars
etcd_conf_dir: '/etc/etcd'
-r_etcd_common_system_container_host_dir: /var/lib/etcd/etcd.etcd
-etcd_system_container_conf_dir: /var/lib/etcd/etc
etcd_conf_file: "{{ etcd_conf_dir }}/etcd.conf"
etcd_ca_file: "{{ etcd_conf_dir }}/ca.crt"
etcd_cert_file: "{{ etcd_conf_dir }}/server.crt"
@@ -54,7 +52,7 @@ etcd_is_containerized: False
etcd_is_thirdparty: False
# etcd dir vars
-etcd_data_dir: "{{ '/var/lib/origin/openshift.local.etcd' if r_etcd_common_embedded_etcd | bool else '/var/lib/etcd/' if r_etcd_common_etcd_runtime != 'runc' else '/var/lib/etcd/etcd.etcd/' }}"
+etcd_data_dir: "{{ '/var/lib/origin/openshift.local.etcd' if r_etcd_common_embedded_etcd | bool else '/var/lib/etcd/' }}"
# etcd ports and protocols
etcd_client_port: 2379
diff --git a/roles/etcd/tasks/backup.force_new_cluster.yml b/roles/etcd/tasks/backup.force_new_cluster.yml
index 24bd0540d..d2e866416 100644
--- a/roles/etcd/tasks/backup.force_new_cluster.yml
+++ b/roles/etcd/tasks/backup.force_new_cluster.yml
@@ -3,10 +3,10 @@
- name: Move content of etcd backup under the etcd data directory
command: >
- mv "{{ l_etcd_backup_dir }}/member" "{{ l_etcd_data_dir }}"
+ mv "{{ l_etcd_backup_dir }}/member" "{{ etcd_data_dir }}"
- name: Set etcd group for the etcd data directory
command: >
- chown -R etcd:etcd "{{ l_etcd_data_dir }}"
+ chown -R etcd:etcd "{{ etcd_data_dir }}"
- include: auxiliary/force_new_cluster.yml
diff --git a/roles/etcd/tasks/backup/backup.yml b/roles/etcd/tasks/backup/backup.yml
index ec1a1989c..ca0d29155 100644
--- a/roles/etcd/tasks/backup/backup.yml
+++ b/roles/etcd/tasks/backup/backup.yml
@@ -3,7 +3,7 @@
# TODO: replace shell module with command and update later checks
- name: Check available disk space for etcd backup
- shell: df --output=avail -k {{ l_etcd_data_dir }} | tail -n 1
+ shell: df --output=avail -k {{ etcd_data_dir }} | tail -n 1
register: l_avail_disk
# AUDIT:changed_when: `false` because we are only inspecting
# state, not manipulating anything
@@ -11,7 +11,7 @@
# TODO: replace shell module with command and update later checks
- name: Check current etcd disk usage
- shell: du --exclude='*openshift-backup*' -k {{ l_etcd_data_dir }} | tail -n 1 | cut -f1
+ shell: du --exclude='*openshift-backup*' -k {{ etcd_data_dir }} | tail -n 1 | cut -f1
register: l_etcd_disk_usage
# AUDIT:changed_when: `false` because we are only inspecting
# state, not manipulating anything
@@ -44,17 +44,17 @@
- r_etcd_common_embedded_etcd | bool
- not l_ostree_booted.stat.exists | bool
-- name: Check selinux label of '{{ l_etcd_data_dir }}'
+- name: Check selinux label of '{{ etcd_data_dir }}'
command: >
- stat -c '%C' {{ l_etcd_data_dir }}
+ stat -c '%C' {{ etcd_data_dir }}
register: l_etcd_selinux_labels
- debug:
msg: "{{ l_etcd_selinux_labels }}"
-- name: Make sure the '{{ l_etcd_data_dir }}' has the proper label
+- name: Make sure the '{{ etcd_data_dir }}' has the proper label
command: >
- chcon -t svirt_sandbox_file_t "{{ l_etcd_data_dir }}"
+ chcon -t svirt_sandbox_file_t "{{ etcd_data_dir }}"
when:
- l_etcd_selinux_labels.rc == 0
- "'svirt_sandbox_file_t' not in l_etcd_selinux_labels.stdout"
@@ -68,12 +68,12 @@
# https://github.com/openshift/openshift-docs/commit/b38042de02d9780842dce95cfa0ef45d53b58bc6
- name: Check for v3 data store
stat:
- path: "{{ l_etcd_data_dir }}/member/snap/db"
+ path: "{{ etcd_data_dir }}/member/snap/db"
register: l_v3_db
- name: Copy etcd v3 data store
command: >
- cp -a {{ l_etcd_data_dir }}/member/snap/db
+ cp -a {{ etcd_data_dir }}/member/snap/db
{{ l_etcd_backup_dir }}/member/snap/
when: l_v3_db.stat.exists
diff --git a/roles/etcd/tasks/backup/copy.yml b/roles/etcd/tasks/backup/copy.yml
index 16604bae8..967e5ee66 100644
--- a/roles/etcd/tasks/backup/copy.yml
+++ b/roles/etcd/tasks/backup/copy.yml
@@ -2,4 +2,4 @@
- name: Copy etcd backup
copy:
src: "{{ etcd_backup_sync_directory }}/{{ l_backup_dir_name }}.tgz"
- dest: "{{ l_etcd_data_dir }}"
+ dest: "{{ etcd_data_dir }}"
diff --git a/roles/etcd/tasks/backup/unarchive.yml b/roles/etcd/tasks/backup/unarchive.yml
index 6c75d00a7..a85f533c2 100644
--- a/roles/etcd/tasks/backup/unarchive.yml
+++ b/roles/etcd/tasks/backup/unarchive.yml
@@ -11,4 +11,4 @@
# src: "{{ l_etcd_backup_dir }}.tgz"
# dest: "{{ l_etcd_backup_dir }}"
command: >
- tar -xf "{{ l_etcd_backup_dir }}.tgz" -C "{{ l_etcd_data_dir }}"
+ tar -xf "{{ l_etcd_backup_dir }}.tgz" -C "{{ etcd_data_dir }}"
diff --git a/roles/etcd/tasks/backup/vars.yml b/roles/etcd/tasks/backup/vars.yml
index 3c009f557..3ffa641b3 100644
--- a/roles/etcd/tasks/backup/vars.yml
+++ b/roles/etcd/tasks/backup/vars.yml
@@ -6,13 +6,10 @@
l_backup_dir_name: "openshift-backup-{{ r_etcd_common_backup_tag }}{{ r_etcd_common_backup_sufix_name }}"
- set_fact:
- l_etcd_data_dir: "{{ etcd_data_dir }}{{ '/etcd.etcd' if r_etcd_common_etcd_runtime == 'runc' else '' }}"
-
-- set_fact:
l_etcd_incontainer_data_dir: "{{ etcd_data_dir }}"
- set_fact:
l_etcd_incontainer_backup_dir: "{{ l_etcd_incontainer_data_dir }}/{{ l_backup_dir_name }}"
- set_fact:
- l_etcd_backup_dir: "{{ l_etcd_data_dir }}/{{ l_backup_dir_name }}"
+ l_etcd_backup_dir: "{{ etcd_data_dir }}/{{ l_backup_dir_name }}"
diff --git a/roles/etcd/tasks/certificates/fetch_server_certificates_from_ca.yml b/roles/etcd/tasks/certificates/fetch_server_certificates_from_ca.yml
index 26492fb3c..00b8f4a0b 100644
--- a/roles/etcd/tasks/certificates/fetch_server_certificates_from_ca.yml
+++ b/roles/etcd/tasks/certificates/fetch_server_certificates_from_ca.yml
@@ -12,9 +12,6 @@
- "{{ etcd_cert_config_dir }}/{{ etcd_cert_prefix }}server.crt"
- "{{ etcd_cert_config_dir }}/{{ etcd_cert_prefix }}peer.crt"
- "{{ etcd_cert_config_dir }}/{{ etcd_cert_prefix }}ca.crt"
- - "{{ etcd_system_container_cert_config_dir }}/{{ etcd_cert_prefix }}server.crt"
- - "{{ etcd_system_container_cert_config_dir }}/{{ etcd_cert_prefix }}peer.crt"
- - "{{ etcd_system_container_cert_config_dir }}/{{ etcd_cert_prefix }}ca.crt"
register: g_etcd_server_cert_stat_result
when: not etcd_certificates_redeploy | default(false) | bool
@@ -141,7 +138,6 @@
state: directory
with_items:
- "{{ etcd_cert_config_dir }}"
- - "{{ etcd_system_container_cert_config_dir }}"
when: etcd_server_certs_missing | bool
- name: Unarchive cert tarball
@@ -176,25 +172,8 @@
state: directory
with_items:
- "{{ etcd_ca_dir }}"
- - "{{ etcd_system_container_cert_config_dir }}/ca"
when: etcd_server_certs_missing | bool
-- name: Unarchive cert tarball for the system container
- unarchive:
- src: "{{ g_etcd_server_mktemp.stdout }}/{{ etcd_cert_subdir }}.tgz"
- dest: "{{ etcd_system_container_cert_config_dir }}"
- when:
- - etcd_server_certs_missing | bool
- - r_etcd_common_etcd_runtime == 'runc'
-
-- name: Unarchive etcd ca cert tarballs for the system container
- unarchive:
- src: "{{ g_etcd_server_mktemp.stdout }}/{{ etcd_ca_name }}.tgz"
- dest: "{{ etcd_system_container_cert_config_dir }}/ca"
- when:
- - etcd_server_certs_missing | bool
- - r_etcd_common_etcd_runtime == 'runc'
-
- name: Delete temporary directory
local_action: file path="{{ g_etcd_server_mktemp.stdout }}" state=absent
become: no
diff --git a/roles/etcd/tasks/system_container.yml b/roles/etcd/tasks/system_container.yml
index 9a6951920..f71d9b551 100644
--- a/roles/etcd/tasks/system_container.yml
+++ b/roles/etcd/tasks/system_container.yml
@@ -1,7 +1,4 @@
---
-- set_fact:
- l_etcd_src_data_dir: "{{ '/var/lib/origin/openshift.local.etcd' if r_etcd_common_embedded_etcd | bool else '/var/lib/etcd/' }}"
-
- name: Ensure proxies are in the atomic.conf
include_role:
name: openshift_atomic
@@ -57,36 +54,13 @@
- name: Systemd reload configuration
systemd: name=etcd_container daemon_reload=yes
-- name: Check for previous etcd data store
- stat:
- path: "{{ l_etcd_src_data_dir }}/member/"
- register: src_datastore
-
-- name: Check for etcd system container data store
- stat:
- path: "{{ r_etcd_common_system_container_host_dir }}/etcd.etcd/member"
- register: dest_datastore
-
-- name: Ensure that etcd system container data dirs exist
- file: path="{{ item }}" state=directory
- with_items:
- - "{{ r_etcd_common_system_container_host_dir }}/etc"
- - "{{ r_etcd_common_system_container_host_dir }}/etcd.etcd"
-
-- name: Copy etcd data store
- command: >
- cp -a {{ l_etcd_src_data_dir }}/member
- {{ r_etcd_common_system_container_host_dir }}/etcd.etcd/member
- when:
- - src_datastore.stat.exists
- - not dest_datastore.stat.exists
-
- name: Install or Update Etcd system container package
oc_atomic_container:
name: etcd
image: "{{ openshift.etcd.etcd_image }}"
state: latest
values:
+ - ETCD_DATA_DIR=/var/lib/etcd
- ETCD_LISTEN_PEER_URLS={{ etcd_listen_peer_urls }}
- ETCD_NAME={{ etcd_hostname }}
- ETCD_INITIAL_CLUSTER={{ etcd_initial_cluster }}
@@ -95,11 +69,21 @@
- ETCD_INITIAL_CLUSTER_STATE={{ etcd_initial_cluster_state }}
- ETCD_INITIAL_CLUSTER_TOKEN={{ etcd_initial_cluster_token }}
- ETCD_ADVERTISE_CLIENT_URLS={{ etcd_advertise_client_urls }}
- - ETCD_CA_FILE={{ etcd_system_container_conf_dir }}/ca.crt
- - ETCD_CERT_FILE={{ etcd_system_container_conf_dir }}/server.crt
- - ETCD_KEY_FILE={{ etcd_system_container_conf_dir }}/server.key
- - ETCD_PEER_CA_FILE={{ etcd_system_container_conf_dir }}/ca.crt
- - ETCD_PEER_CERT_FILE={{ etcd_system_container_conf_dir }}/peer.crt
- - ETCD_PEER_KEY_FILE={{ etcd_system_container_conf_dir }}/peer.key
- - ETCD_TRUSTED_CA_FILE={{ etcd_system_container_conf_dir }}/ca.crt
- - ETCD_PEER_TRUSTED_CA_FILE={{ etcd_system_container_conf_dir }}/ca.crt
+ - ETCD_CA_FILE={{ etcd_ca_file }}
+ - ETCD_CERT_FILE={{ etcd_cert_file }}
+ - ETCD_KEY_FILE={{ etcd_key_file }}
+ - ETCD_PEER_CA_FILE={{ etcd_peer_ca_file }}
+ - ETCD_PEER_CERT_FILE={{ etcd_peer_cert_file }}
+ - ETCD_PEER_KEY_FILE={{ etcd_peer_key_file }}
+ - ETCD_TRUSTED_CA_FILE={{ etcd_ca_file }}
+ - ETCD_PEER_TRUSTED_CA_FILE={{ etcd_peer_ca_file }}
+ - 'ADDTL_MOUNTS=,{"type":"bind","source":"/etc/","destination":"/etc/","options":["rbind","rw","rslave"]},{"type":"bind","source":"/var/lib/etcd","destination":"/var/lib/etcd/","options":["rbind","rw","rslave"]}'
+
+- name: Ensure etcd datadir ownership for the system container
+ file:
+ path: "{{ etcd_data_dir }}"
+ state: directory
+ mode: 0700
+ owner: root
+ group: root
+ recurse: True
diff --git a/roles/kuryr/defaults/main.yaml b/roles/kuryr/defaults/main.yaml
index ff298dda0..af05d80df 100644
--- a/roles/kuryr/defaults/main.yaml
+++ b/roles/kuryr/defaults/main.yaml
@@ -5,10 +5,10 @@ kuryr_config_dir: /etc/kuryr
# Kuryr username
kuryr_openstack_username: kuryr
-# Kuryr username domain
+# Kuryr domain name or id containing user
kuryr_openstack_user_domain_name: default
-# Kuryr username domain
+# Kuryr domain name or id containing project
kuryr_openstack_project_domain_name: default
# Kuryr OpenShift namespace
@@ -31,7 +31,7 @@ cni_bin_url_base: "https://github.com/containernetworking/cni/releases/download/
cni_bin_url: "{{ cni_bin_url_base }}/{{ cni_version }}/cni-{{ cni_version }}.tgz"
cni_bin_checksum: "71f411080245aa14d0cc06f6824e8039607dd9e9"
-# Kuryr ClusterRole definiton
+# Kuryr ClusterRole definition
kuryr_clusterrole:
name: kuryrctl
state: present
diff --git a/roles/openshift_aws/tasks/seal_ami.yml b/roles/openshift_aws/tasks/seal_ami.yml
index d319fdd1a..0cb749dcc 100644
--- a/roles/openshift_aws/tasks/seal_ami.yml
+++ b/roles/openshift_aws/tasks/seal_ami.yml
@@ -1,11 +1,4 @@
---
-- name: Remove any ansible facts created during AMI creation
- file:
- path: "/etc/ansible/facts.d/{{ item }}"
- state: absent
- with_items:
- - openshift.fact
-
- name: fetch newly created instances
ec2_remote_facts:
region: "{{ openshift_aws_region }}"
diff --git a/roles/openshift_etcd_facts/vars/main.yml b/roles/openshift_etcd_facts/vars/main.yml
index b3ecd57a6..0c072b64a 100644
--- a/roles/openshift_etcd_facts/vars/main.yml
+++ b/roles/openshift_etcd_facts/vars/main.yml
@@ -6,6 +6,5 @@ etcd_ip: "{{ openshift.common.ip }}"
etcd_cert_subdir: "etcd-{{ openshift.common.hostname }}"
etcd_cert_prefix:
etcd_cert_config_dir: "/etc/etcd"
-etcd_system_container_cert_config_dir: /var/lib/etcd/etcd.etcd/etc
etcd_peer_url_scheme: https
etcd_url_scheme: https
diff --git a/roles/openshift_logging/tasks/install_logging.yaml b/roles/openshift_logging/tasks/install_logging.yaml
index 76627acf2..668a3f7e7 100644
--- a/roles/openshift_logging/tasks/install_logging.yaml
+++ b/roles/openshift_logging/tasks/install_logging.yaml
@@ -69,21 +69,23 @@
vars:
generated_certs_dir: "{{openshift.common.config_base}}/logging"
openshift_logging_elasticsearch_namespace: "{{ openshift_logging_namespace }}"
- openshift_logging_elasticsearch_deployment_name: "{{ item.0.name }}"
- openshift_logging_elasticsearch_pvc_name: "{{ openshift_logging_es_pvc_prefix ~ '-' ~ item.2 if item.1 is none else item.1 }}"
+ openshift_logging_elasticsearch_deployment_name: "{{ outer_item.0.name }}"
+ openshift_logging_elasticsearch_pvc_name: "{{ openshift_logging_es_pvc_prefix ~ '-' ~ outer_item.2 if outer_item.1 is none else outer_item.1 }}"
openshift_logging_elasticsearch_replica_count: "{{ openshift_logging_es_cluster_size | int }}"
openshift_logging_elasticsearch_storage_type: "{{ elasticsearch_storage_type }}"
openshift_logging_elasticsearch_pvc_pv_selector: "{{ openshift_logging_es_pv_selector }}"
- openshift_logging_elasticsearch_nodeselector: "{{ openshift_logging_es_nodeselector if item.0.nodeSelector | default(None) is none else item.0.nodeSelector }}"
- openshift_logging_elasticsearch_storage_group: "{{ [openshift_logging_es_storage_group] if item.0.storageGroups | default([]) | length == 0 else item.0.storageGroups }}"
- _es_containers: "{{item.0.containers}}"
+ openshift_logging_elasticsearch_nodeselector: "{{ openshift_logging_es_nodeselector if outer_item.0.nodeSelector | default(None) is none else outer_item.0.nodeSelector }}"
+ openshift_logging_elasticsearch_storage_group: "{{ [openshift_logging_es_storage_group] if outer_item.0.storageGroups | default([]) | length == 0 else outer_item.0.storageGroups }}"
+ _es_containers: "{{ outer_item.0.containers}}"
_es_configmap: "{{ openshift_logging_facts | walk('elasticsearch#configmaps#logging-elasticsearch#elasticsearch.yml', '{}', delimiter='#') | from_yaml }}"
with_together:
- "{{ openshift_logging_facts.elasticsearch.deploymentconfigs.values() }}"
- "{{ openshift_logging_facts.elasticsearch.pvcs }}"
- "{{ es_indices }}"
+ loop_control:
+ loop_var: outer_item
when:
- openshift_logging_facts.elasticsearch.deploymentconfigs.keys() | count > 0
@@ -93,13 +95,15 @@
vars:
generated_certs_dir: "{{openshift.common.config_base}}/logging"
openshift_logging_elasticsearch_namespace: "{{ openshift_logging_namespace }}"
- openshift_logging_elasticsearch_pvc_name: "{{ openshift_logging_es_pvc_prefix }}-{{ item | int + openshift_logging_facts.elasticsearch.deploymentconfigs | count - 1 }}"
+ openshift_logging_elasticsearch_pvc_name: "{{ openshift_logging_es_pvc_prefix }}-{{ outer_item | int + openshift_logging_facts.elasticsearch.deploymentconfigs | count - 1 }}"
openshift_logging_elasticsearch_replica_count: "{{ openshift_logging_es_cluster_size | int }}"
openshift_logging_elasticsearch_storage_type: "{{ elasticsearch_storage_type }}"
openshift_logging_elasticsearch_pvc_pv_selector: "{{ openshift_logging_es_pv_selector }}"
with_sequence: count={{ openshift_logging_es_cluster_size | int - openshift_logging_facts.elasticsearch.deploymentconfigs.keys() | count }}
+ loop_control:
+ loop_var: outer_item
- set_fact: es_ops_indices={{ es_ops_indices | default([]) + [item | int - 1] }}
with_sequence: count={{ openshift_logging_facts.elasticsearch_ops.deploymentconfigs.keys() | count }}
@@ -123,8 +127,8 @@
vars:
generated_certs_dir: "{{openshift.common.config_base}}/logging"
openshift_logging_elasticsearch_namespace: "{{ openshift_logging_namespace }}"
- openshift_logging_elasticsearch_deployment_name: "{{ item.0.name }}"
- openshift_logging_elasticsearch_pvc_name: "{{ openshift_logging_es_ops_pvc_prefix ~ '-' ~ item.2 if item.1 is none else item.1 }}"
+ openshift_logging_elasticsearch_deployment_name: "{{ outer_item.0.name }}"
+ openshift_logging_elasticsearch_pvc_name: "{{ openshift_logging_es_ops_pvc_prefix ~ '-' ~ outer_item.2 if outer_item.1 is none else outer_item.1 }}"
openshift_logging_elasticsearch_ops_deployment: true
openshift_logging_elasticsearch_replica_count: "{{ openshift_logging_es_ops_cluster_size | int }}"
@@ -135,8 +139,8 @@
openshift_logging_elasticsearch_memory_limit: "{{ openshift_logging_es_ops_memory_limit }}"
openshift_logging_elasticsearch_cpu_limit: "{{ openshift_logging_es_ops_cpu_limit }}"
openshift_logging_elasticsearch_cpu_request: "{{ openshift_logging_es_ops_cpu_request }}"
- openshift_logging_elasticsearch_nodeselector: "{{ openshift_logging_es_ops_nodeselector if item.0.nodeSelector | default(None) is none else item.0.nodeSelector }}"
- openshift_logging_elasticsearch_storage_group: "{{ [openshift_logging_es_ops_storage_group] if item.0.storageGroups | default([]) | length == 0 else item.0.storageGroups }}"
+ openshift_logging_elasticsearch_nodeselector: "{{ openshift_logging_es_ops_nodeselector if outer_item.0.nodeSelector | default(None) is none else outer_item.0.nodeSelector }}"
+ openshift_logging_elasticsearch_storage_group: "{{ [openshift_logging_es_ops_storage_group] if outer_item.0.storageGroups | default([]) | length == 0 else outer_item.0.storageGroups }}"
openshift_logging_es_key: "{{ openshift_logging_es_ops_key }}"
openshift_logging_es_cert: "{{ openshift_logging_es_ops_cert }}"
openshift_logging_es_ca_ext: "{{ openshift_logging_es_ops_ca_ext }}"
@@ -145,13 +149,16 @@
openshift_logging_es_allow_external: "{{ openshift_logging_es_ops_allow_external }}"
openshift_logging_es_number_of_shards: "{{ openshift_logging_es_ops_number_of_shards | default(None) }}"
openshift_logging_es_number_of_replicas: "{{ openshift_logging_es_ops_number_of_replicas | default(None) }}"
- _es_containers: "{{item.0.containers}}"
+ _es_containers: "{{ outer_item.0.containers}}"
_es_configmap: "{{ openshift_logging_facts | walk('elasticsearch_ops#configmaps#logging-elasticsearch-ops#elasticsearch.yml', '{}', delimiter='#') | from_yaml }}"
with_together:
- "{{ openshift_logging_facts.elasticsearch_ops.deploymentconfigs.values() }}"
- "{{ openshift_logging_facts.elasticsearch_ops.pvcs }}"
- "{{ es_ops_indices }}"
+ loop_control:
+ loop_var: outer_item
+
when:
- openshift_logging_use_ops | bool
- openshift_logging_facts.elasticsearch_ops.deploymentconfigs.keys() | count > 0
@@ -162,7 +169,7 @@
vars:
generated_certs_dir: "{{openshift.common.config_base}}/logging"
openshift_logging_elasticsearch_namespace: "{{ openshift_logging_namespace }}"
- openshift_logging_elasticsearch_pvc_name: "{{ openshift_logging_es_ops_pvc_prefix }}-{{ item | int + openshift_logging_facts.elasticsearch_ops.deploymentconfigs | count - 1 }}"
+ openshift_logging_elasticsearch_pvc_name: "{{ openshift_logging_es_ops_pvc_prefix }}-{{ outer_item | int + openshift_logging_facts.elasticsearch_ops.deploymentconfigs | count - 1 }}"
openshift_logging_elasticsearch_ops_deployment: true
openshift_logging_elasticsearch_replica_count: "{{ openshift_logging_es_ops_cluster_size | int }}"
@@ -182,6 +189,8 @@
openshift_logging_es_allow_external: "{{ openshift_logging_es_ops_allow_external }}"
with_sequence: count={{ openshift_logging_es_ops_cluster_size | int - openshift_logging_facts.elasticsearch_ops.deploymentconfigs.keys() | count }}
+ loop_control:
+ loop_var: outer_item
when:
- openshift_logging_use_ops | bool
diff --git a/roles/openshift_management/README.md b/roles/openshift_management/README.md
index 3a71d9211..05ca27913 100644
--- a/roles/openshift_management/README.md
+++ b/roles/openshift_management/README.md
@@ -38,6 +38,10 @@ deployment type (`openshift_deployment_type`):
* [Cloud Provider](#cloud-provider)
* [Preconfigured (Expert Configuration Only)](#preconfigured-expert-configuration-only)
* [Customization](#customization)
+ * [Container Provider](#container-provider)
+ * [Manually](#manually)
+ * [Automatically](#automatically)
+ * [Multiple Providers](#multiple-providers)
* [Uninstall](#uninstall)
* [Additional Information](#additional-information)
@@ -80,30 +84,10 @@ to there being no databases that require pods.
*Be extra careful* if you are overriding template
parameters. Including parameters not defined in a template **will
-cause errors**.
-
-**Container Provider Integration** - If you want add your container
-platform (OCP/Origin) as a *Container Provider* in CFME/MIQ then you
-must ensure that the infrastructure management hooks are installed.
-
-* During your OCP/Origin install, ensure that you have the
- `openshift_use_manageiq` parameter set to `true` in your inventory
- at install time. This will create a `management-infra` project and a
- service account user.
-* After CFME/MIQ is installed, obtain the `management-admin` service
- account token and copy it somewhere safe.
-
-```bash
-$ oc serviceaccounts get-token -n management-infra management-admin
-eyJhuGdiOiJSUzI1NiIsInR5dCI6IkpXVCJ9.eyJpd9MiOiJrbWJldm5lbGVzL9NldnZpY2VhY2NvbW50Iiwiy9ViZXJuZXRldy5puy9zZXJ2yWNlYWNju9VubC9uYW1ld9BhY2UiOiJtYW5hZ2VtZW50LWluZnJhIiwiy9ViZXJuZXRldy5puy9zZXJ2yWNlYWNju9VubC9zZWNyZXQuumFtZSI6Im1humFnZW1lunQtYWRtyW4tbG9rZW4tdDBnOTAiLCJrbWJldm5lbGVzLmlvL9NldnZpY2VhY2NvbW50L9NldnZpY2UtYWNju9VubC5uYW1lIjoiuWFuYWbluWVubC1hZG1puiIsImt1YmVyumV0ZXMuyW8vd2VybmljZWFjY291unQvd2VybmljZS1hY2NvbW50LnVpZCI6IjRiZDM2MWQ1LWE1NDAtMTFlNy04YzI5LTUyNTQwMDliMmNkZCIsInN1YiI6InN5d9RluTpzZXJ2yWNlYWNju9VubDptYW5hZ2VtZW50LWluZnJhOm1humFnZW1lunQtYWRtyW4ifQ.B6sZLGD9O4vBu9MHwiG-C_4iEwjBXb7Af8BPw-LNlujDmHhOnQ-Oo4QxQKyj9edynfmDy2yutUyJ2Mm9HfDGWg4C9xhWImHoq6Nl7T5_9djkeGKkK7Ejvg4fA-IkrzEsZeQuluBvXnE6wvP0LCjUo_dx4pPyZJyp46teV9NqKQeDzeysjlMCyqp6AK6-Lj8ILG8YA6d_97HlzL_EgFBLAu0lBSn-uC_9J0gLysqBtK6TI0nExfhv9Bm1_5bdHEbKHPW7xIlYlI9AgmyTyhsQ6SoQWtL2khBjkG9TlPBq9wYJj9bzqgVZlqEfICZxgtXO7sYyuoje4y8lo0YQ0kZmig
-```
-
-* In the CFME/MIQ web interface, navigate to `Compute` →
- `Containers` → `Providers` and select `⚙ Configuration` → `⊕
- Add a new Containers Provider`
-
-*See the [upstream documentation](http://manageiq.org/docs/reference/latest/doc-Managing_Providers/miq/index.html#containers-providers) for additional information.*
-
+cause errors**. If you do receive an error during the `Ensure the CFME
+App is created` task, we recommend running the
+[uninstall scripts](#uninstall) first before running the installer
+again.
# Requirements
@@ -140,11 +124,13 @@ used in your Ansible inventory to control the behavior of this
installer.
-| Variable | Required | Default | Description |
-|------------------------------------------------|:--------:|:------------------------------:|-------------------------------------|
-| `openshift_management_project` | **No** | `openshift-management` | Namespace for the installation. |
+| Variable | Required | Default | Description |
+|------------------------------------------------------|:--------:|:------------------------------:|-------------------------------------|
+| `openshift_management_project` | **No** | `openshift-management` | Namespace for the installation. |
| `openshift_management_project_description` | **No** | *CloudForms Management Engine* | Namespace/project description. |
-| `openshift_management_install_management` | **No** | `false` | Boolean, set to `true` to install the application |
+| `openshift_management_install_management` | **No** | `false` | Boolean, set to `true` to install the application |
+| `openshift_management_username` | **No** | `admin` | Default management username. Changing this values **does not change the username**. Only change this value if you have changed the name already and are running integration scripts (such as the [add container provider](#container-provider) script) |
+| `openshift_management_password` | **No** | `smartvm` | Default management password. Changing this values **does not change the password**. Only change this value if you have changed the password already and are running integration scripts (such as the [add-container-provider](#container-provider) script) |
| **PRODUCT CHOICE** | | | | |
| `openshift_management_app_template` | **No** | `miq-template` | The project flavor to install. Choices: <ul><li>`miq-template`: ManageIQ using a podified database</li> <li> `miq-template-ext-db`: ManageIQ using an external database</li> <li>`cfme-template`: CloudForms using a podified database<sup>[1]</sup></li> <li> `cfme-template-ext-db`: CloudForms using an external database.<sup>[1]</sup></li></ul> |
| **STORAGE CLASSES** | | | | |
@@ -268,6 +254,9 @@ openshift_management_app_template=cfme-template-ext-db
openshift_management_template_parameters={'DATABASE_USER': 'root', 'DATABASE_PASSWORD': 'r1ck&M0r7y', 'DATABASE_IP': '10.10.10.10', 'DATABASE_PORT': '5432', 'DATABASE_NAME': 'cfme'}
```
+**NOTE:** Ensure your are running PostgreSQL 9.5 or you may not be
+able to deploy the app successfully.
+
# Limitations
This release is the first OpenShift CFME release in the OCP 3.7
@@ -318,6 +307,9 @@ inventory. The following keys are required:
* `DATABASE_PORT` - *note: Most PostgreSQL servers run on port `5432`*
* `DATABASE_NAME`
+**NOTE:** Ensure your are running PostgreSQL 9.5 or you may not be
+able to deploy the app successfully.
+
Your inventory would contain a line similar to this:
```ini
@@ -453,6 +445,116 @@ hash. This applies to **CloudForms** installations as well:
[cfme-template.yaml](files/templates/cloudforms/cfme-template.yaml),
[cfme-template-ext-db.yaml](files/templates/cloudforms/cfme-template-ext-db.yaml).
+# Container Provider
+
+There are two methods for enabling container provider integration. You
+can manually add OCP/Origin as a container provider, or you can try
+the playbooks included with this role.
+
+## Manually
+
+See the online documentation for steps to manually add you cluster as
+a container provider:
+
+* [Container Providers](http://manageiq.org/docs/reference/latest/doc-Managing_Providers/miq/#containers-providers)
+
+## Automatically
+
+Automated container provider integration can be accomplished using the
+playbooks included with this role.
+
+This playbook will:
+
+1. Gather the necessary authentication secrets
+1. Find the public routes to the Management app and the cluster API
+1. Make a REST call to add this cluster as a container provider
+
+
+```
+$ ansible-playbook -v -i <YOUR_INVENTORY> playbooks/byo/openshift-management/add_container_provider.yml
+```
+
+## Multiple Providers
+
+As well as providing playbooks to integrate your *current* container
+platform into the management service, this role includes a **tech
+preview** script which allows you to add multiple container platforms
+as container providers in any arbitrary MIQ/CFME server.
+
+Using the multiple-provider script requires manual configuration and
+setting an `EXTRA_VARS` parameter on the command-line.
+
+
+1. Copy the
+ [container_providers.yml](files/examples/container_providers.yml)
+ example somewhere, such as `/tmp/cp.yml`
+1. If you changed your CFME/MIQ name or password, update the
+ `hostname`, `user`, and `password` parameters in the
+ `management_server` key in the `container_providers.yml` file copy
+1. Fill in an entry under the `container_providers` key for *each* OCP
+ or Origin cluster you want to add as container providers
+
+**Parameters Which MUST Be Configured:**
+
+* `auth_key` - This is the token of a service account which has admin capabilities on the cluster.
+* `hostname` - This is the hostname that points to the cluster API. Each container provider must have a unique hostname.
+* `name` - This is the name of the cluster as displayed in the management server container providers overview. This must be unique.
+
+*Note*: You can obtain the `auth_key` bearer token from your clusters
+ with this command: `oc serviceaccounts get-token -n management-infra
+ management-admin`
+
+**Parameters Which MAY Be Configured:**
+
+* `port` - Update this key if your OCP/Origin cluster runs the API on a port other than `8443`
+* `endpoint` - You may enable SSL verification (`verify_ssl`) or change the validation setting to `ssl-with-validation`. Support for custom trusted CA certificates is not available at this time.
+
+
+Let's see an example describing the following scenario:
+
+* You copied `files/examples/container_providers.yml` to `/tmp/cp.yml`
+* You're adding two OCP clusters
+* Your management server runs on `mgmt.example.com`
+
+You would customize `/tmp/cp.yml` as such:
+
+```yaml
+---
+container_providers:
+ - connection_configurations:
+ - authentication: {auth_key: "management-token-for-this-cluster", authtype: bearer, type: AuthToken}
+ endpoint: {role: default, security_protocol: ssl-without-validation, verify_ssl: 0}
+ hostname: "ocp-prod.example.com"
+ name: OCP Production
+ port: 8443
+ type: "ManageIQ::Providers::Openshift::ContainerManager"
+ - connection_configurations:
+ - authentication: {auth_key: "management-token-for-this-cluster", authtype: bearer, type: AuthToken}
+ endpoint: {role: default, security_protocol: ssl-without-validation, verify_ssl: 0}
+ hostname: "ocp-test.example.com"
+ name: OCP Testing
+ port: 8443
+ type: "ManageIQ::Providers::Openshift::ContainerManager"
+management_server:
+ hostname: "mgmt.example.com"
+ user: admin
+ password: b3tt3r_p4SSw0rd
+```
+
+Then you will run the many-container-providers integration script. You
+**must** provide the path to the container providers configuration
+file as an `EXTRA_VARS` parameter to `ansible-playbook`. Use the `-e`
+(or `--extra-vars`) parameter to set `container_providers_config` to
+the config file path.
+
+```
+$ ansible-playbook -v -e container_providers_config=/tmp/cp.yml \
+ playbooks/byo/openshift-management/add_many_container_providers.yml
+```
+
+Afterwards you will find two new container providers in your
+management service. Navigate to `Compute` → `Containers` → `Providers`
+to see an overview.
# Uninstall
@@ -461,6 +563,11 @@ installation:
* `playbooks/byo/openshift-management/uninstall.yml`
+NFS export definitions and data stored on NFS exports are not
+automatically removed. You are urged to manually erase any data from
+old application or database deployments before attempting to
+initialize a new deployment.
+
# Additional Information
The upstream project,
diff --git a/roles/openshift_management/defaults/main.yml b/roles/openshift_management/defaults/main.yml
index ebb56313f..8ba65b386 100644
--- a/roles/openshift_management/defaults/main.yml
+++ b/roles/openshift_management/defaults/main.yml
@@ -77,6 +77,20 @@ openshift_management_storage_nfs_base_dir: /exports
openshift_management_storage_nfs_local_hostname: false
######################################################################
+# DEFAULT ACCOUNT INFORMATION
+######################################################################
+# These are the default values for the username and password of the
+# management app. Changing these values in your inventory will not
+# change your username or password. You should only need to change
+# these values in your inventory if you already changed the actual
+# name and password AND are trying to use integration scripts.
+#
+# For example, adding this cluster as a container provider,
+# playbooks/byo/openshift-management/add_container_provider.yml
+openshift_management_username: admin
+openshift_management_password: smartvm
+
+######################################################################
# SCAFFOLDING - These are parameters we pre-seed that a user may or
# may not set later
######################################################################
diff --git a/roles/openshift_management/files/examples/container_providers.yml b/roles/openshift_management/files/examples/container_providers.yml
new file mode 100644
index 000000000..661f62e4d
--- /dev/null
+++ b/roles/openshift_management/files/examples/container_providers.yml
@@ -0,0 +1,22 @@
+---
+container_providers:
+ - connection_configurations:
+ - authentication: {auth_key: "management-admin-token-here", authtype: bearer, type: AuthToken}
+ endpoint: {role: default, security_protocol: ssl-without-validation, verify_ssl: 0}
+ hostname: "OCP/Origin cluster hostname (providing API access)"
+ name: openshift-management
+ port: 8443
+ type: "ManageIQ::Providers::Openshift::ContainerManager"
+# Copy and update for as many OCP or Origin providers as you want to
+# add to your management service
+ # - connection_configurations:
+ # - authentication: {auth_key: "management-admin-token-here", authtype: bearer, type: AuthToken}
+ # endpoint: {role: default, security_protocol: ssl-without-validation, verify_ssl: 0}
+ # hostname: "OCP/Origin cluster hostname (providing API access)"
+ # name: openshift-management
+ # port: 8443
+ # type: "ManageIQ::Providers::Openshift::ContainerManager"
+management_server:
+ hostname: "Management server hostname (providing API access)"
+ user: admin
+ password: smartvm
diff --git a/roles/openshift_management/filter_plugins/oo_management_filters.py b/roles/openshift_management/filter_plugins/oo_management_filters.py
new file mode 100644
index 000000000..3b7013d9a
--- /dev/null
+++ b/roles/openshift_management/filter_plugins/oo_management_filters.py
@@ -0,0 +1,32 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+"""
+Filter methods for the management role
+"""
+
+
+def oo_filter_container_providers(results):
+ """results - the result from posting the API calls for adding new
+providers"""
+ all_results = []
+ for result in results:
+ if 'results' in result['json']:
+ # We got an OK response
+ res = result['json']['results'][0]
+ all_results.append("Provider '{}' - Added successfully".format(res['name']))
+ elif 'error' in result['json']:
+ # This was a problem
+ all_results.append("Provider '{}' - Failed to add. Message: {}".format(
+ result['item']['name'], result['json']['error']['message']))
+ return all_results
+
+
+class FilterModule(object):
+ """ Custom ansible filter mapping """
+
+ # pylint: disable=no-self-use, too-few-public-methods
+ def filters(self):
+ """ returns a mapping of filters to methods """
+ return {
+ "oo_filter_container_providers": oo_filter_container_providers,
+ }
diff --git a/roles/openshift_management/tasks/add_container_provider.yml b/roles/openshift_management/tasks/add_container_provider.yml
new file mode 100644
index 000000000..383e6edb5
--- /dev/null
+++ b/roles/openshift_management/tasks/add_container_provider.yml
@@ -0,0 +1,65 @@
+---
+- name: Ensure lib_openshift modules are available
+ include_role:
+ role: lib_openshift
+
+- name: Ensure OpenShift facts module is available
+ include_role:
+ role: openshift_facts
+
+- name: Ensure OpenShift facts are loaded
+ openshift_facts:
+
+- name: Ensure the management SA Secrets are read
+ oc_serviceaccount_secret:
+ state: list
+ service_account: management-admin
+ namespace: management-infra
+ register: sa
+
+- name: Ensure the management SA bearer token is identified
+ set_fact:
+ management_token: "{{ sa.results | oo_filter_sa_secrets }}"
+
+- name: Ensure the SA bearer token value is read
+ oc_secret:
+ state: list
+ name: "{{ management_token }}"
+ namespace: management-infra
+ decode: true
+ no_log: True
+ register: sa_secret
+
+- name: Ensure the SA bearer token value is saved
+ set_fact:
+ management_bearer_token: "{{ sa_secret.results.decoded.token }}"
+
+- name: Ensure we have the public route to the management service
+ oc_route:
+ state: list
+ name: httpd
+ namespace: openshift-management
+ register: route
+
+- name: Ensure the management service route is saved
+ set_fact:
+ management_route: "{{ route.results.0.spec.host }}"
+
+- name: Ensure this cluster is a container provider
+ uri:
+ url: "https://{{ management_route }}/api/providers"
+ body_format: json
+ method: POST
+ user: "{{ openshift_management_username }}"
+ password: "{{ openshift_management_password }}"
+ validate_certs: no
+ # Docs on formatting the BODY of the POST request:
+ # http://manageiq.org/docs/reference/latest/api/reference/providers.html#specifying-connection-configurations
+ body:
+ connection_configurations:
+ - authentication: {auth_key: "{{ management_bearer_token }}", authtype: bearer, type: AuthToken}
+ endpoint: {role: default, security_protocol: ssl-without-validation, verify_ssl: 0}
+ hostname: "{{ openshift.master.cluster_public_hostname }}"
+ name: "{{ openshift_management_project }}"
+ port: "{{ openshift.master.api_port }}"
+ type: "ManageIQ::Providers::Openshift::ContainerManager"
diff --git a/roles/openshift_management/tasks/main.yml b/roles/openshift_management/tasks/main.yml
index 86c4d0010..9be923a57 100644
--- a/roles/openshift_management/tasks/main.yml
+++ b/roles/openshift_management/tasks/main.yml
@@ -2,23 +2,33 @@
######################################################################)
# Users, projects, and privileges
-- name: Run pre-install CFME validation checks
+- name: Run pre-install Management validation checks
include: validate.yml
-- name: "Ensure the CFME '{{ openshift_management_project }}' namespace exists"
+# This creates a service account allowing Container Provider
+# integration (managing OCP/Origin via MIQ/Management)
+- name: Enable Container Provider Integration
+ include_role:
+ role: openshift_manageiq
+
+- name: "Ensure the Management '{{ openshift_management_project }}' namespace exists"
oc_project:
state: present
name: "{{ openshift_management_project }}"
display_name: "{{ openshift_management_project_description }}"
-- name: Create and Authorize CFME Accounts
+- name: Create and Authorize Management Accounts
include: accounts.yml
######################################################################
# STORAGE - Initialize basic storage class
+- name: Determine the correct NFS host if required
+ include: storage/nfs_server.yml
+ when: openshift_management_storage_class in ['nfs', 'nfs_external']
+
#---------------------------------------------------------------------
# * nfs - set up NFS shares on the first master for a proof of concept
-- name: Create required NFS exports for CFME app storage
+- name: Create required NFS exports for Management app storage
include: storage/nfs.yml
when: openshift_management_storage_class == 'nfs'
@@ -45,7 +55,7 @@
######################################################################
# APPLICATION TEMPLATE
-- name: Install the CFME app and PV templates
+- name: Install the Management app and PV templates
include: template.yml
######################################################################
@@ -71,9 +81,16 @@
when:
- openshift_management_app_template in ['miq-template', 'cfme-template']
-- name: Ensure the CFME App is created
+- name: Ensure the Management App is created
oc_process:
namespace: "{{ openshift_management_project }}"
template_name: "{{ openshift_management_template_name }}"
create: True
params: "{{ openshift_management_template_parameters }}"
+
+- name: Wait for the app to come up. May take several minutes, 30s check intervals, 10m max
+ command: "oc logs {{ openshift_management_flavor }}-0 -n {{ openshift_management_project }}"
+ register: app_seeding_logs
+ until: app_seeding_logs.stdout.find('Server starting complete') != -1
+ delay: 30
+ retries: 20
diff --git a/roles/openshift_management/tasks/noop.yml b/roles/openshift_management/tasks/noop.yml
new file mode 100644
index 000000000..ed97d539c
--- /dev/null
+++ b/roles/openshift_management/tasks/noop.yml
@@ -0,0 +1 @@
+---
diff --git a/roles/openshift_management/tasks/storage/create_nfs_pvs.yml b/roles/openshift_management/tasks/storage/create_nfs_pvs.yml
index 31c845725..d1b9a8d5c 100644
--- a/roles/openshift_management/tasks/storage/create_nfs_pvs.yml
+++ b/roles/openshift_management/tasks/storage/create_nfs_pvs.yml
@@ -26,7 +26,7 @@
when:
- openshift_management_template_parameters.DATABASE_VOLUME_CAPACITY is not defined
-- name: Check if the CFME App PV has been created
+- name: Check if the Management App PV has been created
oc_obj:
namespace: "{{ openshift_management_project }}"
state: list
@@ -34,7 +34,7 @@
name: "{{ openshift_management_flavor_short }}-app"
register: miq_app_pv_check
-- name: Check if the CFME DB PV has been created
+- name: Check if the Management DB PV has been created
oc_obj:
namespace: "{{ openshift_management_project }}"
state: list
@@ -44,7 +44,7 @@
when:
- openshift_management_app_template in ['miq-template', 'cfme-template']
-- name: Ensure the CFME App PV is created
+- name: Ensure the Management App PV is created
oc_process:
namespace: "{{ openshift_management_project }}"
template_name: "{{ openshift_management_flavor }}-app-pv"
@@ -55,7 +55,7 @@
NFS_HOST: "{{ openshift_management_nfs_server }}"
when: miq_app_pv_check.results.results == [{}]
-- name: Ensure the CFME DB PV is created
+- name: Ensure the Management DB PV is created
oc_process:
namespace: "{{ openshift_management_project }}"
template_name: "{{ openshift_management_flavor }}-db-pv"
diff --git a/roles/openshift_management/tasks/storage/nfs.yml b/roles/openshift_management/tasks/storage/nfs.yml
index 696808328..94e11137c 100644
--- a/roles/openshift_management/tasks/storage/nfs.yml
+++ b/roles/openshift_management/tasks/storage/nfs.yml
@@ -2,37 +2,6 @@
# Tasks to statically provision NFS volumes
# Include if not using dynamic volume provisioning
-- name: Ensure we save the local NFS server if one is provided
- set_fact:
- openshift_management_nfs_server: "{{ openshift_management_storage_nfs_local_hostname }}"
- when:
- - openshift_management_storage_nfs_local_hostname is defined
- - openshift_management_storage_nfs_local_hostname != False
- - openshift_management_storage_class == "nfs"
-
-- name: Ensure we save the local NFS server
- set_fact:
- openshift_management_nfs_server: "{{ groups['oo_nfs_to_config'].0 }}"
- when:
- - openshift_management_nfs_server is not defined
- - openshift_management_storage_class == "nfs"
-
-- name: Ensure we save the external NFS server
- set_fact:
- openshift_management_nfs_server: "{{ openshift_management_storage_nfs_external_hostname }}"
- when:
- - openshift_management_storage_class == "nfs_external"
-
-- name: Failed NFS server detection
- assert:
- that:
- - openshift_management_nfs_server is defined
- msg: |
- "Unable to detect an NFS server. The 'nfs_external'
- openshift_management_storage_class option requires that you set
- openshift_management_storage_nfs_external_hostname. NFS hosts detected
- for local nfs services: {{ groups['oo_nfs_to_config'] | join(', ') }}"
-
- name: Setting up NFS storage
block:
- name: Include the NFS Setup role tasks
diff --git a/roles/openshift_management/tasks/storage/nfs_server.yml b/roles/openshift_management/tasks/storage/nfs_server.yml
new file mode 100644
index 000000000..96a742c83
--- /dev/null
+++ b/roles/openshift_management/tasks/storage/nfs_server.yml
@@ -0,0 +1,31 @@
+---
+- name: Ensure we save the local NFS server if one is provided
+ set_fact:
+ openshift_management_nfs_server: "{{ openshift_management_storage_nfs_local_hostname }}"
+ when:
+ - openshift_management_storage_nfs_local_hostname is defined
+ - openshift_management_storage_nfs_local_hostname != False
+ - openshift_management_storage_class == "nfs"
+
+- name: Ensure we save the local NFS server
+ set_fact:
+ openshift_management_nfs_server: "{{ groups['oo_nfs_to_config'].0 }}"
+ when:
+ - openshift_management_nfs_server is not defined
+ - openshift_management_storage_class == "nfs"
+
+- name: Ensure we save the external NFS server
+ set_fact:
+ openshift_management_nfs_server: "{{ openshift_management_storage_nfs_external_hostname }}"
+ when:
+ - openshift_management_storage_class == "nfs_external"
+
+- name: Failed NFS server detection
+ assert:
+ that:
+ - openshift_management_nfs_server is defined
+ msg: |
+ "Unable to detect an NFS server. The 'nfs_external'
+ openshift_management_storage_class option requires that you set
+ openshift_management_storage_nfs_external_hostname. NFS hosts detected
+ for local nfs services: {{ groups['oo_nfs_to_config'] | join(', ') }}"
diff --git a/roles/openshift_management/tasks/template.yml b/roles/openshift_management/tasks/template.yml
index 299158ac4..9f97cdcb9 100644
--- a/roles/openshift_management/tasks/template.yml
+++ b/roles/openshift_management/tasks/template.yml
@@ -15,7 +15,7 @@
# STANDARD PODIFIED DATABASE TEMPLATE
- when: openshift_management_app_template in ['miq-template', 'cfme-template']
block:
- - name: Check if the CFME Server template has been created already
+ - name: Check if the Management Server template has been created already
oc_obj:
namespace: "{{ openshift_management_project }}"
state: list
@@ -25,12 +25,12 @@
- when: miq_server_check.results.results == [{}]
block:
- - name: Copy over CFME Server template
+ - name: Copy over Management Server template
copy:
src: "templates/{{ openshift_management_flavor }}/{{ openshift_management_flavor_short }}-template.yaml"
dest: "{{ template_dir }}/"
- - name: Ensure CFME Server Template is created
+ - name: Ensure Management Server Template is created
oc_obj:
namespace: "{{ openshift_management_project }}"
name: "{{ openshift_management_flavor }}"
@@ -41,9 +41,9 @@
######################################################################
# EXTERNAL DATABASE TEMPLATE
-- when: openshift_management_app_template in ['miq-template-ext-db', 'cfme-template']
+- when: openshift_management_app_template in ['miq-template-ext-db', 'cfme-template-ext-db']
block:
- - name: Check if the CFME Ext-DB Server template has been created already
+ - name: Check if the Management Ext-DB Server template has been created already
oc_obj:
namespace: "{{ openshift_management_project }}"
state: list
@@ -53,12 +53,12 @@
- when: miq_ext_db_server_check.results.results == [{}]
block:
- - name: Copy over CFME Ext-DB Server template
+ - name: Copy over Management Ext-DB Server template
copy:
src: "templates/{{ openshift_management_flavor }}/{{openshift_management_flavor_short}}-template-ext-db.yaml"
dest: "{{ template_dir }}/"
- - name: Ensure CFME Ext-DB Server Template is created
+ - name: Ensure Management Ext-DB Server Template is created
oc_obj:
namespace: "{{ openshift_management_project }}"
name: "{{ openshift_management_flavor }}-ext-db"
@@ -74,7 +74,7 @@
# Begin conditional PV template creations
# Required for the application server
-- name: Check if the CFME App PV template has been created already
+- name: Check if the Management App PV template has been created already
oc_obj:
namespace: "{{ openshift_management_project }}"
state: list
@@ -84,12 +84,12 @@
- when: miq_app_pv_check.results.results == [{}]
block:
- - name: Copy over CFME App PV template
+ - name: Copy over Management App PV template
copy:
src: "templates/{{ openshift_management_flavor }}/{{ openshift_management_flavor_short }}-pv-server-example.yaml"
dest: "{{ template_dir }}/"
- - name: Ensure CFME App PV Template is created
+ - name: Ensure Management App PV Template is created
oc_obj:
namespace: "{{ openshift_management_project }}"
name: "{{ openshift_management_flavor }}-app-pv"
@@ -103,7 +103,7 @@
# Required for database if the installation is fully podified
- when: openshift_management_app_template in ['miq-template', 'cfme-template']
block:
- - name: Check if the CFME DB PV template has been created already
+ - name: Check if the Management DB PV template has been created already
oc_obj:
namespace: "{{ openshift_management_project }}"
state: list
@@ -113,12 +113,12 @@
- when: miq_db_pv_check.results.results == [{}]
block:
- - name: Copy over CFME DB PV template
+ - name: Copy over Management DB PV template
copy:
src: "templates/{{ openshift_management_flavor }}/{{ openshift_management_flavor_short }}-pv-db-example.yaml"
dest: "{{ template_dir }}/"
- - name: Ensure CFME DB PV Template is created
+ - name: Ensure Management DB PV Template is created
oc_obj:
namespace: "{{ openshift_management_project }}"
name: "{{ openshift_management_flavor }}-db-pv"
diff --git a/roles/openshift_master/tasks/journald.yml b/roles/openshift_master/tasks/journald.yml
index f79955e95..e2edd5ef4 100644
--- a/roles/openshift_master/tasks/journald.yml
+++ b/roles/openshift_master/tasks/journald.yml
@@ -3,6 +3,11 @@
stat: path=/etc/systemd/journald.conf
register: journald_conf_file
+- name: Create journald persistence directories
+ file:
+ path: /var/log/journal
+ state: directory
+
- name: Update journald setup
replace:
dest: /etc/systemd/journald.conf
diff --git a/roles/openshift_nfs/tasks/create_export.yml b/roles/openshift_nfs/tasks/create_export.yml
index 39323904f..b0b888d56 100644
--- a/roles/openshift_nfs/tasks/create_export.yml
+++ b/roles/openshift_nfs/tasks/create_export.yml
@@ -12,7 +12,7 @@
# l_nfs_export_name: Name of sub-directory of the export
# l_nfs_options: Mount Options
-- name: Ensure CFME App NFS export directory exists
+- name: "Ensure {{ l_nfs_export_name }} NFS export directory exists"
file:
path: "{{ l_nfs_base_dir }}/{{ l_nfs_export_name }}"
state: directory
diff --git a/roles/openshift_sanitize_inventory/tasks/main.yml b/roles/openshift_sanitize_inventory/tasks/main.yml
index 74c1a51a8..5dccc9faf 100644
--- a/roles/openshift_sanitize_inventory/tasks/main.yml
+++ b/roles/openshift_sanitize_inventory/tasks/main.yml
@@ -23,6 +23,8 @@
# TODO: once this is well-documented, add deprecation notice if using old name.
deployment_type: "{{ openshift_deployment_type | default(deployment_type) | default | string }}"
openshift_deployment_type: "{{ openshift_deployment_type | default(deployment_type) | default | string }}"
+ deployment_subtype: "{{ openshift_deployment_subtype | default(deployment_subtype) | default('basic') | string }}"
+ openshift_deployment_subtype: "{{ openshift_deployment_subtype | default(deployment_subtype) | default('basic') | string }}"
- name: Abort when deployment type is invalid
# this variable is required; complain early and clearly if it is invalid.