summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--.papr.inventory (renamed from .redhat-ci.inventory)0
-rwxr-xr-x.papr.sh (renamed from .redhat-ci.sh)6
-rw-r--r--.papr.yml42
-rw-r--r--.redhat-ci.yml30
-rw-r--r--.tito/packages/openshift-ansible2
-rw-r--r--ansible.cfg1
-rw-r--r--docs/pull_requests.md8
-rw-r--r--docs/repo_structure.md13
-rw-r--r--filter_plugins/oo_filters.py17
-rw-r--r--images/installer/system-container/README.md18
-rw-r--r--images/installer/system-container/root/exports/config.json.template19
-rw-r--r--images/installer/system-container/root/exports/manifest.json3
-rw-r--r--inventory/byo/hosts.byo.native-glusterfs.example2
-rw-r--r--inventory/byo/hosts.origin.example17
-rw-r--r--inventory/byo/hosts.ose.example17
-rw-r--r--openshift-ansible.spec111
-rw-r--r--playbooks/byo/openshift-cfme/config.yml8
-rw-r--r--playbooks/byo/openshift-cfme/uninstall.yml6
-rw-r--r--playbooks/byo/openshift-cluster/config.yml5
-rw-r--r--playbooks/byo/openshift-cluster/redeploy-etcd-ca.yml10
-rw-r--r--playbooks/byo/openshift-cluster/redeploy-openshift-ca.yml2
-rw-r--r--playbooks/common/openshift-cfme/config.yml44
l---------playbooks/common/openshift-cfme/filter_plugins1
l---------playbooks/common/openshift-cfme/library1
l---------playbooks/common/openshift-cfme/roles1
-rw-r--r--playbooks/common/openshift-cfme/uninstall.yml8
-rw-r--r--playbooks/common/openshift-cluster/evaluate_groups.yml2
-rw-r--r--playbooks/common/openshift-cluster/openshift_hosted.yml2
-rw-r--r--playbooks/common/openshift-cluster/redeploy-certificates/etcd-ca.yml158
-rw-r--r--playbooks/common/openshift-cluster/redeploy-certificates/openshift-ca.yml (renamed from playbooks/common/openshift-cluster/redeploy-certificates/ca.yml)136
-rw-r--r--playbooks/common/openshift-cluster/upgrades/disable_node_excluders.yml2
-rw-r--r--playbooks/common/openshift-cluster/upgrades/etcd/backup.yml12
-rw-r--r--playbooks/common/openshift-cluster/upgrades/etcd/main.yml3
-rw-r--r--playbooks/common/openshift-cluster/upgrades/pre/verify_upgrade_targets.yml20
-rw-r--r--playbooks/common/openshift-cluster/upgrades/v3_5/master_config_upgrade.yml16
-rw-r--r--playbooks/common/openshift-cluster/upgrades/v3_5/upgrade_control_plane.yml2
-rw-r--r--playbooks/common/openshift-cluster/upgrades/v3_6/master_config_upgrade.yml16
-rw-r--r--playbooks/common/openshift-cluster/upgrades/v3_6/upgrade_control_plane.yml2
-rw-r--r--roles/calico/defaults/main.yaml8
-rw-r--r--roles/calico_master/defaults/main.yaml1
-rw-r--r--roles/calico_master/templates/calico-policy-controller.yml.j22
-rw-r--r--roles/etcd/tasks/main.yml3
-rw-r--r--roles/etcd/tasks/system_container.yml52
-rw-r--r--roles/etcd_common/defaults/main.yml16
-rw-r--r--roles/etcd_common/tasks/backup.yml (renamed from roles/etcd_upgrade/tasks/backup.yml)33
-rw-r--r--roles/etcd_common/tasks/drop_etcdctl.yml (renamed from roles/etcd_common/tasks/etcdctl.yml)0
-rw-r--r--roles/etcd_common/tasks/main.yml9
-rw-r--r--roles/etcd_server_certificates/tasks/main.yml37
-rw-r--r--roles/etcd_upgrade/defaults/main.yml6
-rw-r--r--roles/etcd_upgrade/meta/main.yml1
-rw-r--r--roles/etcd_upgrade/tasks/main.yml4
-rw-r--r--roles/lib_openshift/library/oc_adm_ca_server_cert.py44
-rw-r--r--roles/lib_openshift/library/oc_adm_manage_node.py44
-rw-r--r--roles/lib_openshift/library/oc_adm_policy_group.py44
-rw-r--r--roles/lib_openshift/library/oc_adm_policy_user.py44
-rw-r--r--roles/lib_openshift/library/oc_adm_registry.py44
-rw-r--r--roles/lib_openshift/library/oc_adm_router.py44
-rw-r--r--roles/lib_openshift/library/oc_clusterrole.py44
-rw-r--r--roles/lib_openshift/library/oc_configmap.py44
-rw-r--r--roles/lib_openshift/library/oc_edit.py44
-rw-r--r--roles/lib_openshift/library/oc_env.py44
-rw-r--r--roles/lib_openshift/library/oc_group.py44
-rw-r--r--roles/lib_openshift/library/oc_image.py44
-rw-r--r--roles/lib_openshift/library/oc_label.py44
-rw-r--r--roles/lib_openshift/library/oc_obj.py58
-rw-r--r--roles/lib_openshift/library/oc_objectvalidator.py44
-rw-r--r--roles/lib_openshift/library/oc_process.py44
-rw-r--r--roles/lib_openshift/library/oc_project.py44
-rw-r--r--roles/lib_openshift/library/oc_pvc.py44
-rw-r--r--roles/lib_openshift/library/oc_route.py44
-rw-r--r--roles/lib_openshift/library/oc_scale.py44
-rw-r--r--roles/lib_openshift/library/oc_secret.py58
-rw-r--r--roles/lib_openshift/library/oc_service.py44
-rw-r--r--roles/lib_openshift/library/oc_serviceaccount.py44
-rw-r--r--roles/lib_openshift/library/oc_serviceaccount_secret.py44
-rw-r--r--roles/lib_openshift/library/oc_user.py44
-rw-r--r--roles/lib_openshift/library/oc_version.py44
-rw-r--r--roles/lib_openshift/library/oc_volume.py44
-rw-r--r--roles/lib_openshift/src/class/oc_obj.py10
-rw-r--r--roles/lib_openshift/src/class/oc_secret.py14
-rw-r--r--roles/lib_openshift/src/doc/obj4
-rw-r--r--roles/lib_openshift/src/lib/base.py44
-rwxr-xr-xroles/lib_openshift/src/test/unit/test_oc_secret.py1
-rw-r--r--roles/openshift_cfme/README.md404
-rw-r--r--roles/openshift_cfme/defaults/main.yml38
-rw-r--r--roles/openshift_cfme/files/miq-template.yaml566
-rw-r--r--roles/openshift_cfme/files/openshift_cfme.exports3
-rw-r--r--roles/openshift_cfme/handlers/main.yml42
-rw-r--r--roles/openshift_cfme/img/CFMEBasicDeployment.pngbin0 -> 38316 bytes
-rw-r--r--roles/openshift_cfme/meta/main.yml20
-rw-r--r--roles/openshift_cfme/tasks/create_pvs.yml36
-rw-r--r--roles/openshift_cfme/tasks/main.yml148
-rw-r--r--roles/openshift_cfme/tasks/tune_masters.yml12
-rw-r--r--roles/openshift_cfme/tasks/uninstall.yml43
-rw-r--r--roles/openshift_cfme/templates/miq-pv-db.yaml.j213
-rw-r--r--roles/openshift_cfme/templates/miq-pv-region.yaml.j213
-rw-r--r--roles/openshift_cfme/templates/miq-pv-server.yaml.j213
-rw-r--r--roles/openshift_default_storage_class/README.md39
-rw-r--r--roles/openshift_default_storage_class/defaults/main.yml14
-rw-r--r--roles/openshift_default_storage_class/meta/main.yml15
-rw-r--r--roles/openshift_default_storage_class/tasks/main.yml19
-rw-r--r--roles/openshift_default_storage_class/vars/main.yml1
-rw-r--r--roles/openshift_etcd_facts/vars/main.yml3
-rw-r--r--roles/openshift_excluder/tasks/install.yml36
-rw-r--r--roles/openshift_facts/tasks/main.yml23
-rw-r--r--roles/openshift_health_checker/openshift_checks/docker_image_availability.py172
-rw-r--r--roles/openshift_health_checker/openshift_checks/docker_storage.py185
-rw-r--r--roles/openshift_health_checker/openshift_checks/mixins.py45
-rw-r--r--roles/openshift_health_checker/openshift_checks/ovs_version.py2
-rw-r--r--roles/openshift_health_checker/openshift_checks/package_availability.py3
-rw-r--r--roles/openshift_health_checker/openshift_checks/package_update.py2
-rw-r--r--roles/openshift_health_checker/openshift_checks/package_version.py2
-rw-r--r--roles/openshift_health_checker/test/docker_image_availability_test.py123
-rw-r--r--roles/openshift_health_checker/test/docker_storage_test.py224
-rw-r--r--roles/openshift_logging/README.md5
-rw-r--r--roles/openshift_logging/defaults/main.yml6
-rw-r--r--roles/openshift_logging_elasticsearch/tasks/main.yaml2
-rw-r--r--roles/openshift_logging_fluentd/templates/fluentd.j28
-rw-r--r--roles/openshift_logging_mux/defaults/main.yml4
-rw-r--r--roles/openshift_logging_mux/templates/mux.j28
-rw-r--r--roles/openshift_metrics/README.md3
-rw-r--r--roles/openshift_metrics/defaults/main.yaml9
-rw-r--r--roles/openshift_metrics/tasks/generate_hawkular_certificates.yaml2
-rw-r--r--roles/openshift_metrics/tasks/install_cassandra.yaml6
-rw-r--r--roles/openshift_metrics/tasks/install_hosa.yaml44
-rw-r--r--roles/openshift_metrics/tasks/install_metrics.yaml37
-rw-r--r--roles/openshift_metrics/tasks/main.yaml3
-rw-r--r--roles/openshift_metrics/tasks/oc_apply.yaml2
-rw-r--r--roles/openshift_metrics/tasks/uninstall_hosa.yaml15
-rw-r--r--roles/openshift_metrics/templates/hawkular_openshift_agent_cm.j254
-rw-r--r--roles/openshift_metrics/templates/hawkular_openshift_agent_ds.j291
-rw-r--r--roles/openshift_metrics/templates/hawkular_openshift_agent_role.j225
-rw-r--r--roles/openshift_metrics/templates/hawkular_openshift_agent_sa.j27
-rw-r--r--roles/openshift_metrics/templates/pvc.j27
-rw-r--r--roles/openshift_node/handlers/main.yml11
-rw-r--r--roles/openshift_node/tasks/systemd_units.yml17
-rw-r--r--roles/openshift_node_dnsmasq/templates/origin-dns.conf.j22
-rw-r--r--roles/openshift_persistent_volumes/templates/persistent-volume.yml.j26
-rw-r--r--roles/openshift_repos/tasks/main.yaml52
-rw-r--r--roles/openshift_storage_glusterfs/README.md83
-rw-r--r--roles/openshift_storage_glusterfs/defaults/main.yml16
-rw-r--r--roles/openshift_storage_glusterfs/files/v3.6/deploy-heketi-template.yml50
-rw-r--r--roles/openshift_storage_glusterfs/files/v3.6/glusterfs-template.yml31
-rw-r--r--roles/openshift_storage_glusterfs/files/v3.6/heketi-template.yml41
-rw-r--r--roles/openshift_storage_glusterfs/tasks/glusterfs_common.yml108
-rw-r--r--roles/openshift_storage_glusterfs/tasks/glusterfs_config.yml6
-rw-r--r--roles/openshift_storage_glusterfs/tasks/glusterfs_deploy.yml36
-rw-r--r--roles/openshift_storage_glusterfs/tasks/glusterfs_registry.yml29
-rw-r--r--roles/openshift_storage_glusterfs/tasks/heketi_deploy_part1.yml21
-rw-r--r--roles/openshift_storage_glusterfs/tasks/heketi_deploy_part2.yml68
-rw-r--r--roles/openshift_storage_glusterfs/tasks/main.yml5
-rw-r--r--roles/openshift_storage_glusterfs/templates/v3.6/glusterfs-registry-endpoints.yml.j23
-rw-r--r--roles/openshift_storage_glusterfs/templates/v3.6/glusterfs-registry-service.yml.j2 (renamed from roles/openshift_storage_glusterfs/files/v3.6/glusterfs-registry-service.yml)2
-rw-r--r--roles/openshift_storage_glusterfs/templates/v3.6/glusterfs-storageclass.yml.j210
-rw-r--r--roles/openshift_storage_glusterfs/templates/v3.6/heketi-endpoints.yml.j212
-rw-r--r--roles/openshift_storage_glusterfs/templates/v3.6/heketi-service.yml.j210
156 files changed, 4050 insertions, 1390 deletions
diff --git a/.redhat-ci.inventory b/.papr.inventory
index 23bc9923c..23bc9923c 100644
--- a/.redhat-ci.inventory
+++ b/.papr.inventory
diff --git a/.redhat-ci.sh b/.papr.sh
index fce8c1d52..fe0b97b68 100755
--- a/.redhat-ci.sh
+++ b/.papr.sh
@@ -1,10 +1,12 @@
#!/bin/bash
set -xeuo pipefail
+echo "Targeting OpenShift Origin $OPENSHIFT_IMAGE_TAG"
+
pip install -r requirements.txt
# ping the nodes to check they're responding and register their ostree versions
-ansible -vvv -i .redhat-ci.inventory nodes -a 'rpm-ostree status'
+ansible -vvv -i .papr.inventory nodes -a 'rpm-ostree status'
upload_journals() {
mkdir journals
@@ -16,7 +18,7 @@ upload_journals() {
trap upload_journals ERR
# run the actual installer
-ansible-playbook -vvv -i .redhat-ci.inventory playbooks/byo/config.yml
+ansible-playbook -vvv -i .papr.inventory playbooks/byo/config.yml
# run a small subset of origin conformance tests to sanity
# check the cluster NB: we run it on the master since we may
diff --git a/.papr.yml b/.papr.yml
new file mode 100644
index 000000000..16d6e78b1
--- /dev/null
+++ b/.papr.yml
@@ -0,0 +1,42 @@
+---
+
+# This YAML file is used by PAPR. It details the test
+# environment to provision and the test procedure. For more
+# information on PAPR, see:
+#
+# https://github.com/projectatomic/papr
+#
+# The PAPR YAML specification detailing allowed fields can
+# be found at:
+#
+# https://github.com/projectatomic/papr/blob/master/sample.papr.yml
+
+cluster:
+ hosts:
+ - name: ocp-master
+ distro: fedora/25/atomic
+ - name: ocp-node1
+ distro: fedora/25/atomic
+ - name: ocp-node2
+ distro: fedora/25/atomic
+ container:
+ image: fedora:25
+
+packages:
+ - gcc
+ - python-pip
+ - python-devel
+ - libffi-devel
+ - openssl-devel
+ - redhat-rpm-config
+
+context: 'fedora/25/atomic'
+
+env:
+ OPENSHIFT_IMAGE_TAG: v3.6.0-alpha.1
+
+tests:
+ - ./.papr.sh
+
+artifacts:
+ - journals/
diff --git a/.redhat-ci.yml b/.redhat-ci.yml
deleted file mode 100644
index 6dac7b256..000000000
--- a/.redhat-ci.yml
+++ /dev/null
@@ -1,30 +0,0 @@
----
-
-cluster:
- hosts:
- - name: ocp-master
- distro: fedora/25/atomic
- - name: ocp-node1
- distro: fedora/25/atomic
- - name: ocp-node2
- distro: fedora/25/atomic
- container:
- image: fedora:25
-
-packages:
- - gcc
- - python-pip
- - python-devel
- - openssl-devel
- - redhat-rpm-config
-
-context: 'fedora/25/atomic | origin/v3.6.0-alpha.1'
-
-env:
- OPENSHIFT_IMAGE_TAG: v3.6.0-alpha.1
-
-tests:
- - ./.redhat-ci.sh
-
-artifacts:
- - journals/
diff --git a/.tito/packages/openshift-ansible b/.tito/packages/openshift-ansible
index 85a9820e8..deae7b94c 100644
--- a/.tito/packages/openshift-ansible
+++ b/.tito/packages/openshift-ansible
@@ -1 +1 @@
-3.6.98-1 ./
+3.6.112-1 ./
diff --git a/ansible.cfg b/ansible.cfg
index 034733684..0c74d63da 100644
--- a/ansible.cfg
+++ b/ansible.cfg
@@ -14,6 +14,7 @@ callback_plugins = callback_plugins/
forks = 20
host_key_checking = False
retry_files_enabled = False
+retry_files_save_path = ~/ansible-installer-retries
nocows = True
# Uncomment to use the provided BYO inventory
diff --git a/docs/pull_requests.md b/docs/pull_requests.md
index fcc3e275c..45ae01a9d 100644
--- a/docs/pull_requests.md
+++ b/docs/pull_requests.md
@@ -10,8 +10,8 @@ Whenever a
[Pull Request is opened](../CONTRIBUTING.md#submitting-contributions), some
automated test jobs must be successfully run before the PR can be merged.
-Some of these jobs are automatically triggered, e.g., Travis and Coveralls.
-Other jobs need to be manually triggered by a member of the
+Some of these jobs are automatically triggered, e.g., Travis, PAPR, and
+Coveralls. Other jobs need to be manually triggered by a member of the
[Team OpenShift Ansible Contributors](https://github.com/orgs/openshift/teams/team-openshift-ansible-contributors).
## Triggering tests
@@ -48,9 +48,9 @@ simplifying the workflow towards a single infrastructure in the future.
There are a set of tests that run on Fedora infrastructure. They are started
automatically with every pull request.
-They are implemented using the [`redhat-ci` framework](https://github.com/jlebon/redhat-ci).
+They are implemented using the [`PAPR` framework](https://github.com/projectatomic/papr).
-To re-run tests, write a comment containing `bot, retest this please`.
+To re-run tests, write a comment containing only `bot, retest this please`.
## Triggering merge
diff --git a/docs/repo_structure.md b/docs/repo_structure.md
index 693837fba..f598f22c3 100644
--- a/docs/repo_structure.md
+++ b/docs/repo_structure.md
@@ -52,3 +52,16 @@ These are plugins used in playbooks and roles:
.
└── test Contains tests.
```
+
+### CI
+
+These files are used by [PAPR](https://github.com/projectatomic/papr),
+It is very similar in workflow to Travis, with the test
+environment and test scripts defined in a YAML file.
+
+```
+.
+├── .papr.yml
+├── .papr.sh
+└── .papr.inventory
+```
diff --git a/filter_plugins/oo_filters.py b/filter_plugins/oo_filters.py
index 8b279981d..cff9f8a60 100644
--- a/filter_plugins/oo_filters.py
+++ b/filter_plugins/oo_filters.py
@@ -715,7 +715,7 @@ def oo_openshift_env(hostvars):
return facts
-# pylint: disable=too-many-branches, too-many-nested-blocks
+# pylint: disable=too-many-branches, too-many-nested-blocks, too-many-statements
def oo_persistent_volumes(hostvars, groups, persistent_volumes=None):
""" Generate list of persistent volumes based on oo_openshift_env
storage options set in host variables.
@@ -747,10 +747,15 @@ def oo_persistent_volumes(hostvars, groups, persistent_volumes=None):
volume = params['volume']['name']
path = directory + '/' + volume
size = params['volume']['size']
+ if 'labels' in params:
+ labels = params['labels']
+ else:
+ labels = dict()
access_modes = params['access']['modes']
persistent_volume = dict(
name="{0}-volume".format(volume),
capacity=size,
+ labels=labels,
access_modes=access_modes,
storage=dict(
nfs=dict(
@@ -760,12 +765,17 @@ def oo_persistent_volumes(hostvars, groups, persistent_volumes=None):
elif kind == 'openstack':
volume = params['volume']['name']
size = params['volume']['size']
+ if 'labels' in params:
+ labels = params['labels']
+ else:
+ labels = dict()
access_modes = params['access']['modes']
filesystem = params['openstack']['filesystem']
volume_id = params['openstack']['volumeID']
persistent_volume = dict(
name="{0}-volume".format(volume),
capacity=size,
+ labels=labels,
access_modes=access_modes,
storage=dict(
cinder=dict(
@@ -775,6 +785,10 @@ def oo_persistent_volumes(hostvars, groups, persistent_volumes=None):
elif kind == 'glusterfs':
volume = params['volume']['name']
size = params['volume']['size']
+ if 'labels' in params:
+ labels = params['labels']
+ else:
+ labels = dict()
access_modes = params['access']['modes']
endpoints = params['glusterfs']['endpoints']
path = params['glusterfs']['path']
@@ -782,6 +796,7 @@ def oo_persistent_volumes(hostvars, groups, persistent_volumes=None):
persistent_volume = dict(
name="{0}-volume".format(volume),
capacity=size,
+ labels=labels,
access_modes=access_modes,
storage=dict(
glusterfs=dict(
diff --git a/images/installer/system-container/README.md b/images/installer/system-container/README.md
index dc95307e5..fbcd47c4a 100644
--- a/images/installer/system-container/README.md
+++ b/images/installer/system-container/README.md
@@ -11,3 +11,21 @@ These files are needed to run the installer using an [Atomic System container](h
* service.template - Template file for the systemd service.
* tmpfiles.template - Template file for systemd-tmpfiles.
+
+## Options
+
+These options may be set via the ``atomic`` ``--set`` flag. For defaults see ``root/exports/manifest.json``
+
+* OPTS - Additional options to pass to ansible when running the installer
+
+* VAR_LIB_OPENSHIFT_INSTALLER - Full path of the installer code to mount into the container
+
+* VAR_LOG_OPENSHIFT_LOG - Full path of the log file to mount into the container
+
+* PLAYBOOK_FILE - Full path of the playbook inside the container
+
+* HOME_ROOT - Full path on host to mount as the root home directory inside the container (for .ssh/, etc..)
+
+* ANSIBLE_CONFIG - Full path for the ansible configuration file to use inside the container
+
+* INVENTORY_FILE - Full path for the inventory to use from the host
diff --git a/images/installer/system-container/root/exports/config.json.template b/images/installer/system-container/root/exports/config.json.template
index 383e3696e..739c0080f 100644
--- a/images/installer/system-container/root/exports/config.json.template
+++ b/images/installer/system-container/root/exports/config.json.template
@@ -21,7 +21,8 @@
"PATH=/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin",
"TERM=xterm",
"OPTS=$OPTS",
- "PLAYBOOK_FILE=$PLAYBOOK_FILE"
+ "PLAYBOOK_FILE=$PLAYBOOK_FILE",
+ "ANSIBLE_CONFIG=$ANSIBLE_CONFIG"
],
"cwd": "/opt/app-root/src/",
"rlimits": [
@@ -102,7 +103,7 @@
},
{
"type": "bind",
- "source": "$SSH_ROOT",
+ "source": "$HOME_ROOT/.ssh",
"destination": "/opt/app-root/src/.ssh",
"options": [
"bind",
@@ -112,8 +113,8 @@
},
{
"type": "bind",
- "source": "$SSH_ROOT",
- "destination": "/root/.ssh",
+ "source": "$HOME_ROOT",
+ "destination": "/root",
"options": [
"bind",
"rw",
@@ -171,6 +172,16 @@
]
},
{
+ "destination": "/etc/resolv.conf",
+ "type": "bind",
+ "source": "/etc/resolv.conf",
+ "options": [
+ "ro",
+ "rbind",
+ "rprivate"
+ ]
+ },
+ {
"destination": "/sys/fs/cgroup",
"type": "cgroup",
"source": "cgroup",
diff --git a/images/installer/system-container/root/exports/manifest.json b/images/installer/system-container/root/exports/manifest.json
index 1db845965..321a84ee8 100644
--- a/images/installer/system-container/root/exports/manifest.json
+++ b/images/installer/system-container/root/exports/manifest.json
@@ -5,7 +5,8 @@
"VAR_LIB_OPENSHIFT_INSTALLER" : "/var/lib/openshift-installer",
"VAR_LOG_OPENSHIFT_LOG": "/var/log/ansible.log",
"PLAYBOOK_FILE": "/usr/share/ansible/openshift-ansible/playbooks/byo/config.yml",
- "SSH_ROOT": "/root/.ssh",
+ "HOME_ROOT": "/root",
+ "ANSIBLE_CONFIG": "/usr/share/ansible/openshift-ansible/ansible.cfg",
"INVENTORY_FILE": "/dev/null"
}
}
diff --git a/inventory/byo/hosts.byo.native-glusterfs.example b/inventory/byo/hosts.byo.native-glusterfs.example
index 2dbb57d40..dc847a5b2 100644
--- a/inventory/byo/hosts.byo.native-glusterfs.example
+++ b/inventory/byo/hosts.byo.native-glusterfs.example
@@ -24,7 +24,7 @@ glusterfs
[OSEv3:vars]
ansible_ssh_user=root
-deployment_type=origin
+openshift_deployment_type=origin
# Specify that we want to use GlusterFS storage for a hosted registry
openshift_hosted_registry_storage_kind=glusterfs
diff --git a/inventory/byo/hosts.origin.example b/inventory/byo/hosts.origin.example
index b2490638b..962a01a91 100644
--- a/inventory/byo/hosts.origin.example
+++ b/inventory/byo/hosts.origin.example
@@ -191,6 +191,13 @@ openshift_master_identity_providers=[{'name': 'htpasswd_auth', 'login': 'true',
# or
#openshift_master_request_header_ca_file=<path to local ca file to use>
+# CloudForms Management Engine (ManageIQ) App Install
+#
+# Enables installation of MIQ server. Recommended for dedicated
+# clusters only. See roles/openshift_cfme/README.md for instructions
+# and requirements.
+#openshift_cfme_install_app=False
+
# Cloud Provider Configuration
#
# Note: You may make use of environment variables rather than store
@@ -501,6 +508,7 @@ openshift_master_identity_providers=[{'name': 'htpasswd_auth', 'login': 'true',
#openshift_hosted_metrics_storage_nfs_options='*(rw,root_squash)'
#openshift_hosted_metrics_storage_volume_name=metrics
#openshift_hosted_metrics_storage_volume_size=10Gi
+#openshift_hosted_metrics_storage_labels={'storage': 'metrics'}
#
# Option B - External NFS Host
# NFS volume must already exist with path "nfs_directory/_volume_name" on
@@ -512,6 +520,7 @@ openshift_master_identity_providers=[{'name': 'htpasswd_auth', 'login': 'true',
#openshift_hosted_metrics_storage_nfs_directory=/exports
#openshift_hosted_metrics_storage_volume_name=metrics
#openshift_hosted_metrics_storage_volume_size=10Gi
+#openshift_hosted_metrics_storage_labels={'storage': 'metrics'}
#
# Option C - Dynamic -- If openshift supports dynamic volume provisioning for
# your cloud platform use this.
@@ -545,6 +554,7 @@ openshift_master_identity_providers=[{'name': 'htpasswd_auth', 'login': 'true',
#openshift_hosted_logging_storage_nfs_options='*(rw,root_squash)'
#openshift_hosted_logging_storage_volume_name=logging
#openshift_hosted_logging_storage_volume_size=10Gi
+#openshift_hosted_logging_storage_labels={'storage': 'logging'}
#
# Option B - External NFS Host
# NFS volume must already exist with path "nfs_directory/_volume_name" on
@@ -556,6 +566,7 @@ openshift_master_identity_providers=[{'name': 'htpasswd_auth', 'login': 'true',
#openshift_hosted_logging_storage_nfs_directory=/exports
#openshift_hosted_logging_storage_volume_name=logging
#openshift_hosted_logging_storage_volume_size=10Gi
+#openshift_hosted_logging_storage_labels={'storage': 'logging'}
#
# Option C - Dynamic -- If openshift supports dynamic volume provisioning for
# your cloud platform use this.
@@ -782,6 +793,12 @@ openshift_master_identity_providers=[{'name': 'htpasswd_auth', 'login': 'true',
# Enable API service auditing, available as of 1.3
#openshift_master_audit_config={"enabled": true}
+#
+# In case you want more advanced setup for the auditlog you can
+# use this line.
+# The directory in "auditFilePath" will be created if it's not
+# exist
+#openshift_master_audit_config={"enabled": true, "auditFilePath": "/var/log/openpaas-oscp-audit/openpaas-oscp-audit.log", "maximumFileRetentionDays": 14, "maximumFileSizeMegabytes": 500, "maximumRetainedFiles": 5}}
# Enable origin repos that point at Centos PAAS SIG, defaults to true, only used
# by deployment_type=origin
diff --git a/inventory/byo/hosts.ose.example b/inventory/byo/hosts.ose.example
index 67d53b22d..63f1f00d2 100644
--- a/inventory/byo/hosts.ose.example
+++ b/inventory/byo/hosts.ose.example
@@ -190,6 +190,13 @@ openshift_master_identity_providers=[{'name': 'htpasswd_auth', 'login': 'true',
# or
#openshift_master_request_header_ca_file=<path to local ca file to use>
+# CloudForms Management Engine (ManageIQ) App Install
+#
+# Enables installation of MIQ server. Recommended for dedicated
+# clusters only. See roles/openshift_cfme/README.md for instructions
+# and requirements.
+#openshift_cfme_install_app=False
+
# Cloud Provider Configuration
#
# Note: You may make use of environment variables rather than store
@@ -501,6 +508,7 @@ openshift_master_identity_providers=[{'name': 'htpasswd_auth', 'login': 'true',
#openshift_hosted_metrics_storage_nfs_options='*(rw,root_squash)'
#openshift_hosted_metrics_storage_volume_name=metrics
#openshift_hosted_metrics_storage_volume_size=10Gi
+#openshift_hosted_metrics_storage_labels={'storage': 'metrics'}
#
# Option B - External NFS Host
# NFS volume must already exist with path "nfs_directory/_volume_name" on
@@ -512,6 +520,7 @@ openshift_master_identity_providers=[{'name': 'htpasswd_auth', 'login': 'true',
#openshift_hosted_metrics_storage_nfs_directory=/exports
#openshift_hosted_metrics_storage_volume_name=metrics
#openshift_hosted_metrics_storage_volume_size=10Gi
+#openshift_hosted_metrics_storage_labels={'storage': 'metrics'}
#
# Option C - Dynamic -- If openshift supports dynamic volume provisioning for
# your cloud platform use this.
@@ -545,6 +554,7 @@ openshift_master_identity_providers=[{'name': 'htpasswd_auth', 'login': 'true',
#openshift_hosted_logging_storage_nfs_options='*(rw,root_squash)'
#openshift_hosted_logging_storage_volume_name=logging
#openshift_hosted_logging_storage_volume_size=10Gi
+#openshift_hosted_logging_storage_labels={'storage': 'logging'}
#
# Option B - External NFS Host
# NFS volume must already exist with path "nfs_directory/_volume_name" on
@@ -556,6 +566,7 @@ openshift_master_identity_providers=[{'name': 'htpasswd_auth', 'login': 'true',
#openshift_hosted_logging_storage_nfs_directory=/exports
#openshift_hosted_logging_storage_volume_name=logging
#openshift_hosted_logging_storage_volume_size=10Gi
+#openshift_hosted_logging_storage_labels={'storage': 'logging'}
#
# Option C - Dynamic -- If openshift supports dynamic volume provisioning for
# your cloud platform use this.
@@ -782,6 +793,12 @@ openshift_master_identity_providers=[{'name': 'htpasswd_auth', 'login': 'true',
# Enable API service auditing, available as of 3.2
#openshift_master_audit_config={"enabled": true}
+#
+# In case you want more advanced setup for the auditlog you can
+# use this line.
+# The directory in "auditFilePath" will be created if it's not
+# exist
+#openshift_master_audit_config={"enabled": true, "auditFilePath": "/var/log/openpaas-oscp-audit/openpaas-oscp-audit.log", "maximumFileRetentionDays": 14, "maximumFileSizeMegabytes": 500, "maximumRetainedFiles": 5}}
# Validity of the auto-generated OpenShift certificates in days.
# See also openshift_hosted_registry_cert_expire_days above.
diff --git a/openshift-ansible.spec b/openshift-ansible.spec
index 5deb575b5..75ef0e881 100644
--- a/openshift-ansible.spec
+++ b/openshift-ansible.spec
@@ -9,7 +9,7 @@
%global __requires_exclude ^/usr/bin/ansible-playbook$
Name: openshift-ansible
-Version: 3.6.98
+Version: 3.6.112
Release: 1%{?dist}
Summary: Openshift and Atomic Enterprise Ansible
License: ASL 2.0
@@ -280,6 +280,115 @@ Atomic OpenShift Utilities includes
%changelog
+* Thu Jun 15 2017 Jenkins CD Merge Bot <smunilla@redhat.com> 3.6.112-1
+- Add the the other featured audit-config paramters as example (al-
+ git001@none.at)
+
+* Thu Jun 15 2017 Jenkins CD Merge Bot <smunilla@redhat.com> 3.6.111-1
+- doc: Info for system container installer options (smilner@redhat.com)
+- Add ANSIBLE_CONFIG to system container installer (smilner@redhat.com)
+- Add missing file. Remove debugging prompt. (tbielawa@redhat.com)
+- Update readme one last time (tbielawa@redhat.com)
+- Reconfigure masters in serial to avoid HA meltdowns (tbielawa@redhat.com)
+- First POC of a CFME turnkey solution in openshift-anisble
+ (tbielawa@redhat.com)
+- Reverted most of this pr 4356 except: adding
+ openshift_logging_fluentd_buffer_queue_limit: 1024
+ openshift_logging_fluentd_buffer_size_limit: 1m
+ openshift_logging_mux_buffer_queue_limit: 1024
+ openshift_logging_mux_buffer_size_limit: 1m and setting the matched
+ environment variables. (nhosoi@redhat.com)
+- Adding the defaults for openshift_logging_fluentd_{cpu,memory}_limit to
+ roles/openshift_logging_fluentd/defaults/main.yml. (nhosoi@redhat.com)
+- Adding environment variables FLUENTD_CPU_LIMIT, FLUENTD_MEMORY_LIMIT,
+ MUX_CPU_LIMIT, MUX_MEMORY_LIMIT. (nhosoi@redhat.com)
+- Introducing fluentd/mux buffer_queue_limit, buffer_size_limit, cpu_limit, and
+ memory_limit. (nhosoi@redhat.com)
+
+* Thu Jun 15 2017 Jenkins CD Merge Bot <smunilla@redhat.com> 3.6.110-1
+- papr: add documentation to YAML and simplify context (jlebon@redhat.com)
+- docs: better documentation for PAPR (jlebon@redhat.com)
+- papr: install libffi-devel (jlebon@redhat.com)
+- pre-install checks: add more during byo install (lmeyer@redhat.com)
+- move etcd backup to etcd_common role (jchaloup@redhat.com)
+- Support installing HOSA via ansible (mwringe@redhat.com)
+- GlusterFS: Remove requirement for heketi-cli (jarrpa@redhat.com)
+- GlusterFS: Fix bugs in wipe (jarrpa@redhat.com)
+- GlusterFS: Skip heketi-cli install on Atomic (jarrpa@redhat.com)
+- GlusterFS: Create a StorageClass if specified (jarrpa@redhat.com)
+- GlusterFS: Use proper secrets (jarrpa@redhat.com)
+- GlusterFS: Allow cleaner separation of multiple clusters (jarrpa@redhat.com)
+- GlusterFS: Minor corrections and cleanups (jarrpa@redhat.com)
+- GlusterFS: Improve documentation (jarrpa@redhat.com)
+- GlusterFS: Allow configuration of kube namespace for heketi
+ (jarrpa@redhat.com)
+- GlusterFS: Adjust when clauses for registry config (jarrpa@redhat.com)
+- GlusterFS: Allow failure reporting when deleting deploy-heketi
+ (jarrpa@redhat.com)
+- GlusterFS: Tweak pod probe parameters (jarrpa@redhat.com)
+- GlusterFS: Allow for configuration of node selector (jarrpa@redhat.com)
+- GlusterFS: Label on Openshift node name (jarrpa@redhat.com)
+- GlusterFS: Make sure timeout is an int (jarrpa@redhat.com)
+- GlusterFS: Use groups variables (jarrpa@redhat.com)
+- papr: rename redhat-ci related files to papr (jlebon@redhat.com)
+- singletonize some role tasks that repeat a lot (lmeyer@redhat.com)
+
+* Wed Jun 14 2017 Jenkins CD Merge Bot <smunilla@redhat.com> 3.6.109-1
+-
+
+* Wed Jun 14 2017 Jenkins CD Merge Bot <smunilla@redhat.com> 3.6.108-1
+- Upgraded Calico to 2.2.1 Release (vincent.schwarzer@yahoo.de)
+
+* Wed Jun 14 2017 Jenkins CD Merge Bot <smunilla@redhat.com> 3.6.107-1
+- Disable negative caching, set cache TTL to 1s (skuznets@redhat.com)
+- Update mounts in system container installer (smilner@redhat.com)
+- Set ansible retry file location (smilner@redhat.com)
+- installer: add bind mount for /etc/resolv.conf (gscrivan@redhat.com)
+- Making pylint happy (ewolinet@redhat.com)
+- Fix possible access to undefined variable (rhcarvalho@gmail.com)
+- certificates: copy the certificates for the etcd system container
+ (gscrivan@redhat.com)
+- Separate etcd and OpenShift CA redeploy playbooks. (abutcher@redhat.com)
+- lib/base: allow for results parsing on non-zero return code
+ (jarrpa@redhat.com)
+- etcd: system container defines ETCD_(PEER_)?TRUSTED_CA_FILE
+ (gscrivan@redhat.com)
+- etcd: unmask system container service before installing it
+ (gscrivan@redhat.com)
+- etcd: copy previous database when migrating to system container
+ (gscrivan@redhat.com)
+- etcd: define data dir location for the system container (gscrivan@redhat.com)
+- oc_obj: set _delete() rc to 0 if err is 'not found' (jarrpa@redhat.com)
+- oc_obj: only check 'items' if exists in delete (jarrpa@redhat.com)
+- Removed hardocded Calico Policy Controller URL (vincent.schwarzer@yahoo.de)
+- Allowing openshift_metrics to specify PV selectors and allow way to define
+ selectors when creating pv (ewolinet@redhat.com)
+
+* Tue Jun 13 2017 Jenkins CD Merge Bot <smunilla@redhat.com> 3.6.100-1
+- Change default key for gce (hekumar@redhat.com)
+- set etcd working directory for embedded etcd (jchaloup@redhat.com)
+- Add daemon-reload handler to openshift_node and notify when /etc/systemd
+ files have been updated. (abutcher@redhat.com)
+- Use volume.beta.kubernetes.io annotation for storage-classes
+ (per.carlson@vegvesen.no)
+- Correct master-config update during upgrade (rteague@redhat.com)
+
+* Mon Jun 12 2017 Jenkins CD Merge Bot <tdawson@redhat.com> 3.6.99-1
+- Replace repoquery with module (jchaloup@redhat.com)
+- Consider previous value of 'changed' when updating (rhcarvalho@gmail.com)
+- Improve code readability (rhcarvalho@gmail.com)
+- Disable excluder only on nodes that are not masters (jchaloup@redhat.com)
+- Added includes to specify openshift version for libvirt cluster create.
+ Otherwise bin/cluster create fails on unknown version for libvirt deployment.
+ (schulthess@puzzle.ch)
+- docker checks: finish and refactor (lmeyer@redhat.com)
+- oc_secret: allow use of force for secret type (jarrpa@redhat.com)
+- add docker storage, docker driver checks (jvallejo@redhat.com)
+- Add dependency and use same storageclass name as upstream
+ (hekumar@redhat.com)
+- Add documentation (hekumar@redhat.com)
+- Install default storageclass in AWS & GCE envs (hekumar@redhat.com)
+
* Fri Jun 09 2017 Jenkins CD Merge Bot <tdawson@redhat.com> 3.6.98-1
-
diff --git a/playbooks/byo/openshift-cfme/config.yml b/playbooks/byo/openshift-cfme/config.yml
new file mode 100644
index 000000000..0e8e7a94d
--- /dev/null
+++ b/playbooks/byo/openshift-cfme/config.yml
@@ -0,0 +1,8 @@
+---
+- include: ../openshift-cluster/initialize_groups.yml
+ tags:
+ - always
+
+- include: ../../common/openshift-cluster/evaluate_groups.yml
+
+- include: ../../common/openshift-cfme/config.yml
diff --git a/playbooks/byo/openshift-cfme/uninstall.yml b/playbooks/byo/openshift-cfme/uninstall.yml
new file mode 100644
index 000000000..c8ed16859
--- /dev/null
+++ b/playbooks/byo/openshift-cfme/uninstall.yml
@@ -0,0 +1,6 @@
+---
+# - include: ../openshift-cluster/initialize_groups.yml
+# tags:
+# - always
+
+- include: ../../common/openshift-cfme/uninstall.yml
diff --git a/playbooks/byo/openshift-cluster/config.yml b/playbooks/byo/openshift-cluster/config.yml
index fd4a9eb26..2372a5322 100644
--- a/playbooks/byo/openshift-cluster/config.yml
+++ b/playbooks/byo/openshift-cluster/config.yml
@@ -15,6 +15,11 @@
checks:
- disk_availability
- memory_availability
+ - package_availability
+ - package_update
+ - package_version
+ - docker_image_availability
+ - docker_storage
- include: ../../common/openshift-cluster/std_include.yml
tags:
diff --git a/playbooks/byo/openshift-cluster/redeploy-etcd-ca.yml b/playbooks/byo/openshift-cluster/redeploy-etcd-ca.yml
new file mode 100644
index 000000000..29f821eda
--- /dev/null
+++ b/playbooks/byo/openshift-cluster/redeploy-etcd-ca.yml
@@ -0,0 +1,10 @@
+---
+- include: initialize_groups.yml
+ tags:
+ - always
+
+- include: ../../common/openshift-cluster/std_include.yml
+ tags:
+ - always
+
+- include: ../../common/openshift-cluster/redeploy-certificates/etcd-ca.yml
diff --git a/playbooks/byo/openshift-cluster/redeploy-openshift-ca.yml b/playbooks/byo/openshift-cluster/redeploy-openshift-ca.yml
index 3b33e0d6f..6e11a111b 100644
--- a/playbooks/byo/openshift-cluster/redeploy-openshift-ca.yml
+++ b/playbooks/byo/openshift-cluster/redeploy-openshift-ca.yml
@@ -7,4 +7,4 @@
tags:
- always
-- include: ../../common/openshift-cluster/redeploy-certificates/ca.yml
+- include: ../../common/openshift-cluster/redeploy-certificates/openshift-ca.yml
diff --git a/playbooks/common/openshift-cfme/config.yml b/playbooks/common/openshift-cfme/config.yml
new file mode 100644
index 000000000..533a35d9e
--- /dev/null
+++ b/playbooks/common/openshift-cfme/config.yml
@@ -0,0 +1,44 @@
+---
+# TODO: Make this work. The 'name' variable below is undefined
+# presently because it's part of the cfme role. This play can't run
+# until that's re-worked.
+#
+# - name: Pre-Pull manageiq-pods docker images
+# hosts: nodes
+# tasks:
+# - name: Ensure the latest manageiq-pods docker image is pulling
+# docker_image:
+# name: "{{ openshift_cfme_container_image }}"
+# # Fire-and-forget method, never timeout
+# async: 99999999999
+# # F-a-f, never check on this. True 'background' task.
+# poll: 0
+
+- name: Configure Masters for CFME Bulk Image Imports
+ hosts: oo_masters_to_config
+ serial: 1
+ tasks:
+ - name: Run master cfme tuning playbook
+ include_role:
+ name: openshift_cfme
+ tasks_from: tune_masters
+
+- name: Setup CFME
+ hosts: oo_first_master
+ vars:
+ r_openshift_cfme_miq_template_content: "{{ lookup('file', 'roles/openshift_cfme/files/miq-template.yaml') | from_yaml}}"
+ pre_tasks:
+ - name: Create a temporary place to evaluate the PV templates
+ command: mktemp -d /tmp/openshift-ansible-XXXXXXX
+ register: r_openshift_cfme_mktemp
+ changed_when: false
+ - name: Ensure the server template was read from disk
+ debug:
+ msg="{{ r_openshift_cfme_miq_template_content | from_yaml }}"
+
+ tasks:
+ - name: Run the CFME Setup Role
+ include_role:
+ name: openshift_cfme
+ vars:
+ template_dir: "{{ hostvars[groups.masters.0].r_openshift_cfme_mktemp.stdout }}"
diff --git a/playbooks/common/openshift-cfme/filter_plugins b/playbooks/common/openshift-cfme/filter_plugins
new file mode 120000
index 000000000..99a95e4ca
--- /dev/null
+++ b/playbooks/common/openshift-cfme/filter_plugins
@@ -0,0 +1 @@
+../../../filter_plugins \ No newline at end of file
diff --git a/playbooks/common/openshift-cfme/library b/playbooks/common/openshift-cfme/library
new file mode 120000
index 000000000..ba40d2f56
--- /dev/null
+++ b/playbooks/common/openshift-cfme/library
@@ -0,0 +1 @@
+../../../library \ No newline at end of file
diff --git a/playbooks/common/openshift-cfme/roles b/playbooks/common/openshift-cfme/roles
new file mode 120000
index 000000000..20c4c58cf
--- /dev/null
+++ b/playbooks/common/openshift-cfme/roles
@@ -0,0 +1 @@
+../../../roles \ No newline at end of file
diff --git a/playbooks/common/openshift-cfme/uninstall.yml b/playbooks/common/openshift-cfme/uninstall.yml
new file mode 100644
index 000000000..78b8e7668
--- /dev/null
+++ b/playbooks/common/openshift-cfme/uninstall.yml
@@ -0,0 +1,8 @@
+---
+- name: Uninstall CFME
+ hosts: masters
+ tasks:
+ - name: Run the CFME Uninstall Role Tasks
+ include_role:
+ name: openshift_cfme
+ tasks_from: uninstall
diff --git a/playbooks/common/openshift-cluster/evaluate_groups.yml b/playbooks/common/openshift-cluster/evaluate_groups.yml
index 46932b27f..c28ce4c14 100644
--- a/playbooks/common/openshift-cluster/evaluate_groups.yml
+++ b/playbooks/common/openshift-cluster/evaluate_groups.yml
@@ -155,5 +155,5 @@
groups: oo_glusterfs_to_config
ansible_ssh_user: "{{ g_ssh_user | default(omit) }}"
ansible_become: "{{ g_sudo | default(omit) }}"
- with_items: "{{ g_glusterfs_hosts | union(g_glusterfs_registry_hosts) | default([]) }}"
+ with_items: "{{ g_glusterfs_hosts | union(g_glusterfs_registry_hosts | default([])) }}"
changed_when: no
diff --git a/playbooks/common/openshift-cluster/openshift_hosted.yml b/playbooks/common/openshift-cluster/openshift_hosted.yml
index 5db71b857..8d94b6509 100644
--- a/playbooks/common/openshift-cluster/openshift_hosted.yml
+++ b/playbooks/common/openshift-cluster/openshift_hosted.yml
@@ -45,6 +45,8 @@
- role: cockpit-ui
when: ( openshift.common.version_gte_3_3_or_1_3 | bool ) and ( openshift_hosted_manage_registry | default(true) | bool ) and not (openshift.docker.hosted_registry_insecure | default(false) | bool)
+ - role: openshift_default_storage_class
+ when: openshift_cloudprovider_kind is defined and (openshift_cloudprovider_kind == 'aws' or openshift_cloudprovider_kind == 'gce')
- name: Update master-config for publicLoggingURL
hosts: oo_masters_to_config:!oo_first_master
diff --git a/playbooks/common/openshift-cluster/redeploy-certificates/etcd-ca.yml b/playbooks/common/openshift-cluster/redeploy-certificates/etcd-ca.yml
new file mode 100644
index 000000000..6964e8567
--- /dev/null
+++ b/playbooks/common/openshift-cluster/redeploy-certificates/etcd-ca.yml
@@ -0,0 +1,158 @@
+---
+- name: Check cert expirys
+ hosts: oo_etcd_to_config:oo_masters_to_config
+ vars:
+ openshift_certificate_expiry_show_all: yes
+ roles:
+ # Sets 'check_results' per host which contains health status for
+ # etcd, master and node certificates. We will use 'check_results'
+ # to determine if any certificates were expired prior to running
+ # this playbook. Service restarts will be skipped if any
+ # certificates were previously expired.
+ - role: openshift_certificate_expiry
+
+- name: Backup existing etcd CA certificate directories
+ hosts: oo_etcd_to_config
+ roles:
+ - role: etcd_common
+ r_etcd_common_etcd_runtime: "{{ openshift.common.etcd_runtime }}"
+ tasks:
+ - name: Determine if CA certificate directory exists
+ stat:
+ path: "{{ etcd_ca_dir }}"
+ register: etcd_ca_certs_dir_stat
+ - name: Backup generated etcd certificates
+ command: >
+ tar -czf {{ etcd_conf_dir }}/etcd-ca-certificate-backup-{{ ansible_date_time.epoch }}.tgz
+ {{ etcd_ca_dir }}
+ args:
+ warn: no
+ when: etcd_ca_certs_dir_stat.stat.exists | bool
+ - name: Remove CA certificate directory
+ file:
+ path: "{{ etcd_ca_dir }}"
+ state: absent
+ when: etcd_ca_certs_dir_stat.stat.exists | bool
+
+- name: Generate new etcd CA
+ hosts: oo_first_etcd
+ roles:
+ - role: openshift_etcd_ca
+ etcd_peers: "{{ groups.oo_etcd_to_config | default([], true) }}"
+ etcd_ca_host: "{{ groups.oo_etcd_to_config.0 }}"
+ etcd_certificates_etcd_hosts: "{{ groups.oo_etcd_to_config | default([], true) }}"
+
+- name: Create temp directory for syncing certs
+ hosts: localhost
+ connection: local
+ become: no
+ gather_facts: no
+ tasks:
+ - name: Create local temp directory for syncing certs
+ local_action: command mktemp -d /tmp/openshift-ansible-XXXXXXX
+ register: g_etcd_mktemp
+ changed_when: false
+
+- name: Distribute etcd CA to etcd hosts
+ hosts: oo_etcd_to_config
+ vars:
+ etcd_ca_host: "{{ groups.oo_etcd_to_config.0 }}"
+ roles:
+ - role: etcd_common
+ r_etcd_common_etcd_runtime: "{{ openshift.common.etcd_runtime }}"
+ tasks:
+ - name: Create a tarball of the etcd ca certs
+ command: >
+ tar -czvf {{ etcd_conf_dir }}/{{ etcd_ca_name }}.tgz
+ -C {{ etcd_ca_dir }} .
+ args:
+ creates: "{{ etcd_conf_dir }}/{{ etcd_ca_name }}.tgz"
+ warn: no
+ delegate_to: "{{ etcd_ca_host }}"
+ run_once: true
+ - name: Retrieve etcd ca cert tarball
+ fetch:
+ src: "{{ etcd_conf_dir }}/{{ etcd_ca_name }}.tgz"
+ dest: "{{ hostvars['localhost'].g_etcd_mktemp.stdout }}/"
+ flat: yes
+ fail_on_missing: yes
+ validate_checksum: yes
+ delegate_to: "{{ etcd_ca_host }}"
+ run_once: true
+ - name: Ensure ca directory exists
+ file:
+ path: "{{ etcd_ca_dir }}"
+ state: directory
+ - name: Unarchive etcd ca cert tarballs
+ unarchive:
+ src: "{{ hostvars['localhost'].g_etcd_mktemp.stdout }}/{{ etcd_ca_name }}.tgz"
+ dest: "{{ etcd_ca_dir }}"
+ - name: Read current etcd CA
+ slurp:
+ src: "{{ etcd_conf_dir }}/ca.crt"
+ register: g_current_etcd_ca_output
+ - name: Read new etcd CA
+ slurp:
+ src: "{{ etcd_ca_dir }}/ca.crt"
+ register: g_new_etcd_ca_output
+ - copy:
+ content: "{{ (g_new_etcd_ca_output.content|b64decode) + (g_current_etcd_ca_output.content|b64decode) }}"
+ dest: "{{ item }}/ca.crt"
+ with_items:
+ - "{{ etcd_conf_dir }}"
+ - "{{ etcd_ca_dir }}"
+
+- include: ../../openshift-etcd/restart.yml
+ # Do not restart etcd when etcd certificates were previously expired.
+ when: ('expired' not in (hostvars
+ | oo_select_keys(groups['etcd'])
+ | oo_collect('check_results.check_results.etcd')
+ | oo_collect('health')))
+
+- name: Retrieve etcd CA certificate
+ hosts: oo_first_etcd
+ roles:
+ - role: etcd_common
+ r_etcd_common_etcd_runtime: "{{ openshift.common.etcd_runtime }}"
+ tasks:
+ - name: Retrieve etcd CA certificate
+ fetch:
+ src: "{{ etcd_conf_dir }}/ca.crt"
+ dest: "{{ hostvars['localhost'].g_etcd_mktemp.stdout }}/"
+ flat: yes
+ fail_on_missing: yes
+ validate_checksum: yes
+
+- name: Distribute etcd CA to masters
+ hosts: oo_masters_to_config
+ vars:
+ openshift_ca_host: "{{ groups.oo_first_master.0 }}"
+ tasks:
+ - name: Deploy etcd CA
+ copy:
+ src: "{{ hostvars['localhost'].g_etcd_mktemp.stdout }}/ca.crt"
+ dest: "{{ openshift.common.config_base }}/master/master.etcd-ca.crt"
+ when: groups.oo_etcd_to_config | default([]) | length > 0
+
+- name: Delete temporary directory on localhost
+ hosts: localhost
+ connection: local
+ become: no
+ gather_facts: no
+ tasks:
+ - file:
+ name: "{{ g_etcd_mktemp.stdout }}"
+ state: absent
+ changed_when: false
+
+- include: ../../openshift-master/restart.yml
+ # Do not restart masters when master certificates were previously expired.
+ when: ('expired' not in hostvars
+ | oo_select_keys(groups['oo_masters_to_config'])
+ | oo_collect('check_results.check_results.ocp_certs')
+ | oo_collect('health', {'path':hostvars[groups.oo_first_master.0].openshift.common.config_base ~ "/master/master.server.crt"}))
+ and
+ ('expired' not in hostvars
+ | oo_select_keys(groups['oo_masters_to_config'])
+ | oo_collect('check_results.check_results.ocp_certs')
+ | oo_collect('health', {'path':hostvars[groups.oo_first_master.0].openshift.common.config_base ~ "/master/ca-bundle.crt"}))
diff --git a/playbooks/common/openshift-cluster/redeploy-certificates/ca.yml b/playbooks/common/openshift-cluster/redeploy-certificates/openshift-ca.yml
index 0d94a011a..089ae6bbc 100644
--- a/playbooks/common/openshift-cluster/redeploy-certificates/ca.yml
+++ b/playbooks/common/openshift-cluster/redeploy-certificates/openshift-ca.yml
@@ -7,7 +7,7 @@
when: not openshift.common.version_gte_3_2_or_1_2 | bool
- name: Check cert expirys
- hosts: oo_nodes_to_config:oo_etcd_to_config:oo_masters_to_config
+ hosts: oo_nodes_to_config:oo_masters_to_config
vars:
openshift_certificate_expiry_show_all: yes
roles:
@@ -18,140 +18,6 @@
# certificates were previously expired.
- role: openshift_certificate_expiry
-- name: Backup existing etcd CA certificate directories
- hosts: oo_etcd_to_config
- roles:
- - role: etcd_common
- r_etcd_common_etcd_runtime: "{{ openshift.common.etcd_runtime }}"
- tasks:
- - name: Determine if CA certificate directory exists
- stat:
- path: "{{ etcd_ca_dir }}"
- register: etcd_ca_certs_dir_stat
- - name: Backup generated etcd certificates
- command: >
- tar -czf {{ etcd_conf_dir }}/etcd-ca-certificate-backup-{{ ansible_date_time.epoch }}.tgz
- {{ etcd_ca_dir }}
- args:
- warn: no
- when: etcd_ca_certs_dir_stat.stat.exists | bool
- - name: Remove CA certificate directory
- file:
- path: "{{ etcd_ca_dir }}"
- state: absent
- when: etcd_ca_certs_dir_stat.stat.exists | bool
-
-- name: Generate new etcd CA
- hosts: oo_first_etcd
- roles:
- - role: openshift_etcd_ca
- etcd_peers: "{{ groups.oo_etcd_to_config | default([], true) }}"
- etcd_ca_host: "{{ groups.oo_etcd_to_config.0 }}"
- etcd_certificates_etcd_hosts: "{{ groups.oo_etcd_to_config | default([], true) }}"
-
-- name: Create temp directory for syncing certs
- hosts: localhost
- connection: local
- become: no
- gather_facts: no
- tasks:
- - name: Create local temp directory for syncing certs
- local_action: command mktemp -d /tmp/openshift-ansible-XXXXXXX
- register: g_etcd_mktemp
- changed_when: false
-
-- name: Distribute etcd CA to etcd hosts
- hosts: oo_etcd_to_config
- vars:
- etcd_ca_host: "{{ groups.oo_etcd_to_config.0 }}"
- roles:
- - role: etcd_common
- r_etcd_common_etcd_runtime: "{{ openshift.common.etcd_runtime }}"
- tasks:
- - name: Create a tarball of the etcd ca certs
- command: >
- tar -czvf {{ etcd_conf_dir }}/{{ etcd_ca_name }}.tgz
- -C {{ etcd_ca_dir }} .
- args:
- creates: "{{ etcd_conf_dir }}/{{ etcd_ca_name }}.tgz"
- warn: no
- delegate_to: "{{ etcd_ca_host }}"
- run_once: true
- - name: Retrieve etcd ca cert tarball
- fetch:
- src: "{{ etcd_conf_dir }}/{{ etcd_ca_name }}.tgz"
- dest: "{{ hostvars['localhost'].g_etcd_mktemp.stdout }}/"
- flat: yes
- fail_on_missing: yes
- validate_checksum: yes
- delegate_to: "{{ etcd_ca_host }}"
- run_once: true
- - name: Ensure ca directory exists
- file:
- path: "{{ etcd_ca_dir }}"
- state: directory
- - name: Unarchive etcd ca cert tarballs
- unarchive:
- src: "{{ hostvars['localhost'].g_etcd_mktemp.stdout }}/{{ etcd_ca_name }}.tgz"
- dest: "{{ etcd_ca_dir }}"
- - name: Read current etcd CA
- slurp:
- src: "{{ etcd_conf_dir }}/ca.crt"
- register: g_current_etcd_ca_output
- - name: Read new etcd CA
- slurp:
- src: "{{ etcd_ca_dir }}/ca.crt"
- register: g_new_etcd_ca_output
- - copy:
- content: "{{ (g_new_etcd_ca_output.content|b64decode) + (g_current_etcd_ca_output.content|b64decode) }}"
- dest: "{{ item }}/ca.crt"
- with_items:
- - "{{ etcd_conf_dir }}"
- - "{{ etcd_ca_dir }}"
-
-- name: Retrieve etcd CA certificate
- hosts: oo_first_etcd
- roles:
- - role: etcd_common
- r_etcd_common_etcd_runtime: "{{ openshift.common.etcd_runtime }}"
- tasks:
- - name: Retrieve etcd CA certificate
- fetch:
- src: "{{ etcd_conf_dir }}/ca.crt"
- dest: "{{ hostvars['localhost'].g_etcd_mktemp.stdout }}/"
- flat: yes
- fail_on_missing: yes
- validate_checksum: yes
-
-- name: Distribute etcd CA to masters
- hosts: oo_masters_to_config
- vars:
- openshift_ca_host: "{{ groups.oo_first_master.0 }}"
- tasks:
- - name: Deploy CA certificate, key, bundle and serial
- copy:
- src: "{{ hostvars['localhost'].g_etcd_mktemp.stdout }}/ca.crt"
- dest: "{{ openshift.common.config_base }}/master/master.etcd-ca.crt"
- when: groups.oo_etcd_to_config | default([]) | length > 0
-
-- name: Delete temporary directory on localhost
- hosts: localhost
- connection: local
- become: no
- gather_facts: no
- tasks:
- - file:
- name: "{{ g_etcd_mktemp.stdout }}"
- state: absent
- changed_when: false
-
-- include: ../../openshift-etcd/restart.yml
- # Do not restart etcd when etcd certificates were previously expired.
- when: ('expired' not in (hostvars
- | oo_select_keys(groups['etcd'])
- | oo_collect('check_results.check_results.etcd')
- | oo_collect('health')))
-
# Update master config when ca-bundle not referenced. Services will be
# restarted below after new CA certificate has been distributed.
- name: Ensure ca-bundle.crt is referenced in master configuration
diff --git a/playbooks/common/openshift-cluster/upgrades/disable_node_excluders.yml b/playbooks/common/openshift-cluster/upgrades/disable_node_excluders.yml
index 7988e97ab..a66301c0d 100644
--- a/playbooks/common/openshift-cluster/upgrades/disable_node_excluders.yml
+++ b/playbooks/common/openshift-cluster/upgrades/disable_node_excluders.yml
@@ -1,6 +1,6 @@
---
- name: Disable excluders
- hosts: oo_nodes_to_config
+ hosts: oo_nodes_to_upgrade:!oo_masters_to_config
gather_facts: no
roles:
- role: openshift_excluder
diff --git a/playbooks/common/openshift-cluster/upgrades/etcd/backup.yml b/playbooks/common/openshift-cluster/upgrades/etcd/backup.yml
index b7fd2c0c5..616ba04f8 100644
--- a/playbooks/common/openshift-cluster/upgrades/etcd/backup.yml
+++ b/playbooks/common/openshift-cluster/upgrades/etcd/backup.yml
@@ -3,12 +3,12 @@
hosts: oo_etcd_hosts_to_backup
roles:
- role: openshift_facts
- - role: etcd_upgrade
- r_etcd_upgrade_action: backup
- r_etcd_backup_tag: etcd_backup_tag
+ - role: etcd_common
+ r_etcd_common_action: backup
+ r_etcd_common_backup_tag: etcd_backup_tag
r_etcd_common_etcd_runtime: "{{ openshift.common.etcd_runtime }}"
- r_etcd_upgrade_embedded_etcd: "{{ groups.oo_etcd_to_config | default([]) | length == 0 }}"
- r_etcd_backup_sufix_name: "{{ lookup('pipe', 'date +%Y%m%d%H%M%S') }}"
+ r_etcd_common_embedded_etcd: "{{ groups.oo_etcd_to_config | default([]) | length == 0 }}"
+ r_etcd_common_backup_sufix_name: "{{ lookup('pipe', 'date +%Y%m%d%H%M%S') }}"
- name: Gate on etcd backup
hosts: localhost
@@ -18,7 +18,7 @@
- set_fact:
etcd_backup_completed: "{{ hostvars
| oo_select_keys(groups.oo_etcd_hosts_to_backup)
- | oo_collect('inventory_hostname', {'r_etcd_upgrade_backup_complete': true}) }}"
+ | oo_collect('inventory_hostname', {'r_etcd_common_backup_complete': true}) }}"
- set_fact:
etcd_backup_failed: "{{ groups.oo_etcd_hosts_to_backup | difference(etcd_backup_completed) }}"
- fail:
diff --git a/playbooks/common/openshift-cluster/upgrades/etcd/main.yml b/playbooks/common/openshift-cluster/upgrades/etcd/main.yml
index 3e01883ae..64abc54e7 100644
--- a/playbooks/common/openshift-cluster/upgrades/etcd/main.yml
+++ b/playbooks/common/openshift-cluster/upgrades/etcd/main.yml
@@ -16,7 +16,8 @@
tasks:
- include_role:
name: etcd_common
- tasks_from: etcdctl.yml
+ vars:
+ r_etcd_common_action: drop_etcdctl
- name: Perform etcd upgrade
include: ./upgrade.yml
diff --git a/playbooks/common/openshift-cluster/upgrades/pre/verify_upgrade_targets.yml b/playbooks/common/openshift-cluster/upgrades/pre/verify_upgrade_targets.yml
index 1b437dce9..9b4a8e413 100644
--- a/playbooks/common/openshift-cluster/upgrades/pre/verify_upgrade_targets.yml
+++ b/playbooks/common/openshift-cluster/upgrades/pre/verify_upgrade_targets.yml
@@ -14,24 +14,26 @@
- when: not openshift.common.is_containerized | bool
block:
- name: Check latest available OpenShift RPM version
- command: >
- {{ repoquery_cmd }} --qf '%{version}' "{{ openshift.common.service_type }}"
- failed_when: false
- changed_when: false
- register: avail_openshift_version
+ repoquery:
+ name: "{{ openshift.common.service_type }}"
+ ignore_excluders: true
+ register: repoquery_out
- name: Fail when unable to determine available OpenShift RPM version
fail:
msg: "Unable to determine available OpenShift RPM version"
when:
- - avail_openshift_version.stdout == ''
+ - not repoquery_out.results.package_found
+
+ - name: Set fact avail_openshift_version
+ set_fact:
+ avail_openshift_version: "{{ repoquery_out.results.versions.available_versions.0 }}"
- name: Verify OpenShift RPMs are available for upgrade
fail:
- msg: "OpenShift {{ avail_openshift_version.stdout }} is available, but {{ openshift_upgrade_target }} or greater is required"
+ msg: "OpenShift {{ avail_openshift_version }} is available, but {{ openshift_upgrade_target }} or greater is required"
when:
- - not avail_openshift_version | skipped
- - avail_openshift_version.stdout | default('0.0', True) | version_compare(openshift_release, '<')
+ - avail_openshift_version | default('0.0', True) | version_compare(openshift_release, '<')
- name: Fail when openshift version does not meet minium requirement for Origin upgrade
fail:
diff --git a/playbooks/common/openshift-cluster/upgrades/v3_5/master_config_upgrade.yml b/playbooks/common/openshift-cluster/upgrades/v3_5/master_config_upgrade.yml
new file mode 100644
index 000000000..ed89dbe8d
--- /dev/null
+++ b/playbooks/common/openshift-cluster/upgrades/v3_5/master_config_upgrade.yml
@@ -0,0 +1,16 @@
+---
+- modify_yaml:
+ dest: "{{ openshift.common.config_base}}/master/master-config.yaml"
+ yaml_key: 'admissionConfig.pluginConfig'
+ yaml_value: "{{ openshift.master.admission_plugin_config }}"
+ when: "'admission_plugin_config' in openshift.master"
+
+- modify_yaml:
+ dest: "{{ openshift.common.config_base}}/master/master-config.yaml"
+ yaml_key: 'admissionConfig.pluginOrderOverride'
+ yaml_value:
+
+- modify_yaml:
+ dest: "{{ openshift.common.config_base}}/master/master-config.yaml"
+ yaml_key: 'kubernetesMasterConfig.admissionConfig'
+ yaml_value:
diff --git a/playbooks/common/openshift-cluster/upgrades/v3_5/upgrade_control_plane.yml b/playbooks/common/openshift-cluster/upgrades/v3_5/upgrade_control_plane.yml
index 21e1d440d..74c2964aa 100644
--- a/playbooks/common/openshift-cluster/upgrades/v3_5/upgrade_control_plane.yml
+++ b/playbooks/common/openshift-cluster/upgrades/v3_5/upgrade_control_plane.yml
@@ -115,6 +115,8 @@
- include: ../cleanup_unused_images.yml
- include: ../upgrade_control_plane.yml
+ vars:
+ master_config_hook: "v3_5/master_config_upgrade.yml"
- include: ../post_control_plane.yml
diff --git a/playbooks/common/openshift-cluster/upgrades/v3_6/master_config_upgrade.yml b/playbooks/common/openshift-cluster/upgrades/v3_6/master_config_upgrade.yml
new file mode 100644
index 000000000..ed89dbe8d
--- /dev/null
+++ b/playbooks/common/openshift-cluster/upgrades/v3_6/master_config_upgrade.yml
@@ -0,0 +1,16 @@
+---
+- modify_yaml:
+ dest: "{{ openshift.common.config_base}}/master/master-config.yaml"
+ yaml_key: 'admissionConfig.pluginConfig'
+ yaml_value: "{{ openshift.master.admission_plugin_config }}"
+ when: "'admission_plugin_config' in openshift.master"
+
+- modify_yaml:
+ dest: "{{ openshift.common.config_base}}/master/master-config.yaml"
+ yaml_key: 'admissionConfig.pluginOrderOverride'
+ yaml_value:
+
+- modify_yaml:
+ dest: "{{ openshift.common.config_base}}/master/master-config.yaml"
+ yaml_key: 'kubernetesMasterConfig.admissionConfig'
+ yaml_value:
diff --git a/playbooks/common/openshift-cluster/upgrades/v3_6/upgrade_control_plane.yml b/playbooks/common/openshift-cluster/upgrades/v3_6/upgrade_control_plane.yml
index e34259b00..a66fb51ff 100644
--- a/playbooks/common/openshift-cluster/upgrades/v3_6/upgrade_control_plane.yml
+++ b/playbooks/common/openshift-cluster/upgrades/v3_6/upgrade_control_plane.yml
@@ -115,6 +115,8 @@
- include: ../cleanup_unused_images.yml
- include: ../upgrade_control_plane.yml
+ vars:
+ master_config_hook: "v3_6/master_config_upgrade.yml"
- include: ../post_control_plane.yml
diff --git a/roles/calico/defaults/main.yaml b/roles/calico/defaults/main.yaml
index c7eea46f2..207dee068 100644
--- a/roles/calico/defaults/main.yaml
+++ b/roles/calico/defaults/main.yaml
@@ -3,13 +3,13 @@ kubeconfig: "{{openshift.common.config_base}}/node/{{ 'system:node:' + openshif
cni_conf_dir: "/etc/cni/net.d/"
cni_bin_dir: "/opt/cni/bin/"
-cni_url: "https://github.com/containernetworking/cni/releases/download/v0.4.0/cni-amd64-v0.4.0.tgz"
+cni_url: "https://github.com/containernetworking/cni/releases/download/v0.5.2/cni-amd64-v0.5.2.tgz"
-calico_url_cni: "https://github.com/projectcalico/cni-plugin/releases/download/v1.5.5/calico"
-calico_url_ipam: "https://github.com/projectcalico/cni-plugin/releases/download/v1.5.5/calico-ipam"
+calico_url_cni: "https://github.com/projectcalico/cni-plugin/releases/download/v1.8.3/calico"
+calico_url_ipam: "https://github.com/projectcalico/cni-plugin/releases/download/v1.8.3/calico-ipam"
calico_ipv4pool_ipip: "always"
calico_ipv4pool_cidr: "192.168.0.0/16"
calico_log_dir: "/var/log/calico"
-calico_node_image: "calico/node:v1.1.0"
+calico_node_image: "calico/node:v1.2.1"
diff --git a/roles/calico_master/defaults/main.yaml b/roles/calico_master/defaults/main.yaml
index 5b324bce5..b2df0105f 100644
--- a/roles/calico_master/defaults/main.yaml
+++ b/roles/calico_master/defaults/main.yaml
@@ -4,3 +4,4 @@ kubeconfig: "{{ openshift.common.config_base }}/master/openshift-master.kubeconf
calicoctl_bin_dir: "/usr/local/bin/"
calico_url_calicoctl: "https://github.com/projectcalico/calicoctl/releases/download/v1.1.3/calicoctl"
+calico_url_policy_controller: "quay.io/calico/kube-policy-controller:v0.5.4"
diff --git a/roles/calico_master/templates/calico-policy-controller.yml.j2 b/roles/calico_master/templates/calico-policy-controller.yml.j2
index 1b87758ce..811884473 100644
--- a/roles/calico_master/templates/calico-policy-controller.yml.j2
+++ b/roles/calico_master/templates/calico-policy-controller.yml.j2
@@ -74,7 +74,7 @@ spec:
serviceAccountName: calico
containers:
- name: calico-policy-controller
- image: quay.io/calico/kube-policy-controller:v0.5.4
+ image: {{ calico_url_policy_controller }}
env:
# The location of the Calico etcd cluster.
- name: ETCD_ENDPOINTS
diff --git a/roles/etcd/tasks/main.yml b/roles/etcd/tasks/main.yml
index fa2f44609..586aebb11 100644
--- a/roles/etcd/tasks/main.yml
+++ b/roles/etcd/tasks/main.yml
@@ -122,7 +122,8 @@
- include_role:
name: etcd_common
- tasks_from: etcdctl.yml
+ vars:
+ r_etcd_common_action: drop_etcdctl
when: openshift_etcd_etcdctl_profile | default(true) | bool
- name: Set fact etcd_service_status_changed
diff --git a/roles/etcd/tasks/system_container.yml b/roles/etcd/tasks/system_container.yml
index 72ffadbd2..f1d948d16 100644
--- a/roles/etcd/tasks/system_container.yml
+++ b/roles/etcd/tasks/system_container.yml
@@ -15,6 +15,56 @@
{%- endif -%}
{% endfor -%}
+- name: Check etcd system container package
+ command: >
+ atomic containers list --no-trunc -a -f container=etcd -f backend=ostree
+ register: etcd_result
+
+- name: Unmask etcd service
+ systemd:
+ name: etcd
+ state: stopped
+ enabled: yes
+ masked: no
+ daemon_reload: yes
+ register: task_result
+ failed_when: task_result|failed and 'could not' not in task_result.msg|lower
+ when: "'etcd' in etcd_result.stdout"
+
+- name: Disable etcd_container
+ systemd:
+ name: etcd_container
+ state: stopped
+ enabled: no
+ masked: yes
+ daemon_reload: yes
+ register: task_result
+ failed_when: task_result|failed and 'could not' not in task_result.msg|lower
+
+- name: Check for previous etcd data store
+ stat:
+ path: "{{ etcd_data_dir }}/member/"
+ register: src_datastore
+
+- name: Check for etcd system container data store
+ stat:
+ path: "{{ r_etcd_common_system_container_host_dir }}/etcd.etcd/member"
+ register: dest_datastore
+
+- name: Ensure that etcd system container data dirs exist
+ file: path="{{ item }}" state=directory
+ with_items:
+ - "{{ r_etcd_common_system_container_host_dir }}/etc"
+ - "{{ r_etcd_common_system_container_host_dir }}/etcd.etcd"
+
+- name: Copy etcd data store
+ command: >
+ cp -a {{ etcd_data_dir }}/member
+ {{ r_etcd_common_system_container_host_dir }}/etcd.etcd/member
+ when:
+ - src_datastore.stat.exists
+ - not dest_datastore.stat.exists
+
- name: Install or Update Etcd system container package
oc_atomic_container:
name: etcd
@@ -35,3 +85,5 @@
- ETCD_PEER_CA_FILE={{ etcd_system_container_conf_dir }}/ca.crt
- ETCD_PEER_CERT_FILE={{ etcd_system_container_conf_dir }}/peer.crt
- ETCD_PEER_KEY_FILE={{ etcd_system_container_conf_dir }}/peer.key
+ - ETCD_TRUSTED_CA_FILE={{ etcd_system_container_conf_dir }}/ca.crt
+ - ETCD_PEER_TRUSTED_CA_FILE={{ etcd_system_container_conf_dir }}/ca.crt
diff --git a/roles/etcd_common/defaults/main.yml b/roles/etcd_common/defaults/main.yml
index e1a080b34..8cc7a9c20 100644
--- a/roles/etcd_common/defaults/main.yml
+++ b/roles/etcd_common/defaults/main.yml
@@ -1,9 +1,21 @@
---
+# Default action when calling this role
+r_etcd_common_action: noop
+r_etcd_common_backup_tag: ''
+r_etcd_common_backup_sufix_name: ''
+
# runc, docker, host
r_etcd_common_etcd_runtime: "docker"
+r_etcd_common_embedded_etcd: false
+
+# etcd run on a host => use etcdctl command directly
+# etcd run as a docker container => use docker exec
+# etcd run as a runc container => use runc exec
+r_etcd_common_etcdctl_command: "{{ 'etcdctl' if r_etcd_common_etcd_runtime == 'host' or r_etcd_common_embedded_etcd | bool else 'docker exec etcd_container etcdctl' if r_etcd_common_etcd_runtime == 'docker' else 'runc exec etcd etcdctl' }}"
# etcd server vars
-etcd_conf_dir: "{{ '/etc/etcd' if r_etcd_common_etcd_runtime != 'runc' else '/var/lib/etcd/etcd.etcd/etc' }}"
+etcd_conf_dir: '/etc/etcd'
+r_etcd_common_system_container_host_dir: /var/lib/etcd/etcd.etcd
etcd_system_container_conf_dir: /var/lib/etcd/etc
etcd_conf_file: "{{ etcd_conf_dir }}/etcd.conf"
etcd_ca_file: "{{ etcd_conf_dir }}/ca.crt"
@@ -40,7 +52,7 @@ etcd_is_containerized: False
etcd_is_thirdparty: False
# etcd dir vars
-etcd_data_dir: /var/lib/etcd/
+etcd_data_dir: "{{ '/var/lib/origin/openshift.local.etcd' if r_etcd_common_embedded_etcd | bool else '/var/lib/etcd/' }}"
# etcd ports and protocols
etcd_client_port: 2379
diff --git a/roles/etcd_upgrade/tasks/backup.yml b/roles/etcd_common/tasks/backup.yml
index 1ea6fc59f..4a4832275 100644
--- a/roles/etcd_upgrade/tasks/backup.yml
+++ b/roles/etcd_common/tasks/backup.yml
@@ -1,15 +1,11 @@
---
-# INPUT r_etcd_backup_sufix_name
-# INPUT r_etcd_backup_tag
-# OUTPUT r_etcd_upgrade_backup_complete
- set_fact:
- # ORIGIN etcd_data_dir etcd_common.defaults
- l_etcd_backup_dir: "{{ etcd_data_dir }}/openshift-backup-{{ r_etcd_backup_tag | default('') }}{{ r_etcd_backup_sufix_name }}"
+ l_etcd_backup_dir: "{{ etcd_data_dir }}/openshift-backup-{{ r_etcd_common_backup_tag }}{{ r_etcd_common_backup_sufix_name }}"
# TODO: replace shell module with command and update later checks
- name: Check available disk space for etcd backup
shell: df --output=avail -k {{ etcd_data_dir }} | tail -n 1
- register: avail_disk
+ register: l_avail_disk
# AUDIT:changed_when: `false` because we are only inspecting
# state, not manipulating anything
changed_when: false
@@ -17,8 +13,8 @@
# TODO: replace shell module with command and update later checks
- name: Check current etcd disk usage
shell: du --exclude='*openshift-backup*' -k {{ etcd_data_dir }} | tail -n 1 | cut -f1
- register: etcd_disk_usage
- when: r_etcd_upgrade_embedded_etcd | bool
+ register: l_etcd_disk_usage
+ when: r_etcd_common_embedded_etcd | bool
# AUDIT:changed_when: `false` because we are only inspecting
# state, not manipulating anything
changed_when: false
@@ -26,9 +22,9 @@
- name: Abort if insufficient disk space for etcd backup
fail:
msg: >
- {{ etcd_disk_usage.stdout }} Kb disk space required for etcd backup,
- {{ avail_disk.stdout }} Kb available.
- when: (r_etcd_upgrade_embedded_etcd | bool) and (etcd_disk_usage.stdout|int > avail_disk.stdout|int)
+ {{ l_etcd_disk_usage.stdout }} Kb disk space required for etcd backup,
+ {{ l_avail_disk.stdout }} Kb available.
+ when: (r_etcd_common_embedded_etcd | bool) and (l_etcd_disk_usage.stdout|int > l_avail_disk.stdout|int)
# For non containerized and non embedded we should have the correct version of
# etcd installed already. So don't do anything.
@@ -37,17 +33,22 @@
#
# For embedded non containerized we need to ensure we have the latest version
# etcd on the host.
+- name: Detecting Atomic Host Operating System
+ stat:
+ path: /run/ostree-booted
+ register: l_ostree_booted
+
- name: Install latest etcd for embedded
package:
name: etcd
state: latest
when:
- - r_etcd_upgrade_embedded_etcd | bool
+ - r_etcd_common_embedded_etcd | bool
- not l_ostree_booted.stat.exists | bool
- name: Generate etcd backup
command: >
- {{ etcdctl_command }} backup --data-dir={{ etcd_data_dir }}
+ {{ r_etcd_common_etcdctl_command }} backup --data-dir={{ etcd_data_dir }}
--backup-dir={{ l_etcd_backup_dir }}
# According to the docs change you can simply copy snap/db
@@ -55,16 +56,16 @@
- name: Check for v3 data store
stat:
path: "{{ etcd_data_dir }}/member/snap/db"
- register: v3_db
+ register: l_v3_db
- name: Copy etcd v3 data store
command: >
cp -a {{ etcd_data_dir }}/member/snap/db
{{ l_etcd_backup_dir }}/member/snap/
- when: v3_db.stat.exists
+ when: l_v3_db.stat.exists
- set_fact:
- r_etcd_upgrade_backup_complete: True
+ r_etcd_common_backup_complete: True
- name: Display location of etcd backup
debug:
diff --git a/roles/etcd_common/tasks/etcdctl.yml b/roles/etcd_common/tasks/drop_etcdctl.yml
index 6cb456677..6cb456677 100644
--- a/roles/etcd_common/tasks/etcdctl.yml
+++ b/roles/etcd_common/tasks/drop_etcdctl.yml
diff --git a/roles/etcd_common/tasks/main.yml b/roles/etcd_common/tasks/main.yml
new file mode 100644
index 000000000..6ed87e6c7
--- /dev/null
+++ b/roles/etcd_common/tasks/main.yml
@@ -0,0 +1,9 @@
+---
+- name: Fail if invalid r_etcd_common_action provided
+ fail:
+ msg: "etcd_common role can only be called with 'noop' or 'backup' or 'drop_etcdctl'"
+ when: r_etcd_common_action not in ['noop', 'backup', 'drop_etcdctl']
+
+- name: Include main action task file
+ include: "{{ r_etcd_common_action }}.yml"
+ when: r_etcd_common_action != "noop"
diff --git a/roles/etcd_server_certificates/tasks/main.yml b/roles/etcd_server_certificates/tasks/main.yml
index 3ac7f3401..4795188a6 100644
--- a/roles/etcd_server_certificates/tasks/main.yml
+++ b/roles/etcd_server_certificates/tasks/main.yml
@@ -5,11 +5,14 @@
- name: Check status of etcd certificates
stat:
- path: "{{ etcd_cert_config_dir }}/{{ item }}"
+ path: "{{ item }}"
with_items:
- - "{{ etcd_cert_prefix }}server.crt"
- - "{{ etcd_cert_prefix }}peer.crt"
- - "{{ etcd_cert_prefix }}ca.crt"
+ - "{{ etcd_cert_config_dir }}/{{ etcd_cert_prefix }}server.crt"
+ - "{{ etcd_cert_config_dir }}/{{ etcd_cert_prefix }}peer.crt"
+ - "{{ etcd_cert_config_dir }}/{{ etcd_cert_prefix }}ca.crt"
+ - "{{ etcd_system_container_cert_config_dir }}/{{ etcd_cert_prefix }}server.crt"
+ - "{{ etcd_system_container_cert_config_dir }}/{{ etcd_cert_prefix }}peer.crt"
+ - "{{ etcd_system_container_cert_config_dir }}/{{ etcd_cert_prefix }}ca.crt"
register: g_etcd_server_cert_stat_result
when: not etcd_certificates_redeploy | default(false) | bool
@@ -132,8 +135,11 @@
- name: Ensure certificate directory exists
file:
- path: "{{ etcd_cert_config_dir }}"
+ path: "{{ item }}"
state: directory
+ with_items:
+ - "{{ etcd_cert_config_dir }}"
+ - "{{ etcd_system_container_cert_config_dir }}"
when: etcd_server_certs_missing | bool
- name: Unarchive cert tarball
@@ -164,15 +170,28 @@
- name: Ensure ca directory exists
file:
- path: "{{ etcd_ca_dir }}"
+ path: "{{ item }}"
state: directory
+ with_items:
+ - "{{ etcd_ca_dir }}"
+ - "{{ etcd_system_container_cert_config_dir }}/ca"
when: etcd_server_certs_missing | bool
-- name: Unarchive etcd ca cert tarballs
+- name: Unarchive cert tarball for the system container
+ unarchive:
+ src: "{{ g_etcd_server_mktemp.stdout }}/{{ etcd_cert_subdir }}.tgz"
+ dest: "{{ etcd_system_container_cert_config_dir }}"
+ when:
+ - etcd_server_certs_missing | bool
+ - r_etcd_common_etcd_runtime == 'runc'
+
+- name: Unarchive etcd ca cert tarballs for the system container
unarchive:
src: "{{ g_etcd_server_mktemp.stdout }}/{{ etcd_ca_name }}.tgz"
- dest: "{{ etcd_ca_dir }}"
- when: etcd_server_certs_missing | bool
+ dest: "{{ etcd_system_container_cert_config_dir }}/ca"
+ when:
+ - etcd_server_certs_missing | bool
+ - r_etcd_common_etcd_runtime == 'runc'
- name: Delete temporary directory
local_action: file path="{{ g_etcd_server_mktemp.stdout }}" state=absent
diff --git a/roles/etcd_upgrade/defaults/main.yml b/roles/etcd_upgrade/defaults/main.yml
index 01ad8a268..61bbba225 100644
--- a/roles/etcd_upgrade/defaults/main.yml
+++ b/roles/etcd_upgrade/defaults/main.yml
@@ -1,9 +1,3 @@
---
r_etcd_upgrade_action: upgrade
r_etcd_upgrade_mechanism: rpm
-r_etcd_upgrade_embedded_etcd: False
-
-# etcd run on a host => use etcdctl command directly
-# etcd run as a docker container => use docker exec
-# etcd run as a runc container => use runc exec
-etcdctl_command: "{{ 'etcdctl' if r_etcd_common_etcd_runtime == 'host' or r_etcd_upgrade_embedded_etcd | bool else 'docker exec etcd_container etcdctl' if r_etcd_common_etcd_runtime == 'docker' else 'runc exec etcd etcdctl' }}"
diff --git a/roles/etcd_upgrade/meta/main.yml b/roles/etcd_upgrade/meta/main.yml
index 018bdc8d7..afdb0267f 100644
--- a/roles/etcd_upgrade/meta/main.yml
+++ b/roles/etcd_upgrade/meta/main.yml
@@ -14,3 +14,4 @@ galaxy_info:
- system
dependencies:
- role: etcd_common
+ r_etcd_common_embedded_etcd: "{{ r_etcd_upgrade_embedded_etcd }}"
diff --git a/roles/etcd_upgrade/tasks/main.yml b/roles/etcd_upgrade/tasks/main.yml
index 5178c14e3..129c69d6b 100644
--- a/roles/etcd_upgrade/tasks/main.yml
+++ b/roles/etcd_upgrade/tasks/main.yml
@@ -2,9 +2,9 @@
# INPUT r_etcd_upgrade_action
- name: Fail if invalid etcd_upgrade_action provided
fail:
- msg: "etcd_upgrade role can only be called with 'upgrade' or 'backup'"
+ msg: "etcd_upgrade role can only be called with 'upgrade'"
when:
- - r_etcd_upgrade_action not in ['upgrade', 'backup']
+ - r_etcd_upgrade_action not in ['upgrade']
- name: Detecting Atomic Host Operating System
stat:
diff --git a/roles/lib_openshift/library/oc_adm_ca_server_cert.py b/roles/lib_openshift/library/oc_adm_ca_server_cert.py
index 3974cc4dd..1b73bfd0e 100644
--- a/roles/lib_openshift/library/oc_adm_ca_server_cert.py
+++ b/roles/lib_openshift/library/oc_adm_ca_server_cert.py
@@ -1097,10 +1097,6 @@ class OpenShiftCLI(object):
elif self.namespace is not None and self.namespace.lower() not in ['none', 'emtpy']: # E501
cmds.extend(['-n', self.namespace])
- rval = {}
- results = ''
- err = None
-
if self.verbose:
print(' '.join(cmds))
@@ -1110,34 +1106,26 @@ class OpenShiftCLI(object):
returncode, stdout, stderr = 1, '', 'Failed to execute {}: {}'.format(subprocess.list2cmdline(cmds), ex)
rval = {"returncode": returncode,
- "results": results,
"cmd": ' '.join(cmds)}
- if returncode == 0:
- if output:
- if output_type == 'json':
- try:
- rval['results'] = json.loads(stdout)
- except ValueError as verr:
- if "No JSON object could be decoded" in verr.args:
- err = verr.args
- elif output_type == 'raw':
- rval['results'] = stdout
-
- if self.verbose:
- print("STDOUT: {0}".format(stdout))
- print("STDERR: {0}".format(stderr))
-
- if err:
- rval.update({"err": err,
- "stderr": stderr,
- "stdout": stdout,
- "cmd": cmds})
+ if output_type == 'json':
+ rval['results'] = {}
+ if output and stdout:
+ try:
+ rval['results'] = json.loads(stdout)
+ except ValueError as verr:
+ if "No JSON object could be decoded" in verr.args:
+ rval['err'] = verr.args
+ elif output_type == 'raw':
+ rval['results'] = stdout if output else ''
- else:
+ if self.verbose:
+ print("STDOUT: {0}".format(stdout))
+ print("STDERR: {0}".format(stderr))
+
+ if 'err' in rval or returncode != 0:
rval.update({"stderr": stderr,
- "stdout": stdout,
- "results": {}})
+ "stdout": stdout})
return rval
diff --git a/roles/lib_openshift/library/oc_adm_manage_node.py b/roles/lib_openshift/library/oc_adm_manage_node.py
index 320eac17e..b09321a5b 100644
--- a/roles/lib_openshift/library/oc_adm_manage_node.py
+++ b/roles/lib_openshift/library/oc_adm_manage_node.py
@@ -1083,10 +1083,6 @@ class OpenShiftCLI(object):
elif self.namespace is not None and self.namespace.lower() not in ['none', 'emtpy']: # E501
cmds.extend(['-n', self.namespace])
- rval = {}
- results = ''
- err = None
-
if self.verbose:
print(' '.join(cmds))
@@ -1096,34 +1092,26 @@ class OpenShiftCLI(object):
returncode, stdout, stderr = 1, '', 'Failed to execute {}: {}'.format(subprocess.list2cmdline(cmds), ex)
rval = {"returncode": returncode,
- "results": results,
"cmd": ' '.join(cmds)}
- if returncode == 0:
- if output:
- if output_type == 'json':
- try:
- rval['results'] = json.loads(stdout)
- except ValueError as verr:
- if "No JSON object could be decoded" in verr.args:
- err = verr.args
- elif output_type == 'raw':
- rval['results'] = stdout
-
- if self.verbose:
- print("STDOUT: {0}".format(stdout))
- print("STDERR: {0}".format(stderr))
-
- if err:
- rval.update({"err": err,
- "stderr": stderr,
- "stdout": stdout,
- "cmd": cmds})
+ if output_type == 'json':
+ rval['results'] = {}
+ if output and stdout:
+ try:
+ rval['results'] = json.loads(stdout)
+ except ValueError as verr:
+ if "No JSON object could be decoded" in verr.args:
+ rval['err'] = verr.args
+ elif output_type == 'raw':
+ rval['results'] = stdout if output else ''
- else:
+ if self.verbose:
+ print("STDOUT: {0}".format(stdout))
+ print("STDERR: {0}".format(stderr))
+
+ if 'err' in rval or returncode != 0:
rval.update({"stderr": stderr,
- "stdout": stdout,
- "results": {}})
+ "stdout": stdout})
return rval
diff --git a/roles/lib_openshift/library/oc_adm_policy_group.py b/roles/lib_openshift/library/oc_adm_policy_group.py
index f9658d6e1..221ef5094 100644
--- a/roles/lib_openshift/library/oc_adm_policy_group.py
+++ b/roles/lib_openshift/library/oc_adm_policy_group.py
@@ -1069,10 +1069,6 @@ class OpenShiftCLI(object):
elif self.namespace is not None and self.namespace.lower() not in ['none', 'emtpy']: # E501
cmds.extend(['-n', self.namespace])
- rval = {}
- results = ''
- err = None
-
if self.verbose:
print(' '.join(cmds))
@@ -1082,34 +1078,26 @@ class OpenShiftCLI(object):
returncode, stdout, stderr = 1, '', 'Failed to execute {}: {}'.format(subprocess.list2cmdline(cmds), ex)
rval = {"returncode": returncode,
- "results": results,
"cmd": ' '.join(cmds)}
- if returncode == 0:
- if output:
- if output_type == 'json':
- try:
- rval['results'] = json.loads(stdout)
- except ValueError as verr:
- if "No JSON object could be decoded" in verr.args:
- err = verr.args
- elif output_type == 'raw':
- rval['results'] = stdout
-
- if self.verbose:
- print("STDOUT: {0}".format(stdout))
- print("STDERR: {0}".format(stderr))
-
- if err:
- rval.update({"err": err,
- "stderr": stderr,
- "stdout": stdout,
- "cmd": cmds})
+ if output_type == 'json':
+ rval['results'] = {}
+ if output and stdout:
+ try:
+ rval['results'] = json.loads(stdout)
+ except ValueError as verr:
+ if "No JSON object could be decoded" in verr.args:
+ rval['err'] = verr.args
+ elif output_type == 'raw':
+ rval['results'] = stdout if output else ''
- else:
+ if self.verbose:
+ print("STDOUT: {0}".format(stdout))
+ print("STDERR: {0}".format(stderr))
+
+ if 'err' in rval or returncode != 0:
rval.update({"stderr": stderr,
- "stdout": stdout,
- "results": {}})
+ "stdout": stdout})
return rval
diff --git a/roles/lib_openshift/library/oc_adm_policy_user.py b/roles/lib_openshift/library/oc_adm_policy_user.py
index 0bdfd0bad..071562875 100644
--- a/roles/lib_openshift/library/oc_adm_policy_user.py
+++ b/roles/lib_openshift/library/oc_adm_policy_user.py
@@ -1069,10 +1069,6 @@ class OpenShiftCLI(object):
elif self.namespace is not None and self.namespace.lower() not in ['none', 'emtpy']: # E501
cmds.extend(['-n', self.namespace])
- rval = {}
- results = ''
- err = None
-
if self.verbose:
print(' '.join(cmds))
@@ -1082,34 +1078,26 @@ class OpenShiftCLI(object):
returncode, stdout, stderr = 1, '', 'Failed to execute {}: {}'.format(subprocess.list2cmdline(cmds), ex)
rval = {"returncode": returncode,
- "results": results,
"cmd": ' '.join(cmds)}
- if returncode == 0:
- if output:
- if output_type == 'json':
- try:
- rval['results'] = json.loads(stdout)
- except ValueError as verr:
- if "No JSON object could be decoded" in verr.args:
- err = verr.args
- elif output_type == 'raw':
- rval['results'] = stdout
-
- if self.verbose:
- print("STDOUT: {0}".format(stdout))
- print("STDERR: {0}".format(stderr))
-
- if err:
- rval.update({"err": err,
- "stderr": stderr,
- "stdout": stdout,
- "cmd": cmds})
+ if output_type == 'json':
+ rval['results'] = {}
+ if output and stdout:
+ try:
+ rval['results'] = json.loads(stdout)
+ except ValueError as verr:
+ if "No JSON object could be decoded" in verr.args:
+ rval['err'] = verr.args
+ elif output_type == 'raw':
+ rval['results'] = stdout if output else ''
- else:
+ if self.verbose:
+ print("STDOUT: {0}".format(stdout))
+ print("STDERR: {0}".format(stderr))
+
+ if 'err' in rval or returncode != 0:
rval.update({"stderr": stderr,
- "stdout": stdout,
- "results": {}})
+ "stdout": stdout})
return rval
diff --git a/roles/lib_openshift/library/oc_adm_registry.py b/roles/lib_openshift/library/oc_adm_registry.py
index df0e40d20..bf2650460 100644
--- a/roles/lib_openshift/library/oc_adm_registry.py
+++ b/roles/lib_openshift/library/oc_adm_registry.py
@@ -1187,10 +1187,6 @@ class OpenShiftCLI(object):
elif self.namespace is not None and self.namespace.lower() not in ['none', 'emtpy']: # E501
cmds.extend(['-n', self.namespace])
- rval = {}
- results = ''
- err = None
-
if self.verbose:
print(' '.join(cmds))
@@ -1200,34 +1196,26 @@ class OpenShiftCLI(object):
returncode, stdout, stderr = 1, '', 'Failed to execute {}: {}'.format(subprocess.list2cmdline(cmds), ex)
rval = {"returncode": returncode,
- "results": results,
"cmd": ' '.join(cmds)}
- if returncode == 0:
- if output:
- if output_type == 'json':
- try:
- rval['results'] = json.loads(stdout)
- except ValueError as verr:
- if "No JSON object could be decoded" in verr.args:
- err = verr.args
- elif output_type == 'raw':
- rval['results'] = stdout
-
- if self.verbose:
- print("STDOUT: {0}".format(stdout))
- print("STDERR: {0}".format(stderr))
-
- if err:
- rval.update({"err": err,
- "stderr": stderr,
- "stdout": stdout,
- "cmd": cmds})
+ if output_type == 'json':
+ rval['results'] = {}
+ if output and stdout:
+ try:
+ rval['results'] = json.loads(stdout)
+ except ValueError as verr:
+ if "No JSON object could be decoded" in verr.args:
+ rval['err'] = verr.args
+ elif output_type == 'raw':
+ rval['results'] = stdout if output else ''
- else:
+ if self.verbose:
+ print("STDOUT: {0}".format(stdout))
+ print("STDERR: {0}".format(stderr))
+
+ if 'err' in rval or returncode != 0:
rval.update({"stderr": stderr,
- "stdout": stdout,
- "results": {}})
+ "stdout": stdout})
return rval
diff --git a/roles/lib_openshift/library/oc_adm_router.py b/roles/lib_openshift/library/oc_adm_router.py
index 8af8cb196..a2b7d12c0 100644
--- a/roles/lib_openshift/library/oc_adm_router.py
+++ b/roles/lib_openshift/library/oc_adm_router.py
@@ -1212,10 +1212,6 @@ class OpenShiftCLI(object):
elif self.namespace is not None and self.namespace.lower() not in ['none', 'emtpy']: # E501
cmds.extend(['-n', self.namespace])
- rval = {}
- results = ''
- err = None
-
if self.verbose:
print(' '.join(cmds))
@@ -1225,34 +1221,26 @@ class OpenShiftCLI(object):
returncode, stdout, stderr = 1, '', 'Failed to execute {}: {}'.format(subprocess.list2cmdline(cmds), ex)
rval = {"returncode": returncode,
- "results": results,
"cmd": ' '.join(cmds)}
- if returncode == 0:
- if output:
- if output_type == 'json':
- try:
- rval['results'] = json.loads(stdout)
- except ValueError as verr:
- if "No JSON object could be decoded" in verr.args:
- err = verr.args
- elif output_type == 'raw':
- rval['results'] = stdout
-
- if self.verbose:
- print("STDOUT: {0}".format(stdout))
- print("STDERR: {0}".format(stderr))
-
- if err:
- rval.update({"err": err,
- "stderr": stderr,
- "stdout": stdout,
- "cmd": cmds})
+ if output_type == 'json':
+ rval['results'] = {}
+ if output and stdout:
+ try:
+ rval['results'] = json.loads(stdout)
+ except ValueError as verr:
+ if "No JSON object could be decoded" in verr.args:
+ rval['err'] = verr.args
+ elif output_type == 'raw':
+ rval['results'] = stdout if output else ''
- else:
+ if self.verbose:
+ print("STDOUT: {0}".format(stdout))
+ print("STDERR: {0}".format(stderr))
+
+ if 'err' in rval or returncode != 0:
rval.update({"stderr": stderr,
- "stdout": stdout,
- "results": {}})
+ "stdout": stdout})
return rval
diff --git a/roles/lib_openshift/library/oc_clusterrole.py b/roles/lib_openshift/library/oc_clusterrole.py
index 3ed0d65dc..289f08b83 100644
--- a/roles/lib_openshift/library/oc_clusterrole.py
+++ b/roles/lib_openshift/library/oc_clusterrole.py
@@ -1061,10 +1061,6 @@ class OpenShiftCLI(object):
elif self.namespace is not None and self.namespace.lower() not in ['none', 'emtpy']: # E501
cmds.extend(['-n', self.namespace])
- rval = {}
- results = ''
- err = None
-
if self.verbose:
print(' '.join(cmds))
@@ -1074,34 +1070,26 @@ class OpenShiftCLI(object):
returncode, stdout, stderr = 1, '', 'Failed to execute {}: {}'.format(subprocess.list2cmdline(cmds), ex)
rval = {"returncode": returncode,
- "results": results,
"cmd": ' '.join(cmds)}
- if returncode == 0:
- if output:
- if output_type == 'json':
- try:
- rval['results'] = json.loads(stdout)
- except ValueError as verr:
- if "No JSON object could be decoded" in verr.args:
- err = verr.args
- elif output_type == 'raw':
- rval['results'] = stdout
-
- if self.verbose:
- print("STDOUT: {0}".format(stdout))
- print("STDERR: {0}".format(stderr))
-
- if err:
- rval.update({"err": err,
- "stderr": stderr,
- "stdout": stdout,
- "cmd": cmds})
+ if output_type == 'json':
+ rval['results'] = {}
+ if output and stdout:
+ try:
+ rval['results'] = json.loads(stdout)
+ except ValueError as verr:
+ if "No JSON object could be decoded" in verr.args:
+ rval['err'] = verr.args
+ elif output_type == 'raw':
+ rval['results'] = stdout if output else ''
- else:
+ if self.verbose:
+ print("STDOUT: {0}".format(stdout))
+ print("STDERR: {0}".format(stderr))
+
+ if 'err' in rval or returncode != 0:
rval.update({"stderr": stderr,
- "stdout": stdout,
- "results": {}})
+ "stdout": stdout})
return rval
diff --git a/roles/lib_openshift/library/oc_configmap.py b/roles/lib_openshift/library/oc_configmap.py
index 5c8ed48d2..7cd29215f 100644
--- a/roles/lib_openshift/library/oc_configmap.py
+++ b/roles/lib_openshift/library/oc_configmap.py
@@ -1067,10 +1067,6 @@ class OpenShiftCLI(object):
elif self.namespace is not None and self.namespace.lower() not in ['none', 'emtpy']: # E501
cmds.extend(['-n', self.namespace])
- rval = {}
- results = ''
- err = None
-
if self.verbose:
print(' '.join(cmds))
@@ -1080,34 +1076,26 @@ class OpenShiftCLI(object):
returncode, stdout, stderr = 1, '', 'Failed to execute {}: {}'.format(subprocess.list2cmdline(cmds), ex)
rval = {"returncode": returncode,
- "results": results,
"cmd": ' '.join(cmds)}
- if returncode == 0:
- if output:
- if output_type == 'json':
- try:
- rval['results'] = json.loads(stdout)
- except ValueError as verr:
- if "No JSON object could be decoded" in verr.args:
- err = verr.args
- elif output_type == 'raw':
- rval['results'] = stdout
-
- if self.verbose:
- print("STDOUT: {0}".format(stdout))
- print("STDERR: {0}".format(stderr))
-
- if err:
- rval.update({"err": err,
- "stderr": stderr,
- "stdout": stdout,
- "cmd": cmds})
+ if output_type == 'json':
+ rval['results'] = {}
+ if output and stdout:
+ try:
+ rval['results'] = json.loads(stdout)
+ except ValueError as verr:
+ if "No JSON object could be decoded" in verr.args:
+ rval['err'] = verr.args
+ elif output_type == 'raw':
+ rval['results'] = stdout if output else ''
- else:
+ if self.verbose:
+ print("STDOUT: {0}".format(stdout))
+ print("STDERR: {0}".format(stderr))
+
+ if 'err' in rval or returncode != 0:
rval.update({"stderr": stderr,
- "stdout": stdout,
- "results": {}})
+ "stdout": stdout})
return rval
diff --git a/roles/lib_openshift/library/oc_edit.py b/roles/lib_openshift/library/oc_edit.py
index f3b6d552d..5b11f45ba 100644
--- a/roles/lib_openshift/library/oc_edit.py
+++ b/roles/lib_openshift/library/oc_edit.py
@@ -1111,10 +1111,6 @@ class OpenShiftCLI(object):
elif self.namespace is not None and self.namespace.lower() not in ['none', 'emtpy']: # E501
cmds.extend(['-n', self.namespace])
- rval = {}
- results = ''
- err = None
-
if self.verbose:
print(' '.join(cmds))
@@ -1124,34 +1120,26 @@ class OpenShiftCLI(object):
returncode, stdout, stderr = 1, '', 'Failed to execute {}: {}'.format(subprocess.list2cmdline(cmds), ex)
rval = {"returncode": returncode,
- "results": results,
"cmd": ' '.join(cmds)}
- if returncode == 0:
- if output:
- if output_type == 'json':
- try:
- rval['results'] = json.loads(stdout)
- except ValueError as verr:
- if "No JSON object could be decoded" in verr.args:
- err = verr.args
- elif output_type == 'raw':
- rval['results'] = stdout
-
- if self.verbose:
- print("STDOUT: {0}".format(stdout))
- print("STDERR: {0}".format(stderr))
-
- if err:
- rval.update({"err": err,
- "stderr": stderr,
- "stdout": stdout,
- "cmd": cmds})
+ if output_type == 'json':
+ rval['results'] = {}
+ if output and stdout:
+ try:
+ rval['results'] = json.loads(stdout)
+ except ValueError as verr:
+ if "No JSON object could be decoded" in verr.args:
+ rval['err'] = verr.args
+ elif output_type == 'raw':
+ rval['results'] = stdout if output else ''
- else:
+ if self.verbose:
+ print("STDOUT: {0}".format(stdout))
+ print("STDERR: {0}".format(stderr))
+
+ if 'err' in rval or returncode != 0:
rval.update({"stderr": stderr,
- "stdout": stdout,
- "results": {}})
+ "stdout": stdout})
return rval
diff --git a/roles/lib_openshift/library/oc_env.py b/roles/lib_openshift/library/oc_env.py
index c6421128a..d3834ce0c 100644
--- a/roles/lib_openshift/library/oc_env.py
+++ b/roles/lib_openshift/library/oc_env.py
@@ -1078,10 +1078,6 @@ class OpenShiftCLI(object):
elif self.namespace is not None and self.namespace.lower() not in ['none', 'emtpy']: # E501
cmds.extend(['-n', self.namespace])
- rval = {}
- results = ''
- err = None
-
if self.verbose:
print(' '.join(cmds))
@@ -1091,34 +1087,26 @@ class OpenShiftCLI(object):
returncode, stdout, stderr = 1, '', 'Failed to execute {}: {}'.format(subprocess.list2cmdline(cmds), ex)
rval = {"returncode": returncode,
- "results": results,
"cmd": ' '.join(cmds)}
- if returncode == 0:
- if output:
- if output_type == 'json':
- try:
- rval['results'] = json.loads(stdout)
- except ValueError as verr:
- if "No JSON object could be decoded" in verr.args:
- err = verr.args
- elif output_type == 'raw':
- rval['results'] = stdout
-
- if self.verbose:
- print("STDOUT: {0}".format(stdout))
- print("STDERR: {0}".format(stderr))
-
- if err:
- rval.update({"err": err,
- "stderr": stderr,
- "stdout": stdout,
- "cmd": cmds})
+ if output_type == 'json':
+ rval['results'] = {}
+ if output and stdout:
+ try:
+ rval['results'] = json.loads(stdout)
+ except ValueError as verr:
+ if "No JSON object could be decoded" in verr.args:
+ rval['err'] = verr.args
+ elif output_type == 'raw':
+ rval['results'] = stdout if output else ''
- else:
+ if self.verbose:
+ print("STDOUT: {0}".format(stdout))
+ print("STDERR: {0}".format(stderr))
+
+ if 'err' in rval or returncode != 0:
rval.update({"stderr": stderr,
- "stdout": stdout,
- "results": {}})
+ "stdout": stdout})
return rval
diff --git a/roles/lib_openshift/library/oc_group.py b/roles/lib_openshift/library/oc_group.py
index a791c29af..0d751fe28 100644
--- a/roles/lib_openshift/library/oc_group.py
+++ b/roles/lib_openshift/library/oc_group.py
@@ -1051,10 +1051,6 @@ class OpenShiftCLI(object):
elif self.namespace is not None and self.namespace.lower() not in ['none', 'emtpy']: # E501
cmds.extend(['-n', self.namespace])
- rval = {}
- results = ''
- err = None
-
if self.verbose:
print(' '.join(cmds))
@@ -1064,34 +1060,26 @@ class OpenShiftCLI(object):
returncode, stdout, stderr = 1, '', 'Failed to execute {}: {}'.format(subprocess.list2cmdline(cmds), ex)
rval = {"returncode": returncode,
- "results": results,
"cmd": ' '.join(cmds)}
- if returncode == 0:
- if output:
- if output_type == 'json':
- try:
- rval['results'] = json.loads(stdout)
- except ValueError as verr:
- if "No JSON object could be decoded" in verr.args:
- err = verr.args
- elif output_type == 'raw':
- rval['results'] = stdout
-
- if self.verbose:
- print("STDOUT: {0}".format(stdout))
- print("STDERR: {0}".format(stderr))
-
- if err:
- rval.update({"err": err,
- "stderr": stderr,
- "stdout": stdout,
- "cmd": cmds})
+ if output_type == 'json':
+ rval['results'] = {}
+ if output and stdout:
+ try:
+ rval['results'] = json.loads(stdout)
+ except ValueError as verr:
+ if "No JSON object could be decoded" in verr.args:
+ rval['err'] = verr.args
+ elif output_type == 'raw':
+ rval['results'] = stdout if output else ''
- else:
+ if self.verbose:
+ print("STDOUT: {0}".format(stdout))
+ print("STDERR: {0}".format(stderr))
+
+ if 'err' in rval or returncode != 0:
rval.update({"stderr": stderr,
- "stdout": stdout,
- "results": {}})
+ "stdout": stdout})
return rval
diff --git a/roles/lib_openshift/library/oc_image.py b/roles/lib_openshift/library/oc_image.py
index bbc123ce0..3a6ba3e56 100644
--- a/roles/lib_openshift/library/oc_image.py
+++ b/roles/lib_openshift/library/oc_image.py
@@ -1070,10 +1070,6 @@ class OpenShiftCLI(object):
elif self.namespace is not None and self.namespace.lower() not in ['none', 'emtpy']: # E501
cmds.extend(['-n', self.namespace])
- rval = {}
- results = ''
- err = None
-
if self.verbose:
print(' '.join(cmds))
@@ -1083,34 +1079,26 @@ class OpenShiftCLI(object):
returncode, stdout, stderr = 1, '', 'Failed to execute {}: {}'.format(subprocess.list2cmdline(cmds), ex)
rval = {"returncode": returncode,
- "results": results,
"cmd": ' '.join(cmds)}
- if returncode == 0:
- if output:
- if output_type == 'json':
- try:
- rval['results'] = json.loads(stdout)
- except ValueError as verr:
- if "No JSON object could be decoded" in verr.args:
- err = verr.args
- elif output_type == 'raw':
- rval['results'] = stdout
-
- if self.verbose:
- print("STDOUT: {0}".format(stdout))
- print("STDERR: {0}".format(stderr))
-
- if err:
- rval.update({"err": err,
- "stderr": stderr,
- "stdout": stdout,
- "cmd": cmds})
+ if output_type == 'json':
+ rval['results'] = {}
+ if output and stdout:
+ try:
+ rval['results'] = json.loads(stdout)
+ except ValueError as verr:
+ if "No JSON object could be decoded" in verr.args:
+ rval['err'] = verr.args
+ elif output_type == 'raw':
+ rval['results'] = stdout if output else ''
- else:
+ if self.verbose:
+ print("STDOUT: {0}".format(stdout))
+ print("STDERR: {0}".format(stderr))
+
+ if 'err' in rval or returncode != 0:
rval.update({"stderr": stderr,
- "stdout": stdout,
- "results": {}})
+ "stdout": stdout})
return rval
diff --git a/roles/lib_openshift/library/oc_label.py b/roles/lib_openshift/library/oc_label.py
index cd1afd0d2..5db036b23 100644
--- a/roles/lib_openshift/library/oc_label.py
+++ b/roles/lib_openshift/library/oc_label.py
@@ -1087,10 +1087,6 @@ class OpenShiftCLI(object):
elif self.namespace is not None and self.namespace.lower() not in ['none', 'emtpy']: # E501
cmds.extend(['-n', self.namespace])
- rval = {}
- results = ''
- err = None
-
if self.verbose:
print(' '.join(cmds))
@@ -1100,34 +1096,26 @@ class OpenShiftCLI(object):
returncode, stdout, stderr = 1, '', 'Failed to execute {}: {}'.format(subprocess.list2cmdline(cmds), ex)
rval = {"returncode": returncode,
- "results": results,
"cmd": ' '.join(cmds)}
- if returncode == 0:
- if output:
- if output_type == 'json':
- try:
- rval['results'] = json.loads(stdout)
- except ValueError as verr:
- if "No JSON object could be decoded" in verr.args:
- err = verr.args
- elif output_type == 'raw':
- rval['results'] = stdout
-
- if self.verbose:
- print("STDOUT: {0}".format(stdout))
- print("STDERR: {0}".format(stderr))
-
- if err:
- rval.update({"err": err,
- "stderr": stderr,
- "stdout": stdout,
- "cmd": cmds})
+ if output_type == 'json':
+ rval['results'] = {}
+ if output and stdout:
+ try:
+ rval['results'] = json.loads(stdout)
+ except ValueError as verr:
+ if "No JSON object could be decoded" in verr.args:
+ rval['err'] = verr.args
+ elif output_type == 'raw':
+ rval['results'] = stdout if output else ''
- else:
+ if self.verbose:
+ print("STDOUT: {0}".format(stdout))
+ print("STDERR: {0}".format(stderr))
+
+ if 'err' in rval or returncode != 0:
rval.update({"stderr": stderr,
- "stdout": stdout,
- "results": {}})
+ "stdout": stdout})
return rval
diff --git a/roles/lib_openshift/library/oc_obj.py b/roles/lib_openshift/library/oc_obj.py
index 215723cc8..9b0c0e0e4 100644
--- a/roles/lib_openshift/library/oc_obj.py
+++ b/roles/lib_openshift/library/oc_obj.py
@@ -90,9 +90,9 @@ options:
required: false
default: str
aliases: []
- all_namespace:
+ all_namespaces:
description:
- - The namespace where the object lives.
+ - Search in all namespaces for the object.
required: false
default: false
aliases: []
@@ -1090,10 +1090,6 @@ class OpenShiftCLI(object):
elif self.namespace is not None and self.namespace.lower() not in ['none', 'emtpy']: # E501
cmds.extend(['-n', self.namespace])
- rval = {}
- results = ''
- err = None
-
if self.verbose:
print(' '.join(cmds))
@@ -1103,34 +1099,26 @@ class OpenShiftCLI(object):
returncode, stdout, stderr = 1, '', 'Failed to execute {}: {}'.format(subprocess.list2cmdline(cmds), ex)
rval = {"returncode": returncode,
- "results": results,
"cmd": ' '.join(cmds)}
- if returncode == 0:
- if output:
- if output_type == 'json':
- try:
- rval['results'] = json.loads(stdout)
- except ValueError as verr:
- if "No JSON object could be decoded" in verr.args:
- err = verr.args
- elif output_type == 'raw':
- rval['results'] = stdout
-
- if self.verbose:
- print("STDOUT: {0}".format(stdout))
- print("STDERR: {0}".format(stderr))
-
- if err:
- rval.update({"err": err,
- "stderr": stderr,
- "stdout": stdout,
- "cmd": cmds})
+ if output_type == 'json':
+ rval['results'] = {}
+ if output and stdout:
+ try:
+ rval['results'] = json.loads(stdout)
+ except ValueError as verr:
+ if "No JSON object could be decoded" in verr.args:
+ rval['err'] = verr.args
+ elif output_type == 'raw':
+ rval['results'] = stdout if output else ''
+
+ if self.verbose:
+ print("STDOUT: {0}".format(stdout))
+ print("STDERR: {0}".format(stderr))
- else:
+ if 'err' in rval or returncode != 0:
rval.update({"stderr": stderr,
- "stdout": stdout,
- "results": {}})
+ "stdout": stdout})
return rval
@@ -1473,7 +1461,12 @@ class OCObject(OpenShiftCLI):
def delete(self):
'''delete the object'''
- return self._delete(self.kind, name=self.name, selector=self.selector)
+ results = self._delete(self.kind, name=self.name, selector=self.selector)
+ if (results['returncode'] != 0 and 'stderr' in results and
+ '\"{}\" not found'.format(self.name) in results['stderr']):
+ results['returncode'] = 0
+
+ return results
def create(self, files=None, content=None):
'''
@@ -1557,7 +1550,8 @@ class OCObject(OpenShiftCLI):
if state == 'absent':
# verify its not in our results
if (params['name'] is not None or params['selector'] is not None) and \
- (len(api_rval['results']) == 0 or len(api_rval['results'][0].get('items', [])) == 0):
+ (len(api_rval['results']) == 0 or \
+ ('items' in api_rval['results'][0] and len(api_rval['results'][0]['items']) == 0)):
return {'changed': False, 'state': state}
if check_mode:
diff --git a/roles/lib_openshift/library/oc_objectvalidator.py b/roles/lib_openshift/library/oc_objectvalidator.py
index 358ef5130..130521761 100644
--- a/roles/lib_openshift/library/oc_objectvalidator.py
+++ b/roles/lib_openshift/library/oc_objectvalidator.py
@@ -1022,10 +1022,6 @@ class OpenShiftCLI(object):
elif self.namespace is not None and self.namespace.lower() not in ['none', 'emtpy']: # E501
cmds.extend(['-n', self.namespace])
- rval = {}
- results = ''
- err = None
-
if self.verbose:
print(' '.join(cmds))
@@ -1035,34 +1031,26 @@ class OpenShiftCLI(object):
returncode, stdout, stderr = 1, '', 'Failed to execute {}: {}'.format(subprocess.list2cmdline(cmds), ex)
rval = {"returncode": returncode,
- "results": results,
"cmd": ' '.join(cmds)}
- if returncode == 0:
- if output:
- if output_type == 'json':
- try:
- rval['results'] = json.loads(stdout)
- except ValueError as verr:
- if "No JSON object could be decoded" in verr.args:
- err = verr.args
- elif output_type == 'raw':
- rval['results'] = stdout
-
- if self.verbose:
- print("STDOUT: {0}".format(stdout))
- print("STDERR: {0}".format(stderr))
-
- if err:
- rval.update({"err": err,
- "stderr": stderr,
- "stdout": stdout,
- "cmd": cmds})
+ if output_type == 'json':
+ rval['results'] = {}
+ if output and stdout:
+ try:
+ rval['results'] = json.loads(stdout)
+ except ValueError as verr:
+ if "No JSON object could be decoded" in verr.args:
+ rval['err'] = verr.args
+ elif output_type == 'raw':
+ rval['results'] = stdout if output else ''
- else:
+ if self.verbose:
+ print("STDOUT: {0}".format(stdout))
+ print("STDERR: {0}".format(stderr))
+
+ if 'err' in rval or returncode != 0:
rval.update({"stderr": stderr,
- "stdout": stdout,
- "results": {}})
+ "stdout": stdout})
return rval
diff --git a/roles/lib_openshift/library/oc_process.py b/roles/lib_openshift/library/oc_process.py
index 025b846c6..c6568d520 100644
--- a/roles/lib_openshift/library/oc_process.py
+++ b/roles/lib_openshift/library/oc_process.py
@@ -1079,10 +1079,6 @@ class OpenShiftCLI(object):
elif self.namespace is not None and self.namespace.lower() not in ['none', 'emtpy']: # E501
cmds.extend(['-n', self.namespace])
- rval = {}
- results = ''
- err = None
-
if self.verbose:
print(' '.join(cmds))
@@ -1092,34 +1088,26 @@ class OpenShiftCLI(object):
returncode, stdout, stderr = 1, '', 'Failed to execute {}: {}'.format(subprocess.list2cmdline(cmds), ex)
rval = {"returncode": returncode,
- "results": results,
"cmd": ' '.join(cmds)}
- if returncode == 0:
- if output:
- if output_type == 'json':
- try:
- rval['results'] = json.loads(stdout)
- except ValueError as verr:
- if "No JSON object could be decoded" in verr.args:
- err = verr.args
- elif output_type == 'raw':
- rval['results'] = stdout
-
- if self.verbose:
- print("STDOUT: {0}".format(stdout))
- print("STDERR: {0}".format(stderr))
-
- if err:
- rval.update({"err": err,
- "stderr": stderr,
- "stdout": stdout,
- "cmd": cmds})
+ if output_type == 'json':
+ rval['results'] = {}
+ if output and stdout:
+ try:
+ rval['results'] = json.loads(stdout)
+ except ValueError as verr:
+ if "No JSON object could be decoded" in verr.args:
+ rval['err'] = verr.args
+ elif output_type == 'raw':
+ rval['results'] = stdout if output else ''
- else:
+ if self.verbose:
+ print("STDOUT: {0}".format(stdout))
+ print("STDERR: {0}".format(stderr))
+
+ if 'err' in rval or returncode != 0:
rval.update({"stderr": stderr,
- "stdout": stdout,
- "results": {}})
+ "stdout": stdout})
return rval
diff --git a/roles/lib_openshift/library/oc_project.py b/roles/lib_openshift/library/oc_project.py
index 05dfddab8..a78bc06d2 100644
--- a/roles/lib_openshift/library/oc_project.py
+++ b/roles/lib_openshift/library/oc_project.py
@@ -1076,10 +1076,6 @@ class OpenShiftCLI(object):
elif self.namespace is not None and self.namespace.lower() not in ['none', 'emtpy']: # E501
cmds.extend(['-n', self.namespace])
- rval = {}
- results = ''
- err = None
-
if self.verbose:
print(' '.join(cmds))
@@ -1089,34 +1085,26 @@ class OpenShiftCLI(object):
returncode, stdout, stderr = 1, '', 'Failed to execute {}: {}'.format(subprocess.list2cmdline(cmds), ex)
rval = {"returncode": returncode,
- "results": results,
"cmd": ' '.join(cmds)}
- if returncode == 0:
- if output:
- if output_type == 'json':
- try:
- rval['results'] = json.loads(stdout)
- except ValueError as verr:
- if "No JSON object could be decoded" in verr.args:
- err = verr.args
- elif output_type == 'raw':
- rval['results'] = stdout
-
- if self.verbose:
- print("STDOUT: {0}".format(stdout))
- print("STDERR: {0}".format(stderr))
-
- if err:
- rval.update({"err": err,
- "stderr": stderr,
- "stdout": stdout,
- "cmd": cmds})
+ if output_type == 'json':
+ rval['results'] = {}
+ if output and stdout:
+ try:
+ rval['results'] = json.loads(stdout)
+ except ValueError as verr:
+ if "No JSON object could be decoded" in verr.args:
+ rval['err'] = verr.args
+ elif output_type == 'raw':
+ rval['results'] = stdout if output else ''
- else:
+ if self.verbose:
+ print("STDOUT: {0}".format(stdout))
+ print("STDERR: {0}".format(stderr))
+
+ if 'err' in rval or returncode != 0:
rval.update({"stderr": stderr,
- "stdout": stdout,
- "results": {}})
+ "stdout": stdout})
return rval
diff --git a/roles/lib_openshift/library/oc_pvc.py b/roles/lib_openshift/library/oc_pvc.py
index d7de4964c..a88639bfc 100644
--- a/roles/lib_openshift/library/oc_pvc.py
+++ b/roles/lib_openshift/library/oc_pvc.py
@@ -1071,10 +1071,6 @@ class OpenShiftCLI(object):
elif self.namespace is not None and self.namespace.lower() not in ['none', 'emtpy']: # E501
cmds.extend(['-n', self.namespace])
- rval = {}
- results = ''
- err = None
-
if self.verbose:
print(' '.join(cmds))
@@ -1084,34 +1080,26 @@ class OpenShiftCLI(object):
returncode, stdout, stderr = 1, '', 'Failed to execute {}: {}'.format(subprocess.list2cmdline(cmds), ex)
rval = {"returncode": returncode,
- "results": results,
"cmd": ' '.join(cmds)}
- if returncode == 0:
- if output:
- if output_type == 'json':
- try:
- rval['results'] = json.loads(stdout)
- except ValueError as verr:
- if "No JSON object could be decoded" in verr.args:
- err = verr.args
- elif output_type == 'raw':
- rval['results'] = stdout
-
- if self.verbose:
- print("STDOUT: {0}".format(stdout))
- print("STDERR: {0}".format(stderr))
-
- if err:
- rval.update({"err": err,
- "stderr": stderr,
- "stdout": stdout,
- "cmd": cmds})
+ if output_type == 'json':
+ rval['results'] = {}
+ if output and stdout:
+ try:
+ rval['results'] = json.loads(stdout)
+ except ValueError as verr:
+ if "No JSON object could be decoded" in verr.args:
+ rval['err'] = verr.args
+ elif output_type == 'raw':
+ rval['results'] = stdout if output else ''
- else:
+ if self.verbose:
+ print("STDOUT: {0}".format(stdout))
+ print("STDERR: {0}".format(stderr))
+
+ if 'err' in rval or returncode != 0:
rval.update({"stderr": stderr,
- "stdout": stdout,
- "results": {}})
+ "stdout": stdout})
return rval
diff --git a/roles/lib_openshift/library/oc_route.py b/roles/lib_openshift/library/oc_route.py
index 3090b4cad..0c0bc9386 100644
--- a/roles/lib_openshift/library/oc_route.py
+++ b/roles/lib_openshift/library/oc_route.py
@@ -1121,10 +1121,6 @@ class OpenShiftCLI(object):
elif self.namespace is not None and self.namespace.lower() not in ['none', 'emtpy']: # E501
cmds.extend(['-n', self.namespace])
- rval = {}
- results = ''
- err = None
-
if self.verbose:
print(' '.join(cmds))
@@ -1134,34 +1130,26 @@ class OpenShiftCLI(object):
returncode, stdout, stderr = 1, '', 'Failed to execute {}: {}'.format(subprocess.list2cmdline(cmds), ex)
rval = {"returncode": returncode,
- "results": results,
"cmd": ' '.join(cmds)}
- if returncode == 0:
- if output:
- if output_type == 'json':
- try:
- rval['results'] = json.loads(stdout)
- except ValueError as verr:
- if "No JSON object could be decoded" in verr.args:
- err = verr.args
- elif output_type == 'raw':
- rval['results'] = stdout
-
- if self.verbose:
- print("STDOUT: {0}".format(stdout))
- print("STDERR: {0}".format(stderr))
-
- if err:
- rval.update({"err": err,
- "stderr": stderr,
- "stdout": stdout,
- "cmd": cmds})
+ if output_type == 'json':
+ rval['results'] = {}
+ if output and stdout:
+ try:
+ rval['results'] = json.loads(stdout)
+ except ValueError as verr:
+ if "No JSON object could be decoded" in verr.args:
+ rval['err'] = verr.args
+ elif output_type == 'raw':
+ rval['results'] = stdout if output else ''
- else:
+ if self.verbose:
+ print("STDOUT: {0}".format(stdout))
+ print("STDERR: {0}".format(stderr))
+
+ if 'err' in rval or returncode != 0:
rval.update({"stderr": stderr,
- "stdout": stdout,
- "results": {}})
+ "stdout": stdout})
return rval
diff --git a/roles/lib_openshift/library/oc_scale.py b/roles/lib_openshift/library/oc_scale.py
index 6a505fb6b..f112b6dd0 100644
--- a/roles/lib_openshift/library/oc_scale.py
+++ b/roles/lib_openshift/library/oc_scale.py
@@ -1065,10 +1065,6 @@ class OpenShiftCLI(object):
elif self.namespace is not None and self.namespace.lower() not in ['none', 'emtpy']: # E501
cmds.extend(['-n', self.namespace])
- rval = {}
- results = ''
- err = None
-
if self.verbose:
print(' '.join(cmds))
@@ -1078,34 +1074,26 @@ class OpenShiftCLI(object):
returncode, stdout, stderr = 1, '', 'Failed to execute {}: {}'.format(subprocess.list2cmdline(cmds), ex)
rval = {"returncode": returncode,
- "results": results,
"cmd": ' '.join(cmds)}
- if returncode == 0:
- if output:
- if output_type == 'json':
- try:
- rval['results'] = json.loads(stdout)
- except ValueError as verr:
- if "No JSON object could be decoded" in verr.args:
- err = verr.args
- elif output_type == 'raw':
- rval['results'] = stdout
-
- if self.verbose:
- print("STDOUT: {0}".format(stdout))
- print("STDERR: {0}".format(stderr))
-
- if err:
- rval.update({"err": err,
- "stderr": stderr,
- "stdout": stdout,
- "cmd": cmds})
+ if output_type == 'json':
+ rval['results'] = {}
+ if output and stdout:
+ try:
+ rval['results'] = json.loads(stdout)
+ except ValueError as verr:
+ if "No JSON object could be decoded" in verr.args:
+ rval['err'] = verr.args
+ elif output_type == 'raw':
+ rval['results'] = stdout if output else ''
- else:
+ if self.verbose:
+ print("STDOUT: {0}".format(stdout))
+ print("STDERR: {0}".format(stderr))
+
+ if 'err' in rval or returncode != 0:
rval.update({"stderr": stderr,
- "stdout": stdout,
- "results": {}})
+ "stdout": stdout})
return rval
diff --git a/roles/lib_openshift/library/oc_secret.py b/roles/lib_openshift/library/oc_secret.py
index 02257500f..d762e0c38 100644
--- a/roles/lib_openshift/library/oc_secret.py
+++ b/roles/lib_openshift/library/oc_secret.py
@@ -1117,10 +1117,6 @@ class OpenShiftCLI(object):
elif self.namespace is not None and self.namespace.lower() not in ['none', 'emtpy']: # E501
cmds.extend(['-n', self.namespace])
- rval = {}
- results = ''
- err = None
-
if self.verbose:
print(' '.join(cmds))
@@ -1130,34 +1126,26 @@ class OpenShiftCLI(object):
returncode, stdout, stderr = 1, '', 'Failed to execute {}: {}'.format(subprocess.list2cmdline(cmds), ex)
rval = {"returncode": returncode,
- "results": results,
"cmd": ' '.join(cmds)}
- if returncode == 0:
- if output:
- if output_type == 'json':
- try:
- rval['results'] = json.loads(stdout)
- except ValueError as verr:
- if "No JSON object could be decoded" in verr.args:
- err = verr.args
- elif output_type == 'raw':
- rval['results'] = stdout
-
- if self.verbose:
- print("STDOUT: {0}".format(stdout))
- print("STDERR: {0}".format(stderr))
-
- if err:
- rval.update({"err": err,
- "stderr": stderr,
- "stdout": stdout,
- "cmd": cmds})
+ if output_type == 'json':
+ rval['results'] = {}
+ if output and stdout:
+ try:
+ rval['results'] = json.loads(stdout)
+ except ValueError as verr:
+ if "No JSON object could be decoded" in verr.args:
+ rval['err'] = verr.args
+ elif output_type == 'raw':
+ rval['results'] = stdout if output else ''
- else:
+ if self.verbose:
+ print("STDOUT: {0}".format(stdout))
+ print("STDERR: {0}".format(stderr))
+
+ if 'err' in rval or returncode != 0:
rval.update({"stderr": stderr,
- "stdout": stdout,
- "results": {}})
+ "stdout": stdout})
return rval
@@ -1613,7 +1601,7 @@ class OCSecret(OpenShiftCLI):
'''delete a secret by name'''
return self._delete('secrets', self.name)
- def create(self, files=None, contents=None):
+ def create(self, files=None, contents=None, force=False):
'''Create a secret '''
if not files:
files = Utils.create_tmp_files_from_contents(contents)
@@ -1622,6 +1610,8 @@ class OCSecret(OpenShiftCLI):
cmd = ['secrets', 'new', self.name]
if self.type is not None:
cmd.append("--type=%s" % (self.type))
+ if force:
+ cmd.append('--confirm')
cmd.extend(secrets)
results = self.openshift_cmd(cmd)
@@ -1634,7 +1624,7 @@ class OCSecret(OpenShiftCLI):
This receives a list of file names and converts it into a secret.
The secret is then written to disk and passed into the `oc replace` command.
'''
- secret = self.prep_secret(files)
+ secret = self.prep_secret(files, force)
if secret['returncode'] != 0:
return secret
@@ -1646,7 +1636,7 @@ class OCSecret(OpenShiftCLI):
return self._replace(sfile_path, force=force)
- def prep_secret(self, files=None, contents=None):
+ def prep_secret(self, files=None, contents=None, force=False):
''' return what the secret would look like if created
This is accomplished by passing -ojson. This will most likely change in the future
'''
@@ -1657,6 +1647,8 @@ class OCSecret(OpenShiftCLI):
cmd = ['-ojson', 'secrets', 'new', self.name]
if self.type is not None:
cmd.extend(["--type=%s" % (self.type)])
+ if force:
+ cmd.append('--confirm')
cmd.extend(secrets)
return self.openshift_cmd(cmd, output=True)
@@ -1719,7 +1711,7 @@ class OCSecret(OpenShiftCLI):
return {'changed': True,
'msg': 'Would have performed a create.'}
- api_rval = ocsecret.create(files, params['contents'])
+ api_rval = ocsecret.create(files, params['contents'], force=params['force'])
# Remove files
if files and params['delete_after']:
@@ -1736,7 +1728,7 @@ class OCSecret(OpenShiftCLI):
########
# Update
########
- secret = ocsecret.prep_secret(params['files'], params['contents'])
+ secret = ocsecret.prep_secret(params['files'], params['contents'], force=params['force'])
if secret['returncode'] != 0:
return {'failed': True, 'msg': secret}
diff --git a/roles/lib_openshift/library/oc_service.py b/roles/lib_openshift/library/oc_service.py
index 308f45488..769b75e15 100644
--- a/roles/lib_openshift/library/oc_service.py
+++ b/roles/lib_openshift/library/oc_service.py
@@ -1124,10 +1124,6 @@ class OpenShiftCLI(object):
elif self.namespace is not None and self.namespace.lower() not in ['none', 'emtpy']: # E501
cmds.extend(['-n', self.namespace])
- rval = {}
- results = ''
- err = None
-
if self.verbose:
print(' '.join(cmds))
@@ -1137,34 +1133,26 @@ class OpenShiftCLI(object):
returncode, stdout, stderr = 1, '', 'Failed to execute {}: {}'.format(subprocess.list2cmdline(cmds), ex)
rval = {"returncode": returncode,
- "results": results,
"cmd": ' '.join(cmds)}
- if returncode == 0:
- if output:
- if output_type == 'json':
- try:
- rval['results'] = json.loads(stdout)
- except ValueError as verr:
- if "No JSON object could be decoded" in verr.args:
- err = verr.args
- elif output_type == 'raw':
- rval['results'] = stdout
-
- if self.verbose:
- print("STDOUT: {0}".format(stdout))
- print("STDERR: {0}".format(stderr))
-
- if err:
- rval.update({"err": err,
- "stderr": stderr,
- "stdout": stdout,
- "cmd": cmds})
+ if output_type == 'json':
+ rval['results'] = {}
+ if output and stdout:
+ try:
+ rval['results'] = json.loads(stdout)
+ except ValueError as verr:
+ if "No JSON object could be decoded" in verr.args:
+ rval['err'] = verr.args
+ elif output_type == 'raw':
+ rval['results'] = stdout if output else ''
- else:
+ if self.verbose:
+ print("STDOUT: {0}".format(stdout))
+ print("STDERR: {0}".format(stderr))
+
+ if 'err' in rval or returncode != 0:
rval.update({"stderr": stderr,
- "stdout": stdout,
- "results": {}})
+ "stdout": stdout})
return rval
diff --git a/roles/lib_openshift/library/oc_serviceaccount.py b/roles/lib_openshift/library/oc_serviceaccount.py
index 68c1fc51c..446987eff 100644
--- a/roles/lib_openshift/library/oc_serviceaccount.py
+++ b/roles/lib_openshift/library/oc_serviceaccount.py
@@ -1063,10 +1063,6 @@ class OpenShiftCLI(object):
elif self.namespace is not None and self.namespace.lower() not in ['none', 'emtpy']: # E501
cmds.extend(['-n', self.namespace])
- rval = {}
- results = ''
- err = None
-
if self.verbose:
print(' '.join(cmds))
@@ -1076,34 +1072,26 @@ class OpenShiftCLI(object):
returncode, stdout, stderr = 1, '', 'Failed to execute {}: {}'.format(subprocess.list2cmdline(cmds), ex)
rval = {"returncode": returncode,
- "results": results,
"cmd": ' '.join(cmds)}
- if returncode == 0:
- if output:
- if output_type == 'json':
- try:
- rval['results'] = json.loads(stdout)
- except ValueError as verr:
- if "No JSON object could be decoded" in verr.args:
- err = verr.args
- elif output_type == 'raw':
- rval['results'] = stdout
-
- if self.verbose:
- print("STDOUT: {0}".format(stdout))
- print("STDERR: {0}".format(stderr))
-
- if err:
- rval.update({"err": err,
- "stderr": stderr,
- "stdout": stdout,
- "cmd": cmds})
+ if output_type == 'json':
+ rval['results'] = {}
+ if output and stdout:
+ try:
+ rval['results'] = json.loads(stdout)
+ except ValueError as verr:
+ if "No JSON object could be decoded" in verr.args:
+ rval['err'] = verr.args
+ elif output_type == 'raw':
+ rval['results'] = stdout if output else ''
- else:
+ if self.verbose:
+ print("STDOUT: {0}".format(stdout))
+ print("STDERR: {0}".format(stderr))
+
+ if 'err' in rval or returncode != 0:
rval.update({"stderr": stderr,
- "stdout": stdout,
- "results": {}})
+ "stdout": stdout})
return rval
diff --git a/roles/lib_openshift/library/oc_serviceaccount_secret.py b/roles/lib_openshift/library/oc_serviceaccount_secret.py
index ebc5bf8a2..c7eb1986a 100644
--- a/roles/lib_openshift/library/oc_serviceaccount_secret.py
+++ b/roles/lib_openshift/library/oc_serviceaccount_secret.py
@@ -1063,10 +1063,6 @@ class OpenShiftCLI(object):
elif self.namespace is not None and self.namespace.lower() not in ['none', 'emtpy']: # E501
cmds.extend(['-n', self.namespace])
- rval = {}
- results = ''
- err = None
-
if self.verbose:
print(' '.join(cmds))
@@ -1076,34 +1072,26 @@ class OpenShiftCLI(object):
returncode, stdout, stderr = 1, '', 'Failed to execute {}: {}'.format(subprocess.list2cmdline(cmds), ex)
rval = {"returncode": returncode,
- "results": results,
"cmd": ' '.join(cmds)}
- if returncode == 0:
- if output:
- if output_type == 'json':
- try:
- rval['results'] = json.loads(stdout)
- except ValueError as verr:
- if "No JSON object could be decoded" in verr.args:
- err = verr.args
- elif output_type == 'raw':
- rval['results'] = stdout
-
- if self.verbose:
- print("STDOUT: {0}".format(stdout))
- print("STDERR: {0}".format(stderr))
-
- if err:
- rval.update({"err": err,
- "stderr": stderr,
- "stdout": stdout,
- "cmd": cmds})
+ if output_type == 'json':
+ rval['results'] = {}
+ if output and stdout:
+ try:
+ rval['results'] = json.loads(stdout)
+ except ValueError as verr:
+ if "No JSON object could be decoded" in verr.args:
+ rval['err'] = verr.args
+ elif output_type == 'raw':
+ rval['results'] = stdout if output else ''
- else:
+ if self.verbose:
+ print("STDOUT: {0}".format(stdout))
+ print("STDERR: {0}".format(stderr))
+
+ if 'err' in rval or returncode != 0:
rval.update({"stderr": stderr,
- "stdout": stdout,
- "results": {}})
+ "stdout": stdout})
return rval
diff --git a/roles/lib_openshift/library/oc_user.py b/roles/lib_openshift/library/oc_user.py
index d1a20fddc..3a98693b7 100644
--- a/roles/lib_openshift/library/oc_user.py
+++ b/roles/lib_openshift/library/oc_user.py
@@ -1123,10 +1123,6 @@ class OpenShiftCLI(object):
elif self.namespace is not None and self.namespace.lower() not in ['none', 'emtpy']: # E501
cmds.extend(['-n', self.namespace])
- rval = {}
- results = ''
- err = None
-
if self.verbose:
print(' '.join(cmds))
@@ -1136,34 +1132,26 @@ class OpenShiftCLI(object):
returncode, stdout, stderr = 1, '', 'Failed to execute {}: {}'.format(subprocess.list2cmdline(cmds), ex)
rval = {"returncode": returncode,
- "results": results,
"cmd": ' '.join(cmds)}
- if returncode == 0:
- if output:
- if output_type == 'json':
- try:
- rval['results'] = json.loads(stdout)
- except ValueError as verr:
- if "No JSON object could be decoded" in verr.args:
- err = verr.args
- elif output_type == 'raw':
- rval['results'] = stdout
-
- if self.verbose:
- print("STDOUT: {0}".format(stdout))
- print("STDERR: {0}".format(stderr))
-
- if err:
- rval.update({"err": err,
- "stderr": stderr,
- "stdout": stdout,
- "cmd": cmds})
+ if output_type == 'json':
+ rval['results'] = {}
+ if output and stdout:
+ try:
+ rval['results'] = json.loads(stdout)
+ except ValueError as verr:
+ if "No JSON object could be decoded" in verr.args:
+ rval['err'] = verr.args
+ elif output_type == 'raw':
+ rval['results'] = stdout if output else ''
- else:
+ if self.verbose:
+ print("STDOUT: {0}".format(stdout))
+ print("STDERR: {0}".format(stderr))
+
+ if 'err' in rval or returncode != 0:
rval.update({"stderr": stderr,
- "stdout": stdout,
- "results": {}})
+ "stdout": stdout})
return rval
diff --git a/roles/lib_openshift/library/oc_version.py b/roles/lib_openshift/library/oc_version.py
index 548c9d8e0..939261526 100644
--- a/roles/lib_openshift/library/oc_version.py
+++ b/roles/lib_openshift/library/oc_version.py
@@ -1035,10 +1035,6 @@ class OpenShiftCLI(object):
elif self.namespace is not None and self.namespace.lower() not in ['none', 'emtpy']: # E501
cmds.extend(['-n', self.namespace])
- rval = {}
- results = ''
- err = None
-
if self.verbose:
print(' '.join(cmds))
@@ -1048,34 +1044,26 @@ class OpenShiftCLI(object):
returncode, stdout, stderr = 1, '', 'Failed to execute {}: {}'.format(subprocess.list2cmdline(cmds), ex)
rval = {"returncode": returncode,
- "results": results,
"cmd": ' '.join(cmds)}
- if returncode == 0:
- if output:
- if output_type == 'json':
- try:
- rval['results'] = json.loads(stdout)
- except ValueError as verr:
- if "No JSON object could be decoded" in verr.args:
- err = verr.args
- elif output_type == 'raw':
- rval['results'] = stdout
-
- if self.verbose:
- print("STDOUT: {0}".format(stdout))
- print("STDERR: {0}".format(stderr))
-
- if err:
- rval.update({"err": err,
- "stderr": stderr,
- "stdout": stdout,
- "cmd": cmds})
+ if output_type == 'json':
+ rval['results'] = {}
+ if output and stdout:
+ try:
+ rval['results'] = json.loads(stdout)
+ except ValueError as verr:
+ if "No JSON object could be decoded" in verr.args:
+ rval['err'] = verr.args
+ elif output_type == 'raw':
+ rval['results'] = stdout if output else ''
- else:
+ if self.verbose:
+ print("STDOUT: {0}".format(stdout))
+ print("STDERR: {0}".format(stderr))
+
+ if 'err' in rval or returncode != 0:
rval.update({"stderr": stderr,
- "stdout": stdout,
- "results": {}})
+ "stdout": stdout})
return rval
diff --git a/roles/lib_openshift/library/oc_volume.py b/roles/lib_openshift/library/oc_volume.py
index 3826cd8e5..41e7d0ab8 100644
--- a/roles/lib_openshift/library/oc_volume.py
+++ b/roles/lib_openshift/library/oc_volume.py
@@ -1112,10 +1112,6 @@ class OpenShiftCLI(object):
elif self.namespace is not None and self.namespace.lower() not in ['none', 'emtpy']: # E501
cmds.extend(['-n', self.namespace])
- rval = {}
- results = ''
- err = None
-
if self.verbose:
print(' '.join(cmds))
@@ -1125,34 +1121,26 @@ class OpenShiftCLI(object):
returncode, stdout, stderr = 1, '', 'Failed to execute {}: {}'.format(subprocess.list2cmdline(cmds), ex)
rval = {"returncode": returncode,
- "results": results,
"cmd": ' '.join(cmds)}
- if returncode == 0:
- if output:
- if output_type == 'json':
- try:
- rval['results'] = json.loads(stdout)
- except ValueError as verr:
- if "No JSON object could be decoded" in verr.args:
- err = verr.args
- elif output_type == 'raw':
- rval['results'] = stdout
-
- if self.verbose:
- print("STDOUT: {0}".format(stdout))
- print("STDERR: {0}".format(stderr))
-
- if err:
- rval.update({"err": err,
- "stderr": stderr,
- "stdout": stdout,
- "cmd": cmds})
+ if output_type == 'json':
+ rval['results'] = {}
+ if output and stdout:
+ try:
+ rval['results'] = json.loads(stdout)
+ except ValueError as verr:
+ if "No JSON object could be decoded" in verr.args:
+ rval['err'] = verr.args
+ elif output_type == 'raw':
+ rval['results'] = stdout if output else ''
- else:
+ if self.verbose:
+ print("STDOUT: {0}".format(stdout))
+ print("STDERR: {0}".format(stderr))
+
+ if 'err' in rval or returncode != 0:
rval.update({"stderr": stderr,
- "stdout": stdout,
- "results": {}})
+ "stdout": stdout})
return rval
diff --git a/roles/lib_openshift/src/class/oc_obj.py b/roles/lib_openshift/src/class/oc_obj.py
index 6f0da3d5c..5e423bea9 100644
--- a/roles/lib_openshift/src/class/oc_obj.py
+++ b/roles/lib_openshift/src/class/oc_obj.py
@@ -33,7 +33,12 @@ class OCObject(OpenShiftCLI):
def delete(self):
'''delete the object'''
- return self._delete(self.kind, name=self.name, selector=self.selector)
+ results = self._delete(self.kind, name=self.name, selector=self.selector)
+ if (results['returncode'] != 0 and 'stderr' in results and
+ '\"{}\" not found'.format(self.name) in results['stderr']):
+ results['returncode'] = 0
+
+ return results
def create(self, files=None, content=None):
'''
@@ -117,7 +122,8 @@ class OCObject(OpenShiftCLI):
if state == 'absent':
# verify its not in our results
if (params['name'] is not None or params['selector'] is not None) and \
- (len(api_rval['results']) == 0 or len(api_rval['results'][0].get('items', [])) == 0):
+ (len(api_rval['results']) == 0 or \
+ ('items' in api_rval['results'][0] and len(api_rval['results'][0]['items']) == 0)):
return {'changed': False, 'state': state}
if check_mode:
diff --git a/roles/lib_openshift/src/class/oc_secret.py b/roles/lib_openshift/src/class/oc_secret.py
index ee83580df..4ee6443e9 100644
--- a/roles/lib_openshift/src/class/oc_secret.py
+++ b/roles/lib_openshift/src/class/oc_secret.py
@@ -44,7 +44,7 @@ class OCSecret(OpenShiftCLI):
'''delete a secret by name'''
return self._delete('secrets', self.name)
- def create(self, files=None, contents=None):
+ def create(self, files=None, contents=None, force=False):
'''Create a secret '''
if not files:
files = Utils.create_tmp_files_from_contents(contents)
@@ -53,6 +53,8 @@ class OCSecret(OpenShiftCLI):
cmd = ['secrets', 'new', self.name]
if self.type is not None:
cmd.append("--type=%s" % (self.type))
+ if force:
+ cmd.append('--confirm')
cmd.extend(secrets)
results = self.openshift_cmd(cmd)
@@ -65,7 +67,7 @@ class OCSecret(OpenShiftCLI):
This receives a list of file names and converts it into a secret.
The secret is then written to disk and passed into the `oc replace` command.
'''
- secret = self.prep_secret(files)
+ secret = self.prep_secret(files, force)
if secret['returncode'] != 0:
return secret
@@ -77,7 +79,7 @@ class OCSecret(OpenShiftCLI):
return self._replace(sfile_path, force=force)
- def prep_secret(self, files=None, contents=None):
+ def prep_secret(self, files=None, contents=None, force=False):
''' return what the secret would look like if created
This is accomplished by passing -ojson. This will most likely change in the future
'''
@@ -88,6 +90,8 @@ class OCSecret(OpenShiftCLI):
cmd = ['-ojson', 'secrets', 'new', self.name]
if self.type is not None:
cmd.extend(["--type=%s" % (self.type)])
+ if force:
+ cmd.append('--confirm')
cmd.extend(secrets)
return self.openshift_cmd(cmd, output=True)
@@ -150,7 +154,7 @@ class OCSecret(OpenShiftCLI):
return {'changed': True,
'msg': 'Would have performed a create.'}
- api_rval = ocsecret.create(files, params['contents'])
+ api_rval = ocsecret.create(files, params['contents'], force=params['force'])
# Remove files
if files and params['delete_after']:
@@ -167,7 +171,7 @@ class OCSecret(OpenShiftCLI):
########
# Update
########
- secret = ocsecret.prep_secret(params['files'], params['contents'])
+ secret = ocsecret.prep_secret(params['files'], params['contents'], force=params['force'])
if secret['returncode'] != 0:
return {'failed': True, 'msg': secret}
diff --git a/roles/lib_openshift/src/doc/obj b/roles/lib_openshift/src/doc/obj
index 4ff912b2d..c6504ed01 100644
--- a/roles/lib_openshift/src/doc/obj
+++ b/roles/lib_openshift/src/doc/obj
@@ -39,9 +39,9 @@ options:
required: false
default: str
aliases: []
- all_namespace:
+ all_namespaces:
description:
- - The namespace where the object lives.
+ - Search in all namespaces for the object.
required: false
default: false
aliases: []
diff --git a/roles/lib_openshift/src/lib/base.py b/roles/lib_openshift/src/lib/base.py
index b3f01008b..16770b22d 100644
--- a/roles/lib_openshift/src/lib/base.py
+++ b/roles/lib_openshift/src/lib/base.py
@@ -273,10 +273,6 @@ class OpenShiftCLI(object):
elif self.namespace is not None and self.namespace.lower() not in ['none', 'emtpy']: # E501
cmds.extend(['-n', self.namespace])
- rval = {}
- results = ''
- err = None
-
if self.verbose:
print(' '.join(cmds))
@@ -286,34 +282,26 @@ class OpenShiftCLI(object):
returncode, stdout, stderr = 1, '', 'Failed to execute {}: {}'.format(subprocess.list2cmdline(cmds), ex)
rval = {"returncode": returncode,
- "results": results,
"cmd": ' '.join(cmds)}
- if returncode == 0:
- if output:
- if output_type == 'json':
- try:
- rval['results'] = json.loads(stdout)
- except ValueError as verr:
- if "No JSON object could be decoded" in verr.args:
- err = verr.args
- elif output_type == 'raw':
- rval['results'] = stdout
-
- if self.verbose:
- print("STDOUT: {0}".format(stdout))
- print("STDERR: {0}".format(stderr))
-
- if err:
- rval.update({"err": err,
- "stderr": stderr,
- "stdout": stdout,
- "cmd": cmds})
+ if output_type == 'json':
+ rval['results'] = {}
+ if output and stdout:
+ try:
+ rval['results'] = json.loads(stdout)
+ except ValueError as verr:
+ if "No JSON object could be decoded" in verr.args:
+ rval['err'] = verr.args
+ elif output_type == 'raw':
+ rval['results'] = stdout if output else ''
- else:
+ if self.verbose:
+ print("STDOUT: {0}".format(stdout))
+ print("STDERR: {0}".format(stderr))
+
+ if 'err' in rval or returncode != 0:
rval.update({"stderr": stderr,
- "stdout": stdout,
- "results": {}})
+ "stdout": stdout})
return rval
diff --git a/roles/lib_openshift/src/test/unit/test_oc_secret.py b/roles/lib_openshift/src/test/unit/test_oc_secret.py
index 09cc4a374..323b3423c 100755
--- a/roles/lib_openshift/src/test/unit/test_oc_secret.py
+++ b/roles/lib_openshift/src/test/unit/test_oc_secret.py
@@ -48,6 +48,7 @@ class OCSecretTest(unittest.TestCase):
'debug': False,
'files': None,
'delete_after': True,
+ 'force': False,
}
# Return values of our mocked function call. These get returned once per call.
diff --git a/roles/openshift_cfme/README.md b/roles/openshift_cfme/README.md
new file mode 100644
index 000000000..8283afed6
--- /dev/null
+++ b/roles/openshift_cfme/README.md
@@ -0,0 +1,404 @@
+# OpenShift-Ansible - CFME Role
+
+# PROOF OF CONCEPT - Alpha Version
+
+This role is based on the work in the upstream
+[manageiq/manageiq-pods](https://github.com/ManageIQ/manageiq-pods)
+project. For additional literature on configuration specific to
+ManageIQ (optional post-installation tasks), visit the project's
+[upstream documentation page](http://manageiq.org/docs/get-started/basic-configuration).
+
+Please submit a
+[new issue](https://github.com/openshift/openshift-ansible/issues/new)
+if you run into bugs with this role or wish to request enhancements.
+
+# Important Notes
+
+This is an early *proof of concept* role to install the Cloud Forms
+Management Engine (ManageIQ) on OpenShift Container Platform (OCP).
+
+* This role is still in **ALPHA STATUS**
+* Many options are hard-coded still (ex: NFS setup)
+* Not many configurable options yet
+* **Should** be ran on a dedicated cluster
+* **Will not run** on undersized infra
+* The terms *CFME* and *MIQ* / *ManageIQ* are interchangeable
+
+## Requirements
+
+**NOTE:** These requirements are copied from the upstream
+[manageiq/manageiq-pods](https://github.com/ManageIQ/manageiq-pods)
+project.
+
+### Prerequisites:
+
+*
+ [OpenShift Origin 1.5](https://docs.openshift.com/container-platform/3.5/welcome/index.html)
+ or
+ [higher](https://docs.openshift.com/container-platform/latest/welcome/index.html)
+ provisioned
+* NFS or other compatible volume provider
+* A cluster-admin user (created by role if required)
+
+### Cluster Sizing
+
+In order to avoid random deployment failures due to resource
+starvation, we recommend a minimum cluster size for a **test**
+environment.
+
+| Type | Size | CPUs | Memory |
+|----------------|---------|----------|----------|
+| Masters | `1+` | `8` | `12GB` |
+| Nodes | `2+` | `4` | `8GB` |
+| PV Storage | `25GB` | `N/A` | `N/A` |
+
+
+![Basic CFME Deployment](img/CFMEBasicDeployment.png)
+
+**CFME has hard-requirements for memory. CFME will NOT install if your
+ infrastructure does not meet or exceed the requirements given
+ above. Do not run this playbook if you do not have the required
+ memory, you will just waste your time.**
+
+
+### Other sizing considerations
+
+* Recommendations assume MIQ will be the **only application running**
+ on this cluster.
+* Alternatively, you can provision an infrastructure node to run
+ registry/metrics/router/logging pods.
+* Each MIQ application pod will consume at least `3GB` of RAM on initial
+ deployment (blank deployment without providers).
+* RAM consumption will ramp up higher depending on appliance use, once
+ providers are added expect higher resource consumption.
+
+
+### Assumptions
+
+1) You meet/exceed the [cluster sizing](#cluster-sizing) requirements
+1) Your NFS server is on your master host
+1) Your PV backing NFS storage volume is mounted on `/exports/`
+
+Required directories that NFS will export to back the PVs:
+
+* `/exports/miq-pv0[123]`
+
+If the required directories are not present at install-time, they will
+be created using the recommended permissions per the
+[upstream documentation](https://github.com/ManageIQ/manageiq-pods#make-persistent-volumes-to-host-the-miq-database-and-application-data):
+
+* UID/GID: `root`/`root`
+* Mode: `0775`
+
+**IMPORTANT:** If you are using a separate volume (`/dev/vdX`) for NFS
+ storage, **ensure** it is mounted on `/exports/` **before** running
+ this role.
+
+
+
+## Role Variables
+
+Core variables in this role:
+
+| Name | Default value | Description |
+|-------------------------------|---------------|---------------|
+| `openshift_cfme_install_app` | `False` | `True`: Install everything and create a new CFME app, `False`: Just install all of the templates and scaffolding |
+
+
+Variables you may override have defaults defined in
+[defaults/main.yml](defaults/main.yml).
+
+
+# Important Notes
+
+This is a **tech preview** status role presently. Use it with the same
+caution you would give any other pre-release software.
+
+**Most importantly** follow this one rule: don't re-run the entrypoint
+playbook multiple times in a row without cleaning up after previous
+runs if some of the CFME steps have ran. This is a known
+flake. Cleanup instructions are provided at the bottom of this README.
+
+
+# Usage
+
+This section describes the basic usage of this role. All parameters
+will use their [default values](defaults/main.yml).
+
+## Pre-flight Checks
+
+**IMPORTANT:** As documented above in [the prerequisites](#prerequisites),
+ you **must already** have your OCP cluster up and running.
+
+**Optional:** The ManageIQ pod is fairly large (about 1.7 GB) so to
+save some spin-up time post-deployment, you can begin pre-pulling the
+docker image to each of your nodes now:
+
+```
+root@node0x # docker pull docker.io/manageiq/manageiq-pods:app-latest-fine
+```
+
+## Getting Started
+
+1) The *entry point playbook* to install CFME is located in
+[the BYO playbooks](../../playbooks/byo/openshift-cfme/config.yml)
+directory
+
+2) Update your existing `hosts` inventory file and ensure the
+parameter `openshift_cfme_install_app` is set to `True` under the
+`[OSEv3:vars]` block.
+
+2) Using your existing `hosts` inventory file, run `ansible-playbook`
+with the entry point playbook:
+
+```
+$ ansible-playbook -v -i <INVENTORY_FILE> playbooks/byo/openshift-cfme/config.yml
+```
+
+## Next Steps
+
+Once complete, the playbook will let you know:
+
+
+```
+TASK [openshift_cfme : Status update] *********************************************************
+ok: [ho.st.na.me] => {
+ "msg": "CFME has been deployed. Note that there will be a delay before it is fully initialized.\n"
+}
+```
+
+This will take several minutes (*possibly 10 or more*, depending on
+your network connection). However, you can get some insight into the
+deployment process during initialization.
+
+### oc describe pod manageiq-0
+
+*Some useful information about the output you will see if you run the
+`oc describe pod manageiq-0` command*
+
+**Readiness probe**s - These will take a while to become
+`Healthy`. The initial health probes won't even happen for at least 8
+minutes depending on how long it takes you to pull down the large
+images. ManageIQ is a large application so it may take a considerable
+amount of time for it to deploy and be marked as `Healthy`.
+
+If you go to the node you know the application is running on (check
+for `Successfully assigned manageiq-0 to <HOST|IP>` in the `describe`
+output) you can run a `docker pull` command to monitor the progress of
+the image pull:
+
+```
+[root@cfme-node ~]# docker pull docker.io/manageiq/manageiq-pods:app-latest-fine
+Trying to pull repository docker.io/manageiq/manageiq-pods ...
+sha256:6c055ca9d3c65cd694d6c0e28986b5239ba56bbdf0488cccdaa283d545258f8a: Pulling from docker.io/manageiq/manageiq-pods
+Digest: sha256:6c055ca9d3c65cd694d6c0e28986b5239ba56bbdf0488cccdaa283d545258f8a
+Status: Image is up to date for docker.io/manageiq/manageiq-pods:app-latest-fine
+```
+
+The example above demonstrates the case where the image has been
+successfully pulled already.
+
+If the image isn't completely pulled already then you will see
+multiple progress bars detailing each image layer download status.
+
+
+### rsh
+
+*Useful inspection/progress monitoring techniques with the `oc rsh`
+command.*
+
+
+On your master node, switch to the `cfme` project (or whatever you
+named it if you overrode the `openshift_cfme_project` variable) and
+check on the pod states:
+
+```
+[root@cfme-master01 ~]# oc project cfme
+Now using project "cfme" on server "https://10.10.0.100:8443".
+
+[root@cfme-master01 ~]# oc get pod
+NAME READY STATUS RESTARTS AGE
+manageiq-0 0/1 Running 0 14m
+memcached-1-3lk7g 1/1 Running 0 14m
+postgresql-1-12slb 1/1 Running 0 14m
+```
+
+Note how the `manageiq-0` pod says `0/1` under the **READY**
+column. After some time (depending on your network connection) you'll
+be able to `rsh` into the pod to find out more of what's happening in
+real time. First, the easy-mode command, run this once `rsh` is
+available and then watch until it says `Started Initialize Appliance
+Database`:
+
+```
+[root@cfme-master01 ~]# oc rsh manageiq-0 journalctl -f -u appliance-initialize.service
+```
+
+For the full explanation of what this means, and more interactive
+inspection techniques, keep reading on.
+
+To obtain a shell on our `manageiq` pod we use this command:
+
+```
+[root@cfme-master01 ~]# oc rsh manageiq-0 bash -l
+```
+
+The `rsh` command opens a shell in your pod for you. In this case it's
+the pod called `manageiq-0`. `systemd` is managing the services in
+this pod so we can use the `list-units` command to see what is running
+currently: `# systemctl list-units | grep appliance`.
+
+If you see the `appliance-initialize` service running, this indicates
+that basic setup is still in progress. We can monitor the process with
+the `journalctl` command like so:
+
+
+```
+[root@manageiq-0 vmdb]# journalctl -f -u appliance-initialize.service
+Jun 14 14:55:52 manageiq-0 appliance-initialize.sh[58]: == Checking deployment status ==
+Jun 14 14:55:52 manageiq-0 appliance-initialize.sh[58]: No pre-existing EVM configuration found on region PV
+Jun 14 14:55:52 manageiq-0 appliance-initialize.sh[58]: == Checking for existing data on server PV ==
+Jun 14 14:55:52 manageiq-0 appliance-initialize.sh[58]: == Starting New Deployment ==
+Jun 14 14:55:52 manageiq-0 appliance-initialize.sh[58]: == Applying memcached config ==
+Jun 14 14:55:53 manageiq-0 appliance-initialize.sh[58]: == Initializing Appliance ==
+Jun 14 14:55:57 manageiq-0 appliance-initialize.sh[58]: create encryption key
+Jun 14 14:55:57 manageiq-0 appliance-initialize.sh[58]: configuring external database
+Jun 14 14:55:57 manageiq-0 appliance-initialize.sh[58]: Checking for connections to the database...
+Jun 14 14:56:09 manageiq-0 appliance-initialize.sh[58]: Create region starting
+Jun 14 14:58:15 manageiq-0 appliance-initialize.sh[58]: Create region complete
+Jun 14 14:58:15 manageiq-0 appliance-initialize.sh[58]: == Initializing PV data ==
+Jun 14 14:58:16 manageiq-0 appliance-initialize.sh[58]: == Initializing PV data backup ==
+Jun 14 14:58:16 manageiq-0 appliance-initialize.sh[58]: sending incremental file list
+Jun 14 14:58:16 manageiq-0 appliance-initialize.sh[58]: created directory /persistent/server-deploy/backup/backup_2017_06_14_145816
+Jun 14 14:58:16 manageiq-0 appliance-initialize.sh[58]: region-data/
+Jun 14 14:58:16 manageiq-0 appliance-initialize.sh[58]: region-data/var/
+Jun 14 14:58:16 manageiq-0 appliance-initialize.sh[58]: region-data/var/www/
+Jun 14 14:58:16 manageiq-0 appliance-initialize.sh[58]: region-data/var/www/miq/
+Jun 14 14:58:16 manageiq-0 appliance-initialize.sh[58]: region-data/var/www/miq/vmdb/
+Jun 14 14:58:16 manageiq-0 appliance-initialize.sh[58]: region-data/var/www/miq/vmdb/REGION
+Jun 14 14:58:16 manageiq-0 appliance-initialize.sh[58]: region-data/var/www/miq/vmdb/certs/
+Jun 14 14:58:16 manageiq-0 appliance-initialize.sh[58]: region-data/var/www/miq/vmdb/certs/v2_key
+Jun 14 14:58:16 manageiq-0 appliance-initialize.sh[58]: region-data/var/www/miq/vmdb/config/
+Jun 14 14:58:16 manageiq-0 appliance-initialize.sh[58]: region-data/var/www/miq/vmdb/config/database.yml
+Jun 14 14:58:16 manageiq-0 appliance-initialize.sh[58]: server-data/
+Jun 14 14:58:16 manageiq-0 appliance-initialize.sh[58]: server-data/var/
+Jun 14 14:58:16 manageiq-0 appliance-initialize.sh[58]: server-data/var/www/
+Jun 14 14:58:16 manageiq-0 appliance-initialize.sh[58]: server-data/var/www/miq/
+Jun 14 14:58:16 manageiq-0 appliance-initialize.sh[58]: server-data/var/www/miq/vmdb/
+Jun 14 14:58:16 manageiq-0 appliance-initialize.sh[58]: server-data/var/www/miq/vmdb/GUID
+Jun 14 14:58:16 manageiq-0 appliance-initialize.sh[58]: sent 1330 bytes received 136 bytes 2932.00 bytes/sec
+Jun 14 14:58:16 manageiq-0 appliance-initialize.sh[58]: total size is 770 speedup is 0.53
+Jun 14 14:58:16 manageiq-0 appliance-initialize.sh[58]: == Restoring PV data symlinks ==
+Jun 14 14:58:16 manageiq-0 appliance-initialize.sh[58]: /var/www/miq/vmdb/REGION symlink is already in place, skipping
+Jun 14 14:58:16 manageiq-0 appliance-initialize.sh[58]: /var/www/miq/vmdb/config/database.yml symlink is already in place, skipping
+Jun 14 14:58:16 manageiq-0 appliance-initialize.sh[58]: /var/www/miq/vmdb/certs/v2_key symlink is already in place, skipping
+Jun 14 14:58:16 manageiq-0 appliance-initialize.sh[58]: /var/www/miq/vmdb/log symlink is already in place, skipping
+Jun 14 14:58:28 manageiq-0 systemctl[304]: Removed symlink /etc/systemd/system/multi-user.target.wants/appliance-initialize.service.
+Jun 14 14:58:29 manageiq-0 systemd[1]: Started Initialize Appliance Database.
+```
+
+Most of what we see here (above) is the initial database seeding
+process. This process isn't very quick, so be patient.
+
+At the bottom of the log there is a special line from the `systemctl`
+service, `Removed symlink
+/etc/systemd/system/multi-user.target.wants/appliance-initialize.service`. The
+`appliance-initialize` service is no longer marked as enabled. This
+indicates that the base application initialization is complete now.
+
+We're not done yet though, there are other ancillary services which
+run in this pod to support the application. *Still in the rsh shell*,
+Use the `ps` command to monitor for the `httpd` processes
+starting. You will see output similar to the following when that stage
+has completed:
+
+```
+[root@manageiq-0 vmdb]# ps aux | grep http
+root 1941 0.0 0.1 249820 7640 ? Ss 15:02 0:00 /usr/sbin/httpd -DFOREGROUND
+apache 1942 0.0 0.0 250752 6012 ? S 15:02 0:00 /usr/sbin/httpd -DFOREGROUND
+apache 1943 0.0 0.0 250472 5952 ? S 15:02 0:00 /usr/sbin/httpd -DFOREGROUND
+apache 1944 0.0 0.0 250472 5916 ? S 15:02 0:00 /usr/sbin/httpd -DFOREGROUND
+apache 1945 0.0 0.0 250360 5764 ? S 15:02 0:00 /usr/sbin/httpd -DFOREGROUND
+```
+
+Furthermore, you can find other related processes by just looking for
+ones with `MIQ` in their name:
+
+```
+[root@manageiq-0 vmdb]# ps aux | grep miq
+root 333 27.7 4.2 555884 315916 ? Sl 14:58 3:59 MIQ Server
+root 1976 0.6 4.0 507224 303740 ? SNl 15:02 0:03 MIQ: MiqGenericWorker id: 1, queue: generic
+root 1984 0.6 4.0 507224 304312 ? SNl 15:02 0:03 MIQ: MiqGenericWorker id: 2, queue: generic
+root 1992 0.9 4.0 508252 304888 ? SNl 15:02 0:05 MIQ: MiqPriorityWorker id: 3, queue: generic
+root 2000 0.7 4.0 510308 304696 ? SNl 15:02 0:04 MIQ: MiqPriorityWorker id: 4, queue: generic
+root 2008 1.2 4.0 514000 303612 ? SNl 15:02 0:07 MIQ: MiqScheduleWorker id: 5
+root 2026 0.2 4.0 517504 303644 ? SNl 15:02 0:01 MIQ: MiqEventHandler id: 6, queue: ems
+root 2036 0.2 4.0 518532 303768 ? SNl 15:02 0:01 MIQ: MiqReportingWorker id: 7, queue: reporting
+root 2044 0.2 4.0 519560 303812 ? SNl 15:02 0:01 MIQ: MiqReportingWorker id: 8, queue: reporting
+root 2059 0.2 4.0 528372 303956 ? SNl 15:02 0:01 puma 3.3.0 (tcp://127.0.0.1:5000) [MIQ: Web Server Worker]
+root 2067 0.9 4.0 529664 305716 ? SNl 15:02 0:05 puma 3.3.0 (tcp://127.0.0.1:3000) [MIQ: Web Server Worker]
+root 2075 0.2 4.0 529408 304056 ? SNl 15:02 0:01 puma 3.3.0 (tcp://127.0.0.1:4000) [MIQ: Web Server Worker]
+root 2329 0.0 0.0 10640 972 ? S+ 15:13 0:00 grep --color=auto -i miq
+```
+
+Finally, *still in the rsh shell*, to test if the application is
+running correctly, we can request the application homepage. If the
+page is available the page title will be `ManageIQ: Login`:
+
+```
+[root@manageiq-0 vmdb]# curl -s -k https://localhost | grep -A2 '<title>'
+<title>
+ManageIQ: Login
+</title>
+```
+
+**Note:** The `-s` flag makes `curl` operations silent and the `-k`
+flag to ignore errors about untrusted certificates.
+
+
+
+# Additional Upstream Resources
+
+Below are some useful resources from the upstream project
+documentation. You may find these of value.
+
+* [Verify Setup Was Successful](https://github.com/ManageIQ/manageiq-pods#verifying-the-setup-was-successful)
+* [POD Access And Routes](https://github.com/ManageIQ/manageiq-pods#pod-access-and-routes)
+* [Troubleshooting](https://github.com/ManageIQ/manageiq-pods#troubleshooting)
+
+
+# Manual Cleanup
+
+At this time uninstallation/cleanup is still a manual process. You
+will have to follow a few steps to fully remove CFME from your
+cluster.
+
+Delete the project:
+
+* `oc delete project cfme`
+
+Delete the PVs:
+
+* `oc delete pv miq-pv01`
+* `oc delete pv miq-pv02`
+* `oc delete pv miq-pv03`
+
+Clean out the old PV data:
+
+* `cd /exports/`
+* `find miq* -type f -delete`
+* `find miq* -type d -delete`
+
+Remove the NFS exports:
+
+* `rm /etc/exports.d/openshift_cfme.exports`
+* `exportfs -ar`
+
+Delete the user:
+
+* `oc delete user cfme`
+
+**NOTE:** The `oc delete project cfme` command will return quickly
+however it will continue to operate in the background. Continue
+running `oc get project` after you've completed the other steps to
+monitor the pods and final project termination progress.
diff --git a/roles/openshift_cfme/defaults/main.yml b/roles/openshift_cfme/defaults/main.yml
new file mode 100644
index 000000000..493e1ef68
--- /dev/null
+++ b/roles/openshift_cfme/defaults/main.yml
@@ -0,0 +1,38 @@
+---
+# Namespace for the CFME project
+openshift_cfme_project: cfme
+# Namespace/project description
+openshift_cfme_project_description: ManageIQ - CloudForms Management Engine
+# Basic user assigned the `admin` role for the project
+openshift_cfme_user: cfme
+# Project system account for enabling privileged pods
+openshift_cfme_service_account: "system:serviceaccount:{{ openshift_cfme_project }}:default"
+# All the required exports
+openshift_cfme_pv_exports:
+ - miq-pv01
+ - miq-pv02
+ - miq-pv03
+# PV template files and their created object names
+openshift_cfme_pv_data:
+ - pv_name: miq-pv01
+ pv_template: miq-pv-db.yaml
+ pv_label: CFME DB PV
+ - pv_name: miq-pv02
+ pv_template: miq-pv-region.yaml
+ pv_label: CFME Region PV
+ - pv_name: miq-pv03
+ pv_template: miq-pv-server.yaml
+ pv_label: CFME Server PV
+
+# Tuning parameter to use more than 5 images at once from an ImageStream
+openshift_cfme_maxImagesBulkImportedPerRepository: 100
+# Hostname/IP of the NFS server. Currently defaults to first master
+openshift_cfme_nfs_server: "{{ groups.nfs.0 }}"
+# TODO: Refactor '_install_app' variable. This is just for testing but
+# maybe in the future it should control the entire yes/no for CFME.
+#
+# Whether or not the manageiq app should be initialized ('oc new-app
+# --template=manageiq). If False everything UP TO 'new-app' is ran.
+openshift_cfme_install_app: False
+# Docker image to pull
+openshift_cfme_container_image: "docker.io/manageiq/manageiq-pods:app-latest-fine"
diff --git a/roles/openshift_cfme/files/miq-template.yaml b/roles/openshift_cfme/files/miq-template.yaml
new file mode 100644
index 000000000..8f0d2af38
--- /dev/null
+++ b/roles/openshift_cfme/files/miq-template.yaml
@@ -0,0 +1,566 @@
+---
+path: /tmp/miq-template-out
+data:
+ apiVersion: v1
+ kind: Template
+ labels:
+ template: manageiq
+ metadata:
+ name: manageiq
+ annotations:
+ description: "ManageIQ appliance with persistent storage"
+ tags: "instant-app,manageiq,miq"
+ iconClass: "icon-rails"
+ objects:
+ - apiVersion: v1
+ kind: Secret
+ metadata:
+ name: "${NAME}-secrets"
+ stringData:
+ pg-password: "${DATABASE_PASSWORD}"
+ - apiVersion: v1
+ kind: Service
+ metadata:
+ annotations:
+ description: "Exposes and load balances ManageIQ pods"
+ service.alpha.openshift.io/dependencies: '[{"name":"${DATABASE_SERVICE_NAME}","namespace":"","kind":"Service"},{"name":"${MEMCACHED_SERVICE_NAME}","namespace":"","kind":"Service"}]'
+ name: ${NAME}
+ spec:
+ clusterIP: None
+ ports:
+ - name: http
+ port: 80
+ protocol: TCP
+ targetPort: 80
+ - name: https
+ port: 443
+ protocol: TCP
+ targetPort: 443
+ selector:
+ name: ${NAME}
+ - apiVersion: v1
+ kind: Route
+ metadata:
+ name: ${NAME}
+ spec:
+ host: ${APPLICATION_DOMAIN}
+ port:
+ targetPort: https
+ tls:
+ termination: passthrough
+ to:
+ kind: Service
+ name: ${NAME}
+ - apiVersion: v1
+ kind: ImageStream
+ metadata:
+ name: miq-app
+ annotations:
+ description: "Keeps track of the ManageIQ image changes"
+ spec:
+ dockerImageRepository: "${APPLICATION_IMG_NAME}"
+ - apiVersion: v1
+ kind: ImageStream
+ metadata:
+ name: miq-postgresql
+ annotations:
+ description: "Keeps track of the PostgreSQL image changes"
+ spec:
+ dockerImageRepository: "${POSTGRESQL_IMG_NAME}"
+ - apiVersion: v1
+ kind: ImageStream
+ metadata:
+ name: miq-memcached
+ annotations:
+ description: "Keeps track of the Memcached image changes"
+ spec:
+ dockerImageRepository: "${MEMCACHED_IMG_NAME}"
+ - apiVersion: v1
+ kind: PersistentVolumeClaim
+ metadata:
+ name: "${NAME}-${DATABASE_SERVICE_NAME}"
+ spec:
+ accessModes:
+ - ReadWriteOnce
+ resources:
+ requests:
+ storage: ${DATABASE_VOLUME_CAPACITY}
+ - apiVersion: v1
+ kind: PersistentVolumeClaim
+ metadata:
+ name: "${NAME}-region"
+ spec:
+ accessModes:
+ - ReadWriteOnce
+ resources:
+ requests:
+ storage: ${APPLICATION_REGION_VOLUME_CAPACITY}
+ - apiVersion: apps/v1beta1
+ kind: "StatefulSet"
+ metadata:
+ name: ${NAME}
+ annotations:
+ description: "Defines how to deploy the ManageIQ appliance"
+ spec:
+ serviceName: "${NAME}"
+ replicas: "${APPLICATION_REPLICA_COUNT}"
+ template:
+ metadata:
+ labels:
+ name: ${NAME}
+ name: ${NAME}
+ spec:
+ containers:
+ - name: manageiq
+ image: "${APPLICATION_IMG_NAME}:${APPLICATION_IMG_TAG}"
+ livenessProbe:
+ tcpSocket:
+ port: 443
+ initialDelaySeconds: 480
+ timeoutSeconds: 3
+ readinessProbe:
+ httpGet:
+ path: /
+ port: 443
+ scheme: HTTPS
+ initialDelaySeconds: 200
+ timeoutSeconds: 3
+ ports:
+ - containerPort: 80
+ protocol: TCP
+ - containerPort: 443
+ protocol: TCP
+ securityContext:
+ privileged: true
+ volumeMounts:
+ -
+ name: "${NAME}-server"
+ mountPath: "/persistent"
+ -
+ name: "${NAME}-region"
+ mountPath: "/persistent-region"
+ env:
+ -
+ name: "APPLICATION_INIT_DELAY"
+ value: "${APPLICATION_INIT_DELAY}"
+ -
+ name: "DATABASE_SERVICE_NAME"
+ value: "${DATABASE_SERVICE_NAME}"
+ -
+ name: "DATABASE_REGION"
+ value: "${DATABASE_REGION}"
+ -
+ name: "MEMCACHED_SERVICE_NAME"
+ value: "${MEMCACHED_SERVICE_NAME}"
+ -
+ name: "POSTGRESQL_USER"
+ value: "${DATABASE_USER}"
+ -
+ name: "POSTGRESQL_PASSWORD"
+ valueFrom:
+ secretKeyRef:
+ name: "${NAME}-secrets"
+ key: "pg-password"
+ -
+ name: "POSTGRESQL_DATABASE"
+ value: "${DATABASE_NAME}"
+ -
+ name: "POSTGRESQL_MAX_CONNECTIONS"
+ value: "${POSTGRESQL_MAX_CONNECTIONS}"
+ -
+ name: "POSTGRESQL_SHARED_BUFFERS"
+ value: "${POSTGRESQL_SHARED_BUFFERS}"
+ resources:
+ requests:
+ memory: "${APPLICATION_MEM_REQ}"
+ cpu: "${APPLICATION_CPU_REQ}"
+ limits:
+ memory: "${APPLICATION_MEM_LIMIT}"
+ lifecycle:
+ preStop:
+ exec:
+ command:
+ - /opt/manageiq/container-scripts/sync-pv-data
+ volumes:
+ -
+ name: "${NAME}-region"
+ persistentVolumeClaim:
+ claimName: ${NAME}-region
+ volumeClaimTemplates:
+ - metadata:
+ name: "${NAME}-server"
+ annotations:
+ # Uncomment this if using dynamic volume provisioning.
+ # https://docs.openshift.org/latest/install_config/persistent_storage/dynamically_provisioning_pvs.html
+ # volume.alpha.kubernetes.io/storage-class: anything
+ spec:
+ accessModes: [ ReadWriteOnce ]
+ resources:
+ requests:
+ storage: "${APPLICATION_VOLUME_CAPACITY}"
+ - apiVersion: v1
+ kind: "Service"
+ metadata:
+ name: "${MEMCACHED_SERVICE_NAME}"
+ annotations:
+ description: "Exposes the memcached server"
+ spec:
+ ports:
+ -
+ name: "memcached"
+ port: 11211
+ targetPort: 11211
+ selector:
+ name: "${MEMCACHED_SERVICE_NAME}"
+ - apiVersion: v1
+ kind: "DeploymentConfig"
+ metadata:
+ name: "${MEMCACHED_SERVICE_NAME}"
+ annotations:
+ description: "Defines how to deploy memcached"
+ spec:
+ strategy:
+ type: "Recreate"
+ triggers:
+ -
+ type: "ImageChange"
+ imageChangeParams:
+ automatic: true
+ containerNames:
+ - "memcached"
+ from:
+ kind: "ImageStreamTag"
+ name: "miq-memcached:${MEMCACHED_IMG_TAG}"
+ -
+ type: "ConfigChange"
+ replicas: 1
+ selector:
+ name: "${MEMCACHED_SERVICE_NAME}"
+ template:
+ metadata:
+ name: "${MEMCACHED_SERVICE_NAME}"
+ labels:
+ name: "${MEMCACHED_SERVICE_NAME}"
+ spec:
+ volumes: []
+ containers:
+ -
+ name: "memcached"
+ image: "${MEMCACHED_IMG_NAME}:${MEMCACHED_IMG_TAG}"
+ ports:
+ -
+ containerPort: 11211
+ readinessProbe:
+ timeoutSeconds: 1
+ initialDelaySeconds: 5
+ tcpSocket:
+ port: 11211
+ livenessProbe:
+ timeoutSeconds: 1
+ initialDelaySeconds: 30
+ tcpSocket:
+ port: 11211
+ volumeMounts: []
+ env:
+ -
+ name: "MEMCACHED_MAX_MEMORY"
+ value: "${MEMCACHED_MAX_MEMORY}"
+ -
+ name: "MEMCACHED_MAX_CONNECTIONS"
+ value: "${MEMCACHED_MAX_CONNECTIONS}"
+ -
+ name: "MEMCACHED_SLAB_PAGE_SIZE"
+ value: "${MEMCACHED_SLAB_PAGE_SIZE}"
+ resources:
+ requests:
+ memory: "${MEMCACHED_MEM_REQ}"
+ cpu: "${MEMCACHED_CPU_REQ}"
+ limits:
+ memory: "${MEMCACHED_MEM_LIMIT}"
+ - apiVersion: v1
+ kind: "Service"
+ metadata:
+ name: "${DATABASE_SERVICE_NAME}"
+ annotations:
+ description: "Exposes the database server"
+ spec:
+ ports:
+ -
+ name: "postgresql"
+ port: 5432
+ targetPort: 5432
+ selector:
+ name: "${DATABASE_SERVICE_NAME}"
+ - apiVersion: v1
+ kind: "DeploymentConfig"
+ metadata:
+ name: "${DATABASE_SERVICE_NAME}"
+ annotations:
+ description: "Defines how to deploy the database"
+ spec:
+ strategy:
+ type: "Recreate"
+ triggers:
+ -
+ type: "ImageChange"
+ imageChangeParams:
+ automatic: true
+ containerNames:
+ - "postgresql"
+ from:
+ kind: "ImageStreamTag"
+ name: "miq-postgresql:${POSTGRESQL_IMG_TAG}"
+ -
+ type: "ConfigChange"
+ replicas: 1
+ selector:
+ name: "${DATABASE_SERVICE_NAME}"
+ template:
+ metadata:
+ name: "${DATABASE_SERVICE_NAME}"
+ labels:
+ name: "${DATABASE_SERVICE_NAME}"
+ spec:
+ volumes:
+ -
+ name: "miq-pgdb-volume"
+ persistentVolumeClaim:
+ claimName: "${NAME}-${DATABASE_SERVICE_NAME}"
+ containers:
+ -
+ name: "postgresql"
+ image: "${POSTGRESQL_IMG_NAME}:${POSTGRESQL_IMG_TAG}"
+ ports:
+ -
+ containerPort: 5432
+ readinessProbe:
+ timeoutSeconds: 1
+ initialDelaySeconds: 15
+ exec:
+ command:
+ - "/bin/sh"
+ - "-i"
+ - "-c"
+ - "psql -h 127.0.0.1 -U ${POSTGRESQL_USER} -q -d ${POSTGRESQL_DATABASE} -c 'SELECT 1'"
+ livenessProbe:
+ timeoutSeconds: 1
+ initialDelaySeconds: 60
+ tcpSocket:
+ port: 5432
+ volumeMounts:
+ -
+ name: "miq-pgdb-volume"
+ mountPath: "/var/lib/pgsql/data"
+ env:
+ -
+ name: "POSTGRESQL_USER"
+ value: "${DATABASE_USER}"
+ -
+ name: "POSTGRESQL_PASSWORD"
+ valueFrom:
+ secretKeyRef:
+ name: "${NAME}-secrets"
+ key: "pg-password"
+ -
+ name: "POSTGRESQL_DATABASE"
+ value: "${DATABASE_NAME}"
+ -
+ name: "POSTGRESQL_MAX_CONNECTIONS"
+ value: "${POSTGRESQL_MAX_CONNECTIONS}"
+ -
+ name: "POSTGRESQL_SHARED_BUFFERS"
+ value: "${POSTGRESQL_SHARED_BUFFERS}"
+ resources:
+ requests:
+ memory: "${POSTGRESQL_MEM_REQ}"
+ cpu: "${POSTGRESQL_CPU_REQ}"
+ limits:
+ memory: "${POSTGRESQL_MEM_LIMIT}"
+
+ parameters:
+ -
+ name: "NAME"
+ displayName: Name
+ required: true
+ description: "The name assigned to all of the frontend objects defined in this template."
+ value: manageiq
+ -
+ name: "DATABASE_SERVICE_NAME"
+ displayName: "PostgreSQL Service Name"
+ required: true
+ description: "The name of the OpenShift Service exposed for the PostgreSQL container."
+ value: "postgresql"
+ -
+ name: "DATABASE_USER"
+ displayName: "PostgreSQL User"
+ required: true
+ description: "PostgreSQL user that will access the database."
+ value: "root"
+ -
+ name: "DATABASE_PASSWORD"
+ displayName: "PostgreSQL Password"
+ required: true
+ description: "Password for the PostgreSQL user."
+ from: "[a-zA-Z0-9]{8}"
+ generate: expression
+ -
+ name: "DATABASE_NAME"
+ required: true
+ displayName: "PostgreSQL Database Name"
+ description: "Name of the PostgreSQL database accessed."
+ value: "vmdb_production"
+ -
+ name: "DATABASE_REGION"
+ required: true
+ displayName: "Application Database Region"
+ description: "Database region that will be used for application."
+ value: "0"
+ -
+ name: "MEMCACHED_SERVICE_NAME"
+ required: true
+ displayName: "Memcached Service Name"
+ description: "The name of the OpenShift Service exposed for the Memcached container."
+ value: "memcached"
+ -
+ name: "MEMCACHED_MAX_MEMORY"
+ displayName: "Memcached Max Memory"
+ description: "Memcached maximum memory for memcached object storage in MB."
+ value: "64"
+ -
+ name: "MEMCACHED_MAX_CONNECTIONS"
+ displayName: "Memcached Max Connections"
+ description: "Memcached maximum number of connections allowed."
+ value: "1024"
+ -
+ name: "MEMCACHED_SLAB_PAGE_SIZE"
+ displayName: "Memcached Slab Page Size"
+ description: "Memcached size of each slab page."
+ value: "1m"
+ -
+ name: "POSTGRESQL_MAX_CONNECTIONS"
+ displayName: "PostgreSQL Max Connections"
+ description: "PostgreSQL maximum number of database connections allowed."
+ value: "100"
+ -
+ name: "POSTGRESQL_SHARED_BUFFERS"
+ displayName: "PostgreSQL Shared Buffer Amount"
+ description: "Amount of memory dedicated for PostgreSQL shared memory buffers."
+ value: "256MB"
+ -
+ name: "APPLICATION_CPU_REQ"
+ displayName: "Application Min CPU Requested"
+ required: true
+ description: "Minimum amount of CPU time the Application container will need (expressed in millicores)."
+ value: "1000m"
+ -
+ name: "POSTGRESQL_CPU_REQ"
+ displayName: "PostgreSQL Min CPU Requested"
+ required: true
+ description: "Minimum amount of CPU time the PostgreSQL container will need (expressed in millicores)."
+ value: "500m"
+ -
+ name: "MEMCACHED_CPU_REQ"
+ displayName: "Memcached Min CPU Requested"
+ required: true
+ description: "Minimum amount of CPU time the Memcached container will need (expressed in millicores)."
+ value: "200m"
+ -
+ name: "APPLICATION_MEM_REQ"
+ displayName: "Application Min RAM Requested"
+ required: true
+ description: "Minimum amount of memory the Application container will need."
+ value: "6144Mi"
+ -
+ name: "POSTGRESQL_MEM_REQ"
+ displayName: "PostgreSQL Min RAM Requested"
+ required: true
+ description: "Minimum amount of memory the PostgreSQL container will need."
+ value: "1024Mi"
+ -
+ name: "MEMCACHED_MEM_REQ"
+ displayName: "Memcached Min RAM Requested"
+ required: true
+ description: "Minimum amount of memory the Memcached container will need."
+ value: "64Mi"
+ -
+ name: "APPLICATION_MEM_LIMIT"
+ displayName: "Application Max RAM Limit"
+ required: true
+ description: "Maximum amount of memory the Application container can consume."
+ value: "16384Mi"
+ -
+ name: "POSTGRESQL_MEM_LIMIT"
+ displayName: "PostgreSQL Max RAM Limit"
+ required: true
+ description: "Maximum amount of memory the PostgreSQL container can consume."
+ value: "8192Mi"
+ -
+ name: "MEMCACHED_MEM_LIMIT"
+ displayName: "Memcached Max RAM Limit"
+ required: true
+ description: "Maximum amount of memory the Memcached container can consume."
+ value: "256Mi"
+ -
+ name: "POSTGRESQL_IMG_NAME"
+ displayName: "PostgreSQL Image Name"
+ description: "This is the PostgreSQL image name requested to deploy."
+ value: "docker.io/manageiq/manageiq-pods"
+ -
+ name: "POSTGRESQL_IMG_TAG"
+ displayName: "PostgreSQL Image Tag"
+ description: "This is the PostgreSQL image tag/version requested to deploy."
+ value: "postgresql-latest-fine"
+ -
+ name: "MEMCACHED_IMG_NAME"
+ displayName: "Memcached Image Name"
+ description: "This is the Memcached image name requested to deploy."
+ value: "docker.io/manageiq/manageiq-pods"
+ -
+ name: "MEMCACHED_IMG_TAG"
+ displayName: "Memcached Image Tag"
+ description: "This is the Memcached image tag/version requested to deploy."
+ value: "memcached-latest-fine"
+ -
+ name: "APPLICATION_IMG_NAME"
+ displayName: "Application Image Name"
+ description: "This is the Application image name requested to deploy."
+ value: "docker.io/manageiq/manageiq-pods"
+ -
+ name: "APPLICATION_IMG_TAG"
+ displayName: "Application Image Tag"
+ description: "This is the Application image tag/version requested to deploy."
+ value: "app-latest-fine"
+ -
+ name: "APPLICATION_DOMAIN"
+ displayName: "Application Hostname"
+ description: "The exposed hostname that will route to the application service, if left blank a value will be defaulted."
+ value: ""
+ -
+ name: "APPLICATION_REPLICA_COUNT"
+ displayName: "Application Replica Count"
+ description: "This is the number of Application replicas requested to deploy."
+ value: "1"
+ -
+ name: "APPLICATION_INIT_DELAY"
+ displayName: "Application Init Delay"
+ required: true
+ description: "Delay in seconds before we attempt to initialize the application."
+ value: "15"
+ -
+ name: "APPLICATION_VOLUME_CAPACITY"
+ displayName: "Application Volume Capacity"
+ required: true
+ description: "Volume space available for application data."
+ value: "5Gi"
+ -
+ name: "APPLICATION_REGION_VOLUME_CAPACITY"
+ displayName: "Application Region Volume Capacity"
+ required: true
+ description: "Volume space available for region application data."
+ value: "5Gi"
+ -
+ name: "DATABASE_VOLUME_CAPACITY"
+ displayName: "Database Volume Capacity"
+ required: true
+ description: "Volume space available for database."
+ value: "15Gi"
diff --git a/roles/openshift_cfme/files/openshift_cfme.exports b/roles/openshift_cfme/files/openshift_cfme.exports
new file mode 100644
index 000000000..5457d41fc
--- /dev/null
+++ b/roles/openshift_cfme/files/openshift_cfme.exports
@@ -0,0 +1,3 @@
+/exports/miq-pv01 *(rw,no_root_squash,no_wdelay)
+/exports/miq-pv02 *(rw,no_root_squash,no_wdelay)
+/exports/miq-pv03 *(rw,no_root_squash,no_wdelay)
diff --git a/roles/openshift_cfme/handlers/main.yml b/roles/openshift_cfme/handlers/main.yml
new file mode 100644
index 000000000..476a5e030
--- /dev/null
+++ b/roles/openshift_cfme/handlers/main.yml
@@ -0,0 +1,42 @@
+---
+######################################################################
+# NOTE: These are duplicated from roles/openshift_master/handlers/main.yml
+#
+# TODO: Use the consolidated 'openshift_handlers' role once it's ready
+# See: https://github.com/openshift/openshift-ansible/pull/4041#discussion_r118770782
+######################################################################
+
+- name: restart master
+ systemd: name={{ openshift.common.service_type }}-master state=restarted
+ when: (openshift.master.ha is not defined or not openshift.master.ha | bool) and (not (master_service_status_changed | default(false) | bool))
+ notify: Verify API Server
+
+- name: restart master api
+ systemd: name={{ openshift.common.service_type }}-master-api state=restarted
+ when: (openshift.master.ha is defined and openshift.master.ha | bool) and (not (master_api_service_status_changed | default(false) | bool)) and openshift.master.cluster_method == 'native'
+ notify: Verify API Server
+
+- name: restart master controllers
+ systemd: name={{ openshift.common.service_type }}-master-controllers state=restarted
+ when: (openshift.master.ha is defined and openshift.master.ha | bool) and (not (master_controllers_service_status_changed | default(false) | bool)) and openshift.master.cluster_method == 'native'
+
+- name: Verify API Server
+ # Using curl here since the uri module requires python-httplib2 and
+ # wait_for port doesn't provide health information.
+ command: >
+ curl --silent --tlsv1.2
+ {% if openshift.common.version_gte_3_2_or_1_2 | bool %}
+ --cacert {{ openshift.common.config_base }}/master/ca-bundle.crt
+ {% else %}
+ --cacert {{ openshift.common.config_base }}/master/ca.crt
+ {% endif %}
+ {{ openshift.master.api_url }}/healthz/ready
+ args:
+ # Disables the following warning:
+ # Consider using get_url or uri module rather than running curl
+ warn: no
+ register: api_available_output
+ until: api_available_output.stdout == 'ok'
+ retries: 120
+ delay: 1
+ changed_when: false
diff --git a/roles/openshift_cfme/img/CFMEBasicDeployment.png b/roles/openshift_cfme/img/CFMEBasicDeployment.png
new file mode 100644
index 000000000..a89c1e325
--- /dev/null
+++ b/roles/openshift_cfme/img/CFMEBasicDeployment.png
Binary files differ
diff --git a/roles/openshift_cfme/meta/main.yml b/roles/openshift_cfme/meta/main.yml
new file mode 100644
index 000000000..9200f2c3c
--- /dev/null
+++ b/roles/openshift_cfme/meta/main.yml
@@ -0,0 +1,20 @@
+---
+galaxy_info:
+ author: Tim Bielawa
+ description: OpenShift CFME (Manage IQ) Deployer
+ company: Red Hat, Inc.
+ license: Apache License, Version 2.0
+ min_ansible_version: 2.2
+ version: 1.0
+ platforms:
+ - name: EL
+ versions:
+ - 7
+ categories:
+ - cloud
+ - system
+dependencies:
+- role: lib_openshift
+- role: lib_utils
+- role: openshift_common
+- role: openshift_master_facts
diff --git a/roles/openshift_cfme/tasks/create_pvs.yml b/roles/openshift_cfme/tasks/create_pvs.yml
new file mode 100644
index 000000000..7fa7d3997
--- /dev/null
+++ b/roles/openshift_cfme/tasks/create_pvs.yml
@@ -0,0 +1,36 @@
+---
+# Check for existance and then conditionally:
+# - evaluate templates
+# - PVs
+#
+# These tasks idempotently create required CFME PV objects. Do not
+# call this file directly. This file is intended to be ran as an
+# include that has a 'with_items' attached to it. Hence the use below
+# of variables like "{{ item.pv_label }}"
+
+- name: "Check if the {{ item.pv_label }} template has been created already"
+ oc_obj:
+ namespace: "{{ openshift_cfme_project }}"
+ state: list
+ kind: pv
+ name: "{{ item.pv_name }}"
+ register: miq_pv_check
+
+# Skip all of this if the PV already exists
+- block:
+ - name: "Ensure the {{ item.pv_label }} template is evaluated"
+ template:
+ src: "{{ item.pv_template }}.j2"
+ dest: "{{ template_dir }}/{{ item.pv_template }}"
+
+ - name: "Ensure {{ item.pv_label }} is created"
+ oc_obj:
+ namespace: "{{ openshift_cfme_project }}"
+ kind: pv
+ name: "{{ item.pv_name }}"
+ state: present
+ delete_after: True
+ files:
+ - "{{ template_dir }}/{{ item.pv_template }}"
+ when:
+ - not miq_pv_check.results.results.0
diff --git a/roles/openshift_cfme/tasks/main.yml b/roles/openshift_cfme/tasks/main.yml
new file mode 100644
index 000000000..acbce7232
--- /dev/null
+++ b/roles/openshift_cfme/tasks/main.yml
@@ -0,0 +1,148 @@
+---
+######################################################################
+# Users, projects, and privileges
+
+- name: Ensure the CFME user exists
+ oc_user:
+ state: present
+ username: "{{ openshift_cfme_user }}"
+
+- name: Ensure the CFME namespace exists with CFME user as admin
+ oc_project:
+ state: present
+ name: "{{ openshift_cfme_project }}"
+ display_name: "{{ openshift_cfme_project_description }}"
+ admin: "{{ openshift_cfme_user }}"
+
+- name: Ensure the CFME namespace service account is privileged
+ oc_adm_policy_user:
+ namespace: "{{ openshift_cfme_project }}"
+ user: "{{ openshift_cfme_service_account }}"
+ resource_kind: scc
+ resource_name: privileged
+ state: present
+
+######################################################################
+# NFS
+
+- name: Ensure the /exports/ directory exists
+ file:
+ path: /exports/
+ state: directory
+ mode: 0755
+ owner: root
+ group: root
+
+- name: Ensure the miq-pv0X export directories exist
+ file:
+ path: "/exports/{{ item }}"
+ state: directory
+ mode: 0775
+ owner: root
+ group: root
+ with_items: "{{ openshift_cfme_pv_exports }}"
+
+- name: Ensure the NFS exports for CFME PVs exist
+ copy:
+ src: openshift_cfme.exports
+ dest: /etc/exports.d/openshift_cfme.exports
+ register: nfs_exports_updated
+
+- name: Ensure the NFS export table is refreshed if exports were added
+ command: exportfs -ar
+ when:
+ - nfs_exports_updated.changed
+
+
+######################################################################
+# Create the required CFME PVs. Check out these online docs if you
+# need a refresher on includes looping with items:
+# * http://docs.ansible.com/ansible/playbooks_loops.html#loops-and-includes-in-2-0
+# * http://stackoverflow.com/a/35128533
+#
+# TODO: Handle the case where a PV template is updated in
+# openshift-ansible and the change needs to be landed on the managed
+# cluster.
+
+- include: create_pvs.yml
+ with_items: "{{ openshift_cfme_pv_data }}"
+
+######################################################################
+# CFME App Template
+#
+# Note, this is different from the create_pvs.yml tasks in that the
+# application template does not require any jinja2 evaluation.
+#
+# TODO: Handle the case where the server template is updated in
+# openshift-ansible and the change needs to be landed on the managed
+# cluster.
+
+- name: Check if the CFME Server template has been created already
+ oc_obj:
+ namespace: "{{ openshift_cfme_project }}"
+ state: list
+ kind: template
+ name: manageiq
+ register: miq_server_check
+
+- name: Copy over CFME Server template
+ copy:
+ src: miq-template.yaml
+ dest: "{{ template_dir }}/miq-template.yaml"
+
+- name: Ensure the server template was read from disk
+ debug:
+ var=r_openshift_cfme_miq_template_content
+
+- name: Ensure CFME Server Template exists
+ oc_obj:
+ namespace: "{{ openshift_cfme_project }}"
+ kind: template
+ name: "manageiq"
+ state: present
+ content: "{{ r_openshift_cfme_miq_template_content }}"
+
+######################################################################
+# Let's do this
+
+- name: Ensure the CFME Server is created
+ oc_process:
+ namespace: "{{ openshift_cfme_project }}"
+ template_name: manageiq
+ create: True
+ register: cfme_new_app_process
+ run_once: True
+ when:
+ # User said to install CFME in their inventory
+ - openshift_cfme_install_app | bool
+ # # The server app doesn't exist already
+ # - not miq_server_check.results.results.0
+
+- debug:
+ var: cfme_new_app_process
+
+######################################################################
+# Various cleanup steps
+
+# TODO: Not sure what to do about this right now. Might be able to
+# just delete it? This currently warns about "Unable to find
+# '<TEMP_DIR>' in expected paths."
+- name: Ensure the temporary PV/App templates are erased
+ file:
+ path: "{{ item }}"
+ state: absent
+ with_fileglob:
+ - "{{ template_dir }}/*.yaml"
+
+- name: Ensure the temporary PV/app template directory is erased
+ file:
+ path: "{{ template_dir }}"
+ state: absent
+
+######################################################################
+
+- name: Status update
+ debug:
+ msg: >
+ CFME has been deployed. Note that there will be a delay before
+ it is fully initialized.
diff --git a/roles/openshift_cfme/tasks/tune_masters.yml b/roles/openshift_cfme/tasks/tune_masters.yml
new file mode 100644
index 000000000..02b0f10bf
--- /dev/null
+++ b/roles/openshift_cfme/tasks/tune_masters.yml
@@ -0,0 +1,12 @@
+---
+- name: Ensure bulk image import limit is tuned
+ yedit:
+ src: /etc/origin/master/master-config.yaml
+ key: 'imagePolicyConfig.maxImagesBulkImportedPerRepository'
+ value: "{{ openshift_cfme_maxImagesBulkImportedPerRepository | int() }}"
+ state: present
+ backup: True
+ notify:
+ - restart master
+
+- meta: flush_handlers
diff --git a/roles/openshift_cfme/tasks/uninstall.yml b/roles/openshift_cfme/tasks/uninstall.yml
new file mode 100644
index 000000000..cba734a0e
--- /dev/null
+++ b/roles/openshift_cfme/tasks/uninstall.yml
@@ -0,0 +1,43 @@
+---
+- include_role:
+ name: lib_openshift
+
+- name: Uninstall CFME - ManageIQ
+ debug:
+ msg: Uninstalling Cloudforms Management Engine - ManageIQ
+
+- name: Ensure the CFME project is removed
+ oc_project:
+ state: absent
+ name: "{{ openshift_cfme_project }}"
+
+- name: Ensure the CFME template is removed
+ oc_obj:
+ namespace: "{{ openshift_cfme_project }}"
+ state: absent
+ kind: template
+ name: manageiq
+
+- name: Ensure the CFME PVs are removed
+ oc_obj:
+ state: absent
+ all_namespaces: True
+ kind: pv
+ name: "{{ item }}"
+ with_items: "{{ openshift_cfme_pv_exports }}"
+
+- name: Ensure the CFME user is removed
+ oc_user:
+ state: absent
+ username: "{{ openshift_cfme_user }}"
+
+- name: Ensure the CFME NFS Exports are removed
+ file:
+ path: /etc/exports.d/openshift_cfme.exports
+ state: absent
+ register: nfs_exports_removed
+
+- name: Ensure the NFS export table is refreshed if exports were removed
+ command: exportfs -ar
+ when:
+ - nfs_exports_removed.changed
diff --git a/roles/openshift_cfme/templates/miq-pv-db.yaml.j2 b/roles/openshift_cfme/templates/miq-pv-db.yaml.j2
new file mode 100644
index 000000000..b8c3bb277
--- /dev/null
+++ b/roles/openshift_cfme/templates/miq-pv-db.yaml.j2
@@ -0,0 +1,13 @@
+apiVersion: v1
+kind: PersistentVolume
+metadata:
+ name: miq-pv01
+spec:
+ capacity:
+ storage: 15Gi
+ accessModes:
+ - ReadWriteOnce
+ nfs:
+ path: /exports/miq-pv01
+ server: {{ openshift_cfme_nfs_server }}
+ persistentVolumeReclaimPolicy: Retain
diff --git a/roles/openshift_cfme/templates/miq-pv-region.yaml.j2 b/roles/openshift_cfme/templates/miq-pv-region.yaml.j2
new file mode 100644
index 000000000..7218773f0
--- /dev/null
+++ b/roles/openshift_cfme/templates/miq-pv-region.yaml.j2
@@ -0,0 +1,13 @@
+apiVersion: v1
+kind: PersistentVolume
+metadata:
+ name: miq-pv02
+spec:
+ capacity:
+ storage: 5Gi
+ accessModes:
+ - ReadWriteOnce
+ nfs:
+ path: /exports/miq-pv02
+ server: {{ openshift_cfme_nfs_server }}
+ persistentVolumeReclaimPolicy: Retain
diff --git a/roles/openshift_cfme/templates/miq-pv-server.yaml.j2 b/roles/openshift_cfme/templates/miq-pv-server.yaml.j2
new file mode 100644
index 000000000..7b40b6c69
--- /dev/null
+++ b/roles/openshift_cfme/templates/miq-pv-server.yaml.j2
@@ -0,0 +1,13 @@
+apiVersion: v1
+kind: PersistentVolume
+metadata:
+ name: miq-pv03
+spec:
+ capacity:
+ storage: 5Gi
+ accessModes:
+ - ReadWriteOnce
+ nfs:
+ path: /exports/miq-pv03
+ server: {{ openshift_cfme_nfs_server }}
+ persistentVolumeReclaimPolicy: Retain
diff --git a/roles/openshift_default_storage_class/README.md b/roles/openshift_default_storage_class/README.md
new file mode 100644
index 000000000..198163127
--- /dev/null
+++ b/roles/openshift_default_storage_class/README.md
@@ -0,0 +1,39 @@
+openshift_master_storage_class
+=========
+
+A role that deploys configuratons for Openshift StorageClass
+
+Requirements
+------------
+
+None
+
+Role Variables
+--------------
+
+openshift_storageclass_name: Name of the storage class to create
+openshift_storageclass_provisioner: The kubernetes provisioner to use
+openshift_storageclass_type: type of storage to use. This is different among clouds/providers
+
+Dependencies
+------------
+
+
+Example Playbook
+----------------
+
+- role: openshift_default_storage_class
+ openshift_storageclass_name: awsEBS
+ openshift_storageclass_provisioner: kubernetes.io/aws-ebs
+ openshift_storageclass_type: gp2
+
+
+License
+-------
+
+Apache
+
+Author Information
+------------------
+
+Openshift Operations
diff --git a/roles/openshift_default_storage_class/defaults/main.yml b/roles/openshift_default_storage_class/defaults/main.yml
new file mode 100644
index 000000000..66ffd2a73
--- /dev/null
+++ b/roles/openshift_default_storage_class/defaults/main.yml
@@ -0,0 +1,14 @@
+---
+openshift_storageclass_defaults:
+ aws:
+ name: gp2
+ provisioner: kubernetes.io/aws-ebs
+ type: gp2
+ gce:
+ name: standard
+ provisioner: kubernetes.io/gce-pd
+ type: pd-standard
+
+openshift_storageclass_name: "{{ openshift_storageclass_defaults[openshift_cloudprovider_kind]['name'] }}"
+openshift_storageclass_provisioner: "{{ openshift_storageclass_defaults[openshift_cloudprovider_kind]['provisioner'] }}"
+openshift_storageclass_type: "{{ openshift_storageclass_defaults[openshift_cloudprovider_kind]['type'] }}"
diff --git a/roles/openshift_default_storage_class/meta/main.yml b/roles/openshift_default_storage_class/meta/main.yml
new file mode 100644
index 000000000..d7d57fe39
--- /dev/null
+++ b/roles/openshift_default_storage_class/meta/main.yml
@@ -0,0 +1,15 @@
+---
+galaxy_info:
+ author: Openshift Operations
+ description: This role configures the StorageClass in Openshift
+ company: Red Hat
+ license: Apache
+ min_ansible_version: 2.2
+ platforms:
+ - name: EL
+ verisons:
+ - 7
+ categories:
+ - cloud
+dependencies:
+- role: lib_openshift
diff --git a/roles/openshift_default_storage_class/tasks/main.yml b/roles/openshift_default_storage_class/tasks/main.yml
new file mode 100644
index 000000000..408fc17c7
--- /dev/null
+++ b/roles/openshift_default_storage_class/tasks/main.yml
@@ -0,0 +1,19 @@
+---
+# Install default storage classes in GCE & AWS
+- name: Ensure storageclass object
+ oc_obj:
+ kind: storageclass
+ name: "{{ openshift_storageclass_name }}"
+ content:
+ path: /tmp/openshift_storageclass
+ data:
+ kind: StorageClass
+ apiVersion: storage.k8s.io/v1beta1
+ metadata:
+ name: "{{ openshift_storageclass_name }}"
+ annotations:
+ storageclass.beta.kubernetes.io/is-default-class: "true"
+ provisioner: "{{ openshift_storageclass_provisioner }}"
+ parameters:
+ type: "{{ openshift_storageclass_type }}"
+ run_once: true
diff --git a/roles/openshift_default_storage_class/vars/main.yml b/roles/openshift_default_storage_class/vars/main.yml
new file mode 100644
index 000000000..ed97d539c
--- /dev/null
+++ b/roles/openshift_default_storage_class/vars/main.yml
@@ -0,0 +1 @@
+---
diff --git a/roles/openshift_etcd_facts/vars/main.yml b/roles/openshift_etcd_facts/vars/main.yml
index 82db36eba..b3ecd57a6 100644
--- a/roles/openshift_etcd_facts/vars/main.yml
+++ b/roles/openshift_etcd_facts/vars/main.yml
@@ -5,6 +5,7 @@ etcd_hostname: "{{ openshift.common.hostname }}"
etcd_ip: "{{ openshift.common.ip }}"
etcd_cert_subdir: "etcd-{{ openshift.common.hostname }}"
etcd_cert_prefix:
-etcd_cert_config_dir: "{{ '/etc/etcd' if not openshift.common.is_etcd_system_container | bool else '/var/lib/etcd/etcd.etcd/etc' }}"
+etcd_cert_config_dir: "/etc/etcd"
+etcd_system_container_cert_config_dir: /var/lib/etcd/etcd.etcd/etc
etcd_peer_url_scheme: https
etcd_url_scheme: https
diff --git a/roles/openshift_excluder/tasks/install.yml b/roles/openshift_excluder/tasks/install.yml
index d09358bee..3a866cedf 100644
--- a/roles/openshift_excluder/tasks/install.yml
+++ b/roles/openshift_excluder/tasks/install.yml
@@ -1,14 +1,24 @@
---
-- name: Install docker excluder
- package:
- name: "{{ r_openshift_excluder_service_type }}-docker-excluder{{ openshift_pkg_version | default('') | oo_image_tag_to_rpm_version(include_dash=True) + '*' }}"
- state: "{{ r_openshift_excluder_docker_package_state }}"
- when:
- - r_openshift_excluder_enable_docker_excluder | bool
-
-- name: Install openshift excluder
- package:
- name: "{{ r_openshift_excluder_service_type }}-excluder{{ openshift_pkg_version | default('') | oo_image_tag_to_rpm_version(include_dash=True) + '*' }}"
- state: "{{ r_openshift_excluder_package_state }}"
- when:
- - r_openshift_excluder_enable_openshift_excluder | bool
+
+- when:
+ - not openshift.common.is_atomic | bool
+ - r_openshift_excluder_install_ran is not defined
+
+ block:
+
+ - name: Install docker excluder
+ package:
+ name: "{{ r_openshift_excluder_service_type }}-docker-excluder{{ openshift_pkg_version | default('') | oo_image_tag_to_rpm_version(include_dash=True) + '*' }}"
+ state: "{{ r_openshift_excluder_docker_package_state }}"
+ when:
+ - r_openshift_excluder_enable_docker_excluder | bool
+
+ - name: Install openshift excluder
+ package:
+ name: "{{ r_openshift_excluder_service_type }}-excluder{{ openshift_pkg_version | default('') | oo_image_tag_to_rpm_version(include_dash=True) + '*' }}"
+ state: "{{ r_openshift_excluder_package_state }}"
+ when:
+ - r_openshift_excluder_enable_openshift_excluder | bool
+
+ - set_fact:
+ r_openshift_excluder_install_ran: True
diff --git a/roles/openshift_facts/tasks/main.yml b/roles/openshift_facts/tasks/main.yml
index 1b9bda67e..50ed3e964 100644
--- a/roles/openshift_facts/tasks/main.yml
+++ b/roles/openshift_facts/tasks/main.yml
@@ -24,12 +24,18 @@
msg: |
openshift-ansible requires Python 3 for {{ ansible_distribution }};
For information on enabling Python 3 with Ansible, see https://docs.ansible.com/ansible/python_3_support.html
- when: ansible_distribution == 'Fedora' and ansible_python['version']['major'] != 3
+ when:
+ - ansible_distribution == 'Fedora'
+ - ansible_python['version']['major'] != 3
+ - r_openshift_facts_ran is not defined
- name: Validate python version
fail:
msg: "openshift-ansible requires Python 2 for {{ ansible_distribution }}"
- when: ansible_distribution != 'Fedora' and ansible_python['version']['major'] != 2
+ when:
+ - ansible_distribution != 'Fedora'
+ - ansible_python['version']['major'] != 2
+ - r_openshift_facts_ran is not defined
# Fail as early as possible if Atomic and old version of Docker
- block:
@@ -48,7 +54,9 @@
that:
- l_atomic_docker_version.stdout | replace('"', '') | version_compare('1.12','>=')
- when: l_is_atomic | bool
+ when:
+ - l_is_atomic | bool
+ - r_openshift_facts_ran is not defined
- name: Load variables
include_vars: "{{ item }}"
@@ -59,7 +67,9 @@
- name: Ensure various deps are installed
package: name={{ item }} state=present
with_items: "{{ required_packages }}"
- when: not l_is_atomic | bool
+ when:
+ - not l_is_atomic | bool
+ - r_openshift_facts_ran is not defined
- name: Ensure various deps for running system containers are installed
package: name={{ item }} state=present
@@ -67,6 +77,7 @@
when:
- not l_is_atomic | bool
- l_any_system_container | bool
+ - r_openshift_facts_ran is not defined
- name: Gather Cluster facts and set is_containerized if needed
openshift_facts:
@@ -99,3 +110,7 @@
- name: Set repoquery command
set_fact:
repoquery_cmd: "{{ 'dnf repoquery --latest-limit 1 -d 0' if ansible_pkg_mgr == 'dnf' else 'repoquery --plugins' }}"
+
+- name: Register that this already ran
+ set_fact:
+ r_openshift_facts_ran: True
diff --git a/roles/openshift_health_checker/openshift_checks/docker_image_availability.py b/roles/openshift_health_checker/openshift_checks/docker_image_availability.py
index 4588ed634..60aacf715 100644
--- a/roles/openshift_health_checker/openshift_checks/docker_image_availability.py
+++ b/roles/openshift_health_checker/openshift_checks/docker_image_availability.py
@@ -1,8 +1,25 @@
-# pylint: disable=missing-docstring
+"""Check that required Docker images are available."""
+
from openshift_checks import OpenShiftCheck, get_var
+from openshift_checks.mixins import DockerHostMixin
+
+NODE_IMAGE_SUFFIXES = ["haproxy-router", "docker-registry", "deployer", "pod"]
+DEPLOYMENT_IMAGE_INFO = {
+ "origin": {
+ "namespace": "openshift",
+ "name": "origin",
+ "registry_console_image": "cockpit/kubernetes",
+ },
+ "openshift-enterprise": {
+ "namespace": "openshift3",
+ "name": "ose",
+ "registry_console_image": "registry.access.redhat.com/openshift3/registry-console",
+ },
+}
-class DockerImageAvailability(OpenShiftCheck):
+
+class DockerImageAvailability(DockerHostMixin, OpenShiftCheck):
"""Check that required Docker images are available.
This check attempts to ensure that required docker images are
@@ -12,43 +29,23 @@ class DockerImageAvailability(OpenShiftCheck):
name = "docker_image_availability"
tags = ["preflight"]
-
dependencies = ["skopeo", "python-docker-py"]
- deployment_image_info = {
- "origin": {
- "namespace": "openshift",
- "name": "origin",
- },
- "openshift-enterprise": {
- "namespace": "openshift3",
- "name": "ose",
- },
- }
-
@classmethod
def is_active(cls, task_vars):
"""Skip hosts with unsupported deployment types."""
deployment_type = get_var(task_vars, "openshift_deployment_type")
- has_valid_deployment_type = deployment_type in cls.deployment_image_info
+ has_valid_deployment_type = deployment_type in DEPLOYMENT_IMAGE_INFO
return super(DockerImageAvailability, cls).is_active(task_vars) and has_valid_deployment_type
def run(self, tmp, task_vars):
msg, failed, changed = self.ensure_dependencies(task_vars)
-
- # exit early if Skopeo update fails
if failed:
- if "No package matching" in msg:
- msg = "Ensure that all required dependencies can be installed via `yum`.\n"
return {
"failed": True,
"changed": changed,
- "msg": (
- "Unable to update or install required dependency packages on this host;\n"
- "These are required in order to check Docker image availability:"
- "\n {deps}\n{msg}"
- ).format(deps=',\n '.join(self.dependencies), msg=msg),
+ "msg": "Some dependencies are required in order to check Docker image availability.\n" + msg
}
required_images = self.required_images(task_vars)
@@ -77,51 +74,55 @@ class DockerImageAvailability(OpenShiftCheck):
return {"changed": changed}
- def required_images(self, task_vars):
- deployment_type = get_var(task_vars, "openshift_deployment_type")
- image_info = self.deployment_image_info[deployment_type]
-
- openshift_release = get_var(task_vars, "openshift_release", default="latest")
- openshift_image_tag = get_var(task_vars, "openshift_image_tag")
- is_containerized = get_var(task_vars, "openshift", "common", "is_containerized")
-
- images = set(self.required_docker_images(
- image_info["namespace"],
- image_info["name"],
- ["registry-console"] if "enterprise" in deployment_type else [], # include enterprise-only image names
- openshift_release,
- is_containerized,
- ))
-
- # append images with qualified image tags to our list of required images.
- # these are images with a (v0.0.0.0) tag, rather than a standard release
- # format tag (v0.0). We want to check this set in both containerized and
- # non-containerized installations.
- images.update(
- self.required_qualified_docker_images(
- image_info["namespace"],
- image_info["name"],
- openshift_image_tag,
- ),
- )
-
- return images
-
- @staticmethod
- def required_docker_images(namespace, name, additional_image_names, version, is_containerized):
- if is_containerized:
- return ["{}/{}:{}".format(namespace, name, version)] if name else []
-
- # include additional non-containerized images specific to the current deployment type
- return ["{}/{}:{}".format(namespace, img_name, version) for img_name in additional_image_names]
-
@staticmethod
- def required_qualified_docker_images(namespace, name, version):
- # pylint: disable=invalid-name
- return [
- "{}/{}-{}:{}".format(namespace, name, suffix, version)
- for suffix in ["haproxy-router", "docker-registry", "deployer", "pod"]
- ]
+ def required_images(task_vars):
+ """
+ Determine which images we expect to need for this host.
+ Returns: a set of required images like 'openshift/origin:v3.6'
+
+ The thorny issue of determining the image names from the variables is under consideration
+ via https://github.com/openshift/openshift-ansible/issues/4415
+
+ For now we operate as follows:
+ * For containerized components (master, node, ...) we look at the deployment type and
+ use openshift/origin or openshift3/ose as the base for those component images. The
+ version is openshift_image_tag as determined by the openshift_version role.
+ * For OpenShift-managed infrastructure (router, registry...) we use oreg_url if
+ it is defined; otherwise we again use the base that depends on the deployment type.
+ Registry is not included in constructed images. It may be in oreg_url or etcd image.
+ """
+ required = set()
+ deployment_type = get_var(task_vars, "openshift_deployment_type")
+ host_groups = get_var(task_vars, "group_names")
+ image_tag = get_var(task_vars, "openshift_image_tag")
+ image_info = DEPLOYMENT_IMAGE_INFO[deployment_type]
+ if not image_info:
+ return required
+
+ # template for images that run on top of OpenShift
+ image_url = "{}/{}-{}:{}".format(image_info["namespace"], image_info["name"], "${component}", "${version}")
+ image_url = get_var(task_vars, "oreg_url", default="") or image_url
+ if 'nodes' in host_groups:
+ for suffix in NODE_IMAGE_SUFFIXES:
+ required.add(image_url.replace("${component}", suffix).replace("${version}", image_tag))
+ # The registry-console is for some reason not prefixed with ose- like the other components.
+ # Nor is it versioned the same, so just look for latest.
+ # Also a completely different name is used for Origin.
+ required.add(image_info["registry_console_image"])
+
+ # images for containerized components
+ if get_var(task_vars, "openshift", "common", "is_containerized"):
+ components = set()
+ if 'nodes' in host_groups:
+ components.update(["node", "openvswitch"])
+ if 'masters' in host_groups: # name is "origin" or "ose"
+ components.add(image_info["name"])
+ for component in components:
+ required.add("{}/{}:{}".format(image_info["namespace"], component, image_tag))
+ if 'etcd' in host_groups: # special case, note it is the same for origin/enterprise
+ required.add("registry.access.redhat.com/rhel7/etcd") # and no image tag
+
+ return required
def local_images(self, images, task_vars):
"""Filter a list of images and return those available locally."""
@@ -131,7 +132,8 @@ class DockerImageAvailability(OpenShiftCheck):
]
def is_image_local(self, image, task_vars):
- result = self.module_executor("docker_image_facts", {"name": image}, task_vars)
+ """Check if image is already in local docker index."""
+ result = self.execute_module("docker_image_facts", {"name": image}, task_vars=task_vars)
if result.get("failed", False):
return False
@@ -139,6 +141,7 @@ class DockerImageAvailability(OpenShiftCheck):
@staticmethod
def known_docker_registries(task_vars):
+ """Build a list of docker registries available according to inventory vars."""
docker_facts = get_var(task_vars, "openshift", "docker")
regs = set(docker_facts["additional_registries"])
@@ -154,26 +157,21 @@ class DockerImageAvailability(OpenShiftCheck):
"""Inspect existing images using Skopeo and return all images successfully inspected."""
return [
image for image in images
- if any(self.is_available_skopeo_image(image, registry, task_vars) for registry in registries)
+ if self.is_available_skopeo_image(image, registries, task_vars)
]
- def is_available_skopeo_image(self, image, registry, task_vars):
- """Uses Skopeo to determine if required image exists in a given registry."""
-
- cmd_str = "skopeo inspect docker://{registry}/{image}".format(
- registry=registry,
- image=image,
- )
+ def is_available_skopeo_image(self, image, registries, task_vars):
+ """Use Skopeo to determine if required image exists in known registry(s)."""
- args = {"_raw_params": cmd_str}
- result = self.module_executor("command", args, task_vars)
- return not result.get("failed", False) and result.get("rc", 0) == 0
+ # if image does already includes a registry, just use that
+ if image.count("/") > 1:
+ registry, image = image.split("/", 1)
+ registries = [registry]
- # ensures that the skopeo and python-docker-py packages exist
- # check is skipped on atomic installations
- def ensure_dependencies(self, task_vars):
- if get_var(task_vars, "openshift", "common", "is_atomic"):
- return "", False, False
+ for registry in registries:
+ args = {"_raw_params": "skopeo inspect docker://{}/{}".format(registry, image)}
+ result = self.execute_module("command", args, task_vars=task_vars)
+ if result.get("rc", 0) == 0 and not result.get("failed"):
+ return True
- result = self.module_executor("yum", {"name": self.dependencies, "state": "latest"}, task_vars)
- return result.get("msg", ""), result.get("failed", False) or result.get("rc", 0) != 0, result.get("changed")
+ return False
diff --git a/roles/openshift_health_checker/openshift_checks/docker_storage.py b/roles/openshift_health_checker/openshift_checks/docker_storage.py
new file mode 100644
index 000000000..2bd615457
--- /dev/null
+++ b/roles/openshift_health_checker/openshift_checks/docker_storage.py
@@ -0,0 +1,185 @@
+"""Check Docker storage driver and usage."""
+import json
+import re
+from openshift_checks import OpenShiftCheck, OpenShiftCheckException, get_var
+from openshift_checks.mixins import DockerHostMixin
+
+
+class DockerStorage(DockerHostMixin, OpenShiftCheck):
+ """Check Docker storage driver compatibility.
+
+ This check ensures that Docker is using a supported storage driver,
+ and that loopback is not being used (if using devicemapper).
+ Also that storage usage is not above threshold.
+ """
+
+ name = "docker_storage"
+ tags = ["pre-install", "health", "preflight"]
+
+ dependencies = ["python-docker-py"]
+ storage_drivers = ["devicemapper", "overlay2"]
+ max_thinpool_data_usage_percent = 90.0
+ max_thinpool_meta_usage_percent = 90.0
+
+ # pylint: disable=too-many-return-statements
+ # Reason: permanent stylistic exception;
+ # it is clearer to return on failures and there are just many ways to fail here.
+ def run(self, tmp, task_vars):
+ msg, failed, changed = self.ensure_dependencies(task_vars)
+ if failed:
+ return {
+ "failed": True,
+ "changed": changed,
+ "msg": "Some dependencies are required in order to query docker storage on host:\n" + msg
+ }
+
+ # attempt to get the docker info hash from the API
+ info = self.execute_module("docker_info", {}, task_vars=task_vars)
+ if info.get("failed"):
+ return {"failed": True, "changed": changed,
+ "msg": "Failed to query Docker API. Is docker running on this host?"}
+ if not info.get("info"): # this would be very strange
+ return {"failed": True, "changed": changed,
+ "msg": "Docker API query missing info:\n{}".format(json.dumps(info))}
+ info = info["info"]
+
+ # check if the storage driver we saw is valid
+ driver = info.get("Driver", "[NONE]")
+ if driver not in self.storage_drivers:
+ msg = (
+ "Detected unsupported Docker storage driver '{driver}'.\n"
+ "Supported storage drivers are: {drivers}"
+ ).format(driver=driver, drivers=', '.join(self.storage_drivers))
+ return {"failed": True, "changed": changed, "msg": msg}
+
+ # driver status info is a list of tuples; convert to dict and validate based on driver
+ driver_status = {item[0]: item[1] for item in info.get("DriverStatus", [])}
+ if driver == "devicemapper":
+ if driver_status.get("Data loop file"):
+ msg = (
+ "Use of loopback devices with the Docker devicemapper storage driver\n"
+ "(the default storage configuration) is unsupported in production.\n"
+ "Please use docker-storage-setup to configure a backing storage volume.\n"
+ "See http://red.ht/2rNperO for further information."
+ )
+ return {"failed": True, "changed": changed, "msg": msg}
+ result = self._check_dm_usage(driver_status, task_vars)
+ result['changed'] = result.get('changed', False) or changed
+ return result
+
+ # TODO(lmeyer): determine how to check usage for overlay2
+
+ return {"changed": changed}
+
+ def _check_dm_usage(self, driver_status, task_vars):
+ """
+ Backing assumptions: We expect devicemapper to be backed by an auto-expanding thin pool
+ implemented as an LV in an LVM2 VG. This is how docker-storage-setup currently configures
+ devicemapper storage. The LV is "thin" because it does not use all available storage
+ from its VG, instead expanding as needed; so to determine available space, we gather
+ current usage as the Docker API reports for the driver as well as space available for
+ expansion in the pool's VG.
+ Usage within the LV is divided into pools allocated to data and metadata, either of which
+ could run out of space first; so we check both.
+ """
+ vals = dict(
+ vg_free=self._get_vg_free(driver_status.get("Pool Name"), task_vars),
+ data_used=driver_status.get("Data Space Used"),
+ data_total=driver_status.get("Data Space Total"),
+ metadata_used=driver_status.get("Metadata Space Used"),
+ metadata_total=driver_status.get("Metadata Space Total"),
+ )
+
+ # convert all human-readable strings to bytes
+ for key, value in vals.copy().items():
+ try:
+ vals[key + "_bytes"] = self._convert_to_bytes(value)
+ except ValueError as err: # unlikely to hit this from API info, but just to be safe
+ return {
+ "failed": True,
+ "values": vals,
+ "msg": "Could not interpret {} value '{}' as bytes: {}".format(key, value, str(err))
+ }
+
+ # determine the threshold percentages which usage should not exceed
+ for name, default in [("data", self.max_thinpool_data_usage_percent),
+ ("metadata", self.max_thinpool_meta_usage_percent)]:
+ percent = get_var(task_vars, "max_thinpool_" + name + "_usage_percent", default=default)
+ try:
+ vals[name + "_threshold"] = float(percent)
+ except ValueError:
+ return {
+ "failed": True,
+ "msg": "Specified thinpool {} usage limit '{}' is not a percentage".format(name, percent)
+ }
+
+ # test whether the thresholds are exceeded
+ messages = []
+ for name in ["data", "metadata"]:
+ vals[name + "_pct_used"] = 100 * vals[name + "_used_bytes"] / (
+ vals[name + "_total_bytes"] + vals["vg_free_bytes"])
+ if vals[name + "_pct_used"] > vals[name + "_threshold"]:
+ messages.append(
+ "Docker thinpool {name} usage percentage {pct:.1f} "
+ "is higher than threshold {thresh:.1f}.".format(
+ name=name,
+ pct=vals[name + "_pct_used"],
+ thresh=vals[name + "_threshold"],
+ ))
+ vals["failed"] = True
+
+ vals["msg"] = "\n".join(messages or ["Thinpool usage is within thresholds."])
+ return vals
+
+ def _get_vg_free(self, pool, task_vars):
+ # Determine which VG to examine according to the pool name, the only indicator currently
+ # available from the Docker API driver info. We assume a name that looks like
+ # "vg--name-docker--pool"; vg and lv names with inner hyphens doubled, joined by a hyphen.
+ match = re.match(r'((?:[^-]|--)+)-(?!-)', pool) # matches up to the first single hyphen
+ if not match: # unlikely, but... be clear if we assumed wrong
+ raise OpenShiftCheckException(
+ "This host's Docker reports it is using a storage pool named '{}'.\n"
+ "However this name does not have the expected format of 'vgname-lvname'\n"
+ "so the available storage in the VG cannot be determined.".format(pool)
+ )
+ vg_name = match.groups()[0].replace("--", "-")
+ vgs_cmd = "/sbin/vgs --noheadings -o vg_free --select vg_name=" + vg_name
+ # should return free space like " 12.00g" if the VG exists; empty if it does not
+
+ ret = self.execute_module("command", {"_raw_params": vgs_cmd}, task_vars=task_vars)
+ if ret.get("failed") or ret.get("rc", 0) != 0:
+ raise OpenShiftCheckException(
+ "Is LVM installed? Failed to run /sbin/vgs "
+ "to determine docker storage usage:\n" + ret.get("msg", "")
+ )
+ size = ret.get("stdout", "").strip()
+ if not size:
+ raise OpenShiftCheckException(
+ "This host's Docker reports it is using a storage pool named '{pool}'.\n"
+ "which we expect to come from local VG '{vg}'.\n"
+ "However, /sbin/vgs did not find this VG. Is Docker for this host"
+ "running and using the storage on the host?".format(pool=pool, vg=vg_name)
+ )
+ return size
+
+ @staticmethod
+ def _convert_to_bytes(string):
+ units = dict(
+ b=1,
+ k=1024,
+ m=1024**2,
+ g=1024**3,
+ t=1024**4,
+ p=1024**5,
+ )
+ string = string or ""
+ match = re.match(r'(\d+(?:\.\d+)?)\s*(\w)?', string) # float followed by optional unit
+ if not match:
+ raise ValueError("Cannot convert to a byte size: " + string)
+
+ number, unit = match.groups()
+ multiplier = 1 if not unit else units.get(unit.lower())
+ if not multiplier:
+ raise ValueError("Cannot convert to a byte size: " + string)
+
+ return float(number) * multiplier
diff --git a/roles/openshift_health_checker/openshift_checks/mixins.py b/roles/openshift_health_checker/openshift_checks/mixins.py
index 20d160eaf..2cb2e21aa 100644
--- a/roles/openshift_health_checker/openshift_checks/mixins.py
+++ b/roles/openshift_health_checker/openshift_checks/mixins.py
@@ -1,4 +1,3 @@
-# pylint: disable=missing-docstring,too-few-public-methods
"""
Mixin classes meant to be used with subclasses of OpenShiftCheck.
"""
@@ -8,8 +7,52 @@ from openshift_checks import get_var
class NotContainerizedMixin(object):
"""Mixin for checks that are only active when not in containerized mode."""
+ # permanent # pylint: disable=too-few-public-methods
+ # Reason: The mixin is not intended to stand on its own as a class.
@classmethod
def is_active(cls, task_vars):
+ """Only run on non-containerized hosts."""
is_containerized = get_var(task_vars, "openshift", "common", "is_containerized")
return super(NotContainerizedMixin, cls).is_active(task_vars) and not is_containerized
+
+
+class DockerHostMixin(object):
+ """Mixin for checks that are only active on hosts that require Docker."""
+
+ dependencies = []
+
+ @classmethod
+ def is_active(cls, task_vars):
+ """Only run on hosts that depend on Docker."""
+ is_containerized = get_var(task_vars, "openshift", "common", "is_containerized")
+ is_node = "nodes" in get_var(task_vars, "group_names", default=[])
+ return super(DockerHostMixin, cls).is_active(task_vars) and (is_containerized or is_node)
+
+ def ensure_dependencies(self, task_vars):
+ """
+ Ensure that docker-related packages exist, but not on atomic hosts
+ (which would not be able to install but should already have them).
+ Returns: msg, failed, changed
+ """
+ if get_var(task_vars, "openshift", "common", "is_atomic"):
+ return "", False, False
+
+ # NOTE: we would use the "package" module but it's actually an action plugin
+ # and it's not clear how to invoke one of those. This is about the same anyway:
+ result = self.execute_module(
+ get_var(task_vars, "ansible_pkg_mgr", default="yum"),
+ {"name": self.dependencies, "state": "present"},
+ task_vars=task_vars,
+ )
+ msg = result.get("msg", "")
+ if result.get("failed"):
+ if "No package matching" in msg:
+ msg = "Ensure that all required dependencies can be installed via `yum`.\n"
+ msg = (
+ "Unable to install required packages on this host:\n"
+ " {deps}\n{msg}"
+ ).format(deps=',\n '.join(self.dependencies), msg=msg)
+ failed = result.get("failed", False) or result.get("rc", 0) != 0
+ changed = result.get("changed", False)
+ return msg, failed, changed
diff --git a/roles/openshift_health_checker/openshift_checks/ovs_version.py b/roles/openshift_health_checker/openshift_checks/ovs_version.py
index 1e45ae3af..2dd045f1f 100644
--- a/roles/openshift_health_checker/openshift_checks/ovs_version.py
+++ b/roles/openshift_health_checker/openshift_checks/ovs_version.py
@@ -43,7 +43,7 @@ class OvsVersion(NotContainerizedMixin, OpenShiftCheck):
},
],
}
- return self.execute_module("rpm_version", args, task_vars)
+ return self.execute_module("rpm_version", args, task_vars=task_vars)
def get_required_ovs_version(self, task_vars):
"""Return the correct Open vSwitch version for the current OpenShift version"""
diff --git a/roles/openshift_health_checker/openshift_checks/package_availability.py b/roles/openshift_health_checker/openshift_checks/package_availability.py
index a7eb720fd..e87567fe6 100644
--- a/roles/openshift_health_checker/openshift_checks/package_availability.py
+++ b/roles/openshift_health_checker/openshift_checks/package_availability.py
@@ -25,7 +25,7 @@ class PackageAvailability(NotContainerizedMixin, OpenShiftCheck):
packages.update(self.node_packages(rpm_prefix))
args = {"packages": sorted(set(packages))}
- return self.execute_module("check_yum_update", args, tmp, task_vars)
+ return self.execute_module("check_yum_update", args, tmp=tmp, task_vars=task_vars)
@staticmethod
def master_packages(rpm_prefix):
@@ -36,7 +36,6 @@ class PackageAvailability(NotContainerizedMixin, OpenShiftCheck):
"bash-completion",
"cockpit-bridge",
"cockpit-docker",
- "cockpit-kubernetes",
"cockpit-shell",
"cockpit-ws",
"etcd",
diff --git a/roles/openshift_health_checker/openshift_checks/package_update.py b/roles/openshift_health_checker/openshift_checks/package_update.py
index fd0c0a755..f432380c6 100644
--- a/roles/openshift_health_checker/openshift_checks/package_update.py
+++ b/roles/openshift_health_checker/openshift_checks/package_update.py
@@ -11,4 +11,4 @@ class PackageUpdate(NotContainerizedMixin, OpenShiftCheck):
def run(self, tmp, task_vars):
args = {"packages": []}
- return self.execute_module("check_yum_update", args, tmp, task_vars)
+ return self.execute_module("check_yum_update", args, tmp=tmp, task_vars=task_vars)
diff --git a/roles/openshift_health_checker/openshift_checks/package_version.py b/roles/openshift_health_checker/openshift_checks/package_version.py
index 2e737818b..6a76bb93d 100644
--- a/roles/openshift_health_checker/openshift_checks/package_version.py
+++ b/roles/openshift_health_checker/openshift_checks/package_version.py
@@ -71,7 +71,7 @@ class PackageVersion(NotContainerizedMixin, OpenShiftCheck):
],
}
- return self.execute_module("aos_version", args, tmp, task_vars)
+ return self.execute_module("aos_version", args, tmp=tmp, task_vars=task_vars)
def get_required_ovs_version(self, task_vars):
"""Return the correct Open vSwitch version for the current OpenShift version.
diff --git a/roles/openshift_health_checker/test/docker_image_availability_test.py b/roles/openshift_health_checker/test/docker_image_availability_test.py
index 0379cafb5..0a7c0f8d3 100644
--- a/roles/openshift_health_checker/test/docker_image_availability_test.py
+++ b/roles/openshift_health_checker/test/docker_image_availability_test.py
@@ -3,19 +3,25 @@ import pytest
from openshift_checks.docker_image_availability import DockerImageAvailability
-@pytest.mark.parametrize('deployment_type,is_active', [
- ("origin", True),
- ("openshift-enterprise", True),
- ("enterprise", False),
- ("online", False),
- ("invalid", False),
- ("", False),
+@pytest.mark.parametrize('deployment_type, is_containerized, group_names, expect_active', [
+ ("origin", True, [], True),
+ ("openshift-enterprise", True, [], True),
+ ("enterprise", True, [], False),
+ ("online", True, [], False),
+ ("invalid", True, [], False),
+ ("", True, [], False),
+ ("origin", False, [], False),
+ ("openshift-enterprise", False, [], False),
+ ("origin", False, ["nodes", "masters"], True),
+ ("openshift-enterprise", False, ["etcd"], False),
])
-def test_is_active(deployment_type, is_active):
+def test_is_active(deployment_type, is_containerized, group_names, expect_active):
task_vars = dict(
+ openshift=dict(common=dict(is_containerized=is_containerized)),
openshift_deployment_type=deployment_type,
+ group_names=group_names,
)
- assert DockerImageAvailability.is_active(task_vars=task_vars) == is_active
+ assert DockerImageAvailability.is_active(task_vars=task_vars) == expect_active
@pytest.mark.parametrize("is_containerized,is_atomic", [
@@ -25,15 +31,15 @@ def test_is_active(deployment_type, is_active):
(False, True),
])
def test_all_images_available_locally(is_containerized, is_atomic):
- def execute_module(module_name, args, task_vars):
+ def execute_module(module_name, module_args, task_vars):
if module_name == "yum":
return {"changed": True}
assert module_name == "docker_image_facts"
- assert 'name' in args
- assert args['name']
+ assert 'name' in module_args
+ assert module_args['name']
return {
- 'images': [args['name']],
+ 'images': [module_args['name']],
}
result = DockerImageAvailability(execute_module=execute_module).run(tmp=None, task_vars=dict(
@@ -46,8 +52,8 @@ def test_all_images_available_locally(is_containerized, is_atomic):
docker=dict(additional_registries=["docker.io"]),
),
openshift_deployment_type='origin',
- openshift_release='v3.4',
openshift_image_tag='3.4',
+ group_names=['nodes', 'masters'],
))
assert not result.get('failed', False)
@@ -58,7 +64,7 @@ def test_all_images_available_locally(is_containerized, is_atomic):
True,
])
def test_all_images_available_remotely(available_locally):
- def execute_module(module_name, args, task_vars):
+ def execute_module(module_name, module_args, task_vars):
if module_name == 'docker_image_facts':
return {'images': [], 'failed': available_locally}
return {'changed': False}
@@ -73,8 +79,8 @@ def test_all_images_available_remotely(available_locally):
docker=dict(additional_registries=["docker.io", "registry.access.redhat.com"]),
),
openshift_deployment_type='origin',
- openshift_release='3.4',
openshift_image_tag='v3.4',
+ group_names=['nodes', 'masters'],
))
assert not result.get('failed', False)
@@ -102,8 +108,8 @@ def test_all_images_unavailable():
docker=dict(additional_registries=["docker.io"]),
),
openshift_deployment_type="openshift-enterprise",
- openshift_release=None,
- openshift_image_tag='latest'
+ openshift_image_tag='latest',
+ group_names=['nodes', 'masters'],
))
assert actual['failed']
@@ -141,8 +147,8 @@ def test_skopeo_update_failure(message, extra_words):
docker=dict(additional_registries=["unknown.io"]),
),
openshift_deployment_type="openshift-enterprise",
- openshift_release='',
openshift_image_tag='',
+ group_names=['nodes', 'masters'],
))
assert actual["failed"]
@@ -171,8 +177,85 @@ def test_registry_availability(deployment_type, registries):
docker=dict(additional_registries=registries),
),
openshift_deployment_type=deployment_type,
- openshift_release='',
openshift_image_tag='',
+ group_names=['nodes', 'masters'],
))
assert not actual.get("failed", False)
+
+
+@pytest.mark.parametrize("deployment_type, is_containerized, groups, oreg_url, expected", [
+ ( # standard set of stuff required on nodes
+ "origin", False, ['nodes'], None,
+ set([
+ 'openshift/origin-pod:vtest',
+ 'openshift/origin-deployer:vtest',
+ 'openshift/origin-docker-registry:vtest',
+ 'openshift/origin-haproxy-router:vtest',
+ 'cockpit/kubernetes', # origin version of registry-console
+ ])
+ ),
+ ( # set a different URL for images
+ "origin", False, ['nodes'], 'foo.io/openshift/origin-${component}:${version}',
+ set([
+ 'foo.io/openshift/origin-pod:vtest',
+ 'foo.io/openshift/origin-deployer:vtest',
+ 'foo.io/openshift/origin-docker-registry:vtest',
+ 'foo.io/openshift/origin-haproxy-router:vtest',
+ 'cockpit/kubernetes', # AFAICS this is not built from the URL
+ ])
+ ),
+ (
+ "origin", True, ['nodes', 'masters', 'etcd'], None,
+ set([
+ # images running on top of openshift
+ 'openshift/origin-pod:vtest',
+ 'openshift/origin-deployer:vtest',
+ 'openshift/origin-docker-registry:vtest',
+ 'openshift/origin-haproxy-router:vtest',
+ 'cockpit/kubernetes',
+ # containerized component images
+ 'openshift/origin:vtest',
+ 'openshift/node:vtest',
+ 'openshift/openvswitch:vtest',
+ 'registry.access.redhat.com/rhel7/etcd',
+ ])
+ ),
+ ( # enterprise images
+ "openshift-enterprise", True, ['nodes'], 'foo.io/openshift3/ose-${component}:f13ac45',
+ set([
+ 'foo.io/openshift3/ose-pod:f13ac45',
+ 'foo.io/openshift3/ose-deployer:f13ac45',
+ 'foo.io/openshift3/ose-docker-registry:f13ac45',
+ 'foo.io/openshift3/ose-haproxy-router:f13ac45',
+ # registry-console is not constructed/versioned the same as the others.
+ 'registry.access.redhat.com/openshift3/registry-console',
+ # containerized images aren't built from oreg_url
+ 'openshift3/node:vtest',
+ 'openshift3/openvswitch:vtest',
+ ])
+ ),
+ (
+ "openshift-enterprise", True, ['etcd', 'lb'], 'foo.io/openshift3/ose-${component}:f13ac45',
+ set([
+ 'registry.access.redhat.com/rhel7/etcd',
+ # lb does not yet come in a containerized version
+ ])
+ ),
+
+])
+def test_required_images(deployment_type, is_containerized, groups, oreg_url, expected):
+ task_vars = dict(
+ openshift=dict(
+ common=dict(
+ is_containerized=is_containerized,
+ is_atomic=False,
+ ),
+ ),
+ openshift_deployment_type=deployment_type,
+ group_names=groups,
+ oreg_url=oreg_url,
+ openshift_image_tag='vtest',
+ )
+
+ assert expected == DockerImageAvailability("DUMMY").required_images(task_vars)
diff --git a/roles/openshift_health_checker/test/docker_storage_test.py b/roles/openshift_health_checker/test/docker_storage_test.py
new file mode 100644
index 000000000..876614b1d
--- /dev/null
+++ b/roles/openshift_health_checker/test/docker_storage_test.py
@@ -0,0 +1,224 @@
+import pytest
+
+from openshift_checks import OpenShiftCheckException
+from openshift_checks.docker_storage import DockerStorage
+
+
+def dummy_check(execute_module=None):
+ def dummy_exec(self, status, task_vars):
+ raise Exception("dummy executor called")
+ return DockerStorage(execute_module=execute_module or dummy_exec)
+
+
+@pytest.mark.parametrize('is_containerized, group_names, is_active', [
+ (False, ["masters", "etcd"], False),
+ (False, ["masters", "nodes"], True),
+ (True, ["etcd"], True),
+])
+def test_is_active(is_containerized, group_names, is_active):
+ task_vars = dict(
+ openshift=dict(common=dict(is_containerized=is_containerized)),
+ group_names=group_names,
+ )
+ assert DockerStorage.is_active(task_vars=task_vars) == is_active
+
+
+non_atomic_task_vars = {"openshift": {"common": {"is_atomic": False}}}
+
+
+@pytest.mark.parametrize('docker_info, failed, expect_msg', [
+ (
+ dict(failed=True, msg="Error connecting: Error while fetching server API version"),
+ True,
+ ["Is docker running on this host?"],
+ ),
+ (
+ dict(msg="I have no info"),
+ True,
+ ["missing info"],
+ ),
+ (
+ dict(info={
+ "Driver": "devicemapper",
+ "DriverStatus": [("Pool Name", "docker-docker--pool")],
+ }),
+ False,
+ [],
+ ),
+ (
+ dict(info={
+ "Driver": "devicemapper",
+ "DriverStatus": [("Data loop file", "true")],
+ }),
+ True,
+ ["loopback devices with the Docker devicemapper storage driver"],
+ ),
+ (
+ dict(info={
+ "Driver": "overlay2",
+ "DriverStatus": []
+ }),
+ False,
+ [],
+ ),
+ (
+ dict(info={
+ "Driver": "overlay",
+ }),
+ True,
+ ["unsupported Docker storage driver"],
+ ),
+ (
+ dict(info={
+ "Driver": "unsupported",
+ }),
+ True,
+ ["unsupported Docker storage driver"],
+ ),
+])
+def test_check_storage_driver(docker_info, failed, expect_msg):
+ def execute_module(module_name, module_args, tmp=None, task_vars=None):
+ if module_name == "yum":
+ return {}
+ if module_name != "docker_info":
+ raise ValueError("not expecting module " + module_name)
+ return docker_info
+
+ check = dummy_check(execute_module=execute_module)
+ check._check_dm_usage = lambda status, task_vars: dict() # stub out for this test
+ result = check.run(tmp=None, task_vars=non_atomic_task_vars)
+
+ if failed:
+ assert result["failed"]
+ else:
+ assert not result.get("failed", False)
+
+ for word in expect_msg:
+ assert word in result["msg"]
+
+
+enough_space = {
+ "Pool Name": "docker--vg-docker--pool",
+ "Data Space Used": "19.92 MB",
+ "Data Space Total": "8.535 GB",
+ "Metadata Space Used": "40.96 kB",
+ "Metadata Space Total": "25.17 MB",
+}
+
+not_enough_space = {
+ "Pool Name": "docker--vg-docker--pool",
+ "Data Space Used": "10 GB",
+ "Data Space Total": "10 GB",
+ "Metadata Space Used": "42 kB",
+ "Metadata Space Total": "43 kB",
+}
+
+
+@pytest.mark.parametrize('task_vars, driver_status, vg_free, success, expect_msg', [
+ (
+ {"max_thinpool_data_usage_percent": "not a float"},
+ enough_space,
+ "12g",
+ False,
+ ["is not a percentage"],
+ ),
+ (
+ {},
+ {}, # empty values from driver status
+ "bogus", # also does not parse as bytes
+ False,
+ ["Could not interpret", "as bytes"],
+ ),
+ (
+ {},
+ enough_space,
+ "12.00g",
+ True,
+ [],
+ ),
+ (
+ {},
+ not_enough_space,
+ "0.00",
+ False,
+ ["data usage", "metadata usage", "higher than threshold"],
+ ),
+])
+def test_dm_usage(task_vars, driver_status, vg_free, success, expect_msg):
+ check = dummy_check()
+ check._get_vg_free = lambda pool, task_vars: vg_free
+ result = check._check_dm_usage(driver_status, task_vars)
+ result_success = not result.get("failed")
+
+ assert result_success is success
+ for msg in expect_msg:
+ assert msg in result["msg"]
+
+
+@pytest.mark.parametrize('pool, command_returns, raises, returns', [
+ (
+ "foo-bar",
+ { # vgs missing
+ "msg": "[Errno 2] No such file or directory",
+ "failed": True,
+ "cmd": "/sbin/vgs",
+ "rc": 2,
+ },
+ "Failed to run /sbin/vgs",
+ None,
+ ),
+ (
+ "foo", # no hyphen in name - should not happen
+ {},
+ "name does not have the expected format",
+ None,
+ ),
+ (
+ "foo-bar",
+ dict(stdout=" 4.00g\n"),
+ None,
+ "4.00g",
+ ),
+ (
+ "foo-bar",
+ dict(stdout="\n"), # no matching VG
+ "vgs did not find this VG",
+ None,
+ )
+])
+def test_vg_free(pool, command_returns, raises, returns):
+ def execute_module(module_name, module_args, tmp=None, task_vars=None):
+ if module_name != "command":
+ raise ValueError("not expecting module " + module_name)
+ return command_returns
+
+ check = dummy_check(execute_module=execute_module)
+ if raises:
+ with pytest.raises(OpenShiftCheckException) as err:
+ check._get_vg_free(pool, {})
+ assert raises in str(err.value)
+ else:
+ ret = check._get_vg_free(pool, {})
+ assert ret == returns
+
+
+@pytest.mark.parametrize('string, expect_bytes', [
+ ("12", 12.0),
+ ("12 k", 12.0 * 1024),
+ ("42.42 MB", 42.42 * 1024**2),
+ ("12g", 12.0 * 1024**3),
+])
+def test_convert_to_bytes(string, expect_bytes):
+ got = DockerStorage._convert_to_bytes(string)
+ assert got == expect_bytes
+
+
+@pytest.mark.parametrize('string', [
+ "bork",
+ "42 Qs",
+])
+def test_convert_to_bytes_error(string):
+ with pytest.raises(ValueError) as err:
+ DockerStorage._convert_to_bytes(string)
+ assert "Cannot convert" in str(err.value)
+ assert string in str(err.value)
diff --git a/roles/openshift_logging/README.md b/roles/openshift_logging/README.md
index 0c60ef6fd..dd0f22d4b 100644
--- a/roles/openshift_logging/README.md
+++ b/roles/openshift_logging/README.md
@@ -55,6 +55,9 @@ When both `openshift_logging_install_logging` and `openshift_logging_upgrade_log
- `openshift_logging_fluentd_use_journal`: NOTE: Fluentd will attempt to detect whether or not Docker is using the journald log driver when using the default of empty.
- `openshift_logging_fluentd_journal_read_from_head`: If empty, Fluentd will use its internal default, which is false.
- `openshift_logging_fluentd_hosts`: List of nodes that should be labeled for Fluentd to be deployed to. Defaults to ['--all'].
+- `openshift_logging_fluentd_buffer_queue_limit`: Buffer queue limit for Fluentd. Defaults to 1024.
+- `openshift_logging_fluentd_buffer_size_limit`: Buffer chunk limit for Fluentd. Defaults to 1m.
+
- `openshift_logging_es_host`: The name of the ES service Fluentd should send logs to. Defaults to 'logging-es'.
- `openshift_logging_es_port`: The port for the ES service Fluentd should sent its logs to. Defaults to '9200'.
@@ -155,3 +158,5 @@ Elasticsearch OPS too, if using an OPS cluster:
- `openshift_logging_mux_namespaces`: Default `[]` - additional namespaces to
create for _external_ mux clients to associate with their logs - users will
need to set this
+- `openshift_logging_mux_buffer_queue_limit`: Default `[1024]` - Buffer queue limit for Mux.
+- `openshift_logging_mux_buffer_size_limit`: Default `[1m]` - Buffer chunk limit for Mux.
diff --git a/roles/openshift_logging/defaults/main.yml b/roles/openshift_logging/defaults/main.yml
index 573cbdd09..66d880d23 100644
--- a/roles/openshift_logging/defaults/main.yml
+++ b/roles/openshift_logging/defaults/main.yml
@@ -76,6 +76,8 @@ openshift_logging_fluentd_use_journal: "{{ openshift_hosted_logging_use_journal
openshift_logging_fluentd_journal_source: "{{ openshift_hosted_logging_journal_source | default('') }}"
openshift_logging_fluentd_journal_read_from_head: "{{ openshift_hosted_logging_journal_read_from_head | default('') }}"
openshift_logging_fluentd_hosts: ['--all']
+openshift_logging_fluentd_buffer_queue_limit: 1024
+openshift_logging_fluentd_buffer_size_limit: 1m
openshift_logging_es_host: logging-es
openshift_logging_es_port: 9200
@@ -87,7 +89,7 @@ openshift_logging_es_cpu_limit: null
# the logging appenders for the root loggers to write ES logs. Valid values: 'file', 'console'
openshift_logging_es_log_appenders: ['file']
openshift_logging_es_memory_limit: "{{ openshift_hosted_logging_elasticsearch_instance_ram | default('8Gi') }}"
-openshift_logging_es_pv_selector: null
+openshift_logging_es_pv_selector: "{{ openshift_hosted_logging_storage_labels | default(null) }}"
openshift_logging_es_pvc_dynamic: "{{ openshift_hosted_logging_elasticsearch_pvc_dynamic | default(False) }}"
openshift_logging_es_pvc_size: "{{ openshift_hosted_logging_elasticsearch_pvc_size | default('') }}"
openshift_logging_es_pvc_prefix: "{{ openshift_hosted_logging_elasticsearch_pvc_prefix | default('logging-es') }}"
@@ -126,7 +128,7 @@ openshift_logging_es_ops_client_key: /etc/fluent/keys/key
openshift_logging_es_ops_cluster_size: "{{ openshift_hosted_logging_elasticsearch_ops_cluster_size | default(1) }}"
openshift_logging_es_ops_cpu_limit: null
openshift_logging_es_ops_memory_limit: "{{ openshift_hosted_logging_elasticsearch_ops_instance_ram | default('8Gi') }}"
-openshift_logging_es_ops_pv_selector: None
+openshift_logging_es_ops_pv_selector: "{{ openshift_hosted_loggingops_storage_labels | default(null) }}"
openshift_logging_es_ops_pvc_dynamic: "{{ openshift_hosted_logging_elasticsearch_ops_pvc_dynamic | default(False) }}"
openshift_logging_es_ops_pvc_size: "{{ openshift_hosted_logging_elasticsearch_ops_pvc_size | default('') }}"
openshift_logging_es_ops_pvc_prefix: "{{ openshift_hosted_logging_elasticsearch_ops_pvc_prefix | default('logging-es-ops') }}"
diff --git a/roles/openshift_logging_elasticsearch/tasks/main.yaml b/roles/openshift_logging_elasticsearch/tasks/main.yaml
index 7e88a7498..f1d15b76d 100644
--- a/roles/openshift_logging_elasticsearch/tasks/main.yaml
+++ b/roles/openshift_logging_elasticsearch/tasks/main.yaml
@@ -217,7 +217,7 @@
access_modes: "{{ openshift_logging_elasticsearch_pvc_access_modes | list }}"
pv_selector: "{{ openshift_logging_elasticsearch_pvc_pv_selector }}"
annotations:
- volume.alpha.kubernetes.io/storage-class: "dynamic"
+ volume.beta.kubernetes.io/storage-class: "dynamic"
when:
- openshift_logging_elasticsearch_storage_type == "pvc"
- openshift_logging_elasticsearch_pvc_dynamic
diff --git a/roles/openshift_logging_fluentd/templates/fluentd.j2 b/roles/openshift_logging_fluentd/templates/fluentd.j2
index e185938e3..a5695ee26 100644
--- a/roles/openshift_logging_fluentd/templates/fluentd.j2
+++ b/roles/openshift_logging_fluentd/templates/fluentd.j2
@@ -93,6 +93,14 @@ spec:
value: "{{ openshift_logging_fluentd_journal_source | default('') }}"
- name: "JOURNAL_READ_FROM_HEAD"
value: "{{ openshift_logging_fluentd_journal_read_from_head | lower }}"
+ - name: "BUFFER_QUEUE_LIMIT"
+ value: "{{ openshift_logging_fluentd_buffer_queue_limit }}"
+ - name: "BUFFER_SIZE_LIMIT"
+ value: "{{ openshift_logging_fluentd_buffer_size_limit }}"
+ - name: "FLUENTD_CPU_LIMIT"
+ value: "{{ openshift_logging_fluentd_cpu_limit }}"
+ - name: "FLUENTD_MEMORY_LIMIT"
+ value: "{{ openshift_logging_fluentd_memory_limit }}"
volumes:
- name: runlogjournal
hostPath:
diff --git a/roles/openshift_logging_mux/defaults/main.yml b/roles/openshift_logging_mux/defaults/main.yml
index 10fa4372c..77e47d38c 100644
--- a/roles/openshift_logging_mux/defaults/main.yml
+++ b/roles/openshift_logging_mux/defaults/main.yml
@@ -10,7 +10,9 @@ openshift_logging_mux_namespace: logging
### Common settings
openshift_logging_mux_nodeselector: "{{ openshift_hosted_logging_mux_nodeselector_label | default('') | map_from_pairs }}"
openshift_logging_mux_cpu_limit: 500m
-openshift_logging_mux_memory_limit: 1Gi
+openshift_logging_mux_memory_limit: 2Gi
+openshift_logging_mux_buffer_queue_limit: 1024
+openshift_logging_mux_buffer_size_limit: 1m
openshift_logging_mux_replicas: 1
diff --git a/roles/openshift_logging_mux/templates/mux.j2 b/roles/openshift_logging_mux/templates/mux.j2
index 502cd3347..243698c6a 100644
--- a/roles/openshift_logging_mux/templates/mux.j2
+++ b/roles/openshift_logging_mux/templates/mux.j2
@@ -103,6 +103,14 @@ spec:
value: "true"
- name: MUX_ALLOW_EXTERNAL
value: "{{ openshift_logging_mux_allow_external | default('false') }}"
+ - name: "BUFFER_QUEUE_LIMIT"
+ value: "{{ openshift_logging_mux_buffer_queue_limit }}"
+ - name: "BUFFER_SIZE_LIMIT"
+ value: "{{ openshift_logging_mux_buffer_size_limit }}"
+ - name: "MUX_CPU_LIMIT"
+ value: "{{ openshift_logging_mux_cpu_limit }}"
+ - name: "MUX_MEMORY_LIMIT"
+ value: "{{ openshift_logging_mux_memory_limit }}"
volumes:
- name: config
configMap:
diff --git a/roles/openshift_metrics/README.md b/roles/openshift_metrics/README.md
index 84503217b..1f10de4a2 100644
--- a/roles/openshift_metrics/README.md
+++ b/roles/openshift_metrics/README.md
@@ -68,6 +68,9 @@ For default values, see [`defaults/main.yaml`](defaults/main.yaml).
- `openshift_metrics_resolution`: How often metrics should be gathered.
+- `openshift_metrics_install_hawkular_agent`: Install the Hawkular OpenShift Agent (HOSA). HOSA can be used
+ to collect custom metrics from your pods. This component is currently in tech-preview and is not installed by default.
+
## Additional variables to control resource limits
Each metrics component (hawkular, cassandra, heapster) can specify a cpu and memory limits and requests by setting
the corresponding role variable:
diff --git a/roles/openshift_metrics/defaults/main.yaml b/roles/openshift_metrics/defaults/main.yaml
index 1d3db8a1a..ba50566e9 100644
--- a/roles/openshift_metrics/defaults/main.yaml
+++ b/roles/openshift_metrics/defaults/main.yaml
@@ -16,6 +16,7 @@ openshift_metrics_hawkular_nodeselector: ""
openshift_metrics_cassandra_replicas: 1
openshift_metrics_cassandra_storage_type: "{{ openshift_hosted_metrics_storage_kind | default('emptydir') }}"
openshift_metrics_cassandra_pvc_size: "{{ openshift_hosted_metrics_storage_volume_size | default('10Gi') }}"
+openshift_metrics_cassandra_pv_selector: "{{ openshift_hosted_metrics_storage_labels | default(null) }}"
openshift_metrics_cassandra_limits_memory: 2G
openshift_metrics_cassandra_limits_cpu: null
openshift_metrics_cassandra_requests_memory: 1G
@@ -30,6 +31,14 @@ openshift_metrics_heapster_requests_memory: 0.9375G
openshift_metrics_heapster_requests_cpu: null
openshift_metrics_heapster_nodeselector: ""
+openshift_metrics_install_hawkular_agent: False
+openshift_metrics_hawkular_agent_limits_memory: null
+openshift_metrics_hawkular_agent_limits_cpu: null
+openshift_metrics_hawkular_agent_requests_memory: null
+openshift_metrics_hawkular_agent_requests_cpu: null
+openshift_metrics_hawkular_agent_nodeselector: ""
+openshift_metrics_hawkular_agent_namespace: "default"
+
openshift_metrics_hawkular_hostname: "hawkular-metrics.{{openshift_master_default_subdomain}}"
openshift_metrics_duration: 7
diff --git a/roles/openshift_metrics/tasks/generate_hawkular_certificates.yaml b/roles/openshift_metrics/tasks/generate_hawkular_certificates.yaml
index fb4fe2f03..7b81b3c10 100644
--- a/roles/openshift_metrics/tasks/generate_hawkular_certificates.yaml
+++ b/roles/openshift_metrics/tasks/generate_hawkular_certificates.yaml
@@ -73,6 +73,8 @@
{{ hawkular_secrets['hawkular-metrics.key'] }}
tls.truststore.crt: >
{{ hawkular_secrets['hawkular-cassandra.crt'] }}
+ ca.crt: >
+ {{ hawkular_secrets['ca.crt'] }}
when: name not in metrics_secrets.stdout_lines
changed_when: no
diff --git a/roles/openshift_metrics/tasks/install_cassandra.yaml b/roles/openshift_metrics/tasks/install_cassandra.yaml
index 3b4e8560f..62b7f52cb 100644
--- a/roles/openshift_metrics/tasks/install_cassandra.yaml
+++ b/roles/openshift_metrics/tasks/install_cassandra.yaml
@@ -23,7 +23,7 @@
changed_when: false
- set_fact: openshift_metrics_cassandra_pvc_prefix="hawkular-metrics"
- when: not openshift_metrics_cassandra_pvc_prefix or openshift_metrics_cassandra_pvc_prefix == ''
+ when: "not openshift_metrics_cassandra_pvc_prefix or openshift_metrics_cassandra_pvc_prefix == ''"
- name: generate hawkular-cassandra persistent volume claims
template:
@@ -35,6 +35,7 @@
metrics-infra: hawkular-cassandra
access_modes: "{{ openshift_metrics_cassandra_pvc_access | list }}"
size: "{{ openshift_metrics_cassandra_pvc_size }}"
+ pv_selector: "{{ openshift_metrics_cassandra_pv_selector }}"
with_sequence: count={{ openshift_metrics_cassandra_replicas }}
when:
- openshift_metrics_cassandra_storage_type != 'emptydir'
@@ -50,9 +51,10 @@
labels:
metrics-infra: hawkular-cassandra
annotations:
- volume.alpha.kubernetes.io/storage-class: dynamic
+ volume.beta.kubernetes.io/storage-class: dynamic
access_modes: "{{ openshift_metrics_cassandra_pvc_access | list }}"
size: "{{ openshift_metrics_cassandra_pvc_size }}"
+ pv_selector: "{{ openshift_metrics_cassandra_pv_selector }}"
with_sequence: count={{ openshift_metrics_cassandra_replicas }}
when: openshift_metrics_cassandra_storage_type == 'dynamic'
changed_when: false
diff --git a/roles/openshift_metrics/tasks/install_hosa.yaml b/roles/openshift_metrics/tasks/install_hosa.yaml
new file mode 100644
index 000000000..cc533a68b
--- /dev/null
+++ b/roles/openshift_metrics/tasks/install_hosa.yaml
@@ -0,0 +1,44 @@
+---
+- name: Generate Hawkular Agent (HOSA) Cluster Role
+ template:
+ src: hawkular_openshift_agent_role.j2
+ dest: "{{mktemp.stdout}}/templates/metrics-hawkular-openshift-agent-role.yaml"
+ changed_when: no
+
+- name: Generate Hawkular Agent (HOSA) Service Account
+ template:
+ src: hawkular_openshift_agent_sa.j2
+ dest: "{{mktemp.stdout}}/templates/metrics-hawkular-openshift-agent-sa.yaml"
+ changed_when: no
+
+- name: Generate Hawkular Agent (HOSA) Daemon Set
+ template:
+ src: hawkular_openshift_agent_ds.j2
+ dest: "{{mktemp.stdout}}/templates/metrics-hawkular-openshift-agent-ds.yaml"
+ vars:
+ node_selector: "{{openshift_metrics_hawkular_agent_nodeselector | default('') }}"
+ changed_when: no
+
+- name: Generate the Hawkular Agent (HOSA) Configmap
+ template:
+ src: hawkular_openshift_agent_cm.j2
+ dest: "{{mktemp.stdout}}/templates/metrics-hawkular-openshift-agent-cm.yaml"
+ changed_when: no
+
+- name: Generate role binding for the hawkular-openshift-agent service account
+ template:
+ src: rolebinding.j2
+ dest: "{{ mktemp.stdout }}/templates/metrics-hawkular-agent-rolebinding.yaml"
+ vars:
+ cluster: True
+ obj_name: hawkular-openshift-agent-rb
+ labels:
+ metrics-infra: hawkular-agent
+ roleRef:
+ kind: ClusterRole
+ name: hawkular-openshift-agent
+ subjects:
+ - kind: ServiceAccount
+ name: hawkular-openshift-agent
+ namespace: "{{openshift_metrics_hawkular_agent_namespace}}"
+ changed_when: no
diff --git a/roles/openshift_metrics/tasks/install_metrics.yaml b/roles/openshift_metrics/tasks/install_metrics.yaml
index 74eb56713..fdf4ae57f 100644
--- a/roles/openshift_metrics/tasks/install_metrics.yaml
+++ b/roles/openshift_metrics/tasks/install_metrics.yaml
@@ -16,11 +16,19 @@
include: install_heapster.yaml
when: openshift_metrics_heapster_standalone | bool
-- find: paths={{ mktemp.stdout }}/templates patterns=*.yaml
+- name: Install Hawkular OpenShift Agent (HOSA)
+ include: install_hosa.yaml
+ when: openshift_metrics_install_hawkular_agent | default(false) | bool
+
+- find:
+ paths: "{{ mktemp.stdout }}/templates"
+ patterns: "^(?!metrics-hawkular-openshift-agent).*.yaml"
+ use_regex: true
register: object_def_files
changed_when: no
-- slurp: src={{item.path}}
+- slurp:
+ src: "{{item.path}}"
register: object_defs
with_items: "{{object_def_files.files}}"
changed_when: no
@@ -34,6 +42,31 @@
file_content: "{{ item.content | b64decode | from_yaml }}"
with_items: "{{ object_defs.results }}"
+- find:
+ paths: "{{ mktemp.stdout }}/templates"
+ patterns: "^metrics-hawkular-openshift-agent.*.yaml"
+ use_regex: true
+ register: hawkular_agent_object_def_files
+ when: openshift_metrics_install_hawkular_agent | bool
+ changed_when: no
+
+- slurp:
+ src: "{{item.path}}"
+ register: hawkular_agent_object_defs
+ with_items: "{{ hawkular_agent_object_def_files.files }}"
+ when: openshift_metrics_install_hawkular_agent | bool
+ changed_when: no
+
+- name: Create Hawkular Agent objects
+ include: oc_apply.yaml
+ vars:
+ kubeconfig: "{{ mktemp.stdout }}/admin.kubeconfig"
+ namespace: "{{ openshift_metrics_hawkular_agent_namespace }}"
+ file_name: "{{ item.source }}"
+ file_content: "{{ item.content | b64decode | from_yaml }}"
+ with_items: "{{ hawkular_agent_object_defs.results }}"
+ when: openshift_metrics_install_hawkular_agent | bool
+
- include: update_master_config.yaml
- command: >
diff --git a/roles/openshift_metrics/tasks/main.yaml b/roles/openshift_metrics/tasks/main.yaml
index 5d8506a73..0b5f23c24 100644
--- a/roles/openshift_metrics/tasks/main.yaml
+++ b/roles/openshift_metrics/tasks/main.yaml
@@ -44,6 +44,9 @@
- include: "{{ (openshift_metrics_install_metrics | bool) | ternary('install_metrics.yaml','uninstall_metrics.yaml') }}"
+- include: uninstall_hosa.yaml
+ when: not openshift_metrics_install_hawkular_agent | bool
+
- name: Delete temp directory
local_action: file path=local_tmp.stdout state=absent
tags: metrics_cleanup
diff --git a/roles/openshift_metrics/tasks/oc_apply.yaml b/roles/openshift_metrics/tasks/oc_apply.yaml
index dd67703b4..1e1af40e8 100644
--- a/roles/openshift_metrics/tasks/oc_apply.yaml
+++ b/roles/openshift_metrics/tasks/oc_apply.yaml
@@ -14,7 +14,7 @@
command: >
{{ openshift.common.client_binary }} --config={{ kubeconfig }}
apply -f {{ file_name }}
- -n {{ openshift_metrics_project }}
+ -n {{namespace}}
register: generation_apply
failed_when: "'error' in generation_apply.stderr"
changed_when: no
diff --git a/roles/openshift_metrics/tasks/uninstall_hosa.yaml b/roles/openshift_metrics/tasks/uninstall_hosa.yaml
new file mode 100644
index 000000000..42ed02460
--- /dev/null
+++ b/roles/openshift_metrics/tasks/uninstall_hosa.yaml
@@ -0,0 +1,15 @@
+---
+- name: remove Hawkular Agent (HOSA) components
+ command: >
+ {{ openshift.common.client_binary }} -n {{ openshift_metrics_hawkular_agent_namespace }} --config={{ mktemp.stdout }}/admin.kubeconfig
+ delete --ignore-not-found --selector=metrics-infra=agent
+ all,sa,secrets,templates,routes,pvc,rolebindings,clusterrolebindings
+ register: delete_metrics
+ changed_when: delete_metrics.stdout != 'No resources found'
+
+- name: remove rolebindings
+ command: >
+ {{ openshift.common.client_binary }} -n {{ openshift_metrics_hawkular_agent_namespace }} --config={{ mktemp.stdout }}/admin.kubeconfig
+ delete --ignore-not-found
+ clusterrolebinding/hawkular-openshift-agent-rb
+ changed_when: delete_metrics.stdout != 'No resources found'
diff --git a/roles/openshift_metrics/templates/hawkular_openshift_agent_cm.j2 b/roles/openshift_metrics/templates/hawkular_openshift_agent_cm.j2
new file mode 100644
index 000000000..bf472c066
--- /dev/null
+++ b/roles/openshift_metrics/templates/hawkular_openshift_agent_cm.j2
@@ -0,0 +1,54 @@
+id: hawkular-openshift-agent
+kind: ConfigMap
+apiVersion: v1
+name: Hawkular OpenShift Agent Configuration
+metadata:
+ name: hawkular-openshift-agent-configuration
+ labels:
+ metrics-infra: agent
+ namespace: {{openshift_metrics_hawkular_agent_namespace}}
+data:
+ config.yaml: |
+ kubernetes:
+ tenant: ${POD:namespace_name}
+ hawkular_server:
+ url: https://hawkular-metrics.openshift-infra.svc.cluster.local
+ credentials:
+ username: secret:openshift-infra/hawkular-metrics-account/hawkular-metrics.username
+ password: secret:openshift-infra/hawkular-metrics-account/hawkular-metrics.password
+ ca_cert_file: secret:openshift-infra/hawkular-metrics-certs/ca.crt
+ emitter:
+ status_enabled: false
+ collector:
+ minimum_collection_interval: 10s
+ default_collection_interval: 30s
+ metric_id_prefix: pod/${POD:uid}/custom/
+ tags:
+ metric_name: ${METRIC:name}
+ description: ${METRIC:description}
+ units: ${METRIC:units}
+ namespace_id: ${POD:namespace_uid}
+ namespace_name: ${POD:namespace_name}
+ node_name: ${POD:node_name}
+ pod_id: ${POD:uid}
+ pod_ip: ${POD:ip}
+ pod_name: ${POD:name}
+ pod_namespace: ${POD:namespace_name}
+ hostname: ${POD:hostname}
+ host_ip: ${POD:host_ip}
+ labels: ${POD:labels}
+ type: pod
+ collector: hawkular_openshift_agent
+ custom_metric: true
+ hawkular-openshift-agent: |
+ endpoints:
+ - type: prometheus
+ protocol: "http"
+ port: 8080
+ path: /metrics
+ collection_interval: 30s
+ metrics:
+ - name: hawkular_openshift_agent_metric_data_points_collected_total
+ - name: hawkular_openshift_agent_monitored_endpoints
+ - name: hawkular_openshift_agent_monitored_pods
+ - name: hawkular_openshift_agent_monitored_metrics
diff --git a/roles/openshift_metrics/templates/hawkular_openshift_agent_ds.j2 b/roles/openshift_metrics/templates/hawkular_openshift_agent_ds.j2
new file mode 100644
index 000000000..d65eaf9ae
--- /dev/null
+++ b/roles/openshift_metrics/templates/hawkular_openshift_agent_ds.j2
@@ -0,0 +1,91 @@
+apiVersion: extensions/v1beta1
+kind: DaemonSet
+metadata:
+ name: hawkular-openshift-agent
+ labels:
+ name: hawkular-openshift-agent
+ metrics-infra: agent
+ namespace: {{openshift_metrics_hawkular_agent_namespace}}
+spec:
+ selector:
+ matchLabels:
+ name: hawkular-openshift-agent
+ template:
+ metadata:
+ labels:
+ name: hawkular-openshift-agent
+ metrics-infra: agent
+ spec:
+ serviceAccount: hawkular-openshift-agent
+{% if node_selector is iterable and node_selector | length > 0 %}
+ nodeSelector:
+{% for key, value in node_selector.iteritems() %}
+ {{key}}: "{{value}}"
+{% endfor %}
+{% endif %}
+ containers:
+ - image: {{openshift_metrics_image_prefix}}metrics-hawkular-openshift-agent:{{openshift_metrics_image_version}}
+ imagePullPolicy: Always
+ name: hawkular-openshift-agent
+{% if ((openshift_metrics_hawkular_agent_limits_cpu is defined and openshift_metrics_hawkular_agent_limits_cpu is not none)
+ or (openshift_metrics_hawkular_agent_limits_memory is defined and openshift_metrics_hawkular_agent_limits_memory is not none)
+ or (openshift_metrics_hawkular_agent_requests_cpu is defined and openshift_metrics_hawkular_agent_requests_cpu is not none)
+ or (openshift_metrics_hawkular_agent_requests_memory is defined and openshift_metrics_hawkular_agent_requests_memory is not none))
+%}
+ resources:
+{% if (openshift_metrics_hawkular_agent_limits_cpu is not none
+ or openshift_metrics_hawkular_agent_limits_memory is not none)
+%}
+ limits:
+{% if openshift_metrics_hawkular_agent_limits_cpu is not none %}
+ cpu: "{{openshift_metrics_hawkular_agent_limits_cpu}}"
+{% endif %}
+{% if openshift_metrics_hawkular_agent_limits_memory is not none %}
+ memory: "{{openshift_metrics_hawkular_agent_limits_memory}}"
+{% endif %}
+{% endif %}
+{% if (openshift_metrics_hawkular_agent_requests_cpu is not none
+ or openshift_metrics_hawkular_agent_requests_memory is not none)
+%}
+ requests:
+{% if openshift_metrics_hawkular_agent_requests_cpu is not none %}
+ cpu: "{{openshift_metrics_hawkular_agent_requests_cpu}}"
+{% endif %}
+{% if openshift_metrics_hawkular_agent_requests_memory is not none %}
+ memory: "{{openshift_metrics_hawkular_agent_requests_memory}}"
+{% endif %}
+{% endif %}
+{% endif %}
+
+ livenessProbe:
+ httpGet:
+ scheme: HTTP
+ path: /health
+ port: 8080
+ initialDelaySeconds: 30
+ periodSeconds: 30
+ command:
+ - "hawkular-openshift-agent"
+ - "-config"
+ - "/hawkular-openshift-agent-configuration/config.yaml"
+ - "-v"
+ - "3"
+ env:
+ - name: K8S_POD_NAMESPACE
+ valueFrom:
+ fieldRef:
+ fieldPath: metadata.namespace
+ - name: K8S_POD_NAME
+ valueFrom:
+ fieldRef:
+ fieldPath: metadata.name
+ volumeMounts:
+ - name: hawkular-openshift-agent-configuration
+ mountPath: "/hawkular-openshift-agent-configuration"
+ volumes:
+ - name: hawkular-openshift-agent-configuration
+ configMap:
+ name: hawkular-openshift-agent-configuration
+ - name: hawkular-openshift-agent
+ configMap:
+ name: hawkular-openshift-agent-configuration
diff --git a/roles/openshift_metrics/templates/hawkular_openshift_agent_role.j2 b/roles/openshift_metrics/templates/hawkular_openshift_agent_role.j2
new file mode 100644
index 000000000..24b8cd801
--- /dev/null
+++ b/roles/openshift_metrics/templates/hawkular_openshift_agent_role.j2
@@ -0,0 +1,25 @@
+apiVersion: v1
+kind: ClusterRole
+metadata:
+ name: hawkular-openshift-agent
+ labels:
+ metrics-infra: agent
+rules:
+- apiGroups:
+ - ""
+ resources:
+ - configmaps
+ - namespaces
+ - nodes
+ - pods
+ - projects
+ verbs:
+ - get
+ - list
+ - watch
+- apiGroups:
+ - ""
+ resources:
+ - secrets
+ verbs:
+ - get
diff --git a/roles/openshift_metrics/templates/hawkular_openshift_agent_sa.j2 b/roles/openshift_metrics/templates/hawkular_openshift_agent_sa.j2
new file mode 100644
index 000000000..ec604d73c
--- /dev/null
+++ b/roles/openshift_metrics/templates/hawkular_openshift_agent_sa.j2
@@ -0,0 +1,7 @@
+apiVersion: v1
+kind: ServiceAccount
+metadata:
+ name: hawkular-openshift-agent
+ labels:
+ metrics-infra: agent
+ namespace: {{openshift_metrics_hawkular_agent_namespace}}
diff --git a/roles/openshift_metrics/templates/pvc.j2 b/roles/openshift_metrics/templates/pvc.j2
index c2e56ba21..0b801b33f 100644
--- a/roles/openshift_metrics/templates/pvc.j2
+++ b/roles/openshift_metrics/templates/pvc.j2
@@ -18,6 +18,13 @@ metadata:
{% endfor %}
{% endif %}
spec:
+{% if pv_selector is defined and pv_selector is mapping %}
+ selector:
+ matchLabels:
+{% for key,value in pv_selector.iteritems() %}
+ {{key}}: {{value}}
+{% endfor %}
+{% endif %}
accessModes:
{% for mode in access_modes %}
- {{ mode }}
diff --git a/roles/openshift_node/handlers/main.yml b/roles/openshift_node/handlers/main.yml
index 4dcf1eef8..a6bd12d4e 100644
--- a/roles/openshift_node/handlers/main.yml
+++ b/roles/openshift_node/handlers/main.yml
@@ -1,6 +1,8 @@
---
- name: restart openvswitch
- systemd: name=openvswitch state=restarted
+ systemd:
+ name: openvswitch
+ state: restarted
when: (not skip_node_svc_handlers | default(False) | bool) and not (ovs_service_status_changed | default(false) | bool) and openshift.common.use_openshift_sdn | bool
notify:
- restart openvswitch pause
@@ -10,8 +12,13 @@
when: (not skip_node_svc_handlers | default(False) | bool) and openshift.common.is_containerized | bool
- name: restart node
- systemd: name={{ openshift.common.service_type }}-node state=restarted
+ systemd:
+ name: "{{ openshift.common.service_type }}-node"
+ state: restarted
when: (not skip_node_svc_handlers | default(False) | bool) and not (node_service_status_changed | default(false) | bool)
- name: reload sysctl.conf
command: /sbin/sysctl -p
+
+- name: reload systemd units
+ command: systemctl daemon-reload
diff --git a/roles/openshift_node/tasks/systemd_units.yml b/roles/openshift_node/tasks/systemd_units.yml
index f58c803c4..e3ce5df3d 100644
--- a/roles/openshift_node/tasks/systemd_units.yml
+++ b/roles/openshift_node/tasks/systemd_units.yml
@@ -8,6 +8,9 @@
src: openshift.docker.node.dep.service
register: install_node_dep_result
when: openshift.common.is_containerized | bool
+ notify:
+ - reload systemd units
+ - restart node
- block:
- name: Pre-pull node image
@@ -21,6 +24,9 @@
dest: "/etc/systemd/system/{{ openshift.common.service_type }}-node.service"
src: openshift.docker.node.service
register: install_node_result
+ notify:
+ - reload systemd units
+ - restart node
when:
- openshift.common.is_containerized | bool
- not openshift.common.is_node_system_container | bool
@@ -31,6 +37,9 @@
src: "{{ openshift.common.service_type }}-node.service.j2"
register: install_node_result
when: not openshift.common.is_containerized | bool
+ notify:
+ - reload systemd units
+ - restart node
- name: Create the openvswitch service env file
template:
@@ -39,6 +48,7 @@
when: openshift.common.is_containerized | bool
register: install_ovs_sysconfig
notify:
+ - reload systemd units
- restart openvswitch
- name: Install Node system container
@@ -67,6 +77,7 @@
when: openshift.common.use_openshift_sdn | default(true) | bool
register: install_oom_fix_result
notify:
+ - reload systemd units
- restart openvswitch
- block:
@@ -81,6 +92,7 @@
dest: "/etc/systemd/system/openvswitch.service"
src: openvswitch.docker.service
notify:
+ - reload systemd units
- restart openvswitch
when:
- openshift.common.is_containerized | bool
@@ -119,8 +131,3 @@
when: ('http_proxy' in openshift.common and openshift.common.http_proxy != '')
notify:
- restart node
-
-- name: Reload systemd units
- command: systemctl daemon-reload
- notify:
- - restart node
diff --git a/roles/openshift_node_dnsmasq/templates/origin-dns.conf.j2 b/roles/openshift_node_dnsmasq/templates/origin-dns.conf.j2
index f397cbbf1..8bae9aaac 100644
--- a/roles/openshift_node_dnsmasq/templates/origin-dns.conf.j2
+++ b/roles/openshift_node_dnsmasq/templates/origin-dns.conf.j2
@@ -1,3 +1,5 @@
no-resolv
domain-needed
server=/{{ openshift.common.dns_domain }}/{{ openshift.common.kube_svc_ip }}
+no-negcache
+max-cache-ttl=1
diff --git a/roles/openshift_persistent_volumes/templates/persistent-volume.yml.j2 b/roles/openshift_persistent_volumes/templates/persistent-volume.yml.j2
index 877e88002..9c5103597 100644
--- a/roles/openshift_persistent_volumes/templates/persistent-volume.yml.j2
+++ b/roles/openshift_persistent_volumes/templates/persistent-volume.yml.j2
@@ -7,6 +7,12 @@ items:
kind: PersistentVolume
metadata:
name: "{{ volume.name }}"
+{% if volume.labels is defined and volume.labels is mapping %}
+ labels:
+{% for key,value in volume.labels.iteritems() %}
+ {{ key }}: {{ value }}
+{% endfor %}
+{% endif %}
spec:
capacity:
storage: "{{ volume.capacity }}"
diff --git a/roles/openshift_repos/tasks/main.yaml b/roles/openshift_repos/tasks/main.yaml
index 023b1a9b7..8f8550e2d 100644
--- a/roles/openshift_repos/tasks/main.yaml
+++ b/roles/openshift_repos/tasks/main.yaml
@@ -4,7 +4,8 @@
path: /run/ostree-booted
register: ostree_booted
-- block:
+- when: not ostree_booted.stat.exists
+ block:
- name: Ensure libselinux-python is installed
package: name=libselinux-python state=present
@@ -24,41 +25,40 @@
- openshift_additional_repos | length == 0
notify: refresh cache
- # Note: OpenShift repositories under CentOS may be shipped through the
- # "centos-release-openshift-origin" package which configures the repository.
- # This task matches the file names provided by the package so that they are
- # not installed twice in different files and remains idempotent.
- - name: Configure origin gpg keys if needed
- copy:
- src: "{{ item.src }}"
- dest: "{{ item.dest }}"
- with_items:
- - src: origin/gpg_keys/openshift-ansible-CentOS-SIG-PaaS
- dest: /etc/pki/rpm-gpg/RPM-GPG-KEY-CentOS-SIG-PaaS
- - src: origin/repos/openshift-ansible-centos-paas-sig.repo
- dest: /etc/yum.repos.d/CentOS-OpenShift-Origin.repo
- notify: refresh cache
- when:
- - ansible_os_family == "RedHat"
- - ansible_distribution != "Fedora"
- - openshift_deployment_type == 'origin'
- - openshift_enable_origin_repo | default(true) | bool
-
# Singleton block
- - when: r_osr_first_run | default(true)
+ - when: r_openshift_repos_has_run is not defined
block:
+
+ # Note: OpenShift repositories under CentOS may be shipped through the
+ # "centos-release-openshift-origin" package which configures the repository.
+ # This task matches the file names provided by the package so that they are
+ # not installed twice in different files and remains idempotent.
+ - name: Configure origin gpg keys if needed
+ copy:
+ src: "{{ item.src }}"
+ dest: "{{ item.dest }}"
+ with_items:
+ - src: origin/gpg_keys/openshift-ansible-CentOS-SIG-PaaS
+ dest: /etc/pki/rpm-gpg/RPM-GPG-KEY-CentOS-SIG-PaaS
+ - src: origin/repos/openshift-ansible-centos-paas-sig.repo
+ dest: /etc/yum.repos.d/CentOS-OpenShift-Origin.repo
+ notify: refresh cache
+ when:
+ - ansible_os_family == "RedHat"
+ - ansible_distribution != "Fedora"
+ - openshift_deployment_type == 'origin'
+ - openshift_enable_origin_repo | default(true) | bool
+
- name: Ensure clean repo cache in the event repos have been changed manually
debug:
msg: "First run of openshift_repos"
changed_when: true
notify: refresh cache
- - name: Set fact r_osr_first_run false
+ - name: Record that openshift_repos already ran
set_fact:
- r_osr_first_run: false
+ r_openshift_repos_has_run: True
# Force running ALL handlers now, because we expect repo cache to be cleared
# if changes have been made.
- meta: flush_handlers
-
- when: not ostree_booted.stat.exists
diff --git a/roles/openshift_storage_glusterfs/README.md b/roles/openshift_storage_glusterfs/README.md
index 7b310dbf8..62fc35299 100644
--- a/roles/openshift_storage_glusterfs/README.md
+++ b/roles/openshift_storage_glusterfs/README.md
@@ -1,7 +1,31 @@
OpenShift GlusterFS Cluster
===========================
-OpenShift GlusterFS Cluster Installation
+OpenShift GlusterFS Cluster Configuration
+
+This role handles the configuration of GlusterFS clusters. It can handle
+two primary configuration scenarios:
+
+* Configuring a new, natively-hosted GlusterFS cluster. In this scenario,
+ GlusterFS pods are deployed on nodes in the OpenShift cluster which are
+ configured to provide storage.
+* Configuring a new, external GlusterFS cluster. In this scenario, the
+ cluster nodes have the GlusterFS software pre-installed but have not
+ been configured yet. The installer will take care of configuring the
+ cluster(s) for use by OpenShift applications.
+* Using existing GlusterFS clusters. In this scenario, one or more
+ GlusterFS clusters are assumed to be already setup. These clusters can
+ be either natively-hosted or external, but must be managed by a
+ [heketi service](https://github.com/heketi/heketi).
+
+As part of the configuration, a particular GlusterFS cluster may be
+specified to provide backend storage for a natively-hosted Docker
+registry.
+
+Unless configured otherwise, a StorageClass will be automatically
+created for each non-registry GlusterFS cluster. This will allow
+applications which can mount PersistentVolumes to request
+dynamically-provisioned GlusterFS volumes.
Requirements
------------
@@ -21,26 +45,50 @@ hosted Docker registry:
* `[glusterfs_registry]`
+Host Variables
+--------------
+
+For configuring new clusters, the following role variables are available.
+
+Each host in either of the above groups must have the following variable
+defined:
+
+| Name | Default value | Description |
+|-------------------|---------------|-----------------------------------------|
+| glusterfs_devices | None | A list of block devices that will be completely managed as part of a GlusterFS cluster. There must be at least one device listed. Each device must be bare, e.g. no partitions or LVM PVs. **Example:** '[ "/dev/sdb" ]'
+
+In addition, each host may specify the following variables to further control
+their configuration as GlusterFS nodes:
+
+| Name | Default value | Description |
+|--------------------|---------------------------|-----------------------------------------|
+| glusterfs_cluster | 1 | The ID of the cluster this node should belong to. This is useful when a single heketi service is expected to manage multiple distinct clusters. **NOTE:** For natively-hosted clusters, all pods will be in the same OpenShift namespace
+| glusterfs_hostname | openshift.common.hostname | A hostname (or IP address) that will be used for internal GlusterFS communication
+| glusterfs_ip | openshift.common.ip | An IP address that will be used by pods to communicate with the GlusterFS node
+| glusterfs_zone | 1 | A zone number for the node. Zones are used within the cluster for determining how to distribute the bricks of GlusterFS volumes. heketi will try to spread each volumes' bricks as evenly as possible across all zones
+
Role Variables
--------------
This role has the following variables that control the integration of a
GlusterFS cluster into a new or existing OpenShift cluster:
-| Name | Default value | |
+| Name | Default value | Description |
|--------------------------------------------------|-------------------------|-----------------------------------------|
| openshift_storage_glusterfs_timeout | 300 | Seconds to wait for pods to become ready
| openshift_storage_glusterfs_namespace | 'default' | Namespace in which to create GlusterFS resources
| openshift_storage_glusterfs_is_native | True | GlusterFS should be containerized
-| openshift_storage_glusterfs_nodeselector | 'storagenode=glusterfs' | Selector to determine which nodes will host GlusterFS pods in native mode
+| openshift_storage_glusterfs_name | 'storage' | A name to identify the GlusterFS cluster, which will be used in resource names
+| openshift_storage_glusterfs_nodeselector | 'glusterfs=storage-host'| Selector to determine which nodes will host GlusterFS pods in native mode. **NOTE:** The label value is taken from the cluster name
+| openshift_storage_glusterfs_storageclass | True | Automatically create a StorageClass for each GlusterFS cluster
| openshift_storage_glusterfs_image | 'gluster/gluster-centos'| Container image to use for GlusterFS pods, enterprise default is 'rhgs3/rhgs-server-rhel7'
| openshift_storage_glusterfs_version | 'latest' | Container image version to use for GlusterFS pods
| openshift_storage_glusterfs_wipe | False | Destroy any existing GlusterFS resources and wipe storage devices. **WARNING: THIS WILL DESTROY ANY DATA ON THOSE DEVICES.**
| openshift_storage_glusterfs_heketi_is_native | True | heketi should be containerized
| openshift_storage_glusterfs_heketi_image | 'heketi/heketi' | Container image to use for heketi pods, enterprise default is 'rhgs3/rhgs-volmanager-rhel7'
| openshift_storage_glusterfs_heketi_version | 'latest' | Container image version to use for heketi pods
-| openshift_storage_glusterfs_heketi_admin_key | '' | String to use as secret key for performing heketi commands as admin
-| openshift_storage_glusterfs_heketi_user_key | '' | String to use as secret key for performing heketi commands as user that can only view or modify volumes
+| openshift_storage_glusterfs_heketi_admin_key | auto-generated | String to use as secret key for performing heketi commands as admin
+| openshift_storage_glusterfs_heketi_user_key | auto-generated | String to use as secret key for performing heketi commands as user that can only view or modify volumes
| openshift_storage_glusterfs_heketi_topology_load | True | Load the GlusterFS topology information into heketi
| openshift_storage_glusterfs_heketi_url | Undefined | URL for the heketi REST API, dynamically determined in native mode
| openshift_storage_glusterfs_heketi_wipe | False | Destroy any existing heketi resources, defaults to the value of `openshift_storage_glusterfs_wipe`
@@ -52,17 +100,24 @@ registry. These variables start with the prefix
values in their corresponding non-registry variables. The following variables
are an exception:
-| Name | Default value | |
-|---------------------------------------------------|-----------------------|-----------------------------------------|
-| openshift_storage_glusterfs_registry_namespace | registry namespace | Default is to use the hosted registry's namespace, otherwise 'default'
-| openshift_storage_glusterfs_registry_nodeselector | 'storagenode=registry'| This allows for the logical separation of the registry GlusterFS cluster from any regular-use GlusterFS clusters
+| Name | Default value | Description |
+|-------------------------------------------------------|-----------------------|-----------------------------------------|
+| openshift_storage_glusterfs_registry_namespace | registry namespace | Default is to use the hosted registry's namespace, otherwise 'default'
+| openshift_storage_glusterfs_registry_name | 'registry' | This allows for the logical separation of the registry GlusterFS cluster from other GlusterFS clusters
+| openshift_storage_glusterfs_registry_storageclass | False | It is recommended to not create a StorageClass for GlusterFS clusters serving registry storage, so as to avoid performance penalties
+| openshift_storage_glusterfs_registry_heketi_admin_key | auto-generated | Separate from the above
+| openshift_storage_glusterfs_registry_heketi_user_key | auto-generated | Separate from the above
Additionally, this role's behavior responds to the following registry-specific
-variable:
-
-| Name | Default value | Description |
-|----------------------------------------------|---------------|------------------------------------------------------------------------------|
-| openshift_hosted_registry_glusterfs_swap | False | Whether to swap an existing registry's storage volume for a GlusterFS volume |
+variables:
+
+| Name | Default value | Description |
+|-----------------------------------------------|------------------------------|-----------------------------------------|
+| openshift_hosted_registry_glusterfs_endpoints | glusterfs-registry-endpoints | The name for the Endpoints resource that will point the registry to the GlusterFS nodes
+| openshift_hosted_registry_glusterfs_path | glusterfs-registry-volume | The name for the GlusterFS volume that will provide registry storage
+| openshift_hosted_registry_glusterfs_readonly | False | Whether the GlusterFS volume should be read-only
+| openshift_hosted_registry_glusterfs_swap | False | Whether to swap an existing registry's storage volume for a GlusterFS volume
+| openshift_hosted_registry_glusterfs_swapcopy | True | If swapping, copy the contents of the pre-existing registry storage to the new GlusterFS volume
Dependencies
------------
diff --git a/roles/openshift_storage_glusterfs/defaults/main.yml b/roles/openshift_storage_glusterfs/defaults/main.yml
index ebe9ca30b..468877e57 100644
--- a/roles/openshift_storage_glusterfs/defaults/main.yml
+++ b/roles/openshift_storage_glusterfs/defaults/main.yml
@@ -2,7 +2,9 @@
openshift_storage_glusterfs_timeout: 300
openshift_storage_glusterfs_namespace: 'default'
openshift_storage_glusterfs_is_native: True
-openshift_storage_glusterfs_nodeselector: 'storagenode=glusterfs'
+openshift_storage_glusterfs_name: 'storage'
+openshift_storage_glusterfs_nodeselector: "glusterfs={{ openshift_storage_glusterfs_name }}-host"
+openshift_storage_glusterfs_storageclass: True
openshift_storage_glusterfs_image: "{{ 'rhgs3/rhgs-server-rhel7' | quote if deployment_type == 'openshift-enterprise' else 'gluster/gluster-centos' | quote }}"
openshift_storage_glusterfs_version: 'latest'
openshift_storage_glusterfs_wipe: False
@@ -11,8 +13,8 @@ openshift_storage_glusterfs_heketi_is_missing: True
openshift_storage_glusterfs_heketi_deploy_is_missing: True
openshift_storage_glusterfs_heketi_image: "{{ 'rhgs3/rhgs-volmanager-rhel7' | quote if deployment_type == 'openshift-enterprise' else 'heketi/heketi' | quote }}"
openshift_storage_glusterfs_heketi_version: 'latest'
-openshift_storage_glusterfs_heketi_admin_key: ''
-openshift_storage_glusterfs_heketi_user_key: ''
+openshift_storage_glusterfs_heketi_admin_key: "{{ 32 | oo_generate_secret }}"
+openshift_storage_glusterfs_heketi_user_key: "{{ 32 | oo_generate_secret }}"
openshift_storage_glusterfs_heketi_topology_load: True
openshift_storage_glusterfs_heketi_wipe: "{{ openshift_storage_glusterfs_wipe }}"
openshift_storage_glusterfs_heketi_url: "{{ omit }}"
@@ -20,7 +22,9 @@ openshift_storage_glusterfs_heketi_url: "{{ omit }}"
openshift_storage_glusterfs_registry_timeout: "{{ openshift_storage_glusterfs_timeout }}"
openshift_storage_glusterfs_registry_namespace: "{{ openshift.hosted.registry.namespace | default('default') }}"
openshift_storage_glusterfs_registry_is_native: "{{ openshift_storage_glusterfs_is_native }}"
-openshift_storage_glusterfs_registry_nodeselector: 'storagenode=registry'
+openshift_storage_glusterfs_registry_name: 'registry'
+openshift_storage_glusterfs_registry_nodeselector: "glusterfs={{ openshift_storage_glusterfs_registry_name }}-host"
+openshift_storage_glusterfs_registry_storageclass: False
openshift_storage_glusterfs_registry_image: "{{ openshift_storage_glusterfs_image }}"
openshift_storage_glusterfs_registry_version: "{{ openshift_storage_glusterfs_version }}"
openshift_storage_glusterfs_registry_wipe: "{{ openshift_storage_glusterfs_wipe }}"
@@ -29,8 +33,8 @@ openshift_storage_glusterfs_registry_heketi_is_missing: "{{ openshift_storage_gl
openshift_storage_glusterfs_registry_heketi_deploy_is_missing: "{{ openshift_storage_glusterfs_heketi_deploy_is_missing }}"
openshift_storage_glusterfs_registry_heketi_image: "{{ openshift_storage_glusterfs_heketi_image }}"
openshift_storage_glusterfs_registry_heketi_version: "{{ openshift_storage_glusterfs_heketi_version }}"
-openshift_storage_glusterfs_registry_heketi_admin_key: "{{ openshift_storage_glusterfs_heketi_admin_key }}"
-openshift_storage_glusterfs_registry_heketi_user_key: "{{ openshift_storage_glusterfs_heketi_user_key }}"
+openshift_storage_glusterfs_registry_heketi_admin_key: "{{ 32 | oo_generate_secret }}"
+openshift_storage_glusterfs_registry_heketi_user_key: "{{ 32 | oo_generate_secret }}"
openshift_storage_glusterfs_registry_heketi_topology_load: "{{ openshift_storage_glusterfs_heketi_topology_load }}"
openshift_storage_glusterfs_registry_heketi_wipe: "{{ openshift_storage_glusterfs_heketi_wipe }}"
openshift_storage_glusterfs_registry_heketi_url: "{{ openshift_storage_glusterfs_heketi_url | default(omit) }}"
diff --git a/roles/openshift_storage_glusterfs/files/v3.6/deploy-heketi-template.yml b/roles/openshift_storage_glusterfs/files/v3.6/deploy-heketi-template.yml
index c9945be13..81b4fa5dc 100644
--- a/roles/openshift_storage_glusterfs/files/v3.6/deploy-heketi-template.yml
+++ b/roles/openshift_storage_glusterfs/files/v3.6/deploy-heketi-template.yml
@@ -9,49 +9,47 @@ metadata:
annotations:
description: Bootstrap Heketi installation
tags: glusterfs,heketi,installation
-labels:
- template: deploy-heketi
objects:
- kind: Service
apiVersion: v1
metadata:
- name: deploy-heketi
+ name: deploy-heketi-${CLUSTER_NAME}
labels:
- glusterfs: deploy-heketi-service
+ glusterfs: deploy-heketi-${CLUSTER_NAME}-service
deploy-heketi: support
annotations:
description: Exposes Heketi service
spec:
ports:
- - name: deploy-heketi
+ - name: deploy-heketi-${CLUSTER_NAME}
port: 8080
targetPort: 8080
selector:
- name: deploy-heketi
+ glusterfs: deploy-heketi-${CLUSTER_NAME}-pod
- kind: Route
apiVersion: v1
metadata:
- name: deploy-heketi
+ name: deploy-heketi-${CLUSTER_NAME}
labels:
- glusterfs: deploy-heketi-route
+ glusterfs: deploy-heketi-${CLUSTER_NAME}-route
deploy-heketi: support
spec:
to:
kind: Service
- name: deploy-heketi
+ name: deploy-heketi-${CLUSTER_NAME}
- kind: DeploymentConfig
apiVersion: v1
metadata:
- name: deploy-heketi
+ name: deploy-heketi-${CLUSTER_NAME}
labels:
- glusterfs: deploy-heketi-dc
+ glusterfs: deploy-heketi-${CLUSTER_NAME}-dc
deploy-heketi: support
annotations:
description: Defines how to deploy Heketi
spec:
replicas: 1
selector:
- name: deploy-heketi
+ glusterfs: deploy-heketi-${CLUSTER_NAME}-pod
triggers:
- type: ConfigChange
strategy:
@@ -60,13 +58,12 @@ objects:
metadata:
name: deploy-heketi
labels:
- name: deploy-heketi
- glusterfs: deploy-heketi-pod
+ glusterfs: deploy-heketi-${CLUSTER_NAME}-pod
deploy-heketi: support
spec:
- serviceAccountName: heketi-service-account
+ serviceAccountName: heketi-${CLUSTER_NAME}-service-account
containers:
- - name: deploy-heketi
+ - name: heketi
image: ${IMAGE_NAME}:${IMAGE_VERSION}
env:
- name: HEKETI_USER_KEY
@@ -81,11 +78,15 @@ objects:
value: '14'
- name: HEKETI_KUBE_GLUSTER_DAEMONSET
value: '1'
+ - name: HEKETI_KUBE_NAMESPACE
+ value: ${HEKETI_KUBE_NAMESPACE}
ports:
- containerPort: 8080
volumeMounts:
- name: db
mountPath: /var/lib/heketi
+ - name: topology
+ mountPath: ${TOPOLOGY_PATH}
readinessProbe:
timeoutSeconds: 3
initialDelaySeconds: 3
@@ -100,6 +101,9 @@ objects:
port: 8080
volumes:
- name: db
+ - name: topology
+ secret:
+ secretName: heketi-${CLUSTER_NAME}-topology-secret
parameters:
- name: HEKETI_USER_KEY
displayName: Heketi User Secret
@@ -107,9 +111,19 @@ parameters:
- name: HEKETI_ADMIN_KEY
displayName: Heketi Administrator Secret
description: Set secret for administration of the Heketi service as user _admin_
+- name: HEKETI_KUBE_NAMESPACE
+ displayName: Namespace
+ description: Set the namespace where the GlusterFS pods reside
+ value: default
- name: IMAGE_NAME
- displayName: GlusterFS container name
+ displayName: heketi container name
required: True
- name: IMAGE_VERSION
- displayName: GlusterFS container versiona
+ displayName: heketi container versiona
+ required: True
+- name: CLUSTER_NAME
+ displayName: GlusterFS cluster name
+ value: glusterfs
+- name: TOPOLOGY_PATH
+ displayName: heketi topology file location
required: True
diff --git a/roles/openshift_storage_glusterfs/files/v3.6/glusterfs-template.yml b/roles/openshift_storage_glusterfs/files/v3.6/glusterfs-template.yml
index c66705752..dc3d2250a 100644
--- a/roles/openshift_storage_glusterfs/files/v3.6/glusterfs-template.yml
+++ b/roles/openshift_storage_glusterfs/files/v3.6/glusterfs-template.yml
@@ -12,24 +12,24 @@ objects:
- kind: DaemonSet
apiVersion: extensions/v1beta1
metadata:
- name: glusterfs
+ name: glusterfs-${CLUSTER_NAME}
labels:
- glusterfs: daemonset
+ glusterfs: ${CLUSTER_NAME}-daemonset
annotations:
description: GlusterFS DaemonSet
tags: glusterfs
spec:
selector:
matchLabels:
- glusterfs-node: pod
+ glusterfs: ${CLUSTER_NAME}-pod
template:
metadata:
- name: glusterfs
+ name: glusterfs-${CLUSTER_NAME}
labels:
+ glusterfs: ${CLUSTER_NAME}-pod
glusterfs-node: pod
spec:
- nodeSelector:
- storagenode: glusterfs
+ nodeSelector: "${{NODE_LABELS}}"
hostNetwork: true
containers:
- name: glusterfs
@@ -63,26 +63,26 @@ objects:
privileged: true
readinessProbe:
timeoutSeconds: 3
- initialDelaySeconds: 100
+ initialDelaySeconds: 40
exec:
command:
- "/bin/bash"
- "-c"
- systemctl status glusterd.service
- periodSeconds: 10
+ periodSeconds: 25
successThreshold: 1
- failureThreshold: 3
+ failureThreshold: 15
livenessProbe:
timeoutSeconds: 3
- initialDelaySeconds: 100
+ initialDelaySeconds: 40
exec:
command:
- "/bin/bash"
- "-c"
- systemctl status glusterd.service
- periodSeconds: 10
+ periodSeconds: 25
successThreshold: 1
- failureThreshold: 3
+ failureThreshold: 15
resources: {}
terminationMessagePath: "/dev/termination-log"
volumes:
@@ -120,9 +120,16 @@ objects:
dnsPolicy: ClusterFirst
securityContext: {}
parameters:
+- name: NODE_LABELS
+ displayName: Daemonset Node Labels
+ description: Labels which define the daemonset node selector. Must contain at least one label of the format \'glusterfs=<CLUSTER_NAME>-host\'
+ value: '{ "glusterfs": "storage-host" }'
- name: IMAGE_NAME
displayName: GlusterFS container name
required: True
- name: IMAGE_VERSION
displayName: GlusterFS container versiona
required: True
+- name: CLUSTER_NAME
+ displayName: GlusterFS cluster name
+ value: storage
diff --git a/roles/openshift_storage_glusterfs/files/v3.6/heketi-template.yml b/roles/openshift_storage_glusterfs/files/v3.6/heketi-template.yml
index df045c170..1d8f1abdf 100644
--- a/roles/openshift_storage_glusterfs/files/v3.6/heketi-template.yml
+++ b/roles/openshift_storage_glusterfs/files/v3.6/heketi-template.yml
@@ -8,15 +8,13 @@ metadata:
annotations:
description: Heketi service deployment template
tags: glusterfs,heketi
-labels:
- template: heketi
objects:
- kind: Service
apiVersion: v1
metadata:
- name: heketi
+ name: heketi-${CLUSTER_NAME}
labels:
- glusterfs: heketi-service
+ glusterfs: heketi-${CLUSTER_NAME}-service
annotations:
description: Exposes Heketi service
spec:
@@ -25,40 +23,40 @@ objects:
port: 8080
targetPort: 8080
selector:
- glusterfs: heketi-pod
+ glusterfs: heketi-${CLUSTER_NAME}-pod
- kind: Route
apiVersion: v1
metadata:
- name: heketi
+ name: heketi-${CLUSTER_NAME}
labels:
- glusterfs: heketi-route
+ glusterfs: heketi-${CLUSTER_NAME}-route
spec:
to:
kind: Service
- name: heketi
+ name: heketi-${CLUSTER_NAME}
- kind: DeploymentConfig
apiVersion: v1
metadata:
- name: heketi
+ name: heketi-${CLUSTER_NAME}
labels:
- glusterfs: heketi-dc
+ glusterfs: heketi-${CLUSTER_NAME}-dc
annotations:
description: Defines how to deploy Heketi
spec:
replicas: 1
selector:
- glusterfs: heketi-pod
+ glusterfs: heketi-${CLUSTER_NAME}-pod
triggers:
- type: ConfigChange
strategy:
type: Recreate
template:
metadata:
- name: heketi
+ name: heketi-${CLUSTER_NAME}
labels:
- glusterfs: heketi-pod
+ glusterfs: heketi-${CLUSTER_NAME}-pod
spec:
- serviceAccountName: heketi-service-account
+ serviceAccountName: heketi-${CLUSTER_NAME}-service-account
containers:
- name: heketi
image: ${IMAGE_NAME}:${IMAGE_VERSION}
@@ -76,6 +74,8 @@ objects:
value: '14'
- name: HEKETI_KUBE_GLUSTER_DAEMONSET
value: '1'
+ - name: HEKETI_KUBE_NAMESPACE
+ value: ${HEKETI_KUBE_NAMESPACE}
ports:
- containerPort: 8080
volumeMounts:
@@ -96,7 +96,7 @@ objects:
volumes:
- name: db
glusterfs:
- endpoints: heketi-storage-endpoints
+ endpoints: heketi-db-${CLUSTER_NAME}-endpoints
path: heketidbstorage
parameters:
- name: HEKETI_USER_KEY
@@ -105,9 +105,16 @@ parameters:
- name: HEKETI_ADMIN_KEY
displayName: Heketi Administrator Secret
description: Set secret for administration of the Heketi service as user _admin_
+- name: HEKETI_KUBE_NAMESPACE
+ displayName: Namespace
+ description: Set the namespace where the GlusterFS pods reside
+ value: default
- name: IMAGE_NAME
- displayName: GlusterFS container name
+ displayName: heketi container name
required: True
- name: IMAGE_VERSION
- displayName: GlusterFS container versiona
+ displayName: heketi container versiona
required: True
+- name: CLUSTER_NAME
+ displayName: GlusterFS cluster name
+ value: glusterfs
diff --git a/roles/openshift_storage_glusterfs/tasks/glusterfs_common.yml b/roles/openshift_storage_glusterfs/tasks/glusterfs_common.yml
index fa5fa2cb0..829c1f51b 100644
--- a/roles/openshift_storage_glusterfs/tasks/glusterfs_common.yml
+++ b/roles/openshift_storage_glusterfs/tasks/glusterfs_common.yml
@@ -5,12 +5,6 @@
name: "{{ glusterfs_namespace }}"
when: glusterfs_is_native or glusterfs_heketi_is_native
-- include: glusterfs_deploy.yml
- when: glusterfs_is_native
-
-- name: Make sure heketi-client is installed
- package: name=heketi-client state=present
-
- name: Delete pre-existing heketi resources
oc_obj:
namespace: "{{ glusterfs_namespace }}"
@@ -21,12 +15,18 @@
with_items:
- kind: "template,route,service,dc,jobs,secret"
selector: "deploy-heketi"
- - kind: "template,route,service,dc"
- name: "heketi"
- - kind: "svc,ep"
+ - kind: "svc"
name: "heketi-storage-endpoints"
+ - kind: "secret"
+ name: "heketi-{{ glusterfs_name }}-topology-secret"
+ - kind: "template,route,service,dc"
+ name: "heketi-{{ glusterfs_name }}"
+ - kind: "svc"
+ name: "heketi-db-{{ glusterfs_name }}-endpoints"
- kind: "sa"
- name: "heketi-service-account"
+ name: "heketi-{{ glusterfs_name }}-service-account"
+ - kind: "secret"
+ name: "heketi-{{ glusterfs_name }}-user-secret"
failed_when: False
when: glusterfs_heketi_wipe
@@ -35,11 +35,11 @@
namespace: "{{ glusterfs_namespace }}"
kind: pod
state: list
- selector: "glusterfs=deploy-heketi-pod"
+ selector: "glusterfs=deploy-heketi-{{ glusterfs_name }}-pod"
register: heketi_pod
until: "heketi_pod.results.results[0]['items'] | count == 0"
delay: 10
- retries: "{{ (glusterfs_timeout / 10) | int }}"
+ retries: "{{ (glusterfs_timeout | int / 10) | int }}"
when: glusterfs_heketi_wipe
- name: Wait for heketi pods to terminate
@@ -47,23 +47,26 @@
namespace: "{{ glusterfs_namespace }}"
kind: pod
state: list
- selector: "glusterfs=heketi-pod"
+ selector: "glusterfs=heketi-{{ glusterfs_name }}-pod"
register: heketi_pod
until: "heketi_pod.results.results[0]['items'] | count == 0"
delay: 10
- retries: "{{ (glusterfs_timeout / 10) | int }}"
+ retries: "{{ (glusterfs_timeout | int / 10) | int }}"
when: glusterfs_heketi_wipe
+- include: glusterfs_deploy.yml
+ when: glusterfs_is_native
+
- name: Create heketi service account
oc_serviceaccount:
namespace: "{{ glusterfs_namespace }}"
- name: heketi-service-account
+ name: "heketi-{{ glusterfs_name }}-service-account"
state: present
when: glusterfs_heketi_is_native
- name: Add heketi service account to privileged SCC
oc_adm_policy_user:
- user: "system:serviceaccount:{{ glusterfs_namespace }}:heketi-service-account"
+ user: "system:serviceaccount:{{ glusterfs_namespace }}:heketi-{{ glusterfs_name }}-service-account"
resource_kind: scc
resource_name: privileged
state: present
@@ -71,7 +74,7 @@
- name: Allow heketi service account to view/edit pods
oc_adm_policy_user:
- user: "system:serviceaccount:{{ glusterfs_namespace }}:heketi-service-account"
+ user: "system:serviceaccount:{{ glusterfs_namespace }}:heketi-{{ glusterfs_name }}-service-account"
resource_kind: role
resource_name: edit
state: present
@@ -82,7 +85,7 @@
namespace: "{{ glusterfs_namespace }}"
state: list
kind: pod
- selector: "glusterfs=deploy-heketi-pod,deploy-heketi=support"
+ selector: "glusterfs=deploy-heketi-{{ glusterfs_name }}-pod"
register: heketi_pod
when: glusterfs_heketi_is_native
@@ -100,7 +103,7 @@
namespace: "{{ glusterfs_namespace }}"
state: list
kind: pod
- selector: "glusterfs=heketi-pod"
+ selector: "glusterfs=heketi-{{ glusterfs_name }}-pod"
register: heketi_pod
when: glusterfs_heketi_is_native
@@ -113,48 +116,35 @@
# heketi is not missing when there are one or more pods with matching labels whose 'Ready' status is True
- "heketi_pod.results.results[0]['items'] | oo_collect(attribute='status.conditions') | oo_collect(attribute='status', filters={'type': 'Ready'}) | map('bool') | select | list | count > 0"
+- name: Generate topology file
+ template:
+ src: "{{ openshift.common.examples_content_version }}/topology.json.j2"
+ dest: "{{ mktemp.stdout }}/topology.json"
+ when:
+ - glusterfs_heketi_topology_load
+
- include: heketi_deploy_part1.yml
when:
- glusterfs_heketi_is_native
- glusterfs_heketi_deploy_is_missing
- glusterfs_heketi_is_missing
-- name: Determine heketi URL
- oc_obj:
- namespace: "{{ glusterfs_namespace }}"
- state: list
- kind: ep
- selector: "glusterfs in (deploy-heketi-service, heketi-service)"
- register: heketi_url
- until:
- - "heketi_url.results.results[0]['items'][0].subsets[0].addresses[0].ip != ''"
- - "heketi_url.results.results[0]['items'][0].subsets[0].ports[0].port != ''"
- delay: 10
- retries: "{{ (glusterfs_timeout / 10) | int }}"
- when:
- - glusterfs_heketi_is_native
- - glusterfs_heketi_url is undefined
-
- name: Set heketi URL
set_fact:
- glusterfs_heketi_url: "{{ heketi_url.results.results[0]['items'][0].subsets[0].addresses[0].ip }}:{{ heketi_url.results.results[0]['items'][0].subsets[0].ports[0].port }}"
+ glusterfs_heketi_url: "localhost:8080"
when:
- glusterfs_heketi_is_native
- - glusterfs_heketi_url is undefined
+
+- name: Set heketi-cli command
+ set_fact:
+ glusterfs_heketi_client: "{% if glusterfs_heketi_is_native %}oc rsh {{ heketi_pod.results.results[0]['items'][0]['metadata']['name'] }} {% endif %}heketi-cli -s http://{{ glusterfs_heketi_url }} --user admin --secret '{{ glusterfs_heketi_admin_key }}'"
- name: Verify heketi service
- command: "heketi-cli -s http://{{ glusterfs_heketi_url }} --user admin --secret '{{ glusterfs_heketi_admin_key }}' cluster list"
+ command: "{{ glusterfs_heketi_client }} cluster list"
changed_when: False
-- name: Generate topology file
- template:
- src: "{{ openshift.common.examples_content_version }}/topology.json.j2"
- dest: "{{ mktemp.stdout }}/topology.json"
- when:
- - glusterfs_heketi_topology_load
-
- name: Load heketi topology
- command: "heketi-cli -s http://{{ glusterfs_heketi_url }} --user admin --secret '{{ glusterfs_heketi_admin_key }}' topology load --json={{ mktemp.stdout }}/topology.json 2>&1"
+ command: "{{ glusterfs_heketi_client }} topology load --json={{ mktemp.stdout }}/topology.json 2>&1"
register: topology_load
failed_when: "topology_load.rc != 0 or 'Unable' in topology_load.stdout"
when:
@@ -164,3 +154,29 @@
when:
- glusterfs_heketi_is_native
- glusterfs_heketi_is_missing
+
+- name: Create heketi user secret
+ oc_secret:
+ namespace: "{{ glusterfs_namespace }}"
+ state: present
+ name: "heketi-{{ glusterfs_name }}-user-secret"
+ type: "kubernetes.io/glusterfs"
+ force: True
+ contents:
+ - path: key
+ data: "{{ glusterfs_heketi_user_key }}"
+
+- name: Generate GlusterFS StorageClass file
+ template:
+ src: "{{ openshift.common.examples_content_version }}/glusterfs-storageclass.yml.j2"
+ dest: "{{ mktemp.stdout }}/glusterfs-storageclass.yml"
+
+- name: Create GlusterFS StorageClass
+ oc_obj:
+ state: present
+ kind: storageclass
+ name: "glusterfs-{{ glusterfs_name }}"
+ files:
+ - "{{ mktemp.stdout }}/glusterfs-storageclass.yml"
+ when:
+ - glusterfs_storageclass
diff --git a/roles/openshift_storage_glusterfs/tasks/glusterfs_config.yml b/roles/openshift_storage_glusterfs/tasks/glusterfs_config.yml
index 451990240..aa303d126 100644
--- a/roles/openshift_storage_glusterfs/tasks/glusterfs_config.yml
+++ b/roles/openshift_storage_glusterfs/tasks/glusterfs_config.yml
@@ -3,7 +3,9 @@
glusterfs_timeout: "{{ openshift_storage_glusterfs_timeout }}"
glusterfs_namespace: "{{ openshift_storage_glusterfs_namespace }}"
glusterfs_is_native: "{{ openshift_storage_glusterfs_is_native }}"
- glusterfs_nodeselector: "{{ openshift_storage_glusterfs_nodeselector | map_from_pairs }}"
+ glusterfs_name: "{{ openshift_storage_glusterfs_name }}"
+ glusterfs_nodeselector: "{{ openshift_storage_glusterfs_nodeselector | default(['storagenode', openshift_storage_glusterfs_name] | join('=')) | map_from_pairs }}"
+ glusterfs_storageclass: "{{ openshift_storage_glusterfs_storageclass }}"
glusterfs_image: "{{ openshift_storage_glusterfs_image }}"
glusterfs_version: "{{ openshift_storage_glusterfs_version }}"
glusterfs_wipe: "{{ openshift_storage_glusterfs_wipe }}"
@@ -17,6 +19,6 @@
glusterfs_heketi_topology_load: "{{ openshift_storage_glusterfs_heketi_topology_load }}"
glusterfs_heketi_wipe: "{{ openshift_storage_glusterfs_heketi_wipe }}"
glusterfs_heketi_url: "{{ openshift_storage_glusterfs_heketi_url }}"
- glusterfs_nodes: "{{ g_glusterfs_hosts }}"
+ glusterfs_nodes: "{{ groups.glusterfs }}"
- include: glusterfs_common.yml
diff --git a/roles/openshift_storage_glusterfs/tasks/glusterfs_deploy.yml b/roles/openshift_storage_glusterfs/tasks/glusterfs_deploy.yml
index 579112349..ea4dcc510 100644
--- a/roles/openshift_storage_glusterfs/tasks/glusterfs_deploy.yml
+++ b/roles/openshift_storage_glusterfs/tasks/glusterfs_deploy.yml
@@ -1,23 +1,24 @@
---
- assert:
- that: "glusterfs_nodeselector.keys() | count == 1"
- msg: Only one GlusterFS nodeselector key pair should be provided
-
-- assert:
that: "glusterfs_nodes | count >= 3"
msg: There must be at least three GlusterFS nodes specified
- name: Delete pre-existing GlusterFS resources
oc_obj:
namespace: "{{ glusterfs_namespace }}"
- kind: "template,daemonset"
- name: glusterfs
+ kind: "{{ item.kind }}"
+ name: "{{ item.name }}"
state: absent
+ with_items:
+ - kind: template
+ name: glusterfs
+ - kind: daemonset
+ name: "glusterfs-{{ glusterfs_name }}"
when: glusterfs_wipe
- name: Unlabel any existing GlusterFS nodes
oc_label:
- name: "{{ item }}"
+ name: "{{ hostvars[item].openshift.common.hostname }}"
kind: node
state: absent
labels: "{{ glusterfs_nodeselector | oo_dict_to_list_of_dict }}"
@@ -40,11 +41,16 @@
failed_when: False
when: glusterfs_wipe
- # Runs "vgremove -fy <vg>; pvremove -fy <pv>" for every device found to be a physical volume.
+ # Runs "lvremove -ff <vg>; vgremove -fy <vg>; pvremove -fy <pv>" for every device found to be a physical volume.
- name: Clear GlusterFS storage device contents
- shell: "{% for line in item.stdout_lines %}{% set fields = line.split() %}{% if fields | count > 1 %}vgremove -fy {{ fields[1] }}; {% endif %}pvremove -fy {{ fields[0] }}; {% endfor %}"
+ shell: "{% for line in item.stdout_lines %}{% set fields = line.split() %}{% if fields | count > 1 %}lvremove -ff {{ fields[1] }}; vgremove -fy {{ fields[1] }}; {% endif %}pvremove -fy {{ fields[0] }}; {% endfor %}"
delegate_to: "{{ item.item }}"
with_items: "{{ devices_info.results }}"
+ register: clear_devices
+ until:
+ - "'contains a filesystem in use' not in clear_devices.stderr"
+ delay: 1
+ retries: 30
when:
- glusterfs_wipe
- item.stdout_lines | count > 0
@@ -61,13 +67,11 @@
- name: Label GlusterFS nodes
oc_label:
- name: "{{ glusterfs_host }}"
+ name: "{{ hostvars[item].openshift.common.hostname }}"
kind: node
state: add
labels: "{{ glusterfs_nodeselector | oo_dict_to_list_of_dict }}"
with_items: "{{ glusterfs_nodes | default([]) }}"
- loop_control:
- loop_var: glusterfs_host
- name: Copy GlusterFS DaemonSet template
copy:
@@ -78,7 +82,7 @@
oc_obj:
namespace: "{{ glusterfs_namespace }}"
kind: template
- name: glusterfs
+ name: "glusterfs"
state: present
files:
- "{{ mktemp.stdout }}/glusterfs-template.yml"
@@ -91,17 +95,19 @@
params:
IMAGE_NAME: "{{ glusterfs_image }}"
IMAGE_VERSION: "{{ glusterfs_version }}"
+ NODE_LABELS: "{{ glusterfs_nodeselector }}"
+ CLUSTER_NAME: "{{ glusterfs_name }}"
- name: Wait for GlusterFS pods
oc_obj:
namespace: "{{ glusterfs_namespace }}"
kind: pod
state: list
- selector: "glusterfs-node=pod"
+ selector: "glusterfs={{ glusterfs_name }}-pod"
register: glusterfs_pods
until:
- "glusterfs_pods.results.results[0]['items'] | count > 0"
# There must be as many pods with 'Ready' staus True as there are nodes expecting those pods
- "glusterfs_pods.results.results[0]['items'] | oo_collect(attribute='status.conditions') | oo_collect(attribute='status', filters={'type': 'Ready'}) | map('bool') | select | list | count == glusterfs_nodes | count"
delay: 10
- retries: "{{ (glusterfs_timeout / 10) | int }}"
+ retries: "{{ (glusterfs_timeout | int / 10) | int }}"
diff --git a/roles/openshift_storage_glusterfs/tasks/glusterfs_registry.yml b/roles/openshift_storage_glusterfs/tasks/glusterfs_registry.yml
index 392f4b65b..4c6891eeb 100644
--- a/roles/openshift_storage_glusterfs/tasks/glusterfs_registry.yml
+++ b/roles/openshift_storage_glusterfs/tasks/glusterfs_registry.yml
@@ -3,7 +3,9 @@
glusterfs_timeout: "{{ openshift_storage_glusterfs_registry_timeout }}"
glusterfs_namespace: "{{ openshift_storage_glusterfs_registry_namespace }}"
glusterfs_is_native: "{{ openshift_storage_glusterfs_registry_is_native }}"
- glusterfs_nodeselector: "{{ openshift_storage_glusterfs_registry_nodeselector | map_from_pairs }}"
+ glusterfs_name: "{{ openshift_storage_glusterfs_registry_name }}"
+ glusterfs_nodeselector: "{{ openshift_storage_glusterfs_registry_nodeselector | default(['storagenode', openshift_storage_glusterfs_registry_name] | join('=')) | map_from_pairs }}"
+ glusterfs_storageclass: "{{ openshift_storage_glusterfs_registry_storageclass }}"
glusterfs_image: "{{ openshift_storage_glusterfs_registry_image }}"
glusterfs_version: "{{ openshift_storage_glusterfs_registry_version }}"
glusterfs_wipe: "{{ openshift_storage_glusterfs_registry_wipe }}"
@@ -17,21 +19,22 @@
glusterfs_heketi_topology_load: "{{ openshift_storage_glusterfs_registry_heketi_topology_load }}"
glusterfs_heketi_wipe: "{{ openshift_storage_glusterfs_registry_heketi_wipe }}"
glusterfs_heketi_url: "{{ openshift_storage_glusterfs_registry_heketi_url }}"
- glusterfs_nodes: "{{ g_glusterfs_registry_hosts }}"
+ glusterfs_nodes: "{{ groups.glusterfs_registry }}"
- include: glusterfs_common.yml
- when: g_glusterfs_registry_hosts != g_glusterfs_hosts
+ when:
+ - groups.glusterfs_registry | default([]) | count > 0
+ - "'glusterfs' not in groups or groups.glusterfs_registry != groups.glusterfs"
- name: Delete pre-existing GlusterFS registry resources
oc_obj:
namespace: "{{ glusterfs_namespace }}"
kind: "{{ item.kind }}"
- name: "{{ item.name | default(omit) }}"
- selector: "{{ item.selector | default(omit) }}"
+ name: "{{ item.name }}"
state: absent
with_items:
- - kind: "svc,ep"
- name: "glusterfs-registry-endpoints"
+ - kind: "svc"
+ name: "glusterfs-{{ glusterfs_name }}-endpoints"
failed_when: False
- name: Generate GlusterFS registry endpoints
@@ -40,8 +43,8 @@
dest: "{{ mktemp.stdout }}/glusterfs-registry-endpoints.yml"
- name: Copy GlusterFS registry service
- copy:
- src: "{{ openshift.common.examples_content_version }}/glusterfs-registry-service.yml"
+ template:
+ src: "{{ openshift.common.examples_content_version }}/glusterfs-registry-service.yml.j2"
dest: "{{ mktemp.stdout }}/glusterfs-registry-service.yml"
- name: Create GlusterFS registry endpoints
@@ -49,7 +52,7 @@
namespace: "{{ glusterfs_namespace }}"
state: present
kind: endpoints
- name: glusterfs-registry-endpoints
+ name: "glusterfs-{{ glusterfs_name }}-endpoints"
files:
- "{{ mktemp.stdout }}/glusterfs-registry-endpoints.yml"
@@ -58,14 +61,14 @@
namespace: "{{ glusterfs_namespace }}"
state: present
kind: service
- name: glusterfs-registry-endpoints
+ name: "glusterfs-{{ glusterfs_name }}-endpoints"
files:
- "{{ mktemp.stdout }}/glusterfs-registry-service.yml"
- name: Check if GlusterFS registry volume exists
- command: "heketi-cli -s http://{{ glusterfs_heketi_url }} --user admin --secret '{{ glusterfs_heketi_admin_key }}' volume list"
+ command: "{{ glusterfs_heketi_client }} volume list"
register: registry_volume
- name: Create GlusterFS registry volume
- command: "heketi-cli -s http://{{ glusterfs_heketi_url }} --user admin --secret '{{ glusterfs_heketi_admin_key }}' volume create --size={{ openshift.hosted.registry.storage.volume.size | replace('Gi','') }} --name={{ openshift.hosted.registry.storage.glusterfs.path }}"
+ command: "{{ glusterfs_heketi_client }} volume create --size={{ openshift.hosted.registry.storage.volume.size | replace('Gi','') }} --name={{ openshift.hosted.registry.storage.glusterfs.path }}"
when: "openshift.hosted.registry.storage.glusterfs.path not in registry_volume.stdout"
diff --git a/roles/openshift_storage_glusterfs/tasks/heketi_deploy_part1.yml b/roles/openshift_storage_glusterfs/tasks/heketi_deploy_part1.yml
index c14fcfb15..318d34b5d 100644
--- a/roles/openshift_storage_glusterfs/tasks/heketi_deploy_part1.yml
+++ b/roles/openshift_storage_glusterfs/tasks/heketi_deploy_part1.yml
@@ -6,11 +6,21 @@
with_items:
- "deploy-heketi-template.yml"
-- name: Create deploy-heketi resources
+- name: Create heketi topology secret
+ oc_secret:
+ namespace: "{{ glusterfs_namespace }}"
+ state: present
+ name: "heketi-{{ glusterfs_name }}-topology-secret"
+ force: True
+ files:
+ - name: topology.json
+ path: "{{ mktemp.stdout }}/topology.json"
+
+- name: Create deploy-heketi template
oc_obj:
namespace: "{{ glusterfs_namespace }}"
kind: template
- name: deploy-heketi
+ name: "deploy-heketi"
state: present
files:
- "{{ mktemp.stdout }}/deploy-heketi-template.yml"
@@ -25,17 +35,20 @@
IMAGE_VERSION: "{{ glusterfs_heketi_version }}"
HEKETI_USER_KEY: "{{ glusterfs_heketi_user_key }}"
HEKETI_ADMIN_KEY: "{{ glusterfs_heketi_admin_key }}"
+ HEKETI_KUBE_NAMESPACE: "{{ glusterfs_namespace }}"
+ CLUSTER_NAME: "{{ glusterfs_name }}"
+ TOPOLOGY_PATH: "{{ mktemp.stdout }}"
- name: Wait for deploy-heketi pod
oc_obj:
namespace: "{{ glusterfs_namespace }}"
kind: pod
state: list
- selector: "glusterfs=deploy-heketi-pod,deploy-heketi=support"
+ selector: "glusterfs=deploy-heketi-{{ glusterfs_name }}-pod"
register: heketi_pod
until:
- "heketi_pod.results.results[0]['items'] | count > 0"
# Pod's 'Ready' status must be True
- "heketi_pod.results.results[0]['items'] | oo_collect(attribute='status.conditions') | oo_collect(attribute='status', filters={'type': 'Ready'}) | map('bool') | select | list | count == 1"
delay: 10
- retries: "{{ (glusterfs_timeout / 10) | int }}"
+ retries: "{{ (glusterfs_timeout | int / 10) | int }}"
diff --git a/roles/openshift_storage_glusterfs/tasks/heketi_deploy_part2.yml b/roles/openshift_storage_glusterfs/tasks/heketi_deploy_part2.yml
index 64410a9ab..3a9619d9d 100644
--- a/roles/openshift_storage_glusterfs/tasks/heketi_deploy_part2.yml
+++ b/roles/openshift_storage_glusterfs/tasks/heketi_deploy_part2.yml
@@ -1,8 +1,10 @@
---
- name: Create heketi DB volume
- command: "heketi-cli -s http://{{ glusterfs_heketi_url }} --user admin --secret '{{ glusterfs_heketi_admin_key }}' setup-openshift-heketi-storage --listfile {{ mktemp.stdout }}/heketi-storage.json"
+ command: "{{ glusterfs_heketi_client }} setup-openshift-heketi-storage --listfile /tmp/heketi-storage.json"
register: setup_storage
- failed_when: False
+
+- name: Copy heketi-storage list
+ shell: "{{ openshift.common.client_binary }} rsh {{ heketi_pod.results.results[0]['items'][0]['metadata']['name'] }} cat /tmp/heketi-storage.json > {{ mktemp.stdout }}/heketi-storage.json"
# This is used in the subsequent task
- name: Copy the admin client config
@@ -28,7 +30,7 @@
# Pod's 'Complete' status must be True
- "heketi_job.results.results | oo_collect(attribute='status.conditions') | oo_collect(attribute='status', filters={'type': 'Complete'}) | map('bool') | select | list | count == 1"
delay: 10
- retries: "{{ (glusterfs_timeout / 10) | int }}"
+ retries: "{{ (glusterfs_timeout | int / 10) | int }}"
failed_when:
- "'results' in heketi_job.results"
- "heketi_job.results.results | count > 0"
@@ -46,14 +48,45 @@
with_items:
- kind: "template,route,service,jobs,dc,secret"
selector: "deploy-heketi"
- failed_when: False
+ - kind: "svc"
+ name: "heketi-storage-endpoints"
+ - kind: "secret"
+ name: "heketi-{{ glusterfs_name }}-topology-secret"
+
+- name: Generate heketi endpoints
+ template:
+ src: "{{ openshift.common.examples_content_version }}/heketi-endpoints.yml.j2"
+ dest: "{{ mktemp.stdout }}/heketi-endpoints.yml"
+
+- name: Generate heketi service
+ template:
+ src: "{{ openshift.common.examples_content_version }}/heketi-service.yml.j2"
+ dest: "{{ mktemp.stdout }}/heketi-service.yml"
+
+- name: Create heketi endpoints
+ oc_obj:
+ namespace: "{{ glusterfs_namespace }}"
+ state: present
+ kind: endpoints
+ name: "heketi-db-{{ glusterfs_name }}-endpoints"
+ files:
+ - "{{ mktemp.stdout }}/heketi-endpoints.yml"
+
+- name: Create heketi service
+ oc_obj:
+ namespace: "{{ glusterfs_namespace }}"
+ state: present
+ kind: service
+ name: "heketi-db-{{ glusterfs_name }}-endpoints"
+ files:
+ - "{{ mktemp.stdout }}/heketi-service.yml"
- name: Copy heketi template
copy:
src: "{{ openshift.common.examples_content_version }}/heketi-template.yml"
dest: "{{ mktemp.stdout }}/heketi-template.yml"
-- name: Create heketi resources
+- name: Create heketi template
oc_obj:
namespace: "{{ glusterfs_namespace }}"
kind: template
@@ -72,38 +105,27 @@
IMAGE_VERSION: "{{ glusterfs_heketi_version }}"
HEKETI_USER_KEY: "{{ glusterfs_heketi_user_key }}"
HEKETI_ADMIN_KEY: "{{ glusterfs_heketi_admin_key }}"
+ HEKETI_KUBE_NAMESPACE: "{{ glusterfs_namespace }}"
+ CLUSTER_NAME: "{{ glusterfs_name }}"
- name: Wait for heketi pod
oc_obj:
namespace: "{{ glusterfs_namespace }}"
kind: pod
state: list
- selector: "glusterfs=heketi-pod"
+ selector: "glusterfs=heketi-{{ glusterfs_name }}-pod"
register: heketi_pod
until:
- "heketi_pod.results.results[0]['items'] | count > 0"
# Pod's 'Ready' status must be True
- "heketi_pod.results.results[0]['items'] | oo_collect(attribute='status.conditions') | oo_collect(attribute='status', filters={'type': 'Ready'}) | map('bool') | select | list | count == 1"
delay: 10
- retries: "{{ (glusterfs_timeout / 10) | int }}"
-
-- name: Determine heketi URL
- oc_obj:
- namespace: "{{ glusterfs_namespace }}"
- state: list
- kind: ep
- selector: "glusterfs=heketi-service"
- register: heketi_url
- until:
- - "heketi_url.results.results[0]['items'][0].subsets[0].addresses[0].ip != ''"
- - "heketi_url.results.results[0]['items'][0].subsets[0].ports[0].port != ''"
- delay: 10
- retries: "{{ (glusterfs_timeout / 10) | int }}"
+ retries: "{{ (glusterfs_timeout | int / 10) | int }}"
-- name: Set heketi URL
+- name: Set heketi-cli command
set_fact:
- glusterfs_heketi_url: "{{ heketi_url.results.results[0]['items'][0].subsets[0].addresses[0].ip }}:{{ heketi_url.results.results[0]['items'][0].subsets[0].ports[0].port }}"
+ glusterfs_heketi_client: "{% if glusterfs_heketi_is_native %}oc rsh {{ heketi_pod.results.results[0]['items'][0]['metadata']['name'] }} {% endif %}heketi-cli -s http://localhost:8080 --user admin --secret '{{ glusterfs_heketi_admin_key }}'"
- name: Verify heketi service
- command: "heketi-cli -s http://{{ glusterfs_heketi_url }} --user admin --secret '{{ glusterfs_heketi_admin_key }}' cluster list"
+ command: "{{ glusterfs_heketi_client }} cluster list"
changed_when: False
diff --git a/roles/openshift_storage_glusterfs/tasks/main.yml b/roles/openshift_storage_glusterfs/tasks/main.yml
index ebd8db453..c9bfdd1cd 100644
--- a/roles/openshift_storage_glusterfs/tasks/main.yml
+++ b/roles/openshift_storage_glusterfs/tasks/main.yml
@@ -7,12 +7,11 @@
- include: glusterfs_config.yml
when:
- - g_glusterfs_hosts | default([]) | count > 0
+ - groups.glusterfs | default([]) | count > 0
- include: glusterfs_registry.yml
when:
- - g_glusterfs_registry_hosts | default([]) | count > 0
- - "openshift.hosted.registry.storage.kind == 'glusterfs' or openshift.hosted.registry.glusterfs.swap"
+ - "groups.glusterfs_registry | default([]) | count > 0 or openshift.hosted.registry.storage.kind == 'glusterfs' or openshift.hosted.registry.glusterfs.swap"
- name: Delete temp directory
file:
diff --git a/roles/openshift_storage_glusterfs/templates/v3.6/glusterfs-registry-endpoints.yml.j2 b/roles/openshift_storage_glusterfs/templates/v3.6/glusterfs-registry-endpoints.yml.j2
index 605627ab5..11c9195bb 100644
--- a/roles/openshift_storage_glusterfs/templates/v3.6/glusterfs-registry-endpoints.yml.j2
+++ b/roles/openshift_storage_glusterfs/templates/v3.6/glusterfs-registry-endpoints.yml.j2
@@ -1,7 +1,8 @@
+---
apiVersion: v1
kind: Endpoints
metadata:
- name: glusterfs-registry-endpoints
+ name: glusterfs-{{ glusterfs_name }}-endpoints
subsets:
- addresses:
{% for node in glusterfs_nodes %}
diff --git a/roles/openshift_storage_glusterfs/files/v3.6/glusterfs-registry-service.yml b/roles/openshift_storage_glusterfs/templates/v3.6/glusterfs-registry-service.yml.j2
index 3f8d8f507..3f869d2b7 100644
--- a/roles/openshift_storage_glusterfs/files/v3.6/glusterfs-registry-service.yml
+++ b/roles/openshift_storage_glusterfs/templates/v3.6/glusterfs-registry-service.yml.j2
@@ -2,7 +2,7 @@
apiVersion: v1
kind: Service
metadata:
- name: glusterfs-registry-endpoints
+ name: glusterfs-{{ glusterfs_name }}-endpoints
spec:
ports:
- port: 1
diff --git a/roles/openshift_storage_glusterfs/templates/v3.6/glusterfs-storageclass.yml.j2 b/roles/openshift_storage_glusterfs/templates/v3.6/glusterfs-storageclass.yml.j2
new file mode 100644
index 000000000..9b8fae310
--- /dev/null
+++ b/roles/openshift_storage_glusterfs/templates/v3.6/glusterfs-storageclass.yml.j2
@@ -0,0 +1,10 @@
+---
+apiVersion: storage.k8s.io/v1
+kind: StorageClass
+metadata:
+ name: glusterfs-{{ glusterfs_name }}
+provisioner: kubernetes.io/glusterfs
+parameters:
+ resturl: "http://{{ glusterfs_heketi_url }}:8081"
+ secretNamespace: "{{ glusterfs_namespace }}"
+ secretName: "heketi-{{ glusterfs_name }}-user-secret"
diff --git a/roles/openshift_storage_glusterfs/templates/v3.6/heketi-endpoints.yml.j2 b/roles/openshift_storage_glusterfs/templates/v3.6/heketi-endpoints.yml.j2
new file mode 100644
index 000000000..99cbdf748
--- /dev/null
+++ b/roles/openshift_storage_glusterfs/templates/v3.6/heketi-endpoints.yml.j2
@@ -0,0 +1,12 @@
+---
+apiVersion: v1
+kind: Endpoints
+metadata:
+ name: heketi-db-{{ glusterfs_name }}-endpoints
+subsets:
+- addresses:
+{% for node in glusterfs_nodes %}
+ - ip: {{ hostvars[node].glusterfs_ip | default(hostvars[node].openshift.common.ip) }}
+{% endfor %}
+ ports:
+ - port: 1
diff --git a/roles/openshift_storage_glusterfs/templates/v3.6/heketi-service.yml.j2 b/roles/openshift_storage_glusterfs/templates/v3.6/heketi-service.yml.j2
new file mode 100644
index 000000000..dcb896441
--- /dev/null
+++ b/roles/openshift_storage_glusterfs/templates/v3.6/heketi-service.yml.j2
@@ -0,0 +1,10 @@
+---
+apiVersion: v1
+kind: Service
+metadata:
+ name: heketi-db-{{ glusterfs_name }}-endpoints
+spec:
+ ports:
+ - port: 1
+status:
+ loadBalancer: {}