summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--.tito/packages/openshift-ansible2
-rwxr-xr-xhack/build-images.sh20
-rwxr-xr-xhack/push-release.sh56
-rw-r--r--images/installer/system-container/root/exports/manifest.json2
-rw-r--r--inventory/byo/hosts.origin.example16
-rw-r--r--inventory/byo/hosts.ose.example16
-rw-r--r--openshift-ansible.spec56
-rw-r--r--playbooks/adhoc/uninstall.yml1
-rw-r--r--playbooks/byo/openshift-cluster/config.yml18
-rw-r--r--playbooks/byo/openshift-cluster/service-catalog.yml12
-rw-r--r--playbooks/common/openshift-checks/health.yml5
-rw-r--r--playbooks/common/openshift-checks/pre-install.yml5
-rw-r--r--playbooks/common/openshift-cluster/config.yml25
-rw-r--r--playbooks/common/openshift-cluster/service_catalog.yml8
-rw-r--r--playbooks/common/openshift-cluster/upgrades/initialize_nodes_to_upgrade.yml45
-rw-r--r--playbooks/common/openshift-cluster/upgrades/upgrade_control_plane.yml1
-rw-r--r--playbooks/common/openshift-cluster/upgrades/upgrade_nodes.yml1
-rw-r--r--playbooks/common/openshift-etcd/restart.yml2
-rw-r--r--playbooks/common/openshift-master/config.yml32
-rw-r--r--roles/ansible_service_broker/defaults/main.yml6
-rw-r--r--roles/ansible_service_broker/meta/main.yml15
-rw-r--r--roles/ansible_service_broker/tasks/install.yml268
-rw-r--r--roles/ansible_service_broker/tasks/main.yml8
-rw-r--r--roles/ansible_service_broker/tasks/remove.yml65
-rw-r--r--roles/ansible_service_broker/tasks/validate_facts.yml15
-rw-r--r--roles/ansible_service_broker/vars/default_images.yml13
-rw-r--r--roles/ansible_service_broker/vars/openshift-enterprise.yml13
-rw-r--r--roles/etcd/tasks/system_container.yml13
-rw-r--r--roles/etcd_common/defaults/main.yml2
-rw-r--r--roles/lib_openshift/library/oc_atomic_container.py13
-rw-r--r--roles/lib_openshift/src/ansible/oc_atomic_container.py13
-rw-r--r--roles/openshift_certificate_expiry/library/openshift_cert_expiry.py21
-rw-r--r--roles/openshift_certificate_expiry/test/conftest.py5
-rw-r--r--roles/openshift_examples/files/examples/v3.6/cfme-templates/cfme-pv-app-example.yaml13
-rw-r--r--roles/openshift_examples/files/examples/v3.6/cfme-templates/cfme-pv-db-example.yaml13
-rw-r--r--roles/openshift_examples/files/examples/v3.6/cfme-templates/cfme-pv-example.yaml13
-rw-r--r--roles/openshift_examples/files/examples/v3.6/cfme-templates/cfme-pv-region-example.yaml13
-rw-r--r--roles/openshift_examples/files/examples/v3.6/cfme-templates/cfme-pv-server-example.yaml13
-rw-r--r--roles/openshift_examples/files/examples/v3.6/cfme-templates/cfme-template.yaml210
-rw-r--r--roles/openshift_examples/files/examples/v3.6/cfme-templates/jboss-middleware-manager-pv-example.yaml58
-rw-r--r--roles/openshift_examples/files/examples/v3.6/cfme-templates/jboss-middleware-manager-template.yaml254
-rw-r--r--roles/openshift_examples/files/examples/v3.6/db-templates/mariadb-ephemeral-template.json12
-rw-r--r--roles/openshift_examples/files/examples/v3.6/db-templates/mariadb-persistent-template.json12
-rw-r--r--roles/openshift_examples/files/examples/v3.6/db-templates/mongodb-ephemeral-template.json12
-rw-r--r--roles/openshift_examples/files/examples/v3.6/db-templates/mongodb-persistent-template.json12
-rw-r--r--roles/openshift_examples/files/examples/v3.6/db-templates/mysql-ephemeral-template.json12
-rw-r--r--roles/openshift_examples/files/examples/v3.6/db-templates/mysql-persistent-template.json12
-rw-r--r--roles/openshift_examples/files/examples/v3.6/db-templates/postgresql-ephemeral-template.json11
-rw-r--r--roles/openshift_examples/files/examples/v3.6/db-templates/postgresql-persistent-template.json11
-rw-r--r--roles/openshift_examples/files/examples/v3.6/db-templates/redis-ephemeral-template.json10
-rw-r--r--roles/openshift_examples/files/examples/v3.6/db-templates/redis-persistent-template.json10
-rw-r--r--roles/openshift_examples/files/examples/v3.6/image-streams/image-streams-centos7.json45
-rw-r--r--roles/openshift_examples/files/examples/v3.6/image-streams/image-streams-rhel7.json45
-rw-r--r--roles/openshift_examples/files/examples/v3.6/quickstart-templates/README.md1
-rw-r--r--roles/openshift_examples/files/examples/v3.6/quickstart-templates/cakephp-mysql-persistent.json5
-rw-r--r--roles/openshift_examples/files/examples/v3.6/quickstart-templates/cakephp-mysql.json5
-rw-r--r--roles/openshift_examples/files/examples/v3.6/quickstart-templates/dancer-mysql-persistent.json5
-rw-r--r--roles/openshift_examples/files/examples/v3.6/quickstart-templates/dancer-mysql.json5
-rw-r--r--roles/openshift_examples/files/examples/v3.6/quickstart-templates/django-postgresql-persistent.json5
-rw-r--r--roles/openshift_examples/files/examples/v3.6/quickstart-templates/django-postgresql.json5
-rw-r--r--roles/openshift_examples/files/examples/v3.6/quickstart-templates/httpd.json274
-rw-r--r--roles/openshift_examples/files/examples/v3.6/quickstart-templates/jenkins-ephemeral-template.json5
-rw-r--r--roles/openshift_examples/files/examples/v3.6/quickstart-templates/jenkins-persistent-template.json5
-rw-r--r--roles/openshift_examples/files/examples/v3.6/quickstart-templates/nodejs-mongodb-persistent.json7
-rw-r--r--roles/openshift_examples/files/examples/v3.6/quickstart-templates/nodejs-mongodb.json7
-rw-r--r--roles/openshift_examples/files/examples/v3.6/quickstart-templates/rails-postgresql-persistent.json11
-rw-r--r--roles/openshift_examples/files/examples/v3.6/quickstart-templates/rails-postgresql.json11
-rw-r--r--roles/openshift_facts/defaults/main.yml2
-rwxr-xr-xroles/openshift_facts/library/openshift_facts.py25
-rw-r--r--roles/openshift_facts/tasks/main.yml8
-rw-r--r--roles/openshift_health_checker/openshift_checks/docker_image_availability.py2
-rw-r--r--roles/openshift_logging/defaults/main.yml2
-rw-r--r--roles/openshift_logging_elasticsearch/tasks/main.yaml10
-rw-r--r--roles/openshift_logging_elasticsearch/templates/elasticsearch.yml.j23
-rw-r--r--roles/openshift_logging_elasticsearch/templates/es.j23
-rw-r--r--roles/openshift_logging_elasticsearch/templates/pvc.j23
-rw-r--r--roles/openshift_master/README.md25
-rw-r--r--roles/openshift_master/defaults/main.yml1
-rw-r--r--roles/openshift_master/tasks/main.yml23
-rw-r--r--roles/openshift_master/templates/atomic-openshift-master.j23
-rw-r--r--roles/openshift_master/templates/docker-cluster/atomic-openshift-master-api.service.j22
-rw-r--r--roles/openshift_master/templates/docker-cluster/atomic-openshift-master-controllers.service.j22
-rw-r--r--roles/openshift_master/templates/master.yaml.v1.j28
-rw-r--r--roles/openshift_master/templates/master_docker/master.docker.service.j22
-rw-r--r--roles/openshift_master/templates/native-cluster/atomic-openshift-master-api.j23
-rw-r--r--roles/openshift_master/templates/native-cluster/atomic-openshift-master-controllers.j23
-rw-r--r--roles/openshift_metrics/defaults/main.yaml2
-rw-r--r--roles/openshift_metrics/tasks/install_cassandra.yaml3
-rw-r--r--roles/openshift_metrics/templates/pvc.j23
-rw-r--r--roles/openshift_node/templates/openshift.docker.node.service2
-rwxr-xr-xroles/openshift_node_dnsmasq/files/networkmanager/99-origin-dns.sh3
-rw-r--r--roles/openshift_node_upgrade/tasks/main.yml3
-rw-r--r--roles/openshift_service_catalog/defaults/main.yml3
-rw-r--r--roles/openshift_service_catalog/files/kubeservicecatalog_roles_bindings.yml161
-rw-r--r--roles/openshift_service_catalog/files/kubesystem_roles_bindings.yml38
-rw-r--r--roles/openshift_service_catalog/meta/main.yml17
-rw-r--r--roles/openshift_service_catalog/tasks/generate_certs.yml70
-rw-r--r--roles/openshift_service_catalog/tasks/install.yml181
-rw-r--r--roles/openshift_service_catalog/tasks/main.yml8
-rw-r--r--roles/openshift_service_catalog/tasks/remove.yml56
-rw-r--r--roles/openshift_service_catalog/tasks/start_api_server.yml22
-rw-r--r--roles/openshift_service_catalog/tasks/wire_aggregator.yml86
-rw-r--r--roles/openshift_service_catalog/templates/api_server.j280
-rw-r--r--roles/openshift_service_catalog/templates/api_server_route.j214
-rw-r--r--roles/openshift_service_catalog/templates/api_server_service.j213
-rw-r--r--roles/openshift_service_catalog/templates/controller_manager.j246
-rw-r--r--roles/openshift_service_catalog/templates/controller_manager_service.j213
-rw-r--r--roles/openshift_service_catalog/vars/default_images.yml3
-rw-r--r--roles/openshift_service_catalog/vars/openshift-enterprise.yml3
-rw-r--r--roles/openshift_version/tasks/main.yml196
-rw-r--r--test/integration/openshift_health_checker/setup_container.yml3
111 files changed, 2705 insertions, 389 deletions
diff --git a/.tito/packages/openshift-ansible b/.tito/packages/openshift-ansible
index 98e277c19..a83752c29 100644
--- a/.tito/packages/openshift-ansible
+++ b/.tito/packages/openshift-ansible
@@ -1 +1 @@
-3.6.117-1 ./
+3.6.123-1 ./
diff --git a/hack/build-images.sh b/hack/build-images.sh
index ce421178f..6e6d360bf 100755
--- a/hack/build-images.sh
+++ b/hack/build-images.sh
@@ -47,7 +47,7 @@ if [ "$help" = true ]; then
echo " default: openshift/origin-ansible"
echo
echo " --version=VERSION"
- echo " The version used to tag the image"
+ echo " The version used to tag the image (can be a comma-separated list)"
echo " default: latest"
echo
echo " --no-cache"
@@ -62,25 +62,33 @@ if [ "$help" = true ]; then
exit 0
fi
+
if [ "$verbose" = true ]; then
set -x
fi
BUILD_STARTTIME=$(date +%s)
comp_path=$source_root/
-docker_tag=${prefix}:${version}
+
+# turn comma-separated versions into -t args for docker build
+IFS=',' read -r -a version_arr <<< "$version"
+docker_tags=()
+for tag in "${version_arr[@]}"; do
+ docker_tags+=("-t" "${prefix}:${tag}")
+done
+
echo
echo
-echo "--- Building component '$comp_path' with docker tag '$docker_tag' ---"
-docker build ${options} -t $docker_tag $comp_path
-BUILD_ENDTIME=$(date +%s); echo "--- $docker_tag took $(($BUILD_ENDTIME - $BUILD_STARTTIME)) seconds ---"
+echo "--- Building component '$comp_path' with docker tag(s) '$version' ---"
+docker build ${options} "${docker_tags[@]}" $comp_path
+BUILD_ENDTIME=$(date +%s); echo "--- ${version} took $(($BUILD_ENDTIME - $BUILD_STARTTIME)) seconds ---"
echo
echo
echo
echo
echo "++ Active images"
-docker images | grep ${prefix} | grep ${version} | sort
+docker images | grep ${prefix} | sort
echo
diff --git a/hack/push-release.sh b/hack/push-release.sh
index 131ed83ca..1f41ab179 100755
--- a/hack/push-release.sh
+++ b/hack/push-release.sh
@@ -1,55 +1,41 @@
#!/bin/bash
-# This script pushes all of the built images to a registry.
+# This script pushes a built image to a registry.
#
-# Set OS_PUSH_BASE_REGISTRY to prefix the destination images
+# Set OS_PUSH_BASE_REGISTRY to prefix the destination images e.g.
+# OS_PUSH_BASE_REGISTRY="docker.io/"
#
+# Set OS_PUSH_TAG with a comma-separated list for pushing same image
+# to multiple tags e.g.
+# OS_PUSH_TAG="latest,v3.6"
set -o errexit
set -o nounset
set -o pipefail
-STARTTIME=$(date +%s)
-OS_ROOT=$(dirname "${BASH_SOURCE}")/..
+starttime=$(date +%s)
-PREFIX="${PREFIX:-openshift/origin-ansible}"
+# image name without repo or tag.
+image="${PREFIX:-openshift/origin-ansible}"
-# Go to the top of the tree.
-cd "${OS_ROOT}"
+# existing local tag on the image we want to push
+source_tag="${OS_TAG:-latest}"
-# Allow a release to be repushed with a tag
-tag="${OS_PUSH_TAG:-}"
-if [[ -n "${tag}" ]]; then
- tag=":${tag}"
-else
- tag=":latest"
-fi
-
-# Source tag
-source_tag="${OS_TAG:-}"
-if [[ -z "${source_tag}" ]]; then
- source_tag="latest"
-fi
-
-images=(
- ${PREFIX}
-)
+# Enable retagging a build with one or more tags for push
+IFS=',' read -r -a push_tags <<< "${OS_PUSH_TAG:-latest}"
+registry="${OS_PUSH_BASE_REGISTRY:-}"
+# force push if available
PUSH_OPTS=""
if docker push --help | grep -q force; then
PUSH_OPTS="--force"
fi
-if [[ "${OS_PUSH_BASE_REGISTRY-}" != "" || "${tag}" != "" ]]; then
- set -e
- for image in "${images[@]}"; do
- docker tag "${image}:${source_tag}" "${OS_PUSH_BASE_REGISTRY-}${image}${tag}"
- done
- set +e
-fi
-
-for image in "${images[@]}"; do
- docker push ${PUSH_OPTS} "${OS_PUSH_BASE_REGISTRY-}${image}${tag}"
+set -x
+for tag in "${push_tags[@]}"; do
+ docker tag "${image}:${source_tag}" "${registry}${image}:${tag}"
+ docker push ${PUSH_OPTS} "${registry}${image}:${tag}"
done
+set +x
-ret=$?; ENDTIME=$(date +%s); echo "$0 took $(($ENDTIME - $STARTTIME)) seconds"; exit "$ret"
+endtime=$(date +%s); echo "$0 took $(($endtime - $starttime)) seconds"; exit 0
diff --git a/images/installer/system-container/root/exports/manifest.json b/images/installer/system-container/root/exports/manifest.json
index 321a84ee8..8b984d7a3 100644
--- a/images/installer/system-container/root/exports/manifest.json
+++ b/images/installer/system-container/root/exports/manifest.json
@@ -6,7 +6,7 @@
"VAR_LOG_OPENSHIFT_LOG": "/var/log/ansible.log",
"PLAYBOOK_FILE": "/usr/share/ansible/openshift-ansible/playbooks/byo/config.yml",
"HOME_ROOT": "/root",
- "ANSIBLE_CONFIG": "/usr/share/ansible/openshift-ansible/ansible.cfg",
+ "ANSIBLE_CONFIG": "/usr/share/atomic-openshift-utils/ansible.cfg",
"INVENTORY_FILE": "/dev/null"
}
}
diff --git a/inventory/byo/hosts.origin.example b/inventory/byo/hosts.origin.example
index 962a01a91..b38c6e6b6 100644
--- a/inventory/byo/hosts.origin.example
+++ b/inventory/byo/hosts.origin.example
@@ -42,6 +42,17 @@ openshift_release=v3.6
# This could potentially trigger an upgrade and downtime, so be careful with modifying this value after the cluster is set up.
#openshift_pkg_version=-3.6.0
+# This enables all the system containers except for docker:
+#openshift_use_system_containers=False
+#
+# But you can choose separately each component that must be a
+# system container:
+#
+#openshift_use_openvswitch_system_container=False
+#openshift_use_node_system_container=False
+#openshift_use_master_system_container=False
+#openshift_use_etcd_system_container=False
+
# Install the openshift examples
#openshift_install_examples=true
@@ -798,7 +809,7 @@ openshift_master_identity_providers=[{'name': 'htpasswd_auth', 'login': 'true',
# use this line.
# The directory in "auditFilePath" will be created if it's not
# exist
-#openshift_master_audit_config={"enabled": true, "auditFilePath": "/var/log/openpaas-oscp-audit/openpaas-oscp-audit.log", "maximumFileRetentionDays": 14, "maximumFileSizeMegabytes": 500, "maximumRetainedFiles": 5}}
+#openshift_master_audit_config={"enabled": true, "auditFilePath": "/var/log/openpaas-oscp-audit/openpaas-oscp-audit.log", "maximumFileRetentionDays": 14, "maximumFileSizeMegabytes": 500, "maximumRetainedFiles": 5}
# Enable origin repos that point at Centos PAAS SIG, defaults to true, only used
# by deployment_type=origin
@@ -815,6 +826,9 @@ openshift_master_identity_providers=[{'name': 'htpasswd_auth', 'login': 'true',
# Controls validity for etcd CA, peer, server and client certificates.
#
#etcd_ca_default_days=1825
+#
+# ServiceAccountConfig:LimitSecretRefences rejects pods that reference secrets their service accounts do not reference
+# openshift_master_saconfig_limitsecretreferences=false
# Upgrade Control
#
diff --git a/inventory/byo/hosts.ose.example b/inventory/byo/hosts.ose.example
index 63f1f00d2..e5e9c7342 100644
--- a/inventory/byo/hosts.ose.example
+++ b/inventory/byo/hosts.ose.example
@@ -42,6 +42,17 @@ openshift_release=v3.6
# This could potentially trigger an upgrade and downtime, so be careful with modifying this value after the cluster is set up.
#openshift_pkg_version=-3.6.0
+# This enables all the system containers except for docker:
+#openshift_use_system_containers=False
+#
+# But you can choose separately each component that must be a
+# system container:
+#
+#openshift_use_openvswitch_system_container=False
+#openshift_use_node_system_container=False
+#openshift_use_master_system_container=False
+#openshift_use_etcd_system_container=False
+
# Install the openshift examples
#openshift_install_examples=true
@@ -798,7 +809,7 @@ openshift_master_identity_providers=[{'name': 'htpasswd_auth', 'login': 'true',
# use this line.
# The directory in "auditFilePath" will be created if it's not
# exist
-#openshift_master_audit_config={"enabled": true, "auditFilePath": "/var/log/openpaas-oscp-audit/openpaas-oscp-audit.log", "maximumFileRetentionDays": 14, "maximumFileSizeMegabytes": 500, "maximumRetainedFiles": 5}}
+#openshift_master_audit_config={"enabled": true, "auditFilePath": "/var/log/openpaas-oscp-audit/openpaas-oscp-audit.log", "maximumFileRetentionDays": 14, "maximumFileSizeMegabytes": 500, "maximumRetainedFiles": 5}
# Validity of the auto-generated OpenShift certificates in days.
# See also openshift_hosted_registry_cert_expire_days above.
@@ -811,6 +822,9 @@ openshift_master_identity_providers=[{'name': 'htpasswd_auth', 'login': 'true',
# Controls validity for etcd CA, peer, server and client certificates.
#
#etcd_ca_default_days=1825
+#
+# ServiceAccountConfig:LimitSecretRefences rejects pods that reference secrets their service accounts do not reference
+# openshift_master_saconfig_limitsecretreferences=false
# Upgrade Control
#
diff --git a/openshift-ansible.spec b/openshift-ansible.spec
index 7b5587294..1fcc9990c 100644
--- a/openshift-ansible.spec
+++ b/openshift-ansible.spec
@@ -9,7 +9,7 @@
%global __requires_exclude ^/usr/bin/ansible-playbook$
Name: openshift-ansible
-Version: 3.6.117
+Version: 3.6.123.1000
Release: 1%{?dist}
Summary: Openshift and Atomic Enterprise Ansible
License: ASL 2.0
@@ -280,6 +280,60 @@ Atomic OpenShift Utilities includes
%changelog
+* Fri Jun 23 2017 Jenkins CD Merge Bot <smunilla@redhat.com> 3.6.123-1
+- releases: enable build/push with multiple tags (lmeyer@redhat.com)
+- Update template examples for 3.6 (rteague@redhat.com)
+- Reverting v prefix introduced by stagecut (smunilla@redhat.com)
+- Fixed readme doc. (kwoodson@redhat.com)
+- Adding version field for stagecut (smunilla@redhat.com)
+- Remove package_update from install playbook (rhcarvalho@gmail.com)
+- Restart NetworkManager only if dnsmasq was used
+ (bliemli@users.noreply.github.com)
+- remove extra close brace in example inventory (gpei@redhat.com)
+- Adding option for serviceAccountConfig.limitSecretReferences
+ (kwoodson@redhat.com)
+- doc: Add system_container examples to inventory (smilner@redhat.com)
+- system_containers: Add openshift_ to other system_container vars
+ (smilner@redhat.com)
+- system_containers: Add openshift_ to use_system_containers var
+ (smilner@redhat.com)
+- detect etcd service name based on etcd runtime when restarting
+ (jchaloup@redhat.com)
+- set proper etcd_data_dir for system container (jchaloup@redhat.com)
+- etcd, system_container: do not mask etcd_container (gscrivan@redhat.com)
+- etcd, system_container: do not enable system etcd (gscrivan@redhat.com)
+- oc_atomic_container: Require 1.17.2 (smilner@redhat.com)
+- Verify matched openshift_upgrade_nodes_label (rteague@redhat.com)
+- bug 1457642. Use same SG index to avoid seeding timeout (jcantril@redhat.com)
+
+* Wed Jun 21 2017 Jenkins CD Merge Bot <smunilla@redhat.com> 3.6.122-1
+-
+
+* Tue Jun 20 2017 Jenkins CD Merge Bot <smunilla@redhat.com> 3.6.121-1
+- Updating default from null to "" (ewolinet@redhat.com)
+
+* Tue Jun 20 2017 Jenkins CD Merge Bot <smunilla@redhat.com> 3.6.120-1
+- Update atomic-openshift-master.j2 (sdodson@redhat.com)
+- Enable push to registry via dns only on clean 3.6 installs
+ (sdodson@redhat.com)
+- Disable actually pushing to the registry via dns for now (sdodson@redhat.com)
+- Add openshift_node_dnsmasq role to upgrade (sdodson@redhat.com)
+- Push to the registry via dns (sdodson@redhat.com)
+
+* Tue Jun 20 2017 Jenkins CD Merge Bot <smunilla@redhat.com> 3.6.119-1
+- Temporarilly only migrate jobs as we were before (sdodson@redhat.com)
+- Disable TLS verification in skopeo inspect (rhcarvalho@gmail.com)
+- Preserve etcd3 storage if it's already in use (sdodson@redhat.com)
+- GlusterFS: Generate better secret keys (jarrpa@redhat.com)
+- GlusterFS: Fix error when groups.glusterfs_registry is undefined.
+ (jarrpa@redhat.com)
+- GlusterFS: Use proper identity in heketi secret (jarrpa@redhat.com)
+- GlusterFS: Allow configuration of heketi port (jarrpa@redhat.com)
+- GlusterFS: Fix variable typo (jarrpa@redhat.com)
+- GlusterFS: Minor template fixes (jarrpa@redhat.com)
+- registry: mount GlusterFS storage volume from correct host
+ (jarrpa@redhat.com)
+
* Mon Jun 19 2017 Jenkins CD Merge Bot <smunilla@redhat.com> 3.6.117-1
- Run storage upgrade pre and post master upgrade (rteague@redhat.com)
- Introduce etcd migrate role (jchaloup@redhat.com)
diff --git a/playbooks/adhoc/uninstall.yml b/playbooks/adhoc/uninstall.yml
index 27c3a9edd..ddd2ecebd 100644
--- a/playbooks/adhoc/uninstall.yml
+++ b/playbooks/adhoc/uninstall.yml
@@ -317,6 +317,7 @@
- name: restart NetworkManager
service: name=NetworkManager state=restarted
+ when: openshift_use_dnsmasq | default(true) | bool
- hosts: masters
become: yes
diff --git a/playbooks/byo/openshift-cluster/config.yml b/playbooks/byo/openshift-cluster/config.yml
index 2372a5322..acf5469bf 100644
--- a/playbooks/byo/openshift-cluster/config.yml
+++ b/playbooks/byo/openshift-cluster/config.yml
@@ -3,24 +3,6 @@
tags:
- always
-- name: Verify Requirements
- hosts: OSEv3
- roles:
- - openshift_health_checker
- vars:
- - r_openshift_health_checker_playbook_context: "install"
- post_tasks:
- - action: openshift_health_check
- args:
- checks:
- - disk_availability
- - memory_availability
- - package_availability
- - package_update
- - package_version
- - docker_image_availability
- - docker_storage
-
- include: ../../common/openshift-cluster/std_include.yml
tags:
- always
diff --git a/playbooks/byo/openshift-cluster/service-catalog.yml b/playbooks/byo/openshift-cluster/service-catalog.yml
new file mode 100644
index 000000000..a9fc18958
--- /dev/null
+++ b/playbooks/byo/openshift-cluster/service-catalog.yml
@@ -0,0 +1,12 @@
+---
+#
+# This playbook is a preview of upcoming changes for installing
+# Hosted logging on. See inventory/byo/hosts.*.example for the
+# currently supported method.
+#
+- include: initialize_groups.yml
+
+- include: ../../common/openshift-cluster/service_catalog.yml
+ vars:
+ openshift_cluster_id: "{{ cluster_id | default('default') }}"
+ openshift_debug_level: "{{ debug_level | default(2) }}"
diff --git a/playbooks/common/openshift-checks/health.yml b/playbooks/common/openshift-checks/health.yml
index 1bee460e8..c7766ff04 100644
--- a/playbooks/common/openshift-checks/health.yml
+++ b/playbooks/common/openshift-checks/health.yml
@@ -1,4 +1,9 @@
---
+# openshift_health_checker depends on openshift_version which now requires group eval.
+- include: ../openshift-cluster/evaluate_groups.yml
+ tags:
+ - always
+
- name: Run OpenShift health checks
hosts: OSEv3
roles:
diff --git a/playbooks/common/openshift-checks/pre-install.yml b/playbooks/common/openshift-checks/pre-install.yml
index e01c6f38d..7ca9f7e8b 100644
--- a/playbooks/common/openshift-checks/pre-install.yml
+++ b/playbooks/common/openshift-checks/pre-install.yml
@@ -1,4 +1,9 @@
---
+# openshift_health_checker depends on openshift_version which now requires group eval.
+- include: ../openshift-cluster/evaluate_groups.yml
+ tags:
+ - always
+
- hosts: OSEv3
name: run OpenShift pre-install checks
roles:
diff --git a/playbooks/common/openshift-cluster/config.yml b/playbooks/common/openshift-cluster/config.yml
index 1482b3a3f..7224ae712 100644
--- a/playbooks/common/openshift-cluster/config.yml
+++ b/playbooks/common/openshift-cluster/config.yml
@@ -1,4 +1,23 @@
---
+# TODO: refactor this into its own include
+# and pass a variable for ctx
+- name: Verify Requirements
+ hosts: oo_all_hosts
+ roles:
+ - openshift_health_checker
+ vars:
+ - r_openshift_health_checker_playbook_context: "install"
+ post_tasks:
+ - action: openshift_health_check
+ args:
+ checks:
+ - disk_availability
+ - memory_availability
+ - package_availability
+ - package_version
+ - docker_image_availability
+ - docker_storage
+
- include: initialize_oo_option_facts.yml
tags:
- always
@@ -45,6 +64,12 @@
tags:
- hosted
+- include: service_catalog.yml
+ when:
+ - openshift_enable_service_catalog | default(false) | bool
+ tags:
+ - servicecatalog
+
- name: Re-enable excluder if it was previously enabled
hosts: oo_masters_to_config:oo_nodes_to_config
tags:
diff --git a/playbooks/common/openshift-cluster/service_catalog.yml b/playbooks/common/openshift-cluster/service_catalog.yml
new file mode 100644
index 000000000..c42e8781a
--- /dev/null
+++ b/playbooks/common/openshift-cluster/service_catalog.yml
@@ -0,0 +1,8 @@
+---
+- include: evaluate_groups.yml
+
+- name: Service Catalog
+ hosts: oo_first_master
+ roles:
+ - openshift_service_catalog
+ - ansible_service_broker
diff --git a/playbooks/common/openshift-cluster/upgrades/initialize_nodes_to_upgrade.yml b/playbooks/common/openshift-cluster/upgrades/initialize_nodes_to_upgrade.yml
index 046535680..72de63070 100644
--- a/playbooks/common/openshift-cluster/upgrades/initialize_nodes_to_upgrade.yml
+++ b/playbooks/common/openshift-cluster/upgrades/initialize_nodes_to_upgrade.yml
@@ -6,27 +6,32 @@
- lib_openshift
tasks:
- - name: Retrieve list of openshift nodes matching upgrade label
- oc_obj:
- state: list
- kind: node
- selector: "{{ openshift_upgrade_nodes_label }}"
- register: nodes_to_upgrade
- when: openshift_upgrade_nodes_label is defined
+ - when: openshift_upgrade_nodes_label is defined
+ block:
+ - name: Retrieve list of openshift nodes matching upgrade label
+ oc_obj:
+ state: list
+ kind: node
+ selector: "{{ openshift_upgrade_nodes_label }}"
+ register: nodes_to_upgrade
- # We got a list of nodes with the label, now we need to match these with inventory hosts
- # using their openshift.common.hostname fact.
- - name: Map labelled nodes to inventory hosts
- add_host:
- name: "{{ item }}"
- groups: temp_nodes_to_upgrade
- ansible_ssh_user: "{{ g_ssh_user | default(omit) }}"
- ansible_become: "{{ g_sudo | default(omit) }}"
- with_items: " {{ groups['oo_nodes_to_config'] }}"
- when:
- - openshift_upgrade_nodes_label is defined
- - hostvars[item].openshift.common.hostname in nodes_to_upgrade.results.results[0]['items'] | map(attribute='metadata.name') | list
- changed_when: false
+ - name: Fail if no nodes match openshift_upgrade_nodes_label
+ fail:
+ msg: "openshift_upgrade_nodes_label was specified but no nodes matched"
+ when: nodes_to_upgrade.results.results[0]['items'] | length == 0
+
+ # We got a list of nodes with the label, now we need to match these with inventory hosts
+ # using their openshift.common.hostname fact.
+ - name: Map labelled nodes to inventory hosts
+ add_host:
+ name: "{{ item }}"
+ groups: temp_nodes_to_upgrade
+ ansible_ssh_user: "{{ g_ssh_user | default(omit) }}"
+ ansible_become: "{{ g_sudo | default(omit) }}"
+ with_items: " {{ groups['oo_nodes_to_config'] }}"
+ when:
+ - hostvars[item].openshift.common.hostname in nodes_to_upgrade.results.results[0]['items'] | map(attribute='metadata.name') | list
+ changed_when: false
# Build up the oo_nodes_to_upgrade group, use the list filtered by label if
# present, otherwise hit all nodes:
diff --git a/playbooks/common/openshift-cluster/upgrades/upgrade_control_plane.yml b/playbooks/common/openshift-cluster/upgrades/upgrade_control_plane.yml
index 5c19df4c5..6738ce11f 100644
--- a/playbooks/common/openshift-cluster/upgrades/upgrade_control_plane.yml
+++ b/playbooks/common/openshift-cluster/upgrades/upgrade_control_plane.yml
@@ -296,6 +296,7 @@
- openshift_facts
- docker
- openshift_node_upgrade
+ - openshift_node_dnsmasq
post_tasks:
- name: Set node schedulability
diff --git a/playbooks/common/openshift-cluster/upgrades/upgrade_nodes.yml b/playbooks/common/openshift-cluster/upgrades/upgrade_nodes.yml
index 91dbc2cd4..35a50cf4e 100644
--- a/playbooks/common/openshift-cluster/upgrades/upgrade_nodes.yml
+++ b/playbooks/common/openshift-cluster/upgrades/upgrade_nodes.yml
@@ -34,6 +34,7 @@
- openshift_facts
- docker
- openshift_node_upgrade
+ - openshift_node_dnsmasq
- role: openshift_excluder
r_openshift_excluder_action: enable
r_openshift_excluder_service_type: "{{ openshift.common.service_type }}"
diff --git a/playbooks/common/openshift-etcd/restart.yml b/playbooks/common/openshift-etcd/restart.yml
index 196c86f28..af1ef245a 100644
--- a/playbooks/common/openshift-etcd/restart.yml
+++ b/playbooks/common/openshift-etcd/restart.yml
@@ -5,5 +5,5 @@
tasks:
- name: restart etcd
service:
- name: "{{ 'etcd' if not openshift.common.is_containerized | bool else 'etcd_container' }}"
+ name: "{{ 'etcd_container' if openshift.common.etcd_runtime == 'docker' else 'etcd' }}"
state: restarted
diff --git a/playbooks/common/openshift-master/config.yml b/playbooks/common/openshift-master/config.yml
index 429460b2c..7d3a371e3 100644
--- a/playbooks/common/openshift-master/config.yml
+++ b/playbooks/common/openshift-master/config.yml
@@ -20,15 +20,6 @@
- node
- .config_managed
- - name: Check for existing configuration
- stat:
- path: /etc/origin/master/master-config.yaml
- register: master_config_stat
-
- - name: Set clean install fact
- set_fact:
- l_clean_install: "{{ not master_config_stat.stat.exists }}"
-
- set_fact:
openshift_master_pod_eviction_timeout: "{{ lookup('oo_option', 'openshift_master_pod_eviction_timeout') | default(none, true) }}"
when: openshift_master_pod_eviction_timeout is not defined
@@ -78,7 +69,7 @@
ha: "{{ openshift_master_ha | default(groups.oo_masters | length > 1) }}"
master_count: "{{ openshift_master_count | default(groups.oo_masters | length) }}"
-- name: Determine if session secrets must be generated
+- name: Inspect state of first master session secrets and config
hosts: oo_first_master
roles:
- role: openshift_facts
@@ -88,6 +79,24 @@
local_facts:
session_auth_secrets: "{{ openshift_master_session_auth_secrets | default(openshift.master.session_auth_secrets | default(None)) }}"
session_encryption_secrets: "{{ openshift_master_session_encryption_secrets | default(openshift.master.session_encryption_secrets | default(None)) }}"
+ - name: Check for existing configuration
+ stat:
+ path: /etc/origin/master/master-config.yaml
+ register: master_config_stat
+
+ - name: Set clean install fact
+ set_fact:
+ l_clean_install: "{{ not master_config_stat.stat.exists | bool }}"
+
+ - name: Determine if etcd3 storage is in use
+ command: grep -Pzo "storage-backend:\n.*etcd3" /etc/origin/master/master-config.yaml -q
+ register: etcd3_grep
+ failed_when: false
+ changed_when: false
+
+ - name: Set etcd3 fact
+ set_fact:
+ l_etcd3_enabled: "{{ etcd3_grep.rc == 0 | bool }}"
- name: Generate master session secrets
hosts: oo_first_master
@@ -131,7 +140,8 @@
etcd_cert_subdir: "openshift-master-{{ openshift.common.hostname }}"
etcd_cert_config_dir: "{{ openshift.common.config_base }}/master"
etcd_cert_prefix: "master.etcd-"
- r_openshift_master_clean_install: hostvars[groups.oo_first_master.0].l_clean_install
+ r_openshift_master_clean_install: "{{ hostvars[groups.oo_first_master.0].l_clean_install }}"
+ r_openshift_master_etcd3_storage: "{{ hostvars[groups.oo_first_master.0].l_etcd3_enabled }}"
- role: nuage_master
when: openshift.common.use_nuage | bool
- role: calico_master
diff --git a/roles/ansible_service_broker/defaults/main.yml b/roles/ansible_service_broker/defaults/main.yml
new file mode 100644
index 000000000..4a7252679
--- /dev/null
+++ b/roles/ansible_service_broker/defaults/main.yml
@@ -0,0 +1,6 @@
+---
+
+ansible_service_broker_remove: false
+ansible_service_broker_log_level: info
+# Recommended you do not enable this for now
+ansible_service_broker_launch_apb_on_bind: false
diff --git a/roles/ansible_service_broker/meta/main.yml b/roles/ansible_service_broker/meta/main.yml
new file mode 100644
index 000000000..ec4aafb79
--- /dev/null
+++ b/roles/ansible_service_broker/meta/main.yml
@@ -0,0 +1,15 @@
+---
+galaxy_info:
+ author: Fabian von Feilitzsch
+ description: OpenShift Ansible Service Broker
+ company: Red Hat, Inc.
+ license: Apache License, Version 2.0
+ min_ansible_version: 2.1
+ platforms:
+ - name: EL
+ versions:
+ - 7
+ categories:
+ - cloud
+dependencies:
+- role: lib_openshift
diff --git a/roles/ansible_service_broker/tasks/install.yml b/roles/ansible_service_broker/tasks/install.yml
new file mode 100644
index 000000000..b48583fd4
--- /dev/null
+++ b/roles/ansible_service_broker/tasks/install.yml
@@ -0,0 +1,268 @@
+---
+
+# Fact setting and validations
+- name: Set default image variables based on deployment type
+ include_vars: "{{ item }}"
+ with_first_found:
+ - "{{ openshift_deployment_type | default(deployment_type) }}.yml"
+ - "default_images.yml"
+
+- name: set ansible_service_broker facts
+ set_fact:
+ ansible_service_broker_image_prefix: "{{ ansible_service_broker_image_prefix | default(__ansible_service_broker_image_prefix) }}"
+ ansible_service_broker_image_tag: "{{ ansible_service_broker_image_tag | default(__ansible_service_broker_image_tag) }}"
+
+ ansible_service_broker_etcd_image_prefix: "{{ ansible_service_broker_etcd_image_prefix | default(__ansible_service_broker_etcd_image_prefix) }}"
+ ansible_service_broker_etcd_image_tag: "{{ ansible_service_broker_etcd_image_tag | default(__ansible_service_broker_etcd_image_tag) }}"
+
+ ansible_service_broker_registry_type: "{{ ansible_service_broker_registry_type | default(__ansible_service_broker_registry_type) }}"
+ ansible_service_broker_registry_url: "{{ ansible_service_broker_registry_url | default(__ansible_service_broker_registry_url) }}"
+ ansible_service_broker_registry_user: "{{ ansible_service_broker_registry_user | default(__ansible_service_broker_registry_user) }}"
+ ansible_service_broker_registry_password: "{{ ansible_service_broker_registry_password | default(__ansible_service_broker_registry_password) }}"
+ ansible_service_broker_registry_organization: "{{ ansible_service_broker_registry_organization | default(__ansible_service_broker_registry_organization) }}"
+
+- name: set ansible-service-broker image facts using set prefix and tag
+ set_fact:
+ ansible_service_broker_image: "{{ ansible_service_broker_image_prefix }}ansible-service-broker:{{ ansible_service_broker_image_tag }}"
+ ansible_service_broker_etcd_image: "{{ ansible_service_broker_etcd_image_prefix }}etcd:{{ ansible_service_broker_etcd_image_tag }}"
+
+- include: validate_facts.yml
+
+
+# Deployment of ansible-service-broker starts here
+- name: create openshift-ansible-service-broker project
+ oc_project:
+ name: openshift-ansible-service-broker
+ state: present
+
+- name: create ansible-service-broker serviceaccount
+ oc_serviceaccount:
+ name: asb
+ namespace: openshift-ansible-service-broker
+ state: present
+
+- name: create ansible-service-broker service
+ oc_service:
+ name: asb
+ namespace: openshift-ansible-service-broker
+ state: present
+ labels:
+ app: ansible-service-broker
+ service: asb
+ ports:
+ - name: port-1338
+ port: 1338
+ selector:
+ app: ansible-service-broker
+ service: asb
+
+- name: create etcd service
+ oc_service:
+ name: etcd
+ namespace: openshift-ansible-service-broker
+ state: present
+ ports:
+ - name: etcd-advertise
+ port: 2379
+ selector:
+ app: ansible-service-broker
+ service: etcd
+
+- name: create route for ansible-service-broker service
+ oc_route:
+ name: asb-1338
+ namespace: openshift-ansible-service-broker
+ state: present
+ service_name: asb
+ port: 1338
+ register: asb_route_out
+
+- name: get ansible-service-broker route name
+ set_fact:
+ ansible_service_broker_route: "{{ asb_route_out.results.results[0].spec.host }}"
+
+- name: create persistent volume claim for etcd
+ oc_obj:
+ name: etcd
+ namespace: openshift-ansible-service-broker
+ state: present
+ kind: PersistentVolumeClaim
+ content:
+ path: /tmp/dcout
+ data:
+ apiVersion: v1
+ kind: PersistentVolumeClaim
+ metadata:
+ name: etcd
+ namespace: openshift-ansible-service-broker
+ spec:
+ accessModes:
+ - ReadWriteOnce
+ resources:
+ requests:
+ storage: 1Gi
+
+- name: create etcd deployment
+ oc_obj:
+ name: etcd
+ namespace: openshift-ansible-service-broker
+ state: present
+ kind: Deployment
+ content:
+ path: /tmp/dcout
+ data:
+ apiVersion: extensions/v1beta1
+ kind: Deployment
+ metadata:
+ name: etcd
+ namespace: openshift-ansible-service-broker
+ labels:
+ app: ansible-service-broker
+ service: etcd
+ spec:
+ selector:
+ matchLabels:
+ app: ansible-service-broker
+ service: etcd
+ strategy:
+ type: RollingUpdate
+ rollingUpdate:
+ maxSurge: 1
+ maxUnavailable: 1
+ replicas: 1
+ template:
+ metadata:
+ labels:
+ app: ansible-service-broker
+ service: etcd
+ spec:
+ restartPolicy: Always
+ containers:
+ - image: "{{ ansible_service_broker_etcd_image }}"
+ name: etcd
+ imagePullPolicy: IfNotPresent
+ terminationMessagePath: /tmp/termination-log
+ workingDir: /etcd
+ args:
+ - /usr/local/bin/etcd
+ - --data-dir=/data
+ - --listen-client-urls="http://0.0.0.0:2379"
+ - --advertise-client-urls="http://0.0.0.0:2379"
+ ports:
+ - containerPort: 2379
+ protocol: TCP
+ env:
+ - name: ETCDCTL_API
+ value: "3"
+ volumeMounts:
+ - mountPath: /data
+ name: etcd
+ volumes:
+ - name: etcd
+ persistentVolumeClaim:
+ claimName: etcd
+
+- name: create ansible-service-broker deployment
+ oc_obj:
+ name: asb
+ namespace: openshift-ansible-service-broker
+ state: present
+ kind: Deployment
+ content:
+ path: /tmp/dcout
+ data:
+ apiVersion: extensions/v1beta1
+ kind: Deployment
+ metadata:
+ name: asb
+ namespace: openshift-ansible-service-broker
+ labels:
+ app: openshift-ansible-service-broker
+ service: asb
+ spec:
+ strategy:
+ type: Recreate
+ replicas: 1
+ template:
+ metadata:
+ labels:
+ app: openshift-ansible-service-broker
+ service: asb
+ spec:
+ serviceAccount: asb
+ restartPolicy: Always
+ containers:
+ - image: "{{ ansible_service_broker_image }}"
+ name: asb
+ imagePullPolicy: IfNotPresent
+ volumeMounts:
+ - name: config-volume
+ mountPath: /etc/ansible-service-broker
+ ports:
+ - containerPort: 1338
+ protocol: TCP
+ env:
+ - name: BROKER_CONFIG
+ value: /etc/ansible-service-broker/config.yaml
+ terminationMessagePath: /tmp/termination-log
+ volumes:
+ - name: config-volume
+ configMap:
+ name: broker-config
+ items:
+ - key: broker-config
+ path: config.yaml
+
+
+# TODO: saw a oc_configmap in the library, but didn't understand how to get it to do the following:
+- name: Create config map for ansible-service-broker
+ oc_obj:
+ name: broker-config
+ namespace: openshift-ansible-service-broker
+ state: present
+ kind: ConfigMap
+ content:
+ path: /tmp/cmout
+ data:
+ apiVersion: v1
+ kind: ConfigMap
+ metadata:
+ name: broker-config
+ namespace: openshift-ansible-service-broker
+ labels:
+ app: ansible-service-broker
+ data:
+ broker-config: |
+ registry:
+ name: "{{ ansible_service_broker_registry_type }}"
+ url: "{{ ansible_service_broker_registry_url }}"
+ user: "{{ ansible_service_broker_registry_user }}"
+ pass: "{{ ansible_service_broker_registry_password }}"
+ org: "{{ ansible_service_broker_registry_organization }}"
+ dao:
+ etcd_host: etcd
+ etcd_port: 2379
+ log:
+ logfile: /var/log/ansible-service-broker/asb.log
+ stdout: true
+ level: "{{ ansible_service_broker_log_level }}"
+ color: true
+ openshift: {}
+ broker:
+ devbroker: false
+ launchapbonbind: "{{ ansible_service_broker_launch_apb_on_bind }}"
+
+- name: Create the Broker resource in the catalog
+ oc_obj:
+ name: ansible-service-broker
+ state: present
+ kind: Broker
+ content:
+ path: /tmp/brokerout
+ data:
+ apiVersion: servicecatalog.k8s.io/v1alpha1
+ kind: Broker
+ metadata:
+ name: ansible-service-broker
+ spec:
+ url: http://{{ ansible_service_broker_route }}
diff --git a/roles/ansible_service_broker/tasks/main.yml b/roles/ansible_service_broker/tasks/main.yml
new file mode 100644
index 000000000..b46ce8233
--- /dev/null
+++ b/roles/ansible_service_broker/tasks/main.yml
@@ -0,0 +1,8 @@
+---
+# do any asserts here
+
+- include: install.yml
+ when: not ansible_service_broker_remove|default(false) | bool
+
+- include: remove.yml
+ when: ansible_service_broker_remove|default(false) | bool
diff --git a/roles/ansible_service_broker/tasks/remove.yml b/roles/ansible_service_broker/tasks/remove.yml
new file mode 100644
index 000000000..2519f9f4c
--- /dev/null
+++ b/roles/ansible_service_broker/tasks/remove.yml
@@ -0,0 +1,65 @@
+---
+
+- name: remove openshift-ansible-service-broker project
+ oc_project:
+ name: openshift-ansible-service-broker
+ state: absent
+
+- name: remove ansible-service-broker serviceaccount
+ oc_serviceaccount:
+ name: asb
+ namespace: openshift-ansible-service-broker
+ state: absent
+
+- name: remove ansible-service-broker service
+ oc_service:
+ name: asb
+ namespace: openshift-ansible-service-broker
+ state: absent
+
+- name: remove etcd service
+ oc_service:
+ name: etcd
+ namespace: openshift-ansible-service-broker
+ state: absent
+
+- name: remove route for ansible-service-broker service
+ oc_route:
+ name: asb-1338
+ namespace: openshift-ansible-service-broker
+ state: absent
+
+- name: remove persistent volume claim for etcd
+ oc_pvc:
+ name: etcd
+ namespace: openshift-ansible-service-broker
+ state: absent
+
+- name: remove etcd deployment
+ oc_obj:
+ name: etcd
+ namespace: openshift-ansible-service-broker
+ state: absent
+ kind: Deployment
+
+- name: remove ansible-service-broker deployment
+ oc_obj:
+ name: asb
+ namespace: openshift-ansible-service-broker
+ state: absent
+ kind: Deployment
+
+# TODO: saw a oc_configmap in the library, but didn't understand how to get it to do the following:
+- name: remove config map for ansible-service-broker
+ oc_obj:
+ name: broker-config
+ namespace: openshift-ansible-service-broker
+ state: absent
+ kind: ConfigMap
+
+# TODO: Is this going to work?
+- name: remove broker object from the catalog
+ oc_obj:
+ name: ansible-service-broker
+ state: absent
+ kind: Broker
diff --git a/roles/ansible_service_broker/tasks/validate_facts.yml b/roles/ansible_service_broker/tasks/validate_facts.yml
new file mode 100644
index 000000000..604d24e1d
--- /dev/null
+++ b/roles/ansible_service_broker/tasks/validate_facts.yml
@@ -0,0 +1,15 @@
+---
+- name: validate Dockerhub registry settings
+ fail: msg="To use the dockerhub registry, you must provide the ansible_service_broker_registry_user. ansible_service_broker_registry_password, and ansible_service_broker_registry_organization parameters"
+ when:
+ - ansible_service_broker_registry_type == 'dockerhub'
+ - not (ansible_service_broker_registry_user and
+ ansible_service_broker_registry_password and
+ ansible_service_broker_registry_organization)
+
+
+- name: validate RHCC registry settings
+ fail: msg="To use the Red Hat Container Catalog registry, you must provide the ansible_service_broker_registry_url"
+ when:
+ - ansible_service_broker_registry_type == 'rhcc'
+ - not ansible_service_broker_registry_url
diff --git a/roles/ansible_service_broker/vars/default_images.yml b/roles/ansible_service_broker/vars/default_images.yml
new file mode 100644
index 000000000..b0b3835e3
--- /dev/null
+++ b/roles/ansible_service_broker/vars/default_images.yml
@@ -0,0 +1,13 @@
+---
+
+__ansible_service_broker_image_prefix: ansibleplaybookbundle/
+__ansible_service_broker_image_tag: latest
+
+__ansible_service_broker_etcd_image_prefix: quay.io/coreos/
+__ansible_service_broker_etcd_image_tag: latest
+
+__ansible_service_broker_registry_type: dockerhub
+__ansible_service_broker_registry_url: null
+__ansible_service_broker_registry_user: null
+__ansible_service_broker_registry_password: null
+__ansible_service_broker_registry_organization: null
diff --git a/roles/ansible_service_broker/vars/openshift-enterprise.yml b/roles/ansible_service_broker/vars/openshift-enterprise.yml
new file mode 100644
index 000000000..a6d999647
--- /dev/null
+++ b/roles/ansible_service_broker/vars/openshift-enterprise.yml
@@ -0,0 +1,13 @@
+---
+
+__ansible_service_broker_image_prefix: openshift3/
+__ansible_service_broker_image_tag: latest
+
+__ansible_service_broker_etcd_image_prefix: rhel7/
+__ansible_service_broker_etcd_image_tag: latest
+
+__ansible_service_broker_registry_type: rhcc
+__ansible_service_broker_registry_url: "https://registry.access.redhat.com"
+__ansible_service_broker_registry_user: null
+__ansible_service_broker_registry_password: null
+__ansible_service_broker_registry_organization: null
diff --git a/roles/etcd/tasks/system_container.yml b/roles/etcd/tasks/system_container.yml
index f1d948d16..a01df66b3 100644
--- a/roles/etcd/tasks/system_container.yml
+++ b/roles/etcd/tasks/system_container.yml
@@ -24,23 +24,30 @@
systemd:
name: etcd
state: stopped
- enabled: yes
+ enabled: no
masked: no
daemon_reload: yes
register: task_result
failed_when: task_result|failed and 'could not' not in task_result.msg|lower
- when: "'etcd' in etcd_result.stdout"
+ when: "'etcd' not in etcd_result.stdout"
- name: Disable etcd_container
systemd:
name: etcd_container
state: stopped
enabled: no
- masked: yes
daemon_reload: yes
register: task_result
failed_when: task_result|failed and 'could not' not in task_result.msg|lower
+- name: Remove etcd_container.service
+ file:
+ path: /etc/systemd/system/etcd_container.service
+ state: absent
+
+- name: Systemd reload configuration
+ systemd: name=etcd_container daemon_reload=yes
+
- name: Check for previous etcd data store
stat:
path: "{{ etcd_data_dir }}/member/"
diff --git a/roles/etcd_common/defaults/main.yml b/roles/etcd_common/defaults/main.yml
index 8cc7a9c20..b5b38c1e1 100644
--- a/roles/etcd_common/defaults/main.yml
+++ b/roles/etcd_common/defaults/main.yml
@@ -52,7 +52,7 @@ etcd_is_containerized: False
etcd_is_thirdparty: False
# etcd dir vars
-etcd_data_dir: "{{ '/var/lib/origin/openshift.local.etcd' if r_etcd_common_embedded_etcd | bool else '/var/lib/etcd/' }}"
+etcd_data_dir: "{{ '/var/lib/origin/openshift.local.etcd' if r_etcd_common_embedded_etcd | bool else '/var/lib/etcd/' if openshift.common.etcd_runtime != 'runc' else '/var/lib/etcd/etcd.etcd/' }}"
# etcd ports and protocols
etcd_client_port: 2379
diff --git a/roles/lib_openshift/library/oc_atomic_container.py b/roles/lib_openshift/library/oc_atomic_container.py
index 1e017a576..955c6313e 100644
--- a/roles/lib_openshift/library/oc_atomic_container.py
+++ b/roles/lib_openshift/library/oc_atomic_container.py
@@ -65,8 +65,11 @@ options:
# -*- -*- -*- Begin included fragment: ansible/oc_atomic_container.py -*- -*- -*-
-# pylint: disable=wrong-import-position,too-many-branches,invalid-name
+# pylint: disable=wrong-import-position,too-many-branches,invalid-name,no-name-in-module, import-error
import json
+
+from distutils.version import StrictVersion
+
from ansible.module_utils.basic import AnsibleModule
@@ -191,9 +194,15 @@ def main():
)
# Verify that the platform supports atomic command
- rc, _, err = module.run_command('atomic -v', check_rc=False)
+ rc, version_out, err = module.run_command('rpm -q --queryformat "%{VERSION}\n" atomic', check_rc=False)
if rc != 0:
module.fail_json(msg="Error in running atomic command", err=err)
+ # This module requires atomic version 1.17.2 or later
+ atomic_version = StrictVersion(version_out.replace('\n', ''))
+ if atomic_version < StrictVersion('1.17.2'):
+ module.fail_json(
+ msg="atomic version 1.17.2+ is required",
+ err=str(atomic_version))
try:
core(module)
diff --git a/roles/lib_openshift/src/ansible/oc_atomic_container.py b/roles/lib_openshift/src/ansible/oc_atomic_container.py
index 1a5ab6869..7b81760df 100644
--- a/roles/lib_openshift/src/ansible/oc_atomic_container.py
+++ b/roles/lib_openshift/src/ansible/oc_atomic_container.py
@@ -1,8 +1,11 @@
# pylint: skip-file
# flake8: noqa
-# pylint: disable=wrong-import-position,too-many-branches,invalid-name
+# pylint: disable=wrong-import-position,too-many-branches,invalid-name,no-name-in-module, import-error
import json
+
+from distutils.version import StrictVersion
+
from ansible.module_utils.basic import AnsibleModule
@@ -127,9 +130,15 @@ def main():
)
# Verify that the platform supports atomic command
- rc, _, err = module.run_command('atomic -v', check_rc=False)
+ rc, version_out, err = module.run_command('rpm -q --queryformat "%{VERSION}\n" atomic', check_rc=False)
if rc != 0:
module.fail_json(msg="Error in running atomic command", err=err)
+ # This module requires atomic version 1.17.2 or later
+ atomic_version = StrictVersion(version_out.replace('\n', ''))
+ if atomic_version < StrictVersion('1.17.2'):
+ module.fail_json(
+ msg="atomic version 1.17.2+ is required",
+ err=str(atomic_version))
try:
core(module)
diff --git a/roles/openshift_certificate_expiry/library/openshift_cert_expiry.py b/roles/openshift_certificate_expiry/library/openshift_cert_expiry.py
index 0242f5b43..44a8fa29b 100644
--- a/roles/openshift_certificate_expiry/library/openshift_cert_expiry.py
+++ b/roles/openshift_certificate_expiry/library/openshift_cert_expiry.py
@@ -104,6 +104,7 @@ platforms missing the Python OpenSSL library.
self.extensions = []
PARSING_ALT_NAMES = False
+ PARSING_HEX_SERIAL = False
for line in self.cert_string.split('\n'):
l = line.strip()
if PARSING_ALT_NAMES:
@@ -114,10 +115,26 @@ platforms missing the Python OpenSSL library.
PARSING_ALT_NAMES = False
continue
+ if PARSING_HEX_SERIAL:
+ # Hex serials arrive colon-delimited
+ serial_raw = l.replace(':', '')
+ # Convert to decimal
+ self.serial = int('0x' + serial_raw, base=16)
+ PARSING_HEX_SERIAL = False
+ continue
+
# parse out the bits that we can
if l.startswith('Serial Number:'):
- # Serial Number: 11 (0xb)
- # => 11
+ # Decimal format:
+ # Serial Number: 11 (0xb)
+ # => 11
+ # Hex Format (large serials):
+ # Serial Number:
+ # 0a:de:eb:24:04:75:ab:56:39:14:e9:5a:22:e2:85:bf
+ # => 14449739080294792594019643629255165375
+ if l.endswith(':'):
+ PARSING_HEX_SERIAL = True
+ continue
self.serial = int(l.split()[-2])
elif l.startswith('Not After :'):
diff --git a/roles/openshift_certificate_expiry/test/conftest.py b/roles/openshift_certificate_expiry/test/conftest.py
index 4ca35ecbc..df948fff0 100644
--- a/roles/openshift_certificate_expiry/test/conftest.py
+++ b/roles/openshift_certificate_expiry/test/conftest.py
@@ -23,7 +23,10 @@ VALID_CERTIFICATE_PARAMS = [
{
'short_name': 'combined',
'cn': 'combined.example.com',
- 'serial': 6,
+ # Verify that HUGE serials parse correctly.
+ # Frobs PARSING_HEX_SERIAL in _parse_cert
+ # See https://bugzilla.redhat.com/show_bug.cgi?id=1464240
+ 'serial': 14449739080294792594019643629255165375,
'uses': b'clientAuth, serverAuth',
'dns': ['etcd'],
'ip': ['10.0.0.2', '192.168.0.2']
diff --git a/roles/openshift_examples/files/examples/v3.6/cfme-templates/cfme-pv-app-example.yaml b/roles/openshift_examples/files/examples/v3.6/cfme-templates/cfme-pv-app-example.yaml
deleted file mode 100644
index 14bdd1dca..000000000
--- a/roles/openshift_examples/files/examples/v3.6/cfme-templates/cfme-pv-app-example.yaml
+++ /dev/null
@@ -1,13 +0,0 @@
-apiVersion: v1
-kind: PersistentVolume
-metadata:
- name: cloudforms
-spec:
- capacity:
- storage: 2Gi
- accessModes:
- - ReadWriteOnce
- nfs:
- path: /opt/nfs/volumes-app
- server: 10.19.0.216
- persistentVolumeReclaimPolicy: Recycle
diff --git a/roles/openshift_examples/files/examples/v3.6/cfme-templates/cfme-pv-db-example.yaml b/roles/openshift_examples/files/examples/v3.6/cfme-templates/cfme-pv-db-example.yaml
new file mode 100644
index 000000000..250a99b8d
--- /dev/null
+++ b/roles/openshift_examples/files/examples/v3.6/cfme-templates/cfme-pv-db-example.yaml
@@ -0,0 +1,13 @@
+apiVersion: v1
+kind: PersistentVolume
+metadata:
+ name: cfme-pv01
+spec:
+ capacity:
+ storage: 15Gi
+ accessModes:
+ - ReadWriteOnce
+ nfs:
+ path: /exports/cfme-pv01
+ server: <your-nfs-host-here>
+ persistentVolumeReclaimPolicy: Retain
diff --git a/roles/openshift_examples/files/examples/v3.6/cfme-templates/cfme-pv-example.yaml b/roles/openshift_examples/files/examples/v3.6/cfme-templates/cfme-pv-example.yaml
deleted file mode 100644
index 709d8d976..000000000
--- a/roles/openshift_examples/files/examples/v3.6/cfme-templates/cfme-pv-example.yaml
+++ /dev/null
@@ -1,13 +0,0 @@
-apiVersion: v1
-kind: PersistentVolume
-metadata:
- name: nfs-pv01
-spec:
- capacity:
- storage: 2Gi
- accessModes:
- - ReadWriteOnce
- nfs:
- path: /opt/nfs/volumes
- server: 10.19.0.216
- persistentVolumeReclaimPolicy: Recycle
diff --git a/roles/openshift_examples/files/examples/v3.6/cfme-templates/cfme-pv-region-example.yaml b/roles/openshift_examples/files/examples/v3.6/cfme-templates/cfme-pv-region-example.yaml
new file mode 100644
index 000000000..cba9bbe35
--- /dev/null
+++ b/roles/openshift_examples/files/examples/v3.6/cfme-templates/cfme-pv-region-example.yaml
@@ -0,0 +1,13 @@
+apiVersion: v1
+kind: PersistentVolume
+metadata:
+ name: cfme-pv02
+spec:
+ capacity:
+ storage: 5Gi
+ accessModes:
+ - ReadWriteOnce
+ nfs:
+ path: /exports/cfme-pv02
+ server: <your-nfs-host-here>
+ persistentVolumeReclaimPolicy: Retain
diff --git a/roles/openshift_examples/files/examples/v3.6/cfme-templates/cfme-pv-server-example.yaml b/roles/openshift_examples/files/examples/v3.6/cfme-templates/cfme-pv-server-example.yaml
new file mode 100644
index 000000000..c08c21265
--- /dev/null
+++ b/roles/openshift_examples/files/examples/v3.6/cfme-templates/cfme-pv-server-example.yaml
@@ -0,0 +1,13 @@
+apiVersion: v1
+kind: PersistentVolume
+metadata:
+ name: cfme-pv03
+spec:
+ capacity:
+ storage: 5Gi
+ accessModes:
+ - ReadWriteOnce
+ nfs:
+ path: /exports/cfme-pv03
+ server: <your-nfs-host-here>
+ persistentVolumeReclaimPolicy: Retain
diff --git a/roles/openshift_examples/files/examples/v3.6/cfme-templates/cfme-template.yaml b/roles/openshift_examples/files/examples/v3.6/cfme-templates/cfme-template.yaml
index 4f25a9c8f..3bc6c5813 100644
--- a/roles/openshift_examples/files/examples/v3.6/cfme-templates/cfme-template.yaml
+++ b/roles/openshift_examples/files/examples/v3.6/cfme-templates/cfme-template.yaml
@@ -17,6 +17,7 @@ objects:
service.alpha.openshift.io/dependencies: '[{"name":"${DATABASE_SERVICE_NAME}","namespace":"","kind":"Service"},{"name":"${MEMCACHED_SERVICE_NAME}","namespace":"","kind":"Service"}]'
name: ${NAME}
spec:
+ clusterIP: None
ports:
- name: http
port: 80
@@ -48,11 +49,27 @@ objects:
annotations:
description: "Keeps track of changes in the CloudForms app image"
spec:
- dockerImageRepository: registry.access.redhat.com/cloudforms/cfme-openshift-app
+ dockerImageRepository: "${APPLICATION_IMG_NAME}"
+- apiVersion: v1
+ kind: ImageStream
+ metadata:
+ name: cfme-openshift-postgresql
+ annotations:
+ description: "Keeps track of changes in the CloudForms postgresql image"
+ spec:
+ dockerImageRepository: "${POSTGRESQL_IMG_NAME}"
+- apiVersion: v1
+ kind: ImageStream
+ metadata:
+ name: cfme-openshift-memcached
+ annotations:
+ description: "Keeps track of changes in the CloudForms memcached image"
+ spec:
+ dockerImageRepository: "${MEMCACHED_IMG_NAME}"
- apiVersion: v1
kind: PersistentVolumeClaim
metadata:
- name: ${DATABASE_SERVICE_NAME}
+ name: "${NAME}-${DATABASE_SERVICE_NAME}"
spec:
accessModes:
- ReadWriteOnce
@@ -62,45 +79,41 @@ objects:
- apiVersion: v1
kind: PersistentVolumeClaim
metadata:
- name: ${NAME}
+ name: "${NAME}-region"
spec:
accessModes:
- ReadWriteOnce
resources:
requests:
- storage: ${APPLICATION_VOLUME_CAPACITY}
-- apiVersion: v1
- kind: "DeploymentConfig"
+ storage: ${APPLICATION_REGION_VOLUME_CAPACITY}
+- apiVersion: apps/v1beta1
+ kind: "StatefulSet"
metadata:
name: ${NAME}
annotations:
description: "Defines how to deploy the CloudForms appliance"
spec:
+ serviceName: "${NAME}"
+ replicas: 1
template:
metadata:
labels:
name: ${NAME}
name: ${NAME}
spec:
- volumes:
- -
- name: "cfme-app-volume"
- persistentVolumeClaim:
- claimName: ${NAME}
containers:
- - image: cloudforms/cfme-openshift-app:${APPLICATION_IMG_TAG}
- imagePullPolicy: IfNotPresent
- name: cloudforms
+ - name: cloudforms
+ image: "${APPLICATION_IMG_NAME}:${APPLICATION_IMG_TAG}"
livenessProbe:
- httpGet:
- path: /
- port: 80
+ tcpSocket:
+ port: 443
initialDelaySeconds: 480
timeoutSeconds: 3
readinessProbe:
httpGet:
path: /
- port: 80
+ port: 443
+ scheme: HTTPS
initialDelaySeconds: 200
timeoutSeconds: 3
ports:
@@ -112,8 +125,11 @@ objects:
privileged: true
volumeMounts:
-
- name: "cfme-app-volume"
+ name: "${NAME}-server"
mountPath: "/persistent"
+ -
+ name: "${NAME}-region"
+ mountPath: "/persistent-region"
env:
-
name: "APPLICATION_INIT_DELAY"
@@ -144,29 +160,32 @@ objects:
value: "${POSTGRESQL_SHARED_BUFFERS}"
resources:
requests:
- memory: "${MEMORY_APPLICATION_MIN}"
+ memory: "${APPLICATION_MEM_REQ}"
+ cpu: "${APPLICATION_CPU_REQ}"
+ limits:
+ memory: "${APPLICATION_MEM_LIMIT}"
lifecycle:
preStop:
exec:
command:
- /opt/rh/cfme-container-scripts/sync-pv-data
- replicas: 1
- selector:
- name: ${NAME}
- triggers:
- - type: "ConfigChange"
- - type: "ImageChange"
- imageChangeParams:
- automatic: true
- containerNames:
- - "cloudforms"
- from:
- kind: "ImageStreamTag"
- name: "cfme-openshift-app:${APPLICATION_IMG_TAG}"
- strategy:
- type: "Recreate"
- recreateParams:
- timeoutSeconds: 1200
+ volumes:
+ -
+ name: "${NAME}-region"
+ persistentVolumeClaim:
+ claimName: ${NAME}-region
+ volumeClaimTemplates:
+ - metadata:
+ name: "${NAME}-server"
+ annotations:
+ # Uncomment this if using dynamic volume provisioning.
+ # https://docs.openshift.org/latest/install_config/persistent_storage/dynamically_provisioning_pvs.html
+ # volume.alpha.kubernetes.io/storage-class: anything
+ spec:
+ accessModes: [ ReadWriteOnce ]
+ resources:
+ requests:
+ storage: "${APPLICATION_VOLUME_CAPACITY}"
- apiVersion: v1
kind: "Service"
metadata:
@@ -182,14 +201,6 @@ objects:
selector:
name: "${MEMCACHED_SERVICE_NAME}"
- apiVersion: v1
- kind: ImageStream
- metadata:
- name: cfme-openshift-memcached
- annotations:
- description: "Keeps track of changes in the CloudForms memcached image"
- spec:
- dockerImageRepository: registry.access.redhat.com/cloudforms/cfme-openshift-memcached
-- apiVersion: v1
kind: "DeploymentConfig"
metadata:
name: "${MEMCACHED_SERVICE_NAME}"
@@ -223,7 +234,7 @@ objects:
containers:
-
name: "memcached"
- image: "cloudforms/cfme-openshift-memcached:${MEMCACHED_IMG_TAG}"
+ image: "${MEMCACHED_IMG_NAME}:${MEMCACHED_IMG_TAG}"
ports:
-
containerPort: 11211
@@ -249,8 +260,11 @@ objects:
name: "MEMCACHED_SLAB_PAGE_SIZE"
value: "${MEMCACHED_SLAB_PAGE_SIZE}"
resources:
+ requests:
+ memory: "${MEMCACHED_MEM_REQ}"
+ cpu: "${MEMCACHED_CPU_REQ}"
limits:
- memory: "${MEMORY_MEMCACHED_LIMIT}"
+ memory: "${MEMCACHED_MEM_LIMIT}"
- apiVersion: v1
kind: "Service"
metadata:
@@ -266,14 +280,6 @@ objects:
selector:
name: "${DATABASE_SERVICE_NAME}"
- apiVersion: v1
- kind: ImageStream
- metadata:
- name: cfme-openshift-postgresql
- annotations:
- description: "Keeps track of changes in the CloudForms postgresql image"
- spec:
- dockerImageRepository: registry.access.redhat.com/cloudforms/cfme-openshift-postgresql
-- apiVersion: v1
kind: "DeploymentConfig"
metadata:
name: "${DATABASE_SERVICE_NAME}"
@@ -307,11 +313,11 @@ objects:
-
name: "cfme-pgdb-volume"
persistentVolumeClaim:
- claimName: ${DATABASE_SERVICE_NAME}
+ claimName: "${NAME}-${DATABASE_SERVICE_NAME}"
containers:
-
name: "postgresql"
- image: "cloudforms/cfme-openshift-postgresql:${POSTGRESQL_IMG_TAG}"
+ image: "${POSTGRESQL_IMG_NAME}:${POSTGRESQL_IMG_TAG}"
ports:
-
containerPort: 5432
@@ -350,8 +356,11 @@ objects:
name: "POSTGRESQL_SHARED_BUFFERS"
value: "${POSTGRESQL_SHARED_BUFFERS}"
resources:
+ requests:
+ memory: "${POSTGRESQL_MEM_REQ}"
+ cpu: "${POSTGRESQL_CPU_REQ}"
limits:
- memory: "${MEMORY_POSTGRESQL_LIMIT}"
+ memory: "${POSTGRESQL_MEM_LIMIT}"
parameters:
-
@@ -420,36 +429,87 @@ parameters:
name: "POSTGRESQL_SHARED_BUFFERS"
displayName: "PostgreSQL Shared Buffer Amount"
description: "Amount of memory dedicated for PostgreSQL shared memory buffers."
- value: "64MB"
+ value: "256MB"
-
- name: "MEMORY_APPLICATION_MIN"
- displayName: "Application Memory Minimum"
+ name: "APPLICATION_CPU_REQ"
+ displayName: "Application Min CPU Requested"
+ required: true
+ description: "Minimum amount of CPU time the Application container will need (expressed in millicores)."
+ value: "1000m"
+ -
+ name: "POSTGRESQL_CPU_REQ"
+ displayName: "PostgreSQL Min CPU Requested"
+ required: true
+ description: "Minimum amount of CPU time the PostgreSQL container will need (expressed in millicores)."
+ value: "500m"
+ -
+ name: "MEMCACHED_CPU_REQ"
+ displayName: "Memcached Min CPU Requested"
+ required: true
+ description: "Minimum amount of CPU time the Memcached container will need (expressed in millicores)."
+ value: "200m"
+ -
+ name: "APPLICATION_MEM_REQ"
+ displayName: "Application Min RAM Requested"
required: true
description: "Minimum amount of memory the Application container will need."
- value: "4096Mi"
+ value: "6144Mi"
+ -
+ name: "POSTGRESQL_MEM_REQ"
+ displayName: "PostgreSQL Min RAM Requested"
+ required: true
+ description: "Minimum amount of memory the PostgreSQL container will need."
+ value: "1024Mi"
-
- name: "MEMORY_POSTGRESQL_LIMIT"
- displayName: "PostgreSQL Memory Limit"
+ name: "MEMCACHED_MEM_REQ"
+ displayName: "Memcached Min RAM Requested"
required: true
- description: "Maximum amount of memory the PostgreSQL container can use."
- value: "2048Mi"
+ description: "Minimum amount of memory the Memcached container will need."
+ value: "64Mi"
-
- name: "MEMORY_MEMCACHED_LIMIT"
- displayName: "Memcached Memory Limit"
+ name: "APPLICATION_MEM_LIMIT"
+ displayName: "Application Max RAM Limit"
required: true
- description: "Maximum amount of memory the Memcached container can use."
+ description: "Maximum amount of memory the Application container can consume."
+ value: "16384Mi"
+ -
+ name: "POSTGRESQL_MEM_LIMIT"
+ displayName: "PostgreSQL Max RAM Limit"
+ required: true
+ description: "Maximum amount of memory the PostgreSQL container can consume."
+ value: "8192Mi"
+ -
+ name: "MEMCACHED_MEM_LIMIT"
+ displayName: "Memcached Max RAM Limit"
+ required: true
+ description: "Maximum amount of memory the Memcached container can consume."
value: "256Mi"
-
+ name: "POSTGRESQL_IMG_NAME"
+ displayName: "PostgreSQL Image Name"
+ description: "This is the PostgreSQL image name requested to deploy."
+ value: "registry.access.redhat.com/cloudforms45/cfme-openshift-postgresql"
+ -
name: "POSTGRESQL_IMG_TAG"
displayName: "PostgreSQL Image Tag"
description: "This is the PostgreSQL image tag/version requested to deploy."
value: "latest"
-
+ name: "MEMCACHED_IMG_NAME"
+ displayName: "Memcached Image Name"
+ description: "This is the Memcached image name requested to deploy."
+ value: "registry.access.redhat.com/cloudforms45/cfme-openshift-memcached"
+ -
name: "MEMCACHED_IMG_TAG"
displayName: "Memcached Image Tag"
description: "This is the Memcached image tag/version requested to deploy."
value: "latest"
-
+ name: "APPLICATION_IMG_NAME"
+ displayName: "Application Image Name"
+ description: "This is the Application image name requested to deploy."
+ value: "registry.access.redhat.com/cloudforms45/cfme-openshift-app"
+ -
name: "APPLICATION_IMG_TAG"
displayName: "Application Image Tag"
description: "This is the Application image tag/version requested to deploy."
@@ -464,16 +524,22 @@ parameters:
displayName: "Application Init Delay"
required: true
description: "Delay in seconds before we attempt to initialize the application."
- value: "30"
+ value: "15"
-
name: "APPLICATION_VOLUME_CAPACITY"
displayName: "Application Volume Capacity"
required: true
description: "Volume space available for application data."
- value: "1Gi"
+ value: "5Gi"
+ -
+ name: "APPLICATION_REGION_VOLUME_CAPACITY"
+ displayName: "Application Region Volume Capacity"
+ required: true
+ description: "Volume space available for region application data."
+ value: "5Gi"
-
name: "DATABASE_VOLUME_CAPACITY"
displayName: "Database Volume Capacity"
required: true
description: "Volume space available for database."
- value: "1Gi"
+ value: "15Gi"
diff --git a/roles/openshift_examples/files/examples/v3.6/cfme-templates/jboss-middleware-manager-pv-example.yaml b/roles/openshift_examples/files/examples/v3.6/cfme-templates/jboss-middleware-manager-pv-example.yaml
new file mode 100644
index 000000000..240f6cbdf
--- /dev/null
+++ b/roles/openshift_examples/files/examples/v3.6/cfme-templates/jboss-middleware-manager-pv-example.yaml
@@ -0,0 +1,58 @@
+#
+# Copyright 2016-2017 Red Hat, Inc. and/or its affiliates
+# and other contributors as indicated by the @author tags.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+
+apiVersion: v1
+kind: Template
+parameters:
+- name: HAWKULAR_SERVICES_DATA_LIMIT
+ description: Maximum amount data used by hawkular-services container (mostly logging)
+ displayName: Hawkular Services Container Data Limit
+ value: 1Gi
+- name: CASSANDRA_DATA_LIMIT
+ description: Maximum amount data used by Cassandra container
+ displayName: Cassandra Container Data Limit
+ value: 2Gi
+
+objects:
+- apiVersion: v1
+ kind: PersistentVolume
+ metadata:
+ name: h-services-pv
+ labels:
+ type: h-services
+ spec:
+ capacity:
+ storage: ${HAWKULAR_SERVICES_DATA_LIMIT}
+ accessModes:
+ - ReadWriteOnce
+ persistentVolumeReclaimPolicy: Retain
+ hostPath:
+ path: /tmp/pv-services
+- apiVersion: v1
+ kind: PersistentVolume
+ metadata:
+ name: cassandra-pv
+ labels:
+ type: cassandra
+ spec:
+ capacity:
+ storage: ${CASSANDRA_DATA_LIMIT}
+ accessModes:
+ - ReadWriteOnce
+ persistentVolumeReclaimPolicy: Retain
+ hostPath:
+ path: /tmp/pv-cassandra
diff --git a/roles/openshift_examples/files/examples/v3.6/cfme-templates/jboss-middleware-manager-template.yaml b/roles/openshift_examples/files/examples/v3.6/cfme-templates/jboss-middleware-manager-template.yaml
new file mode 100644
index 000000000..bbc0c7044
--- /dev/null
+++ b/roles/openshift_examples/files/examples/v3.6/cfme-templates/jboss-middleware-manager-template.yaml
@@ -0,0 +1,254 @@
+#
+# Copyright 2016-2017 Red Hat, Inc. and/or its affiliates
+# and other contributors as indicated by the @author tags.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+
+apiVersion: v1
+kind: Template
+metadata:
+ name: hawkular-services
+ annotations:
+ openshift.io/display-name: Hawkular Services
+ description: Hawkular-Services all-in-one (including Hawkular Metrics, Hawkular Alerts and Hawkular Inventory).
+ iconClass: icon-wildfly
+ tags: hawkular,hawkular-services,metrics,alerts,manageiq,cassandra
+
+parameters:
+- name: HAWKULAR_SERVICES_IMAGE
+ description: What docker image should be used for hawkular-services.
+ displayName: Hawkular Services Docker Image
+ value: registry.access.redhat.com/jboss-mm-7-tech-preview/middleware-manager:latest
+- name: CASSANDRA_IMAGE
+ description: What docker image should be used for cassandra node.
+ displayName: Cassandra Docker Image
+ value: registry.access.redhat.com/openshift3/metrics-cassandra:3.5.0
+- name: CASSANDRA_MEMORY_LIMIT
+ description: Maximum amount of memory for Cassandra container.
+ displayName: Cassandra Memory Limit
+ value: 2Gi
+- name: CASSANDRA_DATA_LIMIT
+ description: Maximum amount data used by Cassandra container.
+ displayName: Cassandra Container Data Limit
+ value: 2Gi
+- name: HAWKULAR_SERVICES_DATA_LIMIT
+ description: Maximum amount data used by hawkular-services container (mostly logging).
+ displayName: Hawkular Services Container Data Limit
+ value: 1Gi
+- name: ROUTE_NAME
+ description: Public route with this name will be created.
+ displayName: Route Name
+ value: hawkular-services
+- name: ROUTE_HOSTNAME
+ description: Under this hostname the Hawkular Services will be accessible, if left blank a value will be defaulted.
+ displayName: Hostname
+- name: HAWKULAR_USER
+ description: Username that is used for accessing the Hawkular Services, if left blank a value will be generated.
+ displayName: Hawkular User
+ from: '[a-zA-Z0-9]{16}'
+ generate: expression
+- name: HAWKULAR_PASSWORD
+ description: Password that is used for accessing the Hawkular Services, if left blank a value will be generated.
+ displayName: Hawkular Password
+ from: '[a-zA-Z0-9]{16}'
+ generate: expression
+labels:
+ template: hawkular-services
+message: Credentials for hawkular-services are ${HAWKULAR_USER}:${HAWKULAR_PASSWORD}
+
+objects:
+- apiVersion: v1
+ kind: Service
+ metadata:
+ annotations:
+ description: Exposes and load balances the application pods
+ service.alpha.openshift.io/dependencies: '[{"name":"hawkular-cassandra","namespace":"","kind":"Service"}]'
+ name: hawkular-services
+ spec:
+ ports:
+ - name: http-8080-tcp
+ port: 8080
+ protocol: TCP
+ targetPort: 8080
+ - name: admin-9990-tcp
+ port: 9990
+ protocol: TCP
+ targetPort: 9990
+ selector:
+ name: hawkular-services
+ type: ClusterIP
+- apiVersion: v1
+ kind: Service
+ metadata:
+ annotations:
+ description: Cassandra Service
+ name: hawkular-cassandra
+ spec:
+ ports:
+ - name: cql-9042-tcp
+ port: 9042
+ protocol: TCP
+ targetPort: 9042
+ selector:
+ name: hawkular-cassandra
+- apiVersion: v1
+ kind: Route
+ metadata:
+ name: ${ROUTE_NAME}
+ spec:
+ host: ${ROUTE_HOSTNAME}
+ to:
+ kind: Service
+ name: hawkular-services
+ port:
+ targetPort: http-8080-tcp
+
+- apiVersion: v1
+ kind: DeploymentConfig
+ metadata:
+ annotations:
+ description: Defines how to deploy the application server
+ name: hawkular-services
+ spec:
+ replicas: 1
+ selector:
+ name: hawkular-services
+ strategy:
+ type: Rolling
+ template:
+ metadata:
+ labels:
+ name: hawkular-services
+ spec:
+ containers:
+ - image: ${HAWKULAR_SERVICES_IMAGE}
+ env:
+ - name: HAWKULAR_BACKEND
+ value: remote
+ - name: CASSANDRA_NODES
+ value: hawkular-cassandra
+ - name: HAWKULAR_USER
+ value: ${HAWKULAR_USER}
+ - name: HAWKULAR_PASSWORD
+ value: ${HAWKULAR_PASSWORD}
+ imagePullPolicy: IfNotPresent
+ name: hawkular-services
+ volumeMounts:
+ - name: h-services-data
+ mountPath: /var/opt/hawkular
+ ports:
+ - containerPort: 8080
+ - containerPort: 9990
+ livenessProbe:
+ exec:
+ command:
+ - /opt/hawkular/bin/ready.sh
+ initialDelaySeconds: 180
+ timeoutSeconds: 3
+ readinessProbe:
+ exec:
+ command:
+ - /opt/hawkular/bin/ready.sh
+ initialDelaySeconds: 120
+ timeoutSeconds: 3
+ periodSeconds: 5
+ successThreshold: 1
+ failureThreshold: 12
+ resources:
+ requests:
+ memory: 1024Mi
+ cpu: 2000m
+ dnsPolicy: ClusterFirst
+ restartPolicy: Always
+ volumes:
+ - name: h-services-data
+ persistentVolumeClaim:
+ claimName: h-services-pvc
+
+- apiVersion: v1
+ kind: DeploymentConfig
+ metadata:
+ annotations:
+ description: Defines how to deploy the cassandra
+ name: hawkular-cassandra
+ spec:
+ replicas: 1
+ selector:
+ name: hawkular-cassandra
+ strategy:
+ type: Recreate
+ rollingParams:
+ timeoutSeconds: 300
+ template:
+ metadata:
+ labels:
+ name: hawkular-cassandra
+ spec:
+ containers:
+ - image: ${CASSANDRA_IMAGE}
+ imagePullPolicy: Always
+ name: hawkular-cassandra
+ env:
+ - name: DATA_VOLUME
+ value: /var/lib/cassandra
+ volumeMounts:
+ - name: cassandra-data
+ mountPath: /var/lib/cassandra
+ ports:
+ - containerPort: 9042
+ - containerPort: 9160
+ readinessProbe:
+ exec:
+ command: ['nodetool', 'status']
+ initialDelaySeconds: 30
+ timeoutSeconds: 10
+ periodSeconds: 15
+ successThreshold: 1
+ failureThreshold: 3
+ livenessProbe:
+ exec:
+ command: ['nodetool', 'status']
+ initialDelaySeconds: 300
+ timeoutSeconds: 10
+ periodSeconds: 15
+ successThreshold: 1
+ failureThreshold: 3
+ resources:
+ limits:
+ memory: ${CASSANDRA_MEMORY_LIMIT}
+ volumes:
+ - name: cassandra-data
+ persistentVolumeClaim:
+ claimName: cassandra-pvc
+
+- apiVersion: v1
+ kind: PersistentVolumeClaim
+ metadata:
+ name: h-services-pvc
+ spec:
+ accessModes:
+ - ReadWriteOnce
+ resources:
+ requests:
+ storage: 1Gi
+- apiVersion: v1
+ kind: PersistentVolumeClaim
+ metadata:
+ name: cassandra-pvc
+ spec:
+ accessModes:
+ - ReadWriteOnce
+ resources:
+ requests:
+ storage: 1Gi
diff --git a/roles/openshift_examples/files/examples/v3.6/db-templates/mariadb-ephemeral-template.json b/roles/openshift_examples/files/examples/v3.6/db-templates/mariadb-ephemeral-template.json
index f347f1f9f..40f8b7933 100644
--- a/roles/openshift_examples/files/examples/v3.6/db-templates/mariadb-ephemeral-template.json
+++ b/roles/openshift_examples/files/examples/v3.6/db-templates/mariadb-ephemeral-template.json
@@ -23,7 +23,12 @@
"kind": "Secret",
"apiVersion": "v1",
"metadata": {
- "name": "${DATABASE_SERVICE_NAME}"
+ "name": "${DATABASE_SERVICE_NAME}",
+ "annotations": {
+ "template.openshift.io/expose-username": "{.data['database-user']}",
+ "template.openshift.io/expose-password": "{.data['database-password']}",
+ "template.openshift.io/expose-root-password": "{.data['database-root-password']}"
+ }
},
"stringData" : {
"database-user" : "${MYSQL_USER}",
@@ -35,7 +40,10 @@
"kind": "Service",
"apiVersion": "v1",
"metadata": {
- "name": "${DATABASE_SERVICE_NAME}"
+ "name": "${DATABASE_SERVICE_NAME}",
+ "annotations": {
+ "template.openshift.io/expose-uri": "mysql://{.spec.clusterIP}:{.spec.ports[?(.name==\"mariadb\")].port}"
+ }
},
"spec": {
"ports": [
diff --git a/roles/openshift_examples/files/examples/v3.6/db-templates/mariadb-persistent-template.json b/roles/openshift_examples/files/examples/v3.6/db-templates/mariadb-persistent-template.json
index 6ed744777..3d8f592cb 100644
--- a/roles/openshift_examples/files/examples/v3.6/db-templates/mariadb-persistent-template.json
+++ b/roles/openshift_examples/files/examples/v3.6/db-templates/mariadb-persistent-template.json
@@ -23,7 +23,12 @@
"kind": "Secret",
"apiVersion": "v1",
"metadata": {
- "name": "${DATABASE_SERVICE_NAME}"
+ "name": "${DATABASE_SERVICE_NAME}",
+ "annotations": {
+ "template.openshift.io/expose-username": "{.data['database-user']}",
+ "template.openshift.io/expose-password": "{.data['database-password']}",
+ "template.openshift.io/expose-root-password": "{.data['database-root-password']}"
+ }
},
"stringData" : {
"database-user" : "${MYSQL_USER}",
@@ -35,7 +40,10 @@
"kind": "Service",
"apiVersion": "v1",
"metadata": {
- "name": "${DATABASE_SERVICE_NAME}"
+ "name": "${DATABASE_SERVICE_NAME}",
+ "annotations": {
+ "template.openshift.io/expose-uri": "mysql://{.spec.clusterIP}:{.spec.ports[?(.name==\"mariadb\")].port}"
+ }
},
"spec": {
"ports": [
diff --git a/roles/openshift_examples/files/examples/v3.6/db-templates/mongodb-ephemeral-template.json b/roles/openshift_examples/files/examples/v3.6/db-templates/mongodb-ephemeral-template.json
index 97a8abf6d..894cba750 100644
--- a/roles/openshift_examples/files/examples/v3.6/db-templates/mongodb-ephemeral-template.json
+++ b/roles/openshift_examples/files/examples/v3.6/db-templates/mongodb-ephemeral-template.json
@@ -24,7 +24,12 @@
"kind": "Secret",
"apiVersion": "v1",
"metadata": {
- "name": "${DATABASE_SERVICE_NAME}"
+ "name": "${DATABASE_SERVICE_NAME}",
+ "annotations": {
+ "template.openshift.io/expose-username": "{.data['database-user']}",
+ "template.openshift.io/expose-password": "{.data['database-password']}",
+ "template.openshift.io/expose-admin-password": "{.data['database-admin-password']}"
+ }
},
"stringData" : {
"database-user" : "${MONGODB_USER}",
@@ -37,7 +42,10 @@
"apiVersion": "v1",
"metadata": {
"name": "${DATABASE_SERVICE_NAME}",
- "creationTimestamp": null
+ "creationTimestamp": null,
+ "annotations": {
+ "template.openshift.io/expose-uri": "mongodb://{.spec.clusterIP}:{.spec.ports[?(.name==\"mongo\")].port}"
+ }
},
"spec": {
"ports": [
diff --git a/roles/openshift_examples/files/examples/v3.6/db-templates/mongodb-persistent-template.json b/roles/openshift_examples/files/examples/v3.6/db-templates/mongodb-persistent-template.json
index 0656219fb..d5c25a5bb 100644
--- a/roles/openshift_examples/files/examples/v3.6/db-templates/mongodb-persistent-template.json
+++ b/roles/openshift_examples/files/examples/v3.6/db-templates/mongodb-persistent-template.json
@@ -24,7 +24,12 @@
"kind": "Secret",
"apiVersion": "v1",
"metadata": {
- "name": "${DATABASE_SERVICE_NAME}"
+ "name": "${DATABASE_SERVICE_NAME}",
+ "annotations": {
+ "template.openshift.io/expose-username": "{.data['database-user']}",
+ "template.openshift.io/expose-password": "{.data['database-password']}",
+ "template.openshift.io/expose-admin-password": "{.data['database-admin-password']}"
+ }
},
"stringData" : {
"database-user" : "${MONGODB_USER}",
@@ -37,7 +42,10 @@
"apiVersion": "v1",
"metadata": {
"name": "${DATABASE_SERVICE_NAME}",
- "creationTimestamp": null
+ "creationTimestamp": null,
+ "annotations": {
+ "template.openshift.io/expose-uri": "mongodb://{.spec.clusterIP}:{.spec.ports[?(.name==\"mongo\")].port}"
+ }
},
"spec": {
"ports": [
diff --git a/roles/openshift_examples/files/examples/v3.6/db-templates/mysql-ephemeral-template.json b/roles/openshift_examples/files/examples/v3.6/db-templates/mysql-ephemeral-template.json
index d60b4647d..10f3bb09e 100644
--- a/roles/openshift_examples/files/examples/v3.6/db-templates/mysql-ephemeral-template.json
+++ b/roles/openshift_examples/files/examples/v3.6/db-templates/mysql-ephemeral-template.json
@@ -23,7 +23,12 @@
"kind": "Secret",
"apiVersion": "v1",
"metadata": {
- "name": "${DATABASE_SERVICE_NAME}"
+ "name": "${DATABASE_SERVICE_NAME}",
+ "annotations": {
+ "template.openshift.io/expose-username": "{.data['database-user']}",
+ "template.openshift.io/expose-password": "{.data['database-password']}",
+ "template.openshift.io/expose-root-password": "{.data['database-root-password']}"
+ }
},
"stringData" : {
"database-user" : "${MYSQL_USER}",
@@ -36,7 +41,10 @@
"apiVersion": "v1",
"metadata": {
"name": "${DATABASE_SERVICE_NAME}",
- "creationTimestamp": null
+ "creationTimestamp": null,
+ "annotations": {
+ "template.openshift.io/expose-uri": "mysql://{.spec.clusterIP}:{.spec.ports[?(.name==\"mysql\")].port}"
+ }
},
"spec": {
"ports": [
diff --git a/roles/openshift_examples/files/examples/v3.6/db-templates/mysql-persistent-template.json b/roles/openshift_examples/files/examples/v3.6/db-templates/mysql-persistent-template.json
index c2bfa40fd..2fd82093a 100644
--- a/roles/openshift_examples/files/examples/v3.6/db-templates/mysql-persistent-template.json
+++ b/roles/openshift_examples/files/examples/v3.6/db-templates/mysql-persistent-template.json
@@ -23,7 +23,12 @@
"kind": "Secret",
"apiVersion": "v1",
"metadata": {
- "name": "${DATABASE_SERVICE_NAME}"
+ "name": "${DATABASE_SERVICE_NAME}",
+ "annotations": {
+ "template.openshift.io/expose-username": "{.data['database-user']}",
+ "template.openshift.io/expose-password": "{.data['database-password']}",
+ "template.openshift.io/expose-root-password": "{.data['database-root-password']}"
+ }
},
"stringData" : {
"database-user" : "${MYSQL_USER}",
@@ -35,7 +40,10 @@
"kind": "Service",
"apiVersion": "v1",
"metadata": {
- "name": "${DATABASE_SERVICE_NAME}"
+ "name": "${DATABASE_SERVICE_NAME}",
+ "annotations": {
+ "template.openshift.io/expose-uri": "mysql://{.spec.clusterIP}:{.spec.ports[?(.name==\"mysql\")].port}"
+ }
},
"spec": {
"ports": [
diff --git a/roles/openshift_examples/files/examples/v3.6/db-templates/postgresql-ephemeral-template.json b/roles/openshift_examples/files/examples/v3.6/db-templates/postgresql-ephemeral-template.json
index 7a16e742a..c37102cb0 100644
--- a/roles/openshift_examples/files/examples/v3.6/db-templates/postgresql-ephemeral-template.json
+++ b/roles/openshift_examples/files/examples/v3.6/db-templates/postgresql-ephemeral-template.json
@@ -24,7 +24,11 @@
"kind": "Secret",
"apiVersion": "v1",
"metadata": {
- "name": "${DATABASE_SERVICE_NAME}"
+ "name": "${DATABASE_SERVICE_NAME}",
+ "annotations": {
+ "template.openshift.io/expose-username": "{.data['database-user']}",
+ "template.openshift.io/expose-password": "{.data['database-password']}"
+ }
},
"stringData" : {
"database-user" : "${POSTGRESQL_USER}",
@@ -36,7 +40,10 @@
"apiVersion": "v1",
"metadata": {
"name": "${DATABASE_SERVICE_NAME}",
- "creationTimestamp": null
+ "creationTimestamp": null,
+ "annotations": {
+ "template.openshift.io/expose-uri": "postgres://{.spec.clusterIP}:{.spec.ports[?(.name==\"postgresql\")].port}"
+ }
},
"spec": {
"ports": [
diff --git a/roles/openshift_examples/files/examples/v3.6/db-templates/postgresql-persistent-template.json b/roles/openshift_examples/files/examples/v3.6/db-templates/postgresql-persistent-template.json
index 242212d6f..32dc93a95 100644
--- a/roles/openshift_examples/files/examples/v3.6/db-templates/postgresql-persistent-template.json
+++ b/roles/openshift_examples/files/examples/v3.6/db-templates/postgresql-persistent-template.json
@@ -24,7 +24,11 @@
"kind": "Secret",
"apiVersion": "v1",
"metadata": {
- "name": "${DATABASE_SERVICE_NAME}"
+ "name": "${DATABASE_SERVICE_NAME}",
+ "annotations": {
+ "template.openshift.io/expose-username": "{.data['database-user']}",
+ "template.openshift.io/expose-password": "{.data['database-password']}"
+ }
},
"stringData" : {
"database-user" : "${POSTGRESQL_USER}",
@@ -36,7 +40,10 @@
"apiVersion": "v1",
"metadata": {
"name": "${DATABASE_SERVICE_NAME}",
- "creationTimestamp": null
+ "creationTimestamp": null,
+ "annotations": {
+ "template.openshift.io/expose-uri": "postgres://{.spec.clusterIP}:{.spec.ports[?(.name==\"postgresql\")].port}"
+ }
},
"spec": {
"ports": [
diff --git a/roles/openshift_examples/files/examples/v3.6/db-templates/redis-ephemeral-template.json b/roles/openshift_examples/files/examples/v3.6/db-templates/redis-ephemeral-template.json
index e9af50937..6bb683e52 100644
--- a/roles/openshift_examples/files/examples/v3.6/db-templates/redis-ephemeral-template.json
+++ b/roles/openshift_examples/files/examples/v3.6/db-templates/redis-ephemeral-template.json
@@ -24,7 +24,10 @@
"kind": "Secret",
"apiVersion": "v1",
"metadata": {
- "name": "${DATABASE_SERVICE_NAME}"
+ "name": "${DATABASE_SERVICE_NAME}",
+ "annotations": {
+ "template.openshift.io/expose-password": "{.data['database-password']}"
+ }
},
"stringData" : {
"database-password" : "${REDIS_PASSWORD}"
@@ -35,7 +38,10 @@
"apiVersion": "v1",
"metadata": {
"name": "${DATABASE_SERVICE_NAME}",
- "creationTimestamp": null
+ "creationTimestamp": null,
+ "annotations": {
+ "template.openshift.io/expose-uri": "redis://{.spec.clusterIP}:{.spec.ports[?(.name==\"redis\")].port}"
+ }
},
"spec": {
"ports": [
diff --git a/roles/openshift_examples/files/examples/v3.6/db-templates/redis-persistent-template.json b/roles/openshift_examples/files/examples/v3.6/db-templates/redis-persistent-template.json
index aa27578a9..9e8be2309 100644
--- a/roles/openshift_examples/files/examples/v3.6/db-templates/redis-persistent-template.json
+++ b/roles/openshift_examples/files/examples/v3.6/db-templates/redis-persistent-template.json
@@ -24,7 +24,10 @@
"kind": "Secret",
"apiVersion": "v1",
"metadata": {
- "name": "${DATABASE_SERVICE_NAME}"
+ "name": "${DATABASE_SERVICE_NAME}",
+ "annotations": {
+ "template.openshift.io/expose-password": "{.data['database-password']}"
+ }
},
"stringData" : {
"database-password" : "${REDIS_PASSWORD}"
@@ -35,7 +38,10 @@
"apiVersion": "v1",
"metadata": {
"name": "${DATABASE_SERVICE_NAME}",
- "creationTimestamp": null
+ "creationTimestamp": null,
+ "annotations": {
+ "template.openshift.io/expose-uri": "redis://{.spec.clusterIP}:{.spec.ports[?(.name==\"redis\")].port}"
+ }
},
"spec": {
"ports": [
diff --git a/roles/openshift_examples/files/examples/v3.6/image-streams/image-streams-centos7.json b/roles/openshift_examples/files/examples/v3.6/image-streams/image-streams-centos7.json
index 2583018b7..6cef21945 100644
--- a/roles/openshift_examples/files/examples/v3.6/image-streams/image-streams-centos7.json
+++ b/roles/openshift_examples/files/examples/v3.6/image-streams/image-streams-centos7.json
@@ -7,6 +7,51 @@
"kind": "ImageStream",
"apiVersion": "v1",
"metadata": {
+ "name": "httpd",
+ "annotations": {
+ "openshift.io/display-name": "Httpd"
+ }
+ },
+ "spec": {
+ "tags": [
+ {
+ "name": "latest",
+ "annotations": {
+ "openshift.io/display-name": "Httpd (Latest)",
+ "description": "Build and serve static content via Httpd on CentOS 7. For more information about using this builder image, including OpenShift considerations, see https://github.com/sclorg/httpd-container/blob/master/2.4/README.md.\n\nWARNING: By selecting this tag, your application will automatically update to use the latest version of Httpd available on OpenShift, including major versions updates.",
+ "iconClass": "icon-apache",
+ "tags": "builder,httpd",
+ "supports":"httpd",
+ "sampleRepo": "https://github.com/openshift/httpd-ex.git"
+ },
+ "from": {
+ "kind": "ImageStreamTag",
+ "name": "2.4"
+ }
+ },
+ {
+ "name": "2.4",
+ "annotations": {
+ "openshift.io/display-name": "Httpd 2.4",
+ "description": "Build and serve static content via Httpd on CentOS 7. For more information about using this builder image, including OpenShift considerations, see https://github.com/sclorg/httpd-container/blob/master/2.4/README.md.",
+ "iconClass": "icon-apache",
+ "tags": "builder,httpd",
+ "supports":"httpd",
+ "version": "2.4",
+ "sampleRepo": "https://github.com/openshift/httpd-ex.git"
+ },
+ "from": {
+ "kind": "DockerImage",
+ "name": "centos/httpd-24-centos7:latest"
+ }
+ }
+ ]
+ }
+ },
+ {
+ "kind": "ImageStream",
+ "apiVersion": "v1",
+ "metadata": {
"name": "ruby",
"annotations": {
"openshift.io/display-name": "Ruby"
diff --git a/roles/openshift_examples/files/examples/v3.6/image-streams/image-streams-rhel7.json b/roles/openshift_examples/files/examples/v3.6/image-streams/image-streams-rhel7.json
index b65f0a5e3..abdae01e3 100644
--- a/roles/openshift_examples/files/examples/v3.6/image-streams/image-streams-rhel7.json
+++ b/roles/openshift_examples/files/examples/v3.6/image-streams/image-streams-rhel7.json
@@ -7,6 +7,51 @@
"kind": "ImageStream",
"apiVersion": "v1",
"metadata": {
+ "name": "httpd",
+ "annotations": {
+ "openshift.io/display-name": "Httpd"
+ }
+ },
+ "spec": {
+ "tags": [
+ {
+ "name": "latest",
+ "annotations": {
+ "openshift.io/display-name": "Httpd (Latest)",
+ "description": "Build and serve static content via Httpd on RHEL 7. For more information about using this builder image, including OpenShift considerations, see https://github.com/sclorg/httpd-container/blob/master/2.4/README.md.\n\nWARNING: By selecting this tag, your application will automatically update to use the latest version of Httpd available on OpenShift, including major versions updates.",
+ "iconClass": "icon-apache",
+ "tags": "builder,httpd",
+ "supports":"httpd",
+ "sampleRepo": "https://github.com/openshift/httpd-ex.git"
+ },
+ "from": {
+ "kind": "ImageStreamTag",
+ "name": "2.4"
+ }
+ },
+ {
+ "name": "2.4",
+ "annotations": {
+ "openshift.io/display-name": "Httpd 2.4",
+ "description": "Build and serve static content via Httpd on RHEL 7. For more information about using this builder image, including OpenShift considerations, see https://github.com/sclorg/httpd-container/blob/master/2.4/README.md.",
+ "iconClass": "icon-apache",
+ "tags": "builder,httpd",
+ "supports":"httpd",
+ "version": "2.4",
+ "sampleRepo": "https://github.com/openshift/httpd-ex.git"
+ },
+ "from": {
+ "kind": "DockerImage",
+ "name": "registry.access.redhat.com/rhscl/httpd-24-rhel7"
+ }
+ }
+ ]
+ }
+ },
+ {
+ "kind": "ImageStream",
+ "apiVersion": "v1",
+ "metadata": {
"name": "ruby",
"annotations": {
"openshift.io/display-name": "Ruby"
diff --git a/roles/openshift_examples/files/examples/v3.6/quickstart-templates/README.md b/roles/openshift_examples/files/examples/v3.6/quickstart-templates/README.md
index f48d8d4a8..6d2ccbf7f 100644
--- a/roles/openshift_examples/files/examples/v3.6/quickstart-templates/README.md
+++ b/roles/openshift_examples/files/examples/v3.6/quickstart-templates/README.md
@@ -17,6 +17,7 @@ instantiating them.
* [Dancer persistent](https://raw.githubusercontent.com/openshift/dancer-ex/master/openshift/templates/dancer-mysql-persistent.json) - Provides a basic Dancer (Perl) application with a persistent MySQL database. Note: requires available persistent volumes. For more information see the [source repository](https://github.com/openshift/dancer-ex).
* [Django](https://raw.githubusercontent.com/openshift/django-ex/master/openshift/templates/django-postgresql.json) - Provides a basic Django (Python) application with a PostgreSQL database. For more information see the [source repository](https://github.com/openshift/django-ex).
* [Django persistent](https://raw.githubusercontent.com/openshift/django-ex/master/openshift/templates/django-postgresql-persistent.json) - Provides a basic Django (Python) application with a persistent PostgreSQL database. Note: requires available persistent volumes. For more information see the [source repository](https://github.com/openshift/django-ex).
+* [Httpd](https://raw.githubusercontent.com/openshift/httpd-ex/master/openshift/templates/httpd.json) - Provides a basic Httpd static content application. For more information see the [source repository](https://github.com/openshift/httpd-ex).
* [NodeJS](https://raw.githubusercontent.com/openshift/nodejs-ex/master/openshift/templates/nodejs-mongodb.json) - Provides a basic NodeJS application with a MongoDB database. For more information see the [source repository](https://github.com/openshift/nodejs-ex).
* [NodeJS persistent](https://raw.githubusercontent.com/openshift/nodejs-ex/master/openshift/templates/nodejs-mongodb-persistent.json) - Provides a basic NodeJS application with a persistent MongoDB database. Note: requires available persistent volumes. For more information see the [source repository](https://github.com/openshift/nodejs-ex).
* [Rails](https://raw.githubusercontent.com/openshift/rails-ex/master/openshift/templates/rails-postgresql.json) - Provides a basic Rails (Ruby) application with a PostgreSQL database. For more information see the [source repository](https://github.com/openshift/rails-ex).
diff --git a/roles/openshift_examples/files/examples/v3.6/quickstart-templates/cakephp-mysql-persistent.json b/roles/openshift_examples/files/examples/v3.6/quickstart-templates/cakephp-mysql-persistent.json
index eb3d296be..8c79d3340 100644
--- a/roles/openshift_examples/files/examples/v3.6/quickstart-templates/cakephp-mysql-persistent.json
+++ b/roles/openshift_examples/files/examples/v3.6/quickstart-templates/cakephp-mysql-persistent.json
@@ -60,7 +60,10 @@
"kind": "Route",
"apiVersion": "v1",
"metadata": {
- "name": "${NAME}"
+ "name": "${NAME}",
+ "annotations": {
+ "template.openshift.io/expose-uri": "http://{.spec.host}{.spec.path}"
+ }
},
"spec": {
"host": "${APPLICATION_DOMAIN}",
diff --git a/roles/openshift_examples/files/examples/v3.6/quickstart-templates/cakephp-mysql.json b/roles/openshift_examples/files/examples/v3.6/quickstart-templates/cakephp-mysql.json
index da2454d2e..0f75f773f 100644
--- a/roles/openshift_examples/files/examples/v3.6/quickstart-templates/cakephp-mysql.json
+++ b/roles/openshift_examples/files/examples/v3.6/quickstart-templates/cakephp-mysql.json
@@ -60,7 +60,10 @@
"kind": "Route",
"apiVersion": "v1",
"metadata": {
- "name": "${NAME}"
+ "name": "${NAME}",
+ "annotations": {
+ "template.openshift.io/expose-uri": "http://{.spec.host}{.spec.path}"
+ }
},
"spec": {
"host": "${APPLICATION_DOMAIN}",
diff --git a/roles/openshift_examples/files/examples/v3.6/quickstart-templates/dancer-mysql-persistent.json b/roles/openshift_examples/files/examples/v3.6/quickstart-templates/dancer-mysql-persistent.json
index ec335daa0..f564d4606 100644
--- a/roles/openshift_examples/files/examples/v3.6/quickstart-templates/dancer-mysql-persistent.json
+++ b/roles/openshift_examples/files/examples/v3.6/quickstart-templates/dancer-mysql-persistent.json
@@ -58,7 +58,10 @@
"kind": "Route",
"apiVersion": "v1",
"metadata": {
- "name": "${NAME}"
+ "name": "${NAME}",
+ "annotations": {
+ "template.openshift.io/expose-uri": "http://{.spec.host}{.spec.path}"
+ }
},
"spec": {
"host": "${APPLICATION_DOMAIN}",
diff --git a/roles/openshift_examples/files/examples/v3.6/quickstart-templates/dancer-mysql.json b/roles/openshift_examples/files/examples/v3.6/quickstart-templates/dancer-mysql.json
index 6304586dd..48283bfc2 100644
--- a/roles/openshift_examples/files/examples/v3.6/quickstart-templates/dancer-mysql.json
+++ b/roles/openshift_examples/files/examples/v3.6/quickstart-templates/dancer-mysql.json
@@ -58,7 +58,10 @@
"kind": "Route",
"apiVersion": "v1",
"metadata": {
- "name": "${NAME}"
+ "name": "${NAME}",
+ "annotations": {
+ "template.openshift.io/expose-uri": "http://{.spec.host}{.spec.path}"
+ }
},
"spec": {
"host": "${APPLICATION_DOMAIN}",
diff --git a/roles/openshift_examples/files/examples/v3.6/quickstart-templates/django-postgresql-persistent.json b/roles/openshift_examples/files/examples/v3.6/quickstart-templates/django-postgresql-persistent.json
index 152bf1c7c..180eeb967 100644
--- a/roles/openshift_examples/files/examples/v3.6/quickstart-templates/django-postgresql-persistent.json
+++ b/roles/openshift_examples/files/examples/v3.6/quickstart-templates/django-postgresql-persistent.json
@@ -58,7 +58,10 @@
"kind": "Route",
"apiVersion": "v1",
"metadata": {
- "name": "${NAME}"
+ "name": "${NAME}",
+ "annotations": {
+ "template.openshift.io/expose-uri": "http://{.spec.host}{.spec.path}"
+ }
},
"spec": {
"host": "${APPLICATION_DOMAIN}",
diff --git a/roles/openshift_examples/files/examples/v3.6/quickstart-templates/django-postgresql.json b/roles/openshift_examples/files/examples/v3.6/quickstart-templates/django-postgresql.json
index f3b5f97f3..da79c8dd0 100644
--- a/roles/openshift_examples/files/examples/v3.6/quickstart-templates/django-postgresql.json
+++ b/roles/openshift_examples/files/examples/v3.6/quickstart-templates/django-postgresql.json
@@ -58,7 +58,10 @@
"kind": "Route",
"apiVersion": "v1",
"metadata": {
- "name": "${NAME}"
+ "name": "${NAME}",
+ "annotations": {
+ "template.openshift.io/expose-uri": "http://{.spec.host}{.spec.path}"
+ }
},
"spec": {
"host": "${APPLICATION_DOMAIN}",
diff --git a/roles/openshift_examples/files/examples/v3.6/quickstart-templates/httpd.json b/roles/openshift_examples/files/examples/v3.6/quickstart-templates/httpd.json
new file mode 100644
index 000000000..5bfb4b019
--- /dev/null
+++ b/roles/openshift_examples/files/examples/v3.6/quickstart-templates/httpd.json
@@ -0,0 +1,274 @@
+{
+ "kind": "Template",
+ "apiVersion": "v1",
+ "metadata": {
+ "name": "httpd-example",
+ "annotations": {
+ "openshift.io/display-name": "Httpd",
+ "description": "An example Httpd application that serves static content. For more information about using this template, including OpenShift considerations, see https://github.com/openshift/httpd-ex/blob/master/README.md.",
+ "tags": "quickstart,httpd",
+ "iconClass": "icon-apache",
+ "template.openshift.io/long-description": "This template defines resources needed to develop a static application served by httpd, including a build configuration and application deployment configuration.",
+ "template.openshift.io/provider-display-name": "Red Hat, Inc.",
+ "template.openshift.io/documentation-url": "https://github.com/openshift/httpd-ex",
+ "template.openshift.io/support-url": "https://access.redhat.com"
+ }
+ },
+ "message": "The following service(s) have been created in your project: ${NAME}.\n\nFor more information about using this template, including OpenShift considerations, see https://github.com/openshift/httpd-ex/blob/master/README.md.",
+ "labels": {
+ "template": "httpd-example"
+ },
+ "objects": [
+ {
+ "kind": "Service",
+ "apiVersion": "v1",
+ "metadata": {
+ "name": "${NAME}",
+ "annotations": {
+ "description": "Exposes and load balances the application pods"
+ }
+ },
+ "spec": {
+ "ports": [
+ {
+ "name": "web",
+ "port": 8080,
+ "targetPort": 8080
+ }
+ ],
+ "selector": {
+ "name": "${NAME}"
+ }
+ }
+ },
+ {
+ "kind": "Route",
+ "apiVersion": "v1",
+ "metadata": {
+ "name": "${NAME}",
+ "annotations": {
+ "template.openshift.io/expose-uri": "http://{.spec.host}{.spec.path}"
+ }
+ },
+ "spec": {
+ "host": "${APPLICATION_DOMAIN}",
+ "to": {
+ "kind": "Service",
+ "name": "${NAME}"
+ }
+ }
+ },
+ {
+ "kind": "ImageStream",
+ "apiVersion": "v1",
+ "metadata": {
+ "name": "${NAME}",
+ "annotations": {
+ "description": "Keeps track of changes in the application image"
+ }
+ }
+ },
+ {
+ "kind": "BuildConfig",
+ "apiVersion": "v1",
+ "metadata": {
+ "name": "${NAME}",
+ "annotations": {
+ "description": "Defines how to build the application"
+ }
+ },
+ "spec": {
+ "source": {
+ "type": "Git",
+ "git": {
+ "uri": "${SOURCE_REPOSITORY_URL}",
+ "ref": "${SOURCE_REPOSITORY_REF}"
+ },
+ "contextDir": "${CONTEXT_DIR}"
+ },
+ "strategy": {
+ "type": "Source",
+ "sourceStrategy": {
+ "from": {
+ "kind": "ImageStreamTag",
+ "namespace": "${NAMESPACE}",
+ "name": "httpd:2.4"
+ }
+ }
+ },
+ "output": {
+ "to": {
+ "kind": "ImageStreamTag",
+ "name": "${NAME}:latest"
+ }
+ },
+ "triggers": [
+ {
+ "type": "ImageChange"
+ },
+ {
+ "type": "ConfigChange"
+ },
+ {
+ "type": "GitHub",
+ "github": {
+ "secret": "${GITHUB_WEBHOOK_SECRET}"
+ }
+ },
+ {
+ "type": "Generic",
+ "generic": {
+ "secret": "${GENERIC_WEBHOOK_SECRET}"
+ }
+ }
+ ]
+ }
+ },
+ {
+ "kind": "DeploymentConfig",
+ "apiVersion": "v1",
+ "metadata": {
+ "name": "${NAME}",
+ "annotations": {
+ "description": "Defines how to deploy the application server"
+ }
+ },
+ "spec": {
+ "strategy": {
+ "type": "Rolling"
+ },
+ "triggers": [
+ {
+ "type": "ImageChange",
+ "imageChangeParams": {
+ "automatic": true,
+ "containerNames": [
+ "httpd-example"
+ ],
+ "from": {
+ "kind": "ImageStreamTag",
+ "name": "${NAME}:latest"
+ }
+ }
+ },
+ {
+ "type": "ConfigChange"
+ }
+ ],
+ "replicas": 1,
+ "selector": {
+ "name": "${NAME}"
+ },
+ "template": {
+ "metadata": {
+ "name": "${NAME}",
+ "labels": {
+ "name": "${NAME}"
+ }
+ },
+ "spec": {
+ "containers": [
+ {
+ "name": "httpd-example",
+ "image": " ",
+ "ports": [
+ {
+ "containerPort": 8080
+ }
+ ],
+ "readinessProbe": {
+ "timeoutSeconds": 3,
+ "initialDelaySeconds": 3,
+ "httpGet": {
+ "path": "/",
+ "port": 8080
+ }
+ },
+ "livenessProbe": {
+ "timeoutSeconds": 3,
+ "initialDelaySeconds": 30,
+ "httpGet": {
+ "path": "/",
+ "port": 8080
+ }
+ },
+ "resources": {
+ "limits": {
+ "memory": "${MEMORY_LIMIT}"
+ }
+ },
+ "env": [
+ ],
+ "resources": {
+ "limits": {
+ "memory": "${MEMORY_LIMIT}"
+ }
+ }
+ }
+ ]
+ }
+ }
+ }
+ }
+ ],
+ "parameters": [
+ {
+ "name": "NAME",
+ "displayName": "Name",
+ "description": "The name assigned to all of the frontend objects defined in this template.",
+ "required": true,
+ "value": "httpd-example"
+ },
+ {
+ "name": "NAMESPACE",
+ "displayName": "Namespace",
+ "description": "The OpenShift Namespace where the ImageStream resides.",
+ "required": true,
+ "value": "openshift"
+ },
+ {
+ "name": "MEMORY_LIMIT",
+ "displayName": "Memory Limit",
+ "description": "Maximum amount of memory the container can use.",
+ "required": true,
+ "value": "512Mi"
+ },
+ {
+ "name": "SOURCE_REPOSITORY_URL",
+ "displayName": "Git Repository URL",
+ "description": "The URL of the repository with your application source code.",
+ "required": true,
+ "value": "https://github.com/openshift/httpd-ex.git"
+ },
+ {
+ "name": "SOURCE_REPOSITORY_REF",
+ "displayName": "Git Reference",
+ "description": "Set this to a branch name, tag or other ref of your repository if you are not using the default branch."
+ },
+ {
+ "name": "CONTEXT_DIR",
+ "displayName": "Context Directory",
+ "description": "Set this to the relative path to your project if it is not in the root of your repository."
+ },
+ {
+ "name": "APPLICATION_DOMAIN",
+ "displayName": "Application Hostname",
+ "description": "The exposed hostname that will route to the httpd service, if left blank a value will be defaulted.",
+ "value": ""
+ },
+ {
+ "name": "GITHUB_WEBHOOK_SECRET",
+ "displayName": "GitHub Webhook Secret",
+ "description": "A secret string used to configure the GitHub webhook.",
+ "generate": "expression",
+ "from": "[a-zA-Z0-9]{40}"
+ },
+ {
+ "name": "GENERIC_WEBHOOK_SECRET",
+ "displayName": "Generic Webhook Secret",
+ "description": "A secret string used to configure the Generic webhook.",
+ "generate": "expression",
+ "from": "[a-zA-Z0-9]{40}"
+ }
+ ]
+}
diff --git a/roles/openshift_examples/files/examples/v3.6/quickstart-templates/jenkins-ephemeral-template.json b/roles/openshift_examples/files/examples/v3.6/quickstart-templates/jenkins-ephemeral-template.json
index 264e4b2de..ce96684a9 100644
--- a/roles/openshift_examples/files/examples/v3.6/quickstart-templates/jenkins-ephemeral-template.json
+++ b/roles/openshift_examples/files/examples/v3.6/quickstart-templates/jenkins-ephemeral-template.json
@@ -22,7 +22,10 @@
"apiVersion": "v1",
"metadata": {
"name": "${JENKINS_SERVICE_NAME}",
- "creationTimestamp": null
+ "creationTimestamp": null,
+ "annotations": {
+ "template.openshift.io/expose-uri": "http://{.spec.host}{.spec.path}"
+ }
},
"spec": {
"to": {
diff --git a/roles/openshift_examples/files/examples/v3.6/quickstart-templates/jenkins-persistent-template.json b/roles/openshift_examples/files/examples/v3.6/quickstart-templates/jenkins-persistent-template.json
index b47bdf353..34b2b920b 100644
--- a/roles/openshift_examples/files/examples/v3.6/quickstart-templates/jenkins-persistent-template.json
+++ b/roles/openshift_examples/files/examples/v3.6/quickstart-templates/jenkins-persistent-template.json
@@ -22,7 +22,10 @@
"apiVersion": "v1",
"metadata": {
"name": "${JENKINS_SERVICE_NAME}",
- "creationTimestamp": null
+ "creationTimestamp": null,
+ "annotations": {
+ "template.openshift.io/expose-uri": "http://{.spec.host}{.spec.path}"
+ }
},
"spec": {
"to": {
diff --git a/roles/openshift_examples/files/examples/v3.6/quickstart-templates/nodejs-mongodb-persistent.json b/roles/openshift_examples/files/examples/v3.6/quickstart-templates/nodejs-mongodb-persistent.json
index c570ca5d5..167370811 100644
--- a/roles/openshift_examples/files/examples/v3.6/quickstart-templates/nodejs-mongodb-persistent.json
+++ b/roles/openshift_examples/files/examples/v3.6/quickstart-templates/nodejs-mongodb-persistent.json
@@ -58,7 +58,10 @@
"kind": "Route",
"apiVersion": "v1",
"metadata": {
- "name": "${NAME}"
+ "name": "${NAME}",
+ "annotations": {
+ "template.openshift.io/expose-uri": "http://{.spec.host}{.spec.path}"
+ }
},
"spec": {
"host": "${APPLICATION_DOMAIN}",
@@ -102,7 +105,7 @@
"from": {
"kind": "ImageStreamTag",
"namespace": "${NAMESPACE}",
- "name": "nodejs:4"
+ "name": "nodejs:6"
},
"env": [
{
diff --git a/roles/openshift_examples/files/examples/v3.6/quickstart-templates/nodejs-mongodb.json b/roles/openshift_examples/files/examples/v3.6/quickstart-templates/nodejs-mongodb.json
index 161f1582e..214c110d2 100644
--- a/roles/openshift_examples/files/examples/v3.6/quickstart-templates/nodejs-mongodb.json
+++ b/roles/openshift_examples/files/examples/v3.6/quickstart-templates/nodejs-mongodb.json
@@ -58,7 +58,10 @@
"kind": "Route",
"apiVersion": "v1",
"metadata": {
- "name": "${NAME}"
+ "name": "${NAME}",
+ "annotations": {
+ "template.openshift.io/expose-uri": "http://{.spec.host}{.spec.path}"
+ }
},
"spec": {
"host": "${APPLICATION_DOMAIN}",
@@ -102,7 +105,7 @@
"from": {
"kind": "ImageStreamTag",
"namespace": "${NAMESPACE}",
- "name": "nodejs:4"
+ "name": "nodejs:6"
},
"env": [
{
diff --git a/roles/openshift_examples/files/examples/v3.6/quickstart-templates/rails-postgresql-persistent.json b/roles/openshift_examples/files/examples/v3.6/quickstart-templates/rails-postgresql-persistent.json
index b400cfdb3..82a979379 100644
--- a/roles/openshift_examples/files/examples/v3.6/quickstart-templates/rails-postgresql-persistent.json
+++ b/roles/openshift_examples/files/examples/v3.6/quickstart-templates/rails-postgresql-persistent.json
@@ -23,7 +23,11 @@
"kind": "Secret",
"apiVersion": "v1",
"metadata": {
- "name": "${NAME}"
+ "name": "${NAME}",
+ "annotations": {
+ "template.openshift.io/expose-username": "{.data['application-user']}",
+ "template.openshift.io/expose-password": "{.data['application-password']}"
+ }
},
"stringData" : {
"database-user" : "${DATABASE_USER}",
@@ -60,7 +64,10 @@
"kind": "Route",
"apiVersion": "v1",
"metadata": {
- "name": "${NAME}"
+ "name": "${NAME}",
+ "annotations": {
+ "template.openshift.io/expose-uri": "http://{.spec.host}{.spec.path}"
+ }
},
"spec": {
"host": "${APPLICATION_DOMAIN}",
diff --git a/roles/openshift_examples/files/examples/v3.6/quickstart-templates/rails-postgresql.json b/roles/openshift_examples/files/examples/v3.6/quickstart-templates/rails-postgresql.json
index fa67412ff..f32c4fc4a 100644
--- a/roles/openshift_examples/files/examples/v3.6/quickstart-templates/rails-postgresql.json
+++ b/roles/openshift_examples/files/examples/v3.6/quickstart-templates/rails-postgresql.json
@@ -23,7 +23,11 @@
"kind": "Secret",
"apiVersion": "v1",
"metadata": {
- "name": "${NAME}"
+ "name": "${NAME}",
+ "annotations": {
+ "template.openshift.io/expose-username": "{.data['application-user']}",
+ "template.openshift.io/expose-password": "{.data['application-password']}"
+ }
},
"stringData" : {
"database-user" : "${DATABASE_USER}",
@@ -60,7 +64,10 @@
"kind": "Route",
"apiVersion": "v1",
"metadata": {
- "name": "${NAME}"
+ "name": "${NAME}",
+ "annotations": {
+ "template.openshift.io/expose-uri": "http://{.spec.host}{.spec.path}"
+ }
},
"spec": {
"host": "${APPLICATION_DOMAIN}",
diff --git a/roles/openshift_facts/defaults/main.yml b/roles/openshift_facts/defaults/main.yml
index 28b388560..cc4dc9365 100644
--- a/roles/openshift_facts/defaults/main.yml
+++ b/roles/openshift_facts/defaults/main.yml
@@ -1,2 +1,2 @@
---
-use_system_containers: false
+openshift_use_system_containers: false
diff --git a/roles/openshift_facts/library/openshift_facts.py b/roles/openshift_facts/library/openshift_facts.py
index cfe092a28..663423061 100755
--- a/roles/openshift_facts/library/openshift_facts.py
+++ b/roles/openshift_facts/library/openshift_facts.py
@@ -537,6 +537,7 @@ def set_node_schedulability(facts):
return facts
+# pylint: disable=too-many-branches
def set_selectors(facts):
""" Set selectors facts if not already present in facts dict
Args:
@@ -570,6 +571,10 @@ def set_selectors(facts):
facts['hosted']['logging'] = {}
if 'selector' not in facts['hosted']['logging'] or facts['hosted']['logging']['selector'] in [None, 'None']:
facts['hosted']['logging']['selector'] = None
+ if 'etcd' not in facts['hosted']:
+ facts['hosted']['etcd'] = {}
+ if 'selector' not in facts['hosted']['etcd'] or facts['hosted']['etcd']['selector'] in [None, 'None']:
+ facts['hosted']['etcd']['selector'] = None
return facts
@@ -1654,6 +1659,7 @@ def set_proxy_facts(facts):
common['no_proxy'].extend(common['no_proxy_internal_hostnames'].split(','))
# We always add local dns domain and ourselves no matter what
common['no_proxy'].append('.' + common['dns_domain'])
+ common['no_proxy'].append('.svc')
common['no_proxy'].append(common['hostname'])
common['no_proxy'] = ','.join(sort_unique(common['no_proxy']))
facts['common'] = common
@@ -2156,6 +2162,25 @@ class OpenShiftFacts(object):
create_pvc=False
)
),
+ etcd=dict(
+ storage=dict(
+ kind=None,
+ volume=dict(
+ name='etcd',
+ size='1Gi'
+ ),
+ nfs=dict(
+ directory='/exports',
+ options='*(rw,root_squash)'
+ ),
+ host=None,
+ access=dict(
+ modes=['ReadWriteOnce']
+ ),
+ create_pv=True,
+ create_pvc=False
+ )
+ ),
registry=dict(
storage=dict(
kind=None,
diff --git a/roles/openshift_facts/tasks/main.yml b/roles/openshift_facts/tasks/main.yml
index 50ed3e964..451386bf1 100644
--- a/roles/openshift_facts/tasks/main.yml
+++ b/roles/openshift_facts/tasks/main.yml
@@ -9,10 +9,10 @@
l_is_atomic: "{{ ostree_booted.stat.exists }}"
- set_fact:
l_is_containerized: "{{ (l_is_atomic | bool) or (containerized | default(false) | bool) }}"
- l_is_openvswitch_system_container: "{{ (use_openvswitch_system_container | default(use_system_containers) | bool) }}"
- l_is_node_system_container: "{{ (use_node_system_container | default(use_system_containers) | bool) }}"
- l_is_master_system_container: "{{ (use_master_system_container | default(use_system_containers) | bool) }}"
- l_is_etcd_system_container: "{{ (use_etcd_system_container | default(use_system_containers) | bool) }}"
+ l_is_openvswitch_system_container: "{{ (openshift_use_openvswitch_system_container | default(openshift_use_system_containers) | bool) }}"
+ l_is_node_system_container: "{{ (openshift_use_node_system_container | default(openshift_use_system_containers) | bool) }}"
+ l_is_master_system_container: "{{ (openshift_use_master_system_container | default(openshift_use_system_containers) | bool) }}"
+ l_is_etcd_system_container: "{{ (openshift_use_etcd_system_container | default(openshift_use_system_containers) | bool) }}"
- set_fact:
l_any_system_container: "{{ l_is_etcd_system_container or l_is_openvswitch_system_container or l_is_node_system_container or l_is_master_system_container }}"
- set_fact:
diff --git a/roles/openshift_health_checker/openshift_checks/docker_image_availability.py b/roles/openshift_health_checker/openshift_checks/docker_image_availability.py
index 60aacf715..26bf4c09b 100644
--- a/roles/openshift_health_checker/openshift_checks/docker_image_availability.py
+++ b/roles/openshift_health_checker/openshift_checks/docker_image_availability.py
@@ -169,7 +169,7 @@ class DockerImageAvailability(DockerHostMixin, OpenShiftCheck):
registries = [registry]
for registry in registries:
- args = {"_raw_params": "skopeo inspect docker://{}/{}".format(registry, image)}
+ args = {"_raw_params": "skopeo inspect --tls-verify=false docker://{}/{}".format(registry, image)}
result = self.execute_module("command", args, task_vars=task_vars)
if result.get("rc", 0) == 0 and not result.get("failed"):
return True
diff --git a/roles/openshift_logging/defaults/main.yml b/roles/openshift_logging/defaults/main.yml
index 66d880d23..9b7767ccd 100644
--- a/roles/openshift_logging/defaults/main.yml
+++ b/roles/openshift_logging/defaults/main.yml
@@ -89,7 +89,7 @@ openshift_logging_es_cpu_limit: null
# the logging appenders for the root loggers to write ES logs. Valid values: 'file', 'console'
openshift_logging_es_log_appenders: ['file']
openshift_logging_es_memory_limit: "{{ openshift_hosted_logging_elasticsearch_instance_ram | default('8Gi') }}"
-openshift_logging_es_pv_selector: "{{ openshift_hosted_logging_storage_labels | default(null) }}"
+openshift_logging_es_pv_selector: "{{ openshift_hosted_logging_storage_labels | default('') }}"
openshift_logging_es_pvc_dynamic: "{{ openshift_hosted_logging_elasticsearch_pvc_dynamic | default(False) }}"
openshift_logging_es_pvc_size: "{{ openshift_hosted_logging_elasticsearch_pvc_size | default('') }}"
openshift_logging_es_pvc_prefix: "{{ openshift_hosted_logging_elasticsearch_pvc_prefix | default('logging-es') }}"
diff --git a/roles/openshift_logging_elasticsearch/tasks/main.yaml b/roles/openshift_logging_elasticsearch/tasks/main.yaml
index 684dbe0a0..d9ac52cb7 100644
--- a/roles/openshift_logging_elasticsearch/tasks/main.yaml
+++ b/roles/openshift_logging_elasticsearch/tasks/main.yaml
@@ -194,7 +194,9 @@
- port: 9200
targetPort: "restapi"
-- name: Creating ES storage template
+# storageclasses are used by default but if static then disable
+# storageclasses with the storageClassName set to "" in pvc.j2
+- name: Creating ES storage template - static
template:
src: pvc.j2
dest: "{{ tempdir }}/templates/logging-es-pvc.yml"
@@ -203,11 +205,13 @@
size: "{{ openshift_logging_elasticsearch_pvc_size }}"
access_modes: "{{ openshift_logging_elasticsearch_pvc_access_modes | list }}"
pv_selector: "{{ openshift_logging_elasticsearch_pvc_pv_selector }}"
+ storage_class_name: "{{ openshift_logging_elasticsearch_pvc_storage_class_name | default('', true) }}"
when:
- openshift_logging_elasticsearch_storage_type == "pvc"
- not openshift_logging_elasticsearch_pvc_dynamic
-- name: Creating ES storage template
+# Storageclasses are used by default if configured
+- name: Creating ES storage template - dynamic
template:
src: pvc.j2
dest: "{{ tempdir }}/templates/logging-es-pvc.yml"
@@ -216,8 +220,6 @@
size: "{{ openshift_logging_elasticsearch_pvc_size }}"
access_modes: "{{ openshift_logging_elasticsearch_pvc_access_modes | list }}"
pv_selector: "{{ openshift_logging_elasticsearch_pvc_pv_selector }}"
- annotations:
- volume.beta.kubernetes.io/storage-class: "dynamic"
when:
- openshift_logging_elasticsearch_storage_type == "pvc"
- openshift_logging_elasticsearch_pvc_dynamic
diff --git a/roles/openshift_logging_elasticsearch/templates/elasticsearch.yml.j2 b/roles/openshift_logging_elasticsearch/templates/elasticsearch.yml.j2
index 409e564c2..141967c33 100644
--- a/roles/openshift_logging_elasticsearch/templates/elasticsearch.yml.j2
+++ b/roles/openshift_logging_elasticsearch/templates/elasticsearch.yml.j2
@@ -14,6 +14,7 @@ index:
flush_threshold_period: 5m
node:
+ name: ${DC_NAME}
master: ${IS_MASTER}
data: ${HAS_DATA}
max_local_storage_nodes: 1
@@ -61,7 +62,7 @@ path:
searchguard:
authcz.admin_dn:
- CN=system.admin,OU=OpenShift,O=Logging
- config_index_name: ".searchguard.${HOSTNAME}"
+ config_index_name: ".searchguard.${DC_NAME}"
ssl:
transport:
enabled: true
diff --git a/roles/openshift_logging_elasticsearch/templates/es.j2 b/roles/openshift_logging_elasticsearch/templates/es.j2
index bd2289f0d..844dbc8c2 100644
--- a/roles/openshift_logging_elasticsearch/templates/es.j2
+++ b/roles/openshift_logging_elasticsearch/templates/es.j2
@@ -58,6 +58,9 @@ spec:
name: "cluster"
env:
-
+ name: "DC_NAME"
+ value: "{{deploy_name}}"
+ -
name: "NAMESPACE"
valueFrom:
fieldRef:
diff --git a/roles/openshift_logging_elasticsearch/templates/pvc.j2 b/roles/openshift_logging_elasticsearch/templates/pvc.j2
index f19a3a750..063f9c5ae 100644
--- a/roles/openshift_logging_elasticsearch/templates/pvc.j2
+++ b/roles/openshift_logging_elasticsearch/templates/pvc.j2
@@ -25,3 +25,6 @@ spec:
resources:
requests:
storage: {{size}}
+{% if storage_class_name is defined %}
+ storageClassName: {{ storage_class_name }}
+{% endif %}
diff --git a/roles/openshift_master/README.md b/roles/openshift_master/README.md
index e5362105c..fbf69c270 100644
--- a/roles/openshift_master/README.md
+++ b/roles/openshift_master/README.md
@@ -15,18 +15,19 @@ Role Variables
From this role:
-| Name | Default value | |
-|-------------------------------------|-----------------------|-------------------------------------------------------------------------------|
-| openshift_master_debug_level | openshift_debug_level | Verbosity of the debug logs for master |
-| openshift_node_ips | [] | List of the openshift node ip addresses to pre-register when master starts up |
-| oreg_url | UNDEF | Default docker registry to use |
-| oreg_url_master | UNDEF | Default docker registry to use, specifically on the master |
-| openshift_master_api_port | UNDEF | |
-| openshift_master_console_port | UNDEF | |
-| openshift_master_api_url | UNDEF | |
-| openshift_master_console_url | UNDEF | |
-| openshift_master_public_api_url | UNDEF | |
-| openshift_master_public_console_url | UNDEF | |
+| Name | Default value | |
+|---------------------------------------------------|-----------------------|-------------------------------------------------------------------------------|
+| openshift_master_debug_level | openshift_debug_level | Verbosity of the debug logs for master |
+| openshift_node_ips | [] | List of the openshift node ip addresses to pre-register when master starts up |
+| oreg_url | UNDEF | Default docker registry to use |
+| oreg_url_master | UNDEF | Default docker registry to use, specifically on the master |
+| openshift_master_api_port | UNDEF | |
+| openshift_master_console_port | UNDEF | |
+| openshift_master_api_url | UNDEF | |
+| openshift_master_console_url | UNDEF | |
+| openshift_master_public_api_url | UNDEF | |
+| openshift_master_public_console_url | UNDEF | |
+| openshift_master_saconfig_limit_secret_references | false | |
From openshift_common:
diff --git a/roles/openshift_master/defaults/main.yml b/roles/openshift_master/defaults/main.yml
index 6a082d71a..2d3ce5bcd 100644
--- a/roles/openshift_master/defaults/main.yml
+++ b/roles/openshift_master/defaults/main.yml
@@ -1,3 +1,4 @@
---
openshift_node_ips: []
r_openshift_master_clean_install: false
+r_openshift_master_etcd3_storage: false
diff --git a/roles/openshift_master/tasks/main.yml b/roles/openshift_master/tasks/main.yml
index 035c15fef..86532cd0a 100644
--- a/roles/openshift_master/tasks/main.yml
+++ b/roles/openshift_master/tasks/main.yml
@@ -128,6 +128,9 @@
when: openshift.master.request_header_ca is defined and item.kind == 'RequestHeaderIdentityProvider' and item.clientCA | default('') != ''
with_items: "{{ openshift.master.identity_providers }}"
+- set_fact:
+ openshift_push_via_dns: "{{ openshift_use_dnsmasq | default(true) and openshift.common.version_gte_3_6 and r_openshift_master_clean_install }}"
+
- name: Install the systemd units
include: systemd_units.yml
@@ -164,26 +167,6 @@
- restart master api
- restart master controllers
-- name: Configure master to use etcd3 storage backend on 3.6 clean installs
- yedit:
- src: /etc/origin/master/master-config.yaml
- key: "{{ item.key }}"
- value: "{{ item.value }}"
- with_items:
- - key: kubernetesMasterConfig.apiServerArguments.storage-backend
- value:
- - etcd3
- - key: kubernetesMasterConfig.apiServerArguments.storage-media-type
- value:
- - application/vnd.kubernetes.protobuf
- when:
- - r_openshift_master_clean_install
- - openshift.common.version_gte_3_6
- notify:
- - restart master
- - restart master api
- - restart master controllers
-
- include: set_loopback_context.yml
when: openshift.common.version_gte_3_2_or_1_2
diff --git a/roles/openshift_master/templates/atomic-openshift-master.j2 b/roles/openshift_master/templates/atomic-openshift-master.j2
index 6e2439fd9..850fae0e4 100644
--- a/roles/openshift_master/templates/atomic-openshift-master.j2
+++ b/roles/openshift_master/templates/atomic-openshift-master.j2
@@ -1,5 +1,8 @@
OPTIONS=--loglevel={{ openshift.master.debug_level | default(2) }}
CONFIG_FILE={{ openshift_master_config_file }}
+{% if openshift_push_via_dns | default(false) %}
+OPENSHIFT_DEFAULT_REGISTRY=docker-registry.default.svc:5000
+{% endif %}
{% if openshift.common.is_containerized | bool %}
IMAGE_VERSION={{ openshift_image_tag }}
{% endif %}
diff --git a/roles/openshift_master/templates/docker-cluster/atomic-openshift-master-api.service.j2 b/roles/openshift_master/templates/docker-cluster/atomic-openshift-master-api.service.j2
index 897ee7285..e8f7c47b0 100644
--- a/roles/openshift_master/templates/docker-cluster/atomic-openshift-master-api.service.j2
+++ b/roles/openshift_master/templates/docker-cluster/atomic-openshift-master-api.service.j2
@@ -12,7 +12,7 @@ Requires={{ openshift.docker.service_name }}.service
EnvironmentFile=/etc/sysconfig/{{ openshift.common.service_type }}-master-api
Environment=GOTRACEBACK=crash
ExecStartPre=-/usr/bin/docker rm -f {{ openshift.common.service_type}}-master-api
-ExecStart=/usr/bin/docker run --rm --privileged --net=host --name {{ openshift.common.service_type }}-master-api --env-file=/etc/sysconfig/{{ openshift.common.service_type }}-master-api -v {{ openshift.common.data_dir }}:{{ openshift.common.data_dir }} -v /var/log:/var/log -v /var/run/docker.sock:/var/run/docker.sock -v {{ openshift.common.config_base }}:{{ openshift.common.config_base }} {% if openshift_cloudprovider_kind | default('') != '' -%} -v {{ openshift.common.config_base }}/cloudprovider:{{ openshift.common.config_base}}/cloudprovider {% endif -%} {{ openshift.master.master_image }}:${IMAGE_VERSION} start master api --config=${CONFIG_FILE} $OPTIONS
+ExecStart=/usr/bin/docker run --rm --privileged --net=host --name {{ openshift.common.service_type }}-master-api --env-file=/etc/sysconfig/{{ openshift.common.service_type }}-master-api -v {{ openshift.common.data_dir }}:{{ openshift.common.data_dir }} -v /var/log:/var/log -v /var/run/docker.sock:/var/run/docker.sock -v {{ openshift.common.config_base }}:{{ openshift.common.config_base }} {% if openshift_cloudprovider_kind | default('') != '' -%} -v {{ openshift.common.config_base }}/cloudprovider:{{ openshift.common.config_base}}/cloudprovider {% endif -%} -v /etc/pki:/etc/pki:ro {{ openshift.master.master_image }}:${IMAGE_VERSION} start master api --config=${CONFIG_FILE} $OPTIONS
ExecStartPost=/usr/bin/sleep 10
ExecStop=/usr/bin/docker stop {{ openshift.common.service_type }}-master-api
LimitNOFILE=131072
diff --git a/roles/openshift_master/templates/docker-cluster/atomic-openshift-master-controllers.service.j2 b/roles/openshift_master/templates/docker-cluster/atomic-openshift-master-controllers.service.j2
index 451f3436a..69db62f16 100644
--- a/roles/openshift_master/templates/docker-cluster/atomic-openshift-master-controllers.service.j2
+++ b/roles/openshift_master/templates/docker-cluster/atomic-openshift-master-controllers.service.j2
@@ -11,7 +11,7 @@ PartOf={{ openshift.docker.service_name }}.service
EnvironmentFile=/etc/sysconfig/{{ openshift.common.service_type }}-master-controllers
Environment=GOTRACEBACK=crash
ExecStartPre=-/usr/bin/docker rm -f {{ openshift.common.service_type}}-master-controllers
-ExecStart=/usr/bin/docker run --rm --privileged --net=host --name {{ openshift.common.service_type }}-master-controllers --env-file=/etc/sysconfig/{{ openshift.common.service_type }}-master-controllers -v {{ openshift.common.data_dir }}:{{ openshift.common.data_dir }} -v /var/run/docker.sock:/var/run/docker.sock -v {{ openshift.common.config_base }}:{{ openshift.common.config_base }} {% if openshift_cloudprovider_kind | default('') != '' -%} -v {{ openshift.common.config_base }}/cloudprovider:{{ openshift.common.config_base}}/cloudprovider {% endif -%} {{ openshift.master.master_image }}:${IMAGE_VERSION} start master controllers --config=${CONFIG_FILE} $OPTIONS
+ExecStart=/usr/bin/docker run --rm --privileged --net=host --name {{ openshift.common.service_type }}-master-controllers --env-file=/etc/sysconfig/{{ openshift.common.service_type }}-master-controllers -v {{ openshift.common.data_dir }}:{{ openshift.common.data_dir }} -v /var/run/docker.sock:/var/run/docker.sock -v {{ openshift.common.config_base }}:{{ openshift.common.config_base }} {% if openshift_cloudprovider_kind | default('') != '' -%} -v {{ openshift.common.config_base }}/cloudprovider:{{ openshift.common.config_base}}/cloudprovider {% endif -%} -v /etc/pki:/etc/pki:ro {{ openshift.master.master_image }}:${IMAGE_VERSION} start master controllers --config=${CONFIG_FILE} $OPTIONS
ExecStartPost=/usr/bin/sleep 10
ExecStop=/usr/bin/docker stop {{ openshift.common.service_type }}-master-controllers
LimitNOFILE=131072
diff --git a/roles/openshift_master/templates/master.yaml.v1.j2 b/roles/openshift_master/templates/master.yaml.v1.j2
index 1935d9592..af3ebc6d2 100644
--- a/roles/openshift_master/templates/master.yaml.v1.j2
+++ b/roles/openshift_master/templates/master.yaml.v1.j2
@@ -139,6 +139,12 @@ kubernetesMasterConfig:
- v1
{% endif %}
apiServerArguments: {{ openshift.master.api_server_args | default(None) | to_padded_yaml( level=2 ) }}
+{% if r_openshift_master_etcd3_storage or ( r_openshift_master_clean_install and openshift.common.version_gte_3_6 ) %}
+ storage-backend:
+ - etcd3
+ storage-media-type:
+ - application/vnd.kubernetes.protobuf
+{% endif %}
controllerArguments: {{ openshift.master.controller_args | default(None) | to_padded_yaml( level=2 ) }}
masterCount: {{ openshift.master.master_count if openshift.master.cluster_method | default(None) == 'native' else 1 }}
masterIP: {{ openshift.common.ip }}
@@ -229,7 +235,7 @@ projectConfig:
routingConfig:
subdomain: "{{ openshift_master_default_subdomain | default("") }}"
serviceAccountConfig:
- limitSecretReferences: false
+ limitSecretReferences: {{ openshift_master_saconfig_limitsecretreferences | default(false) }}
managedNames:
- default
- builder
diff --git a/roles/openshift_master/templates/master_docker/master.docker.service.j2 b/roles/openshift_master/templates/master_docker/master.docker.service.j2
index 7f40cb042..31c1dfc33 100644
--- a/roles/openshift_master/templates/master_docker/master.docker.service.j2
+++ b/roles/openshift_master/templates/master_docker/master.docker.service.j2
@@ -8,7 +8,7 @@ Wants=etcd_container.service
[Service]
EnvironmentFile=/etc/sysconfig/{{ openshift.common.service_type }}-master
ExecStartPre=-/usr/bin/docker rm -f {{ openshift.common.service_type }}-master
-ExecStart=/usr/bin/docker run --rm --privileged --net=host --name {{ openshift.common.service_type }}-master --env-file=/etc/sysconfig/{{ openshift.common.service_type }}-master -v {{ openshift.common.data_dir }}:{{ openshift.common.data_dir }} -v /var/log:/var/log -v /var/run/docker.sock:/var/run/docker.sock -v {{ openshift.common.config_base }}:{{ openshift.common.config_base }} {% if openshift_cloudprovider_kind | default('') != '' -%} -v {{ openshift.common.config_base }}/cloudprovider:{{ openshift.common.config_base}}/cloudprovider {% endif -%} {{ openshift.master.master_image }}:${IMAGE_VERSION} start master --config=${CONFIG_FILE} $OPTIONS
+ExecStart=/usr/bin/docker run --rm --privileged --net=host --name {{ openshift.common.service_type }}-master --env-file=/etc/sysconfig/{{ openshift.common.service_type }}-master -v {{ openshift.common.data_dir }}:{{ openshift.common.data_dir }} -v /var/log:/var/log -v /var/run/docker.sock:/var/run/docker.sock -v {{ openshift.common.config_base }}:{{ openshift.common.config_base }} {% if openshift_cloudprovider_kind | default('') != '' -%} -v {{ openshift.common.config_base }}/cloudprovider:{{ openshift.common.config_base}}/cloudprovider {% endif -%} -v /etc/pki:/etc/pki:ro {{ openshift.master.master_image }}:${IMAGE_VERSION} start master --config=${CONFIG_FILE} $OPTIONS
ExecStartPost=/usr/bin/sleep 10
ExecStop=/usr/bin/docker stop {{ openshift.common.service_type }}-master
Restart=always
diff --git a/roles/openshift_master/templates/native-cluster/atomic-openshift-master-api.j2 b/roles/openshift_master/templates/native-cluster/atomic-openshift-master-api.j2
index c484d23cc..c05a27559 100644
--- a/roles/openshift_master/templates/native-cluster/atomic-openshift-master-api.j2
+++ b/roles/openshift_master/templates/native-cluster/atomic-openshift-master-api.j2
@@ -1,5 +1,8 @@
OPTIONS=--loglevel={{ openshift.master.debug_level }} --listen={{ 'https' if openshift.master.api_use_ssl else 'http' }}://{{ openshift.master.bind_addr }}:{{ openshift.master.api_port }} --master={{ openshift.master.loopback_api_url }}
CONFIG_FILE={{ openshift_master_config_file }}
+{% if openshift_push_via_dns | default(false) %}
+OPENSHIFT_DEFAULT_REGISTRY=docker-registry.default.svc:5000
+{% endif %}
{% if openshift.common.is_containerized | bool %}
IMAGE_VERSION={{ openshift_image_tag }}
{% endif %}
diff --git a/roles/openshift_master/templates/native-cluster/atomic-openshift-master-controllers.j2 b/roles/openshift_master/templates/native-cluster/atomic-openshift-master-controllers.j2
index e0adbbf52..a153fb33d 100644
--- a/roles/openshift_master/templates/native-cluster/atomic-openshift-master-controllers.j2
+++ b/roles/openshift_master/templates/native-cluster/atomic-openshift-master-controllers.j2
@@ -1,5 +1,8 @@
OPTIONS=--loglevel={{ openshift.master.debug_level }} --listen={{ 'https' if openshift.master.api_use_ssl else 'http' }}://{{ openshift.master.bind_addr }}:{{ openshift.master.controllers_port }}
CONFIG_FILE={{ openshift_master_config_file }}
+{% if openshift_push_via_dns | default(false) %}
+OPENSHIFT_DEFAULT_REGISTRY=docker-registry.default.svc:5000
+{% endif %}
{% if openshift.common.is_containerized | bool %}
IMAGE_VERSION={{ openshift_image_tag }}
{% endif %}
diff --git a/roles/openshift_metrics/defaults/main.yaml b/roles/openshift_metrics/defaults/main.yaml
index ba50566e9..c34936930 100644
--- a/roles/openshift_metrics/defaults/main.yaml
+++ b/roles/openshift_metrics/defaults/main.yaml
@@ -16,7 +16,7 @@ openshift_metrics_hawkular_nodeselector: ""
openshift_metrics_cassandra_replicas: 1
openshift_metrics_cassandra_storage_type: "{{ openshift_hosted_metrics_storage_kind | default('emptydir') }}"
openshift_metrics_cassandra_pvc_size: "{{ openshift_hosted_metrics_storage_volume_size | default('10Gi') }}"
-openshift_metrics_cassandra_pv_selector: "{{ openshift_hosted_metrics_storage_labels | default(null) }}"
+openshift_metrics_cassandra_pv_selector: "{{ openshift_hosted_metrics_storage_labels | default('') }}"
openshift_metrics_cassandra_limits_memory: 2G
openshift_metrics_cassandra_limits_cpu: null
openshift_metrics_cassandra_requests_memory: 1G
diff --git a/roles/openshift_metrics/tasks/install_cassandra.yaml b/roles/openshift_metrics/tasks/install_cassandra.yaml
index 62b7f52cb..7928a0346 100644
--- a/roles/openshift_metrics/tasks/install_cassandra.yaml
+++ b/roles/openshift_metrics/tasks/install_cassandra.yaml
@@ -36,6 +36,7 @@
access_modes: "{{ openshift_metrics_cassandra_pvc_access | list }}"
size: "{{ openshift_metrics_cassandra_pvc_size }}"
pv_selector: "{{ openshift_metrics_cassandra_pv_selector }}"
+ storage_class_name: "{{ openshift_metrics_cassanda_pvc_storage_class_name | default('', true) }}"
with_sequence: count={{ openshift_metrics_cassandra_replicas }}
when:
- openshift_metrics_cassandra_storage_type != 'emptydir'
@@ -50,8 +51,6 @@
obj_name: "{{ openshift_metrics_cassandra_pvc_prefix }}-{{ item }}"
labels:
metrics-infra: hawkular-cassandra
- annotations:
- volume.beta.kubernetes.io/storage-class: dynamic
access_modes: "{{ openshift_metrics_cassandra_pvc_access | list }}"
size: "{{ openshift_metrics_cassandra_pvc_size }}"
pv_selector: "{{ openshift_metrics_cassandra_pv_selector }}"
diff --git a/roles/openshift_metrics/templates/pvc.j2 b/roles/openshift_metrics/templates/pvc.j2
index 0b801b33f..b4e6a1503 100644
--- a/roles/openshift_metrics/templates/pvc.j2
+++ b/roles/openshift_metrics/templates/pvc.j2
@@ -32,3 +32,6 @@ spec:
resources:
requests:
storage: {{size}}
+{% if storage_class_name is defined %}
+ storageClassName: {{ storage_class_name }}
+{% endif %}
diff --git a/roles/openshift_node/templates/openshift.docker.node.service b/roles/openshift_node/templates/openshift.docker.node.service
index d89b64b06..cd0a1a60b 100644
--- a/roles/openshift_node/templates/openshift.docker.node.service
+++ b/roles/openshift_node/templates/openshift.docker.node.service
@@ -17,7 +17,7 @@ After={{ openshift.common.service_type }}-node-dep.service
EnvironmentFile=/etc/sysconfig/{{ openshift.common.service_type }}-node
EnvironmentFile=/etc/sysconfig/{{ openshift.common.service_type }}-node-dep
ExecStartPre=-/usr/bin/docker rm -f {{ openshift.common.service_type }}-node
-ExecStart=/usr/bin/docker run --name {{ openshift.common.service_type }}-node --rm --privileged --net=host --pid=host --env-file=/etc/sysconfig/{{ openshift.common.service_type }}-node -v /:/rootfs:ro,rslave -e CONFIG_FILE=${CONFIG_FILE} -e OPTIONS=${OPTIONS} -e HOST=/rootfs -e HOST_ETC=/host-etc -v {{ openshift.common.data_dir }}:{{ openshift.common.data_dir }}{{ ':rslave' if openshift.docker.gte_1_10 | default(False) | bool else '' }} -v {{ openshift.common.config_base }}/node:{{ openshift.common.config_base }}/node {% if openshift_cloudprovider_kind | default('') != '' -%} -v {{ openshift.common.config_base }}/cloudprovider:{{ openshift.common.config_base}}/cloudprovider {% endif -%} -v /etc/localtime:/etc/localtime:ro -v /etc/machine-id:/etc/machine-id:ro -v /run:/run -v /sys:/sys:rw -v /sys/fs/cgroup:/sys/fs/cgroup:rw -v /usr/bin/docker:/usr/bin/docker:ro -v /var/lib/docker:/var/lib/docker -v /lib/modules:/lib/modules -v /etc/origin/openvswitch:/etc/openvswitch -v /etc/origin/sdn:/etc/openshift-sdn -v /var/lib/cni:/var/lib/cni -v /etc/systemd/system:/host-etc/systemd/system -v /var/log:/var/log -v /dev:/dev $DOCKER_ADDTL_BIND_MOUNTS {{ openshift.node.node_image }}:${IMAGE_VERSION}
+ExecStart=/usr/bin/docker run --name {{ openshift.common.service_type }}-node --rm --privileged --net=host --pid=host --env-file=/etc/sysconfig/{{ openshift.common.service_type }}-node -v /:/rootfs:ro,rslave -e CONFIG_FILE=${CONFIG_FILE} -e OPTIONS=${OPTIONS} -e HOST=/rootfs -e HOST_ETC=/host-etc -v {{ openshift.common.data_dir }}:{{ openshift.common.data_dir }}{{ ':rslave' if openshift.docker.gte_1_10 | default(False) | bool else '' }} -v {{ openshift.common.config_base }}/node:{{ openshift.common.config_base }}/node {% if openshift_cloudprovider_kind | default('') != '' -%} -v {{ openshift.common.config_base }}/cloudprovider:{{ openshift.common.config_base}}/cloudprovider {% endif -%} -v /etc/localtime:/etc/localtime:ro -v /etc/machine-id:/etc/machine-id:ro -v /run:/run -v /sys:/sys:rw -v /sys/fs/cgroup:/sys/fs/cgroup:rw -v /usr/bin/docker:/usr/bin/docker:ro -v /var/lib/docker:/var/lib/docker -v /lib/modules:/lib/modules -v /etc/origin/openvswitch:/etc/openvswitch -v /etc/origin/sdn:/etc/openshift-sdn -v /var/lib/cni:/var/lib/cni -v /etc/systemd/system:/host-etc/systemd/system -v /var/log:/var/log -v /dev:/dev $DOCKER_ADDTL_BIND_MOUNTS -v /etc/pki:/etc/pki:ro {{ openshift.node.node_image }}:${IMAGE_VERSION}
ExecStartPost=/usr/bin/sleep 10
ExecStop=/usr/bin/docker stop {{ openshift.common.service_type }}-node
SyslogIdentifier={{ openshift.common.service_type }}-node
diff --git a/roles/openshift_node_dnsmasq/files/networkmanager/99-origin-dns.sh b/roles/openshift_node_dnsmasq/files/networkmanager/99-origin-dns.sh
index 24798d3d2..c68073a10 100755
--- a/roles/openshift_node_dnsmasq/files/networkmanager/99-origin-dns.sh
+++ b/roles/openshift_node_dnsmasq/files/networkmanager/99-origin-dns.sh
@@ -96,6 +96,9 @@ EOF
if ! grep -q '99-origin-dns.sh' ${NEW_RESOLV_CONF}; then
echo "# nameserver updated by /etc/NetworkManager/dispatcher.d/99-origin-dns.sh" >> ${NEW_RESOLV_CONF}
fi
+ if ! grep -q 'search.*cluster.local' ${NEW_RESOLV_CONF}; then
+ sed -i '/^search/ s/$/ cluster.local/' ${NEW_RESOLV_CONF}
+ fi
cp -Z ${NEW_RESOLV_CONF} /etc/resolv.conf
fi
fi
diff --git a/roles/openshift_node_upgrade/tasks/main.yml b/roles/openshift_node_upgrade/tasks/main.yml
index d44839d69..8eaa68cc9 100644
--- a/roles/openshift_node_upgrade/tasks/main.yml
+++ b/roles/openshift_node_upgrade/tasks/main.yml
@@ -147,3 +147,6 @@
# Give the node two minutes to come back online.
retries: 24
delay: 5
+
+- include_role:
+ name: openshift_node_dnsmasq
diff --git a/roles/openshift_service_catalog/defaults/main.yml b/roles/openshift_service_catalog/defaults/main.yml
new file mode 100644
index 000000000..01ee2544d
--- /dev/null
+++ b/roles/openshift_service_catalog/defaults/main.yml
@@ -0,0 +1,3 @@
+---
+openshift_service_catalog_remove: false
+openshift_service_catalog_nodeselector: {"openshift-infra": "apiserver"}
diff --git a/roles/openshift_service_catalog/files/kubeservicecatalog_roles_bindings.yml b/roles/openshift_service_catalog/files/kubeservicecatalog_roles_bindings.yml
new file mode 100644
index 000000000..880146ca4
--- /dev/null
+++ b/roles/openshift_service_catalog/files/kubeservicecatalog_roles_bindings.yml
@@ -0,0 +1,161 @@
+apiVersion: v1
+kind: Template
+metadata:
+ name: service-catalog
+objects:
+
+- kind: ClusterRole
+ apiVersion: v1
+ metadata:
+ name: servicecatalog-serviceclass-viewer
+ rules:
+ - apiGroups:
+ - servicecatalog.k8s.io
+ resources:
+ - serviceclasses
+ verbs:
+ - list
+ - watch
+ - get
+
+- kind: ClusterRoleBinding
+ apiVersion: v1
+ metadata:
+ name: servicecatalog-serviceclass-viewer-binding
+ roleRef:
+ name: servicecatalog-serviceclass-viewer
+ groupNames:
+ - system:authenticated
+
+- kind: ServiceAccount
+ apiVersion: v1
+ metadata:
+ name: service-catalog-controller
+
+- kind: ServiceAccount
+ apiVersion: v1
+ metadata:
+ name: service-catalog-apiserver
+
+- kind: ClusterRole
+ apiVersion: v1
+ metadata:
+ name: sar-creator
+ rules:
+ - apiGroups:
+ - ""
+ resources:
+ - subjectaccessreviews.authorization.k8s.io
+ verbs:
+ - create
+
+- kind: ClusterRoleBinding
+ apiVersion: v1
+ metadata:
+ name: service-catalog-sar-creator-binding
+ roleRef:
+ name: sar-creator
+ userNames:
+ - system:serviceaccount:kube-service-catalog:service-catalog-apiserver
+
+- kind: ClusterRole
+ apiVersion: v1
+ metadata:
+ name: namespace-viewer
+ rules:
+ - apiGroups:
+ - ""
+ resources:
+ - namespaces
+ verbs:
+ - list
+ - watch
+ - get
+
+- kind: ClusterRoleBinding
+ apiVersion: v1
+ metadata:
+ name: service-catalog-namespace-viewer-binding
+ roleRef:
+ name: namespace-viewer
+ userNames:
+ - system:serviceaccount:kube-service-catalog:service-catalog-apiserver
+
+- kind: ClusterRoleBinding
+ apiVersion: v1
+ metadata:
+ name: service-catalog-controller-namespace-viewer-binding
+ roleRef:
+ name: namespace-viewer
+ userNames:
+ - system:serviceaccount:kube-service-catalog:service-catalog-controller
+
+- kind: ClusterRole
+ apiVersion: v1
+ metadata:
+ name: service-catalog-controller
+ rules:
+ - apiGroups:
+ - ""
+ resources:
+ - secrets
+ - podpresets
+ verbs:
+ - create
+ - update
+ - delete
+ - get
+ - list
+ - watch
+ - apiGroups:
+ - servicecatalog.k8s.io
+ resources:
+ - brokers/status
+ - instances/status
+ - bindings/status
+ verbs:
+ - update
+
+- kind: ClusterRoleBinding
+ apiVersion: v1
+ metadata:
+ name: service-catalog-controller-binding
+ roleRef:
+ name: service-catalog-controller
+ userNames:
+ - system:serviceaccount:kube-service-catalog:service-catalog-controller
+
+- kind: Role
+ apiVersion: v1
+ metadata:
+ name: endpoint-accessor
+ rules:
+ - apiGroups:
+ - ""
+ resources:
+ - endpoints
+ verbs:
+ - list
+ - watch
+ - get
+ - create
+ - update
+
+- kind: RoleBinding
+ apiVersion: v1
+ metadata:
+ name: endpoint-accessor-binding
+ roleRef:
+ name: endpoint-accessor
+ namespace: kube-service-catalog
+ userNames:
+ - system:serviceaccount:kube-service-catalog:service-catalog-controller
+
+- kind: ClusterRoleBinding
+ apiVersion: v1
+ metadata:
+ name: system:auth-delegator-binding
+ roleRef:
+ name: system:auth-delegator
+ userNames:
+ - system:serviceaccount:kube-service-catalog:service-catalog-apiserver
diff --git a/roles/openshift_service_catalog/files/kubesystem_roles_bindings.yml b/roles/openshift_service_catalog/files/kubesystem_roles_bindings.yml
new file mode 100644
index 000000000..f6ee0955d
--- /dev/null
+++ b/roles/openshift_service_catalog/files/kubesystem_roles_bindings.yml
@@ -0,0 +1,38 @@
+apiVersion: v1
+kind: Template
+metadata:
+ name: kube-system-service-catalog
+objects:
+
+- kind: Role
+ apiVersion: v1
+ metadata:
+ name: extension-apiserver-authentication-reader
+ namespace: ${KUBE_SYSTEM_NAMESPACE}
+ rules:
+ - apiGroups:
+ - ""
+ resourceNames:
+ - extension-apiserver-authentication
+ resources:
+ - configmaps
+ verbs:
+ - get
+
+- kind: RoleBinding
+ apiVersion: v1
+ metadata:
+ name: extension-apiserver-authentication-reader-binding
+ namespace: ${KUBE_SYSTEM_NAMESPACE}
+ roleRef:
+ name: extension-apiserver-authentication-reader
+ namespace: kube-system
+ userNames:
+ - system:serviceaccount:kube-service-catalog:service-catalog-apiserver
+
+parameters:
+- description: Do not change this value.
+ displayName: Name of the kube-system namespace
+ name: KUBE_SYSTEM_NAMESPACE
+ required: true
+ value: kube-system
diff --git a/roles/openshift_service_catalog/meta/main.yml b/roles/openshift_service_catalog/meta/main.yml
new file mode 100644
index 000000000..1e6b837cd
--- /dev/null
+++ b/roles/openshift_service_catalog/meta/main.yml
@@ -0,0 +1,17 @@
+---
+galaxy_info:
+ author: OpenShift Red Hat
+ description: OpenShift Service Catalog
+ company: Red Hat, Inc.
+ license: Apache License, Version 2.0
+ min_ansible_version: 2.2
+ platforms:
+ - name: EL
+ versions:
+ - 7
+ categories:
+ - cloud
+dependencies:
+- role: lib_openshift
+- role: openshift_facts
+- role: lib_utils
diff --git a/roles/openshift_service_catalog/tasks/generate_certs.yml b/roles/openshift_service_catalog/tasks/generate_certs.yml
new file mode 100644
index 000000000..cc897b032
--- /dev/null
+++ b/roles/openshift_service_catalog/tasks/generate_certs.yml
@@ -0,0 +1,70 @@
+---
+- name: Create service catalog cert directory
+ file:
+ path: "{{ openshift.common.config_base }}/service-catalog"
+ state: directory
+ mode: 0755
+ changed_when: False
+ check_mode: no
+
+- set_fact:
+ generated_certs_dir: "{{ openshift.common.config_base }}/service-catalog"
+
+- name: Generate signing cert
+ command: >
+ {{ openshift.common.client_binary }} adm --config=/etc/origin/master/admin.kubeconfig ca create-signer-cert
+ --key={{ generated_certs_dir }}/ca.key --cert={{ generated_certs_dir }}/ca.crt
+ --serial={{ generated_certs_dir }}/apiserver.serial.txt --name=service-catalog-signer
+
+- name: Generating server keys
+ oc_adm_ca_server_cert:
+ cert: "{{ generated_certs_dir }}/apiserver.crt"
+ key: "{{ generated_certs_dir }}/apiserver.key"
+ hostnames: "apiserver.kube-service-catalog.svc,apiserver.kube-service-catalog.svc.cluster.local,apiserver.kube-service-catalog"
+ signer_cert: "{{ generated_certs_dir }}/ca.crt"
+ signer_key: "{{ generated_certs_dir }}/ca.key"
+ signer_serial: "{{ generated_certs_dir }}/apiserver.serial.txt"
+
+- name: Create apiserver-ssl secret
+ oc_secret:
+ state: present
+ name: apiserver-ssl
+ namespace: kube-service-catalog
+ files:
+ - name: tls.crt
+ path: "{{ generated_certs_dir }}/apiserver.crt"
+ - name: tls.key
+ path: "{{ generated_certs_dir }}/apiserver.key"
+
+- slurp:
+ src: "{{ generated_certs_dir }}/ca.crt"
+ register: apiserver_ca
+
+- shell: >
+ oc get apiservices.apiregistration.k8s.io/v1alpha1.servicecatalog.k8s.io -n kube-service-catalog || echo "not found"
+ register: get_apiservices
+ changed_when: no
+
+- name: Create api service
+ oc_obj:
+ state: present
+ name: v1alpha1.servicecatalog.k8s.io
+ kind: apiservices.apiregistration.k8s.io
+ namespace: "kube-service-catalog"
+ content:
+ path: /tmp/apisvcout
+ data:
+ apiVersion: apiregistration.k8s.io/v1beta1
+ kind: APIService
+ metadata:
+ name: v1alpha1.servicecatalog.k8s.io
+ spec:
+ group: servicecatalog.k8s.io
+ version: v1alpha1
+ service:
+ namespace: "kube-service-catalog"
+ name: apiserver
+ caBundle: "{{ apiserver_ca.content }}"
+ groupPriorityMinimum: 20
+ versionPriority: 10
+ when: "'not found' in get_apiservices.stdout"
diff --git a/roles/openshift_service_catalog/tasks/install.yml b/roles/openshift_service_catalog/tasks/install.yml
new file mode 100644
index 000000000..c1773b5f6
--- /dev/null
+++ b/roles/openshift_service_catalog/tasks/install.yml
@@ -0,0 +1,181 @@
+---
+# do any asserts here
+
+- name: Create temp directory for doing work in
+ command: mktemp -d /tmp/openshift-service-catalog-ansible-XXXXXX
+ register: mktemp
+ changed_when: False
+
+
+- include: wire_aggregator.yml
+
+- name: Set default image variables based on deployment_type
+ include_vars: "{{ item }}"
+ with_first_found:
+ - "{{ openshift_deployment_type | default(deployment_type) }}.yml"
+ - "default_images.yml"
+
+- name: Set service_catalog image facts
+ set_fact:
+ openshift_service_catalog_image_prefix: "{{ openshift_service_catalog_image_prefix | default(__openshift_service_catalog_image_prefix) }}"
+ openshift_service_catalog_image_version: "{{ openshift_service_catalog_image_version | default(__openshift_service_catalog_image_version) }}"
+
+- name: Set Service Catalog namespace
+ oc_project:
+ state: present
+ name: "kube-service-catalog"
+# node_selector: "{{ openshift_service_catalog_nodeselector | default(null) }}"
+
+- include: generate_certs.yml
+
+- copy:
+ src: kubeservicecatalog_roles_bindings.yml
+ dest: "{{ mktemp.stdout }}/kubeservicecatalog_roles_bindings.yml"
+
+- oc_obj:
+ name: service-catalog
+ kind: template
+ namespace: "kube-service-catalog"
+ files:
+ - "{{ mktemp.stdout }}/kubeservicecatalog_roles_bindings.yml"
+ delete_after: yes
+
+- oc_process:
+ create: True
+ template_name: service-catalog
+ namespace: "kube-service-catalog"
+
+- copy:
+ src: kubesystem_roles_bindings.yml
+ dest: "{{ mktemp.stdout }}/kubesystem_roles_bindings.yml"
+
+- oc_obj:
+ name: kube-system-service-catalog
+ kind: template
+ namespace: kube-system
+ files:
+ - "{{ mktemp.stdout }}/kubesystem_roles_bindings.yml"
+ delete_after: yes
+
+- oc_process:
+ create: True
+ template_name: kube-system-service-catalog
+ namespace: kube-system
+
+- shell: >
+ oc get policybindings/kube-system:default -n kube-system || echo "not found"
+ register: get_kube_system
+ changed_when: no
+
+- command: >
+ oc create policybinding kube-system -n kube-system
+ when: "'not found' in get_kube_system.stdout"
+
+- oc_adm_policy_user:
+ namespace: kube-service-catalog
+ resource_kind: scc
+ resource_name: hostmount-anyuid
+ state: present
+ user: "system:serviceaccount:kube-service-catalog:service-catalog-apiserver"
+
+- name: Set SA cluster-role
+ oc_adm_policy_user:
+ state: present
+ namespace: "kube-service-catalog"
+ resource_kind: cluster-role
+ resource_name: admin
+ user: "system:serviceaccount:kube-service-catalog:default"
+
+## api server
+- template:
+ src: api_server.j2
+ dest: "{{ mktemp.stdout }}/service_catalog_api_server.yml"
+ vars:
+ image: ""
+ namespace: ""
+ cpu_limit: none
+ memory_limit: none
+ cpu_requests: none
+ memory_request: none
+ cors_allowed_origin: localhost
+ node_selector: "{{ openshift_service_catalog_nodeselector | default ({'openshift-infra': 'apiserver'}) }}"
+
+- name: Set Service Catalog API Server daemonset
+ oc_obj:
+ state: present
+ namespace: "kube-service-catalog"
+ kind: daemonset
+ name: apiserver
+ files:
+ - "{{ mktemp.stdout }}/service_catalog_api_server.yml"
+ delete_after: yes
+
+- template:
+ src: api_server_service.j2
+ dest: "{{ mktemp.stdout }}/service_catalog_api_service.yml"
+
+- name: Set Service Catalog API Server service
+ oc_obj:
+ state: present
+ namespace: "kube-service-catalog"
+ kind: service
+ name: apiserver
+ files:
+ - "{{ mktemp.stdout }}/service_catalog_api_service.yml"
+ delete_after: yes
+
+- template:
+ src: api_server_route.j2
+ dest: "{{ mktemp.stdout }}/service_catalog_api_route.yml"
+
+- name: Set Service Catalog API Server route
+ oc_obj:
+ state: present
+ namespace: "kube-service-catalog"
+ kind: route
+ name: apiserver
+ files:
+ - "{{ mktemp.stdout }}/service_catalog_api_route.yml"
+ delete_after: yes
+
+## controller manager
+- template:
+ src: controller_manager.j2
+ dest: "{{ mktemp.stdout }}/controller_manager.yml"
+ vars:
+ image: ""
+ cpu_limit: none
+ memory_limit: none
+ node_selector: "{{ openshift_service_catalog_nodeselector | default ({'openshift-infra': 'apiserver'}) }}"
+
+- name: Set Controller Manager deployment
+ oc_obj:
+ state: present
+ namespace: "kube-service-catalog"
+ kind: daemonset
+ name: controller-manager
+ files:
+ - "{{ mktemp.stdout }}/controller_manager.yml"
+ delete_after: yes
+
+- template:
+ src: controller_manager_service.j2
+ dest: "{{ mktemp.stdout }}/controller_manager_service.yml"
+
+- name: Set Controller Manager service
+ oc_obj:
+ state: present
+ namespace: "kube-service-catalog"
+ kind: service
+ name: controller-manager
+ files:
+ - "{{ mktemp.stdout }}/controller_manager_service.yml"
+ delete_after: yes
+
+- include: start_api_server.yml
+
+- name: Delete temp directory
+ file:
+ name: "{{ mktemp.stdout }}"
+ state: absent
+ changed_when: False
diff --git a/roles/openshift_service_catalog/tasks/main.yml b/roles/openshift_service_catalog/tasks/main.yml
new file mode 100644
index 000000000..dc0d6a370
--- /dev/null
+++ b/roles/openshift_service_catalog/tasks/main.yml
@@ -0,0 +1,8 @@
+---
+# do any asserts here
+
+- include: install.yml
+ when: not openshift_service_catalog_remove | default(false) | bool
+
+- include: remove.yml
+ when: openshift_service_catalog_remove | default(false) | bool
diff --git a/roles/openshift_service_catalog/tasks/remove.yml b/roles/openshift_service_catalog/tasks/remove.yml
new file mode 100644
index 000000000..2fb1ec440
--- /dev/null
+++ b/roles/openshift_service_catalog/tasks/remove.yml
@@ -0,0 +1,56 @@
+---
+- name: Remove Service Catalog APIServer
+ command: >
+ oc delete apiservices.apiregistration.k8s.io/v1alpha1.servicecatalog.k8s.io --ignore-not-found -n kube-service-catalog
+
+- name: Remove Policy Binding
+ command: >
+ oc delete policybindings/kube-system:default -n kube-system --ignore-not-found
+
+# TODO: this module doesn't currently remove this
+#- name: Remove service catalog api service
+# oc_obj:
+# state: absent
+# namespace: "kube-service-catalog"
+# kind: apiservices.apiregistration.k8s.io
+# name: v1alpha1.servicecatalog.k8s.io
+
+- name: Remove Service Catalog API Server route
+ oc_obj:
+ state: absent
+ namespace: "kube-service-catalog"
+ kind: route
+ name: apiserver
+
+- name: Remove Service Catalog API Server service
+ oc_obj:
+ state: absent
+ namespace: "kube-service-catalog"
+ kind: service
+ name: apiserver
+
+- name: Remove Service Catalog API Server daemonset
+ oc_obj:
+ state: absent
+ namespace: "kube-service-catalog"
+ kind: daemonset
+ name: apiserver
+
+- name: Remove Controller Manager service
+ oc_obj:
+ state: absent
+ namespace: "kube-service-catalog"
+ kind: service
+ name: controller-manager
+
+- name: Remove Controller Manager deployment
+ oc_obj:
+ state: absent
+ namespace: "kube-service-catalog"
+ kind: deployment
+ name: controller-manager
+
+- name: Remove Service Catalog namespace
+ oc_project:
+ state: absent
+ name: "kube-service-catalog"
diff --git a/roles/openshift_service_catalog/tasks/start_api_server.yml b/roles/openshift_service_catalog/tasks/start_api_server.yml
new file mode 100644
index 000000000..b143292b6
--- /dev/null
+++ b/roles/openshift_service_catalog/tasks/start_api_server.yml
@@ -0,0 +1,22 @@
+---
+# Label nodes and wait for apiserver and controller to be running (at least one)
+- name: Label {{ openshift.node.nodename }} for APIServer and controller deployment
+ oc_label:
+ name: "{{ openshift.node.nodename }}"
+ kind: node
+ state: add
+ labels: "{{ openshift_service_catalog_nodeselector | default ({'openshift-infra': 'apiserver'}) | oo_dict_to_list_of_dict }}"
+
+# wait to see that the apiserver is available
+- name: wait for api server to be ready
+ command: >
+ curl -k https://apiserver.kube-service-catalog.svc/healthz
+ args:
+ # Disables the following warning:
+ # Consider using get_url or uri module rather than running curl
+ warn: no
+ register: api_health
+ until: api_health.stdout == 'ok'
+ retries: 120
+ delay: 1
+ changed_when: false
diff --git a/roles/openshift_service_catalog/tasks/wire_aggregator.yml b/roles/openshift_service_catalog/tasks/wire_aggregator.yml
new file mode 100644
index 000000000..3e5897ba4
--- /dev/null
+++ b/roles/openshift_service_catalog/tasks/wire_aggregator.yml
@@ -0,0 +1,86 @@
+---
+# TODO: this currently has a bug where hostnames are required
+- name: Creating Aggregator signer certs
+ command: >
+ oc adm ca create-signer-cert
+ --cert=/etc/origin/master/front-proxy-ca.crt
+ --key=/etc/origin/master/front-proxy-ca.key
+ --serial=/etc/origin/master/ca.serial.txt
+# oc_adm_ca_server_cert:
+# cert: /etc/origin/master/front-proxy-ca.crt
+# key: /etc/origin/master/front-proxy-ca.key
+
+- name: Create api-client config for Aggregator
+ command: >
+ oc adm create-api-client-config
+ --certificate-authority=/etc/origin/master/front-proxy-ca.crt
+ --signer-cert=/etc/origin/master/front-proxy-ca.crt
+ --signer-key=/etc/origin/master/front-proxy-ca.key
+ --user aggregator-front-proxy
+ --client-dir=/etc/origin/master
+ --signer-serial=/etc/origin/master/ca.serial.txt
+
+- name: Update master config
+ yedit:
+ state: present
+ src: /etc/origin/master/master-config.yaml
+ edits:
+ - key: aggregatorConfig.proxyClientInfo.certFile
+ value: aggregator-front-proxy.crt
+ - key: aggregatorConfig.proxyClientInfo.keyFile
+ value: aggregator-front-proxy.key
+ - key: authConfig.requestHeader.clientCA
+ value: front-proxy-ca.crt
+ - key: authConfig.requestHeader.clientCommonNames
+ value: [aggregator-front-proxy]
+ - key: authConfig.requestHeader.usernameHeaders
+ value: [X-Remote-User]
+ - key: authConfig.requestHeader.groupHeaders
+ value: [X-Remote-Group]
+ - key: authConfig.requestHeader.extraHeaderPrefixes
+ value: [X-Remote-Extra-]
+ register: yedit_output
+
+#restart master serially here
+- name: restart master
+ systemd: name={{ openshift.common.service_type }}-master state=restarted
+ when:
+ - yedit_output.changed
+ - openshift.master.ha is not defined or not openshift.master.ha | bool
+
+- name: restart master api
+ systemd: name={{ openshift.common.service_type }}-master-api state=restarted
+ when:
+ - yedit_output.changed
+ - openshift.master.ha is defined and openshift.master.ha | bool
+ - openshift.master.cluster_method == 'native'
+
+- name: restart master controllers
+ systemd: name={{ openshift.common.service_type }}-master-controllers state=restarted
+ when:
+ - yedit_output.changed
+ - openshift.master.ha is defined and openshift.master.ha | bool
+ - openshift.master.cluster_method == 'native'
+
+- name: Verify API Server
+ # Using curl here since the uri module requires python-httplib2 and
+ # wait_for port doesn't provide health information.
+ command: >
+ curl --silent --tlsv1.2
+ {% if openshift.common.version_gte_3_2_or_1_2 | bool %}
+ --cacert {{ openshift.common.config_base }}/master/ca-bundle.crt
+ {% else %}
+ --cacert {{ openshift.common.config_base }}/master/ca.crt
+ {% endif %}
+ {{ openshift.master.api_url }}/healthz/ready
+ args:
+ # Disables the following warning:
+ # Consider using get_url or uri module rather than running curl
+ warn: no
+ register: api_available_output
+ until: api_available_output.stdout == 'ok'
+ retries: 120
+ delay: 1
+ changed_when: false
+ when:
+ - yedit_output.changed
diff --git a/roles/openshift_service_catalog/templates/api_server.j2 b/roles/openshift_service_catalog/templates/api_server.j2
new file mode 100644
index 000000000..8ae6b6c8d
--- /dev/null
+++ b/roles/openshift_service_catalog/templates/api_server.j2
@@ -0,0 +1,80 @@
+apiVersion: extensions/v1beta1
+kind: DaemonSet
+metadata:
+ labels:
+ app: apiserver
+ name: apiserver
+spec:
+ selector:
+ matchLabels:
+ app: apiserver
+ updateStrategy:
+ rollingUpdate:
+ maxUnavailable: 1
+ type: RollingUpdate
+ template:
+ metadata:
+ labels:
+ app: apiserver
+ spec:
+ serviceAccountName: service-catalog-apiserver
+ nodeSelector:
+{% for key, value in node_selector.iteritems() %}
+ {{key}}: "{{value}}"
+{% endfor %}
+ containers:
+ - args:
+ - --storage-type
+ - etcd
+ - --secure-port
+ - "6443"
+ - --etcd-servers
+# TODO: come back and get openshift.common.hostname to work
+ - https://{{ openshift.common.ip }}:{{ openshift.master.etcd_port }}
+ - --etcd-cafile
+ - /etc/origin/master/master.etcd-ca.crt
+ - --etcd-certfile
+ - /etc/origin/master/master.etcd-client.crt
+ - --etcd-keyfile
+ - /etc/origin/master/master.etcd-client.key
+ - -v
+ - "10"
+ - --cors-allowed-origins
+ - {{ cors_allowed_origin }}
+ - --admission-control
+ - "KubernetesNamespaceLifecycle"
+ image: {{ openshift_service_catalog_image_prefix }}service-catalog:{{ openshift_service_catalog_image_version }}
+ command: ["/usr/bin/apiserver"]
+ imagePullPolicy: Always
+ name: apiserver
+ ports:
+ - containerPort: 6443
+ protocol: TCP
+ resources: {}
+ terminationMessagePath: /dev/termination-log
+ volumeMounts:
+ - mountPath: /var/run/kubernetes-service-catalog
+ name: apiserver-ssl
+ readOnly: true
+ - mountPath: /etc/origin/master
+ name: etcd-host-cert
+ readOnly: true
+ dnsPolicy: ClusterFirst
+ restartPolicy: Always
+ securityContext: {}
+ terminationGracePeriodSeconds: 30
+ volumes:
+ - name: apiserver-ssl
+ secret:
+ defaultMode: 420
+ secretName: apiserver-ssl
+ items:
+ - key: tls.crt
+ path: apiserver.crt
+ - key: tls.key
+ path: apiserver.key
+ - hostPath:
+ path: /etc/origin/master
+ name: etcd-host-cert
+ - emptyDir: {}
+ name: data-dir
diff --git a/roles/openshift_service_catalog/templates/api_server_route.j2 b/roles/openshift_service_catalog/templates/api_server_route.j2
new file mode 100644
index 000000000..3c3da254d
--- /dev/null
+++ b/roles/openshift_service_catalog/templates/api_server_route.j2
@@ -0,0 +1,14 @@
+apiVersion: v1
+kind: Route
+metadata:
+ name: apiserver
+spec:
+ port:
+ targetPort: secure
+ tls:
+ termination: passthrough
+ to:
+ kind: Service
+ name: apiserver
+ weight: 100
+ wildcardPolicy: None
diff --git a/roles/openshift_service_catalog/templates/api_server_service.j2 b/roles/openshift_service_catalog/templates/api_server_service.j2
new file mode 100644
index 000000000..bae337201
--- /dev/null
+++ b/roles/openshift_service_catalog/templates/api_server_service.j2
@@ -0,0 +1,13 @@
+apiVersion: v1
+kind: Service
+metadata:
+ name: apiserver
+spec:
+ ports:
+ - name: secure
+ port: 443
+ protocol: TCP
+ targetPort: 6443
+ selector:
+ app: apiserver
+ sessionAffinity: None
diff --git a/roles/openshift_service_catalog/templates/controller_manager.j2 b/roles/openshift_service_catalog/templates/controller_manager.j2
new file mode 100644
index 000000000..33932eeb7
--- /dev/null
+++ b/roles/openshift_service_catalog/templates/controller_manager.j2
@@ -0,0 +1,46 @@
+apiVersion: extensions/v1beta1
+kind: DaemonSet
+metadata:
+ labels:
+ app: controller-manager
+ name: controller-manager
+spec:
+ selector:
+ matchLabels:
+ app: controller-manager
+ strategy:
+ rollingUpdate:
+ maxUnavailable: 1
+ type: RollingUpdate
+ template:
+ metadata:
+ labels:
+ app: controller-manager
+ spec:
+ nodeSelector:
+{% for key, value in node_selector.iteritems() %}
+ {{key}}: "{{value}}"
+{% endfor %}
+ containers:
+ - env:
+ - name: K8S_NAMESPACE
+ valueFrom:
+ fieldRef:
+ fieldPath: metadata.namespace
+ args:
+ - -v
+ - "5"
+ - "--leader-election-namespace=$(K8S_NAMESPACE)"
+ image: {{ openshift_service_catalog_image_prefix }}service-catalog:{{ openshift_service_catalog_image_version }}
+ command: ["/usr/bin/controller-manager"]
+ imagePullPolicy: Always
+ name: controller-manager
+ ports:
+ - containerPort: 8080
+ protocol: TCP
+ resources: {}
+ terminationMessagePath: /dev/termination-log
+ dnsPolicy: ClusterFirst
+ restartPolicy: Always
+ securityContext: {}
+ terminationGracePeriodSeconds: 30
diff --git a/roles/openshift_service_catalog/templates/controller_manager_service.j2 b/roles/openshift_service_catalog/templates/controller_manager_service.j2
new file mode 100644
index 000000000..2bac645fc
--- /dev/null
+++ b/roles/openshift_service_catalog/templates/controller_manager_service.j2
@@ -0,0 +1,13 @@
+apiVersion: v1
+kind: Service
+metadata:
+ name: controller-manager
+spec:
+ ports:
+ - port: 6443
+ protocol: TCP
+ targetPort: 6443
+ selector:
+ app: controller-manager
+ sessionAffinity: None
+ type: ClusterIP
diff --git a/roles/openshift_service_catalog/vars/default_images.yml b/roles/openshift_service_catalog/vars/default_images.yml
new file mode 100644
index 000000000..6fb9d1b86
--- /dev/null
+++ b/roles/openshift_service_catalog/vars/default_images.yml
@@ -0,0 +1,3 @@
+---
+__openshift_service_catalog_image_prefix: "docker.io/openshift/origin-"
+__openshift_service_catalog_image_version: "latest"
diff --git a/roles/openshift_service_catalog/vars/openshift-enterprise.yml b/roles/openshift_service_catalog/vars/openshift-enterprise.yml
new file mode 100644
index 000000000..8c3f14485
--- /dev/null
+++ b/roles/openshift_service_catalog/vars/openshift-enterprise.yml
@@ -0,0 +1,3 @@
+---
+__openshift_service_catalog_image_prefix: "registry.access.redhat.com/openshift3/"
+__openshift_service_catalog_image_version: "3.6.0"
diff --git a/roles/openshift_version/tasks/main.yml b/roles/openshift_version/tasks/main.yml
index 16792388f..f4cb8ddb2 100644
--- a/roles/openshift_version/tasks/main.yml
+++ b/roles/openshift_version/tasks/main.yml
@@ -84,115 +84,119 @@
- openshift_version is not defined
- openshift_protect_installed_version | bool
-- name: Set openshift_version for rpm installation
- include: set_version_rpm.yml
- when: not is_containerized | bool
-
-- name: Set openshift_version for containerized installation
- include: set_version_containerized.yml
- when: is_containerized | bool
-
-- block:
- - name: Get available {{ openshift.common.service_type}} version
- repoquery:
- name: "{{ openshift.common.service_type}}"
- ignore_excluders: true
- register: rpm_results
- - fail:
- msg: "Package {{ openshift.common.service_type}} not found"
- when: not rpm_results.results.package_found
- - set_fact:
- openshift_rpm_version: "{{ rpm_results.results.versions.available_versions.0 | default('0.0', True) }}"
- - name: Fail if rpm version and docker image version are different
- fail:
- msg: "OCP rpm version {{ openshift_rpm_version }} is different from OCP image version {{ openshift_version }}"
- # Both versions have the same string representation
+# The rest of these tasks should only execute on
+# masters and nodes as we can verify they have subscriptions
+- when:
+ - inventory_hostname in groups['oo_masters_to_config'] or inventory_hostname in groups['oo_nodes_to_config']
+ block:
+ - name: Set openshift_version for rpm installation
+ include: set_version_rpm.yml
+ when: not is_containerized | bool
+
+ - name: Set openshift_version for containerized installation
+ include: set_version_containerized.yml
+ when: is_containerized | bool
+
+ - block:
+ - name: Get available {{ openshift.common.service_type}} version
+ repoquery:
+ name: "{{ openshift.common.service_type}}"
+ ignore_excluders: true
+ register: rpm_results
+ - fail:
+ msg: "Package {{ openshift.common.service_type}} not found"
+ when: not rpm_results.results.package_found
+ - set_fact:
+ openshift_rpm_version: "{{ rpm_results.results.versions.available_versions.0 | default('0.0', True) }}"
+ - name: Fail if rpm version and docker image version are different
+ fail:
+ msg: "OCP rpm version {{ openshift_rpm_version }} is different from OCP image version {{ openshift_version }}"
+ # Both versions have the same string representation
+ when:
+ - openshift_rpm_version != openshift_version
+ # if openshift_pkg_version or openshift_image_tag is defined, user gives a permission the rpm and docker image versions can differ
+ - openshift_pkg_version is not defined
+ - openshift_image_tag is not defined
when:
- - openshift_rpm_version != openshift_version
- # if openshift_pkg_version or openshift_image_tag is defined, user gives a permission the rpm and docker image versions can differ
- - openshift_pkg_version is not defined
- - openshift_image_tag is not defined
- when:
- - is_containerized | bool
- - not is_atomic | bool
-
-# Warn if the user has provided an openshift_image_tag but is not doing a containerized install
-# NOTE: This will need to be modified/removed for future container + rpm installations work.
-- name: Warn if openshift_image_tag is defined when not doing a containerized install
- debug:
- msg: >
- openshift_image_tag is used for containerized installs. If you are trying to
- specify an image for a non-container install see oreg_url or oreg_url_master or oreg_url_node.
- when:
- - not is_containerized | bool
- - openshift_image_tag is defined
-
+ - is_containerized | bool
+ - not is_atomic | bool
+
+ # Warn if the user has provided an openshift_image_tag but is not doing a containerized install
+ # NOTE: This will need to be modified/removed for future container + rpm installations work.
+ - name: Warn if openshift_image_tag is defined when not doing a containerized install
+ debug:
+ msg: >
+ openshift_image_tag is used for containerized installs. If you are trying to
+ specify an image for a non-container install see oreg_url or oreg_url_master or oreg_url_node.
+ when:
+ - not is_containerized | bool
+ - openshift_image_tag is defined
-# At this point we know openshift_version is set appropriately. Now we set
-# openshift_image_tag and openshift_pkg_version, so all roles can always assume
-# each of this variables *will* be set correctly and can use them per their
-# intended purpose.
+ # At this point we know openshift_version is set appropriately. Now we set
+ # openshift_image_tag and openshift_pkg_version, so all roles can always assume
+ # each of this variables *will* be set correctly and can use them per their
+ # intended purpose.
-- block:
- - debug:
- msg: "openshift_image_tag was not defined. Falling back to v{{ openshift_version }}"
+ - block:
+ - debug:
+ msg: "openshift_image_tag was not defined. Falling back to v{{ openshift_version }}"
- - set_fact:
- openshift_image_tag: v{{ openshift_version }}
+ - set_fact:
+ openshift_image_tag: v{{ openshift_version }}
- when: openshift_image_tag is not defined
+ when: openshift_image_tag is not defined
-- block:
- - debug:
- msg: "openshift_pkg_version was not defined. Falling back to -{{ openshift_version }}"
+ - block:
+ - debug:
+ msg: "openshift_pkg_version was not defined. Falling back to -{{ openshift_version }}"
- - set_fact:
- openshift_pkg_version: -{{ openshift_version }}
+ - set_fact:
+ openshift_pkg_version: -{{ openshift_version }}
- when: openshift_pkg_version is not defined
+ when: openshift_pkg_version is not defined
-- fail:
- msg: openshift_version role was unable to set openshift_version
- name: Abort if openshift_version was not set
- when: openshift_version is not defined
+ - fail:
+ msg: openshift_version role was unable to set openshift_version
+ name: Abort if openshift_version was not set
+ when: openshift_version is not defined
-- fail:
- msg: openshift_version role was unable to set openshift_image_tag
- name: Abort if openshift_image_tag was not set
- when: openshift_image_tag is not defined
+ - fail:
+ msg: openshift_version role was unable to set openshift_image_tag
+ name: Abort if openshift_image_tag was not set
+ when: openshift_image_tag is not defined
-- fail:
- msg: openshift_version role was unable to set openshift_pkg_version
- name: Abort if openshift_pkg_version was not set
- when: openshift_pkg_version is not defined
+ - fail:
+ msg: openshift_version role was unable to set openshift_pkg_version
+ name: Abort if openshift_pkg_version was not set
+ when: openshift_pkg_version is not defined
-- fail:
- msg: "No OpenShift version available; please ensure your systems are fully registered and have access to appropriate yum repositories."
- name: Abort if openshift_pkg_version was not set
- when:
- - not is_containerized | bool
- - openshift_version == '0.0'
+ - fail:
+ msg: "No OpenShift version available; please ensure your systems are fully registered and have access to appropriate yum repositories."
+ name: Abort if openshift_pkg_version was not set
+ when:
+ - not is_containerized | bool
+ - openshift_version == '0.0'
-# We can't map an openshift_release to full rpm version like we can with containers; make sure
-# the rpm version we looked up matches the release requested and error out if not.
-- name: For an RPM install, abort when the release requested does not match the available version.
- when:
- - not is_containerized | bool
- - openshift_release is defined
- assert:
- that:
- - openshift_version.startswith(openshift_release) | bool
- msg: |-
- You requested openshift_release {{ openshift_release }}, which is not matched by
- the latest OpenShift RPM we detected as {{ openshift.common.service_type }}-{{ openshift_version }}
- on host {{ inventory_hostname }}.
- We will only install the latest RPMs, so please ensure you are getting the release
- you expect. You may need to adjust your Ansible inventory, modify the repositories
- available on the host, or run the appropriate OpenShift upgrade playbook.
+ # We can't map an openshift_release to full rpm version like we can with containers; make sure
+ # the rpm version we looked up matches the release requested and error out if not.
+ - name: For an RPM install, abort when the release requested does not match the available version.
+ when:
+ - not is_containerized | bool
+ - openshift_release is defined
+ assert:
+ that:
+ - openshift_version.startswith(openshift_release) | bool
+ msg: |-
+ You requested openshift_release {{ openshift_release }}, which is not matched by
+ the latest OpenShift RPM we detected as {{ openshift.common.service_type }}-{{ openshift_version }}
+ on host {{ inventory_hostname }}.
+ We will only install the latest RPMs, so please ensure you are getting the release
+ you expect. You may need to adjust your Ansible inventory, modify the repositories
+ available on the host, or run the appropriate OpenShift upgrade playbook.
-# The end result of these three variables is quite important so make sure they are displayed and logged:
-- debug: var=openshift_release
+ # The end result of these three variables is quite important so make sure they are displayed and logged:
+ - debug: var=openshift_release
-- debug: var=openshift_image_tag
+ - debug: var=openshift_image_tag
-- debug: var=openshift_pkg_version
+ - debug: var=openshift_pkg_version
diff --git a/test/integration/openshift_health_checker/setup_container.yml b/test/integration/openshift_health_checker/setup_container.yml
index 8793d954e..33e94cf1f 100644
--- a/test/integration/openshift_health_checker/setup_container.yml
+++ b/test/integration/openshift_health_checker/setup_container.yml
@@ -43,3 +43,6 @@
delegate_facts: True
delegate_to: "{{ container_name }}"
with_dict: "{{ l_host_vars | default({}) }}"
+
+- include: ../../../playbooks/byo/openshift-cluster/initialize_groups.yml
+- include: ../../../playbooks/common/openshift-cluster/evaluate_groups.yml