summaryrefslogtreecommitdiffstats
path: root/roles
diff options
context:
space:
mode:
Diffstat (limited to 'roles')
-rw-r--r--roles/calico_master/templates/calico-policy-controller.yml.j22
-rw-r--r--roles/cockpit-ui/tasks/main.yml13
-rw-r--r--roles/etcd/templates/etcd.conf.j22
-rw-r--r--roles/lib_openshift/library/oc_adm_ca_server_cert.py52
-rw-r--r--roles/lib_openshift/library/oc_adm_manage_node.py47
-rw-r--r--roles/lib_openshift/library/oc_adm_policy_group.py45
-rw-r--r--roles/lib_openshift/library/oc_adm_policy_user.py47
-rw-r--r--roles/lib_openshift/library/oc_adm_registry.py47
-rw-r--r--roles/lib_openshift/library/oc_adm_router.py47
-rw-r--r--roles/lib_openshift/library/oc_clusterrole.py59
-rw-r--r--roles/lib_openshift/library/oc_configmap.py45
-rw-r--r--roles/lib_openshift/library/oc_edit.py45
-rw-r--r--roles/lib_openshift/library/oc_env.py45
-rw-r--r--roles/lib_openshift/library/oc_group.py45
-rw-r--r--roles/lib_openshift/library/oc_image.py45
-rw-r--r--roles/lib_openshift/library/oc_label.py47
-rw-r--r--roles/lib_openshift/library/oc_obj.py89
-rw-r--r--roles/lib_openshift/library/oc_objectvalidator.py45
-rw-r--r--roles/lib_openshift/library/oc_process.py45
-rw-r--r--roles/lib_openshift/library/oc_project.py45
-rw-r--r--roles/lib_openshift/library/oc_pvc.py45
-rw-r--r--roles/lib_openshift/library/oc_route.py45
-rw-r--r--roles/lib_openshift/library/oc_scale.py45
-rw-r--r--roles/lib_openshift/library/oc_secret.py45
-rw-r--r--roles/lib_openshift/library/oc_service.py45
-rw-r--r--roles/lib_openshift/library/oc_serviceaccount.py45
-rw-r--r--roles/lib_openshift/library/oc_serviceaccount_secret.py45
-rw-r--r--roles/lib_openshift/library/oc_user.py45
-rw-r--r--roles/lib_openshift/library/oc_version.py45
-rw-r--r--roles/lib_openshift/library/oc_volume.py45
-rw-r--r--roles/lib_openshift/src/ansible/oc_obj.py2
-rw-r--r--roles/lib_openshift/src/class/oc_adm_ca_server_cert.py7
-rw-r--r--roles/lib_openshift/src/class/oc_adm_manage_node.py2
-rw-r--r--roles/lib_openshift/src/class/oc_adm_policy_user.py2
-rw-r--r--roles/lib_openshift/src/class/oc_adm_registry.py2
-rw-r--r--roles/lib_openshift/src/class/oc_adm_router.py2
-rw-r--r--roles/lib_openshift/src/class/oc_clusterrole.py6
-rw-r--r--roles/lib_openshift/src/class/oc_label.py2
-rw-r--r--roles/lib_openshift/src/class/oc_obj.py40
-rw-r--r--roles/lib_openshift/src/doc/obj2
-rw-r--r--roles/lib_openshift/src/lib/base.py45
-rw-r--r--roles/lib_openshift/src/lib/rule.py8
-rwxr-xr-xroles/lib_openshift/src/test/integration/oc_obj.yml207
-rwxr-xr-xroles/lib_openshift/src/test/unit/test_oc_adm_registry.py10
-rwxr-xr-xroles/lib_openshift/src/test/unit/test_oc_adm_router.py7
-rwxr-xr-xroles/lib_openshift/src/test/unit/test_oc_objectvalidator.py28
-rw-r--r--roles/openshift_certificate_expiry/README.md133
-rwxr-xr-xroles/openshift_examples/examples-sync.sh2
l---------roles/openshift_examples/files/examples/latest2
-rw-r--r--roles/openshift_examples/files/examples/v1.5/image-streams/image-streams-centos7.json2
-rw-r--r--roles/openshift_examples/files/examples/v1.5/image-streams/image-streams-rhel7.json2
-rw-r--r--roles/openshift_examples/files/examples/v1.5/quickstart-templates/dotnet-pgsql-persistent.json25
-rw-r--r--roles/openshift_examples/files/examples/v3.6/cfme-templates/cfme-pv-app-example.yaml (renamed from roles/openshift_examples/files/examples/v1.6/cfme-templates/cfme-pv-app-example.yaml)0
-rw-r--r--roles/openshift_examples/files/examples/v3.6/cfme-templates/cfme-pv-example.yaml (renamed from roles/openshift_examples/files/examples/v1.6/cfme-templates/cfme-pv-example.yaml)0
-rw-r--r--roles/openshift_examples/files/examples/v3.6/cfme-templates/cfme-template.yaml (renamed from roles/openshift_examples/files/examples/v1.6/cfme-templates/cfme-template.yaml)0
-rw-r--r--roles/openshift_examples/files/examples/v3.6/db-templates/README.md (renamed from roles/openshift_examples/files/examples/v1.6/db-templates/README.md)0
-rw-r--r--roles/openshift_examples/files/examples/v3.6/db-templates/mariadb-ephemeral-template.json (renamed from roles/openshift_examples/files/examples/v1.6/db-templates/mariadb-ephemeral-template.json)0
-rw-r--r--roles/openshift_examples/files/examples/v3.6/db-templates/mariadb-persistent-template.json (renamed from roles/openshift_examples/files/examples/v1.6/db-templates/mariadb-persistent-template.json)0
-rw-r--r--roles/openshift_examples/files/examples/v3.6/db-templates/mongodb-ephemeral-template.json (renamed from roles/openshift_examples/files/examples/v1.6/db-templates/mongodb-ephemeral-template.json)0
-rw-r--r--roles/openshift_examples/files/examples/v3.6/db-templates/mongodb-persistent-template.json (renamed from roles/openshift_examples/files/examples/v1.6/db-templates/mongodb-persistent-template.json)0
-rw-r--r--roles/openshift_examples/files/examples/v3.6/db-templates/mysql-ephemeral-template.json (renamed from roles/openshift_examples/files/examples/v1.6/db-templates/mysql-ephemeral-template.json)0
-rw-r--r--roles/openshift_examples/files/examples/v3.6/db-templates/mysql-persistent-template.json (renamed from roles/openshift_examples/files/examples/v1.6/db-templates/mysql-persistent-template.json)0
-rw-r--r--roles/openshift_examples/files/examples/v3.6/db-templates/postgresql-ephemeral-template.json (renamed from roles/openshift_examples/files/examples/v1.6/db-templates/postgresql-ephemeral-template.json)0
-rw-r--r--roles/openshift_examples/files/examples/v3.6/db-templates/postgresql-persistent-template.json (renamed from roles/openshift_examples/files/examples/v1.6/db-templates/postgresql-persistent-template.json)0
-rw-r--r--roles/openshift_examples/files/examples/v3.6/db-templates/redis-ephemeral-template.json (renamed from roles/openshift_examples/files/examples/v1.6/db-templates/redis-ephemeral-template.json)0
-rw-r--r--roles/openshift_examples/files/examples/v3.6/db-templates/redis-persistent-template.json (renamed from roles/openshift_examples/files/examples/v1.6/db-templates/redis-persistent-template.json)0
-rw-r--r--roles/openshift_examples/files/examples/v3.6/image-streams/dotnet_imagestreams.json (renamed from roles/openshift_examples/files/examples/v1.6/image-streams/dotnet_imagestreams.json)0
-rw-r--r--roles/openshift_examples/files/examples/v3.6/image-streams/image-streams-centos7.json (renamed from roles/openshift_examples/files/examples/v1.6/image-streams/image-streams-centos7.json)2
-rw-r--r--roles/openshift_examples/files/examples/v3.6/image-streams/image-streams-rhel7.json (renamed from roles/openshift_examples/files/examples/v1.6/image-streams/image-streams-rhel7.json)2
-rw-r--r--roles/openshift_examples/files/examples/v3.6/quickstart-templates/README.md (renamed from roles/openshift_examples/files/examples/v1.6/quickstart-templates/README.md)0
-rw-r--r--roles/openshift_examples/files/examples/v3.6/quickstart-templates/apicast-gateway-template.yml (renamed from roles/openshift_examples/files/examples/v1.6/quickstart-templates/apicast-gateway-template.yml)0
-rw-r--r--roles/openshift_examples/files/examples/v3.6/quickstart-templates/cakephp-mysql-persistent.json (renamed from roles/openshift_examples/files/examples/v1.6/quickstart-templates/cakephp-mysql-persistent.json)0
-rw-r--r--roles/openshift_examples/files/examples/v3.6/quickstart-templates/cakephp-mysql.json (renamed from roles/openshift_examples/files/examples/v1.6/quickstart-templates/cakephp-mysql.json)0
-rw-r--r--roles/openshift_examples/files/examples/v3.6/quickstart-templates/dancer-mysql-persistent.json (renamed from roles/openshift_examples/files/examples/v1.6/quickstart-templates/dancer-mysql-persistent.json)0
-rw-r--r--roles/openshift_examples/files/examples/v3.6/quickstart-templates/dancer-mysql.json (renamed from roles/openshift_examples/files/examples/v1.6/quickstart-templates/dancer-mysql.json)0
-rw-r--r--roles/openshift_examples/files/examples/v3.6/quickstart-templates/django-postgresql-persistent.json (renamed from roles/openshift_examples/files/examples/v1.6/quickstart-templates/django-postgresql-persistent.json)0
-rw-r--r--roles/openshift_examples/files/examples/v3.6/quickstart-templates/django-postgresql.json (renamed from roles/openshift_examples/files/examples/v1.6/quickstart-templates/django-postgresql.json)0
-rw-r--r--roles/openshift_examples/files/examples/v3.6/quickstart-templates/dotnet-example.json (renamed from roles/openshift_examples/files/examples/v1.6/quickstart-templates/dotnet-example.json)0
-rw-r--r--roles/openshift_examples/files/examples/v3.6/quickstart-templates/dotnet-pgsql-persistent.json (renamed from roles/openshift_examples/files/examples/v1.6/quickstart-templates/dotnet-pgsql-persistent.json)25
-rw-r--r--roles/openshift_examples/files/examples/v3.6/quickstart-templates/jenkins-ephemeral-template.json (renamed from roles/openshift_examples/files/examples/v1.6/quickstart-templates/jenkins-ephemeral-template.json)0
-rw-r--r--roles/openshift_examples/files/examples/v3.6/quickstart-templates/jenkins-persistent-template.json (renamed from roles/openshift_examples/files/examples/v1.6/quickstart-templates/jenkins-persistent-template.json)0
-rw-r--r--roles/openshift_examples/files/examples/v3.6/quickstart-templates/nodejs-mongodb-persistent.json (renamed from roles/openshift_examples/files/examples/v1.6/quickstart-templates/nodejs-mongodb-persistent.json)0
-rw-r--r--roles/openshift_examples/files/examples/v3.6/quickstart-templates/nodejs-mongodb.json (renamed from roles/openshift_examples/files/examples/v1.6/quickstart-templates/nodejs-mongodb.json)0
-rw-r--r--roles/openshift_examples/files/examples/v3.6/quickstart-templates/rails-postgresql-persistent.json (renamed from roles/openshift_examples/files/examples/v1.6/quickstart-templates/rails-postgresql-persistent.json)0
-rw-r--r--roles/openshift_examples/files/examples/v3.6/quickstart-templates/rails-postgresql.json (renamed from roles/openshift_examples/files/examples/v1.6/quickstart-templates/rails-postgresql.json)0
-rw-r--r--roles/openshift_examples/files/examples/v3.6/xpaas-streams/fis-image-streams.json (renamed from roles/openshift_examples/files/examples/v1.6/xpaas-streams/fis-image-streams.json)0
-rw-r--r--roles/openshift_examples/files/examples/v3.6/xpaas-streams/jboss-image-streams.json (renamed from roles/openshift_examples/files/examples/v1.6/xpaas-streams/jboss-image-streams.json)0
-rw-r--r--roles/openshift_examples/files/examples/v3.6/xpaas-templates/amq62-basic.json (renamed from roles/openshift_examples/files/examples/v1.6/xpaas-templates/amq62-basic.json)0
-rw-r--r--roles/openshift_examples/files/examples/v3.6/xpaas-templates/amq62-persistent-ssl.json (renamed from roles/openshift_examples/files/examples/v1.6/xpaas-templates/amq62-persistent-ssl.json)0
-rw-r--r--roles/openshift_examples/files/examples/v3.6/xpaas-templates/amq62-persistent.json (renamed from roles/openshift_examples/files/examples/v1.6/xpaas-templates/amq62-persistent.json)0
-rw-r--r--roles/openshift_examples/files/examples/v3.6/xpaas-templates/amq62-ssl.json (renamed from roles/openshift_examples/files/examples/v1.6/xpaas-templates/amq62-ssl.json)0
-rw-r--r--roles/openshift_examples/files/examples/v3.6/xpaas-templates/datagrid65-basic.json (renamed from roles/openshift_examples/files/examples/v1.6/xpaas-templates/datagrid65-basic.json)0
-rw-r--r--roles/openshift_examples/files/examples/v3.6/xpaas-templates/datagrid65-https.json (renamed from roles/openshift_examples/files/examples/v1.6/xpaas-templates/datagrid65-https.json)0
-rw-r--r--roles/openshift_examples/files/examples/v3.6/xpaas-templates/datagrid65-mysql-persistent.json (renamed from roles/openshift_examples/files/examples/v1.6/xpaas-templates/datagrid65-mysql-persistent.json)0
-rw-r--r--roles/openshift_examples/files/examples/v3.6/xpaas-templates/datagrid65-mysql.json (renamed from roles/openshift_examples/files/examples/v1.6/xpaas-templates/datagrid65-mysql.json)0
-rw-r--r--roles/openshift_examples/files/examples/v3.6/xpaas-templates/datagrid65-postgresql-persistent.json (renamed from roles/openshift_examples/files/examples/v1.6/xpaas-templates/datagrid65-postgresql-persistent.json)0
-rw-r--r--roles/openshift_examples/files/examples/v3.6/xpaas-templates/datagrid65-postgresql.json (renamed from roles/openshift_examples/files/examples/v1.6/xpaas-templates/datagrid65-postgresql.json)0
-rw-r--r--roles/openshift_examples/files/examples/v3.6/xpaas-templates/datavirt63-basic-s2i.json (renamed from roles/openshift_examples/files/examples/v1.6/xpaas-templates/datavirt63-basic-s2i.json)0
-rw-r--r--roles/openshift_examples/files/examples/v3.6/xpaas-templates/datavirt63-extensions-support-s2i.json (renamed from roles/openshift_examples/files/examples/v1.6/xpaas-templates/datavirt63-extensions-support-s2i.json)0
-rw-r--r--roles/openshift_examples/files/examples/v3.6/xpaas-templates/datavirt63-secure-s2i.json (renamed from roles/openshift_examples/files/examples/v1.6/xpaas-templates/datavirt63-secure-s2i.json)0
-rw-r--r--roles/openshift_examples/files/examples/v3.6/xpaas-templates/decisionserver62-amq-s2i.json (renamed from roles/openshift_examples/files/examples/v1.6/xpaas-templates/decisionserver62-amq-s2i.json)0
-rw-r--r--roles/openshift_examples/files/examples/v3.6/xpaas-templates/decisionserver62-basic-s2i.json (renamed from roles/openshift_examples/files/examples/v1.6/xpaas-templates/decisionserver62-basic-s2i.json)0
-rw-r--r--roles/openshift_examples/files/examples/v3.6/xpaas-templates/decisionserver62-https-s2i.json (renamed from roles/openshift_examples/files/examples/v1.6/xpaas-templates/decisionserver62-https-s2i.json)0
-rw-r--r--roles/openshift_examples/files/examples/v3.6/xpaas-templates/decisionserver63-amq-s2i.json (renamed from roles/openshift_examples/files/examples/v1.6/xpaas-templates/decisionserver63-amq-s2i.json)0
-rw-r--r--roles/openshift_examples/files/examples/v3.6/xpaas-templates/decisionserver63-basic-s2i.json (renamed from roles/openshift_examples/files/examples/v1.6/xpaas-templates/decisionserver63-basic-s2i.json)0
-rw-r--r--roles/openshift_examples/files/examples/v3.6/xpaas-templates/decisionserver63-https-s2i.json (renamed from roles/openshift_examples/files/examples/v1.6/xpaas-templates/decisionserver63-https-s2i.json)0
-rw-r--r--roles/openshift_examples/files/examples/v3.6/xpaas-templates/eap64-amq-persistent-s2i.json (renamed from roles/openshift_examples/files/examples/v1.6/xpaas-templates/eap64-amq-persistent-s2i.json)0
-rw-r--r--roles/openshift_examples/files/examples/v3.6/xpaas-templates/eap64-amq-s2i.json (renamed from roles/openshift_examples/files/examples/v1.6/xpaas-templates/eap64-amq-s2i.json)0
-rw-r--r--roles/openshift_examples/files/examples/v3.6/xpaas-templates/eap64-basic-s2i.json (renamed from roles/openshift_examples/files/examples/v1.6/xpaas-templates/eap64-basic-s2i.json)0
-rw-r--r--roles/openshift_examples/files/examples/v3.6/xpaas-templates/eap64-https-s2i.json (renamed from roles/openshift_examples/files/examples/v1.6/xpaas-templates/eap64-https-s2i.json)0
-rw-r--r--roles/openshift_examples/files/examples/v3.6/xpaas-templates/eap64-mongodb-persistent-s2i.json (renamed from roles/openshift_examples/files/examples/v1.6/xpaas-templates/eap64-mongodb-persistent-s2i.json)0
-rw-r--r--roles/openshift_examples/files/examples/v3.6/xpaas-templates/eap64-mongodb-s2i.json (renamed from roles/openshift_examples/files/examples/v1.6/xpaas-templates/eap64-mongodb-s2i.json)0
-rw-r--r--roles/openshift_examples/files/examples/v3.6/xpaas-templates/eap64-mysql-persistent-s2i.json (renamed from roles/openshift_examples/files/examples/v1.6/xpaas-templates/eap64-mysql-persistent-s2i.json)0
-rw-r--r--roles/openshift_examples/files/examples/v3.6/xpaas-templates/eap64-mysql-s2i.json (renamed from roles/openshift_examples/files/examples/v1.6/xpaas-templates/eap64-mysql-s2i.json)0
-rw-r--r--roles/openshift_examples/files/examples/v3.6/xpaas-templates/eap64-postgresql-persistent-s2i.json (renamed from roles/openshift_examples/files/examples/v1.6/xpaas-templates/eap64-postgresql-persistent-s2i.json)0
-rw-r--r--roles/openshift_examples/files/examples/v3.6/xpaas-templates/eap64-postgresql-s2i.json (renamed from roles/openshift_examples/files/examples/v1.6/xpaas-templates/eap64-postgresql-s2i.json)0
-rw-r--r--roles/openshift_examples/files/examples/v3.6/xpaas-templates/eap64-sso-s2i.json (renamed from roles/openshift_examples/files/examples/v1.6/xpaas-templates/eap64-sso-s2i.json)0
-rw-r--r--roles/openshift_examples/files/examples/v3.6/xpaas-templates/eap70-amq-persistent-s2i.json (renamed from roles/openshift_examples/files/examples/v1.6/xpaas-templates/eap70-amq-persistent-s2i.json)0
-rw-r--r--roles/openshift_examples/files/examples/v3.6/xpaas-templates/eap70-amq-s2i.json (renamed from roles/openshift_examples/files/examples/v1.6/xpaas-templates/eap70-amq-s2i.json)0
-rw-r--r--roles/openshift_examples/files/examples/v3.6/xpaas-templates/eap70-basic-s2i.json (renamed from roles/openshift_examples/files/examples/v1.6/xpaas-templates/eap70-basic-s2i.json)0
-rw-r--r--roles/openshift_examples/files/examples/v3.6/xpaas-templates/eap70-https-s2i.json (renamed from roles/openshift_examples/files/examples/v1.6/xpaas-templates/eap70-https-s2i.json)0
-rw-r--r--roles/openshift_examples/files/examples/v3.6/xpaas-templates/eap70-mongodb-persistent-s2i.json (renamed from roles/openshift_examples/files/examples/v1.6/xpaas-templates/eap70-mongodb-persistent-s2i.json)0
-rw-r--r--roles/openshift_examples/files/examples/v3.6/xpaas-templates/eap70-mongodb-s2i.json (renamed from roles/openshift_examples/files/examples/v1.6/xpaas-templates/eap70-mongodb-s2i.json)0
-rw-r--r--roles/openshift_examples/files/examples/v3.6/xpaas-templates/eap70-mysql-persistent-s2i.json (renamed from roles/openshift_examples/files/examples/v1.6/xpaas-templates/eap70-mysql-persistent-s2i.json)0
-rw-r--r--roles/openshift_examples/files/examples/v3.6/xpaas-templates/eap70-mysql-s2i.json (renamed from roles/openshift_examples/files/examples/v1.6/xpaas-templates/eap70-mysql-s2i.json)0
-rw-r--r--roles/openshift_examples/files/examples/v3.6/xpaas-templates/eap70-postgresql-persistent-s2i.json (renamed from roles/openshift_examples/files/examples/v1.6/xpaas-templates/eap70-postgresql-persistent-s2i.json)0
-rw-r--r--roles/openshift_examples/files/examples/v3.6/xpaas-templates/eap70-postgresql-s2i.json (renamed from roles/openshift_examples/files/examples/v1.6/xpaas-templates/eap70-postgresql-s2i.json)0
-rw-r--r--roles/openshift_examples/files/examples/v3.6/xpaas-templates/eap70-sso-s2i.json (renamed from roles/openshift_examples/files/examples/v1.6/xpaas-templates/eap70-sso-s2i.json)0
-rw-r--r--roles/openshift_examples/files/examples/v3.6/xpaas-templates/jws30-tomcat7-basic-s2i.json (renamed from roles/openshift_examples/files/examples/v1.6/xpaas-templates/jws30-tomcat7-basic-s2i.json)0
-rw-r--r--roles/openshift_examples/files/examples/v3.6/xpaas-templates/jws30-tomcat7-https-s2i.json (renamed from roles/openshift_examples/files/examples/v1.6/xpaas-templates/jws30-tomcat7-https-s2i.json)0
-rw-r--r--roles/openshift_examples/files/examples/v3.6/xpaas-templates/jws30-tomcat7-mongodb-persistent-s2i.json (renamed from roles/openshift_examples/files/examples/v1.6/xpaas-templates/jws30-tomcat7-mongodb-persistent-s2i.json)0
-rw-r--r--roles/openshift_examples/files/examples/v3.6/xpaas-templates/jws30-tomcat7-mongodb-s2i.json (renamed from roles/openshift_examples/files/examples/v1.6/xpaas-templates/jws30-tomcat7-mongodb-s2i.json)0
-rw-r--r--roles/openshift_examples/files/examples/v3.6/xpaas-templates/jws30-tomcat7-mysql-persistent-s2i.json (renamed from roles/openshift_examples/files/examples/v1.6/xpaas-templates/jws30-tomcat7-mysql-persistent-s2i.json)0
-rw-r--r--roles/openshift_examples/files/examples/v3.6/xpaas-templates/jws30-tomcat7-mysql-s2i.json (renamed from roles/openshift_examples/files/examples/v1.6/xpaas-templates/jws30-tomcat7-mysql-s2i.json)0
-rw-r--r--roles/openshift_examples/files/examples/v3.6/xpaas-templates/jws30-tomcat7-postgresql-persistent-s2i.json (renamed from roles/openshift_examples/files/examples/v1.6/xpaas-templates/jws30-tomcat7-postgresql-persistent-s2i.json)0
-rw-r--r--roles/openshift_examples/files/examples/v3.6/xpaas-templates/jws30-tomcat7-postgresql-s2i.json (renamed from roles/openshift_examples/files/examples/v1.6/xpaas-templates/jws30-tomcat7-postgresql-s2i.json)0
-rw-r--r--roles/openshift_examples/files/examples/v3.6/xpaas-templates/jws30-tomcat8-basic-s2i.json (renamed from roles/openshift_examples/files/examples/v1.6/xpaas-templates/jws30-tomcat8-basic-s2i.json)0
-rw-r--r--roles/openshift_examples/files/examples/v3.6/xpaas-templates/jws30-tomcat8-https-s2i.json (renamed from roles/openshift_examples/files/examples/v1.6/xpaas-templates/jws30-tomcat8-https-s2i.json)0
-rw-r--r--roles/openshift_examples/files/examples/v3.6/xpaas-templates/jws30-tomcat8-mongodb-persistent-s2i.json (renamed from roles/openshift_examples/files/examples/v1.6/xpaas-templates/jws30-tomcat8-mongodb-persistent-s2i.json)0
-rw-r--r--roles/openshift_examples/files/examples/v3.6/xpaas-templates/jws30-tomcat8-mongodb-s2i.json (renamed from roles/openshift_examples/files/examples/v1.6/xpaas-templates/jws30-tomcat8-mongodb-s2i.json)0
-rw-r--r--roles/openshift_examples/files/examples/v3.6/xpaas-templates/jws30-tomcat8-mysql-persistent-s2i.json (renamed from roles/openshift_examples/files/examples/v1.6/xpaas-templates/jws30-tomcat8-mysql-persistent-s2i.json)0
-rw-r--r--roles/openshift_examples/files/examples/v3.6/xpaas-templates/jws30-tomcat8-mysql-s2i.json (renamed from roles/openshift_examples/files/examples/v1.6/xpaas-templates/jws30-tomcat8-mysql-s2i.json)0
-rw-r--r--roles/openshift_examples/files/examples/v3.6/xpaas-templates/jws30-tomcat8-postgresql-persistent-s2i.json (renamed from roles/openshift_examples/files/examples/v1.6/xpaas-templates/jws30-tomcat8-postgresql-persistent-s2i.json)0
-rw-r--r--roles/openshift_examples/files/examples/v3.6/xpaas-templates/jws30-tomcat8-postgresql-s2i.json (renamed from roles/openshift_examples/files/examples/v1.6/xpaas-templates/jws30-tomcat8-postgresql-s2i.json)0
-rw-r--r--roles/openshift_examples/files/examples/v3.6/xpaas-templates/karaf2-camel-amq-template.json (renamed from roles/openshift_examples/files/examples/v1.6/xpaas-templates/karaf2-camel-amq-template.json)0
-rw-r--r--roles/openshift_examples/files/examples/v3.6/xpaas-templates/karaf2-camel-log-template.json (renamed from roles/openshift_examples/files/examples/v1.6/xpaas-templates/karaf2-camel-log-template.json)0
-rw-r--r--roles/openshift_examples/files/examples/v3.6/xpaas-templates/karaf2-camel-rest-sql-template.json (renamed from roles/openshift_examples/files/examples/v1.6/xpaas-templates/karaf2-camel-rest-sql-template.json)0
-rw-r--r--roles/openshift_examples/files/examples/v3.6/xpaas-templates/karaf2-cxf-rest-template.json (renamed from roles/openshift_examples/files/examples/v1.6/xpaas-templates/karaf2-cxf-rest-template.json)0
-rw-r--r--roles/openshift_examples/files/examples/v3.6/xpaas-templates/openjdk18-web-basic-s2i.json (renamed from roles/openshift_examples/files/examples/v1.6/xpaas-templates/openjdk18-web-basic-s2i.json)0
-rw-r--r--roles/openshift_examples/files/examples/v3.6/xpaas-templates/processserver63-amq-mysql-persistent-s2i.json (renamed from roles/openshift_examples/files/examples/v1.6/xpaas-templates/processserver63-amq-mysql-persistent-s2i.json)0
-rw-r--r--roles/openshift_examples/files/examples/v3.6/xpaas-templates/processserver63-amq-mysql-s2i.json (renamed from roles/openshift_examples/files/examples/v1.6/xpaas-templates/processserver63-amq-mysql-s2i.json)0
-rw-r--r--roles/openshift_examples/files/examples/v3.6/xpaas-templates/processserver63-amq-postgresql-persistent-s2i.json (renamed from roles/openshift_examples/files/examples/v1.6/xpaas-templates/processserver63-amq-postgresql-persistent-s2i.json)0
-rw-r--r--roles/openshift_examples/files/examples/v3.6/xpaas-templates/processserver63-amq-postgresql-s2i.json (renamed from roles/openshift_examples/files/examples/v1.6/xpaas-templates/processserver63-amq-postgresql-s2i.json)0
-rw-r--r--roles/openshift_examples/files/examples/v3.6/xpaas-templates/processserver63-basic-s2i.json (renamed from roles/openshift_examples/files/examples/v1.6/xpaas-templates/processserver63-basic-s2i.json)0
-rw-r--r--roles/openshift_examples/files/examples/v3.6/xpaas-templates/processserver63-mysql-persistent-s2i.json (renamed from roles/openshift_examples/files/examples/v1.6/xpaas-templates/processserver63-mysql-persistent-s2i.json)0
-rw-r--r--roles/openshift_examples/files/examples/v3.6/xpaas-templates/processserver63-mysql-s2i.json (renamed from roles/openshift_examples/files/examples/v1.6/xpaas-templates/processserver63-mysql-s2i.json)0
-rw-r--r--roles/openshift_examples/files/examples/v3.6/xpaas-templates/processserver63-postgresql-persistent-s2i.json (renamed from roles/openshift_examples/files/examples/v1.6/xpaas-templates/processserver63-postgresql-persistent-s2i.json)0
-rw-r--r--roles/openshift_examples/files/examples/v3.6/xpaas-templates/processserver63-postgresql-s2i.json (renamed from roles/openshift_examples/files/examples/v1.6/xpaas-templates/processserver63-postgresql-s2i.json)0
-rw-r--r--roles/openshift_examples/files/examples/v3.6/xpaas-templates/spring-boot-camel-amq-template.json (renamed from roles/openshift_examples/files/examples/v1.6/xpaas-templates/spring-boot-camel-amq-template.json)0
-rw-r--r--roles/openshift_examples/files/examples/v3.6/xpaas-templates/spring-boot-camel-config-template.json (renamed from roles/openshift_examples/files/examples/v1.6/xpaas-templates/spring-boot-camel-config-template.json)0
-rw-r--r--roles/openshift_examples/files/examples/v3.6/xpaas-templates/spring-boot-camel-drools-template.json (renamed from roles/openshift_examples/files/examples/v1.6/xpaas-templates/spring-boot-camel-drools-template.json)0
-rw-r--r--roles/openshift_examples/files/examples/v3.6/xpaas-templates/spring-boot-camel-infinispan-template.json (renamed from roles/openshift_examples/files/examples/v1.6/xpaas-templates/spring-boot-camel-infinispan-template.json)0
-rw-r--r--roles/openshift_examples/files/examples/v3.6/xpaas-templates/spring-boot-camel-rest-sql-template.json (renamed from roles/openshift_examples/files/examples/v1.6/xpaas-templates/spring-boot-camel-rest-sql-template.json)0
-rw-r--r--roles/openshift_examples/files/examples/v3.6/xpaas-templates/spring-boot-camel-teiid-template.json (renamed from roles/openshift_examples/files/examples/v1.6/xpaas-templates/spring-boot-camel-teiid-template.json)0
-rw-r--r--roles/openshift_examples/files/examples/v3.6/xpaas-templates/spring-boot-camel-template.json (renamed from roles/openshift_examples/files/examples/v1.6/xpaas-templates/spring-boot-camel-template.json)0
-rw-r--r--roles/openshift_examples/files/examples/v3.6/xpaas-templates/spring-boot-camel-xml-template.json (renamed from roles/openshift_examples/files/examples/v1.6/xpaas-templates/spring-boot-camel-xml-template.json)0
-rw-r--r--roles/openshift_examples/files/examples/v3.6/xpaas-templates/spring-boot-cxf-jaxrs-template.json (renamed from roles/openshift_examples/files/examples/v1.6/xpaas-templates/spring-boot-cxf-jaxrs-template.json)0
-rw-r--r--roles/openshift_examples/files/examples/v3.6/xpaas-templates/spring-boot-cxf-jaxws-template.json (renamed from roles/openshift_examples/files/examples/v1.6/xpaas-templates/spring-boot-cxf-jaxws-template.json)0
-rw-r--r--roles/openshift_examples/files/examples/v3.6/xpaas-templates/sso70-https.json (renamed from roles/openshift_examples/files/examples/v1.6/xpaas-templates/sso70-https.json)0
-rw-r--r--roles/openshift_examples/files/examples/v3.6/xpaas-templates/sso70-mysql-persistent.json (renamed from roles/openshift_examples/files/examples/v1.6/xpaas-templates/sso70-mysql-persistent.json)0
-rw-r--r--roles/openshift_examples/files/examples/v3.6/xpaas-templates/sso70-mysql.json (renamed from roles/openshift_examples/files/examples/v1.6/xpaas-templates/sso70-mysql.json)0
-rw-r--r--roles/openshift_examples/files/examples/v3.6/xpaas-templates/sso70-postgresql-persistent.json (renamed from roles/openshift_examples/files/examples/v1.6/xpaas-templates/sso70-postgresql-persistent.json)0
-rw-r--r--roles/openshift_examples/files/examples/v3.6/xpaas-templates/sso70-postgresql.json (renamed from roles/openshift_examples/files/examples/v1.6/xpaas-templates/sso70-postgresql.json)0
-rw-r--r--roles/openshift_excluder/tasks/disable.yml8
-rwxr-xr-xroles/openshift_facts/library/openshift_facts.py17
-rw-r--r--roles/openshift_health_checker/action_plugins/openshift_health_check.py60
-rwxr-xr-xroles/openshift_health_checker/library/aos_version.py214
-rw-r--r--roles/openshift_health_checker/meta/main.yml1
-rw-r--r--roles/openshift_health_checker/openshift_checks/disk_availability.py65
-rw-r--r--roles/openshift_health_checker/openshift_checks/memory_availability.py44
-rw-r--r--roles/openshift_health_checker/openshift_checks/mixins.py17
-rw-r--r--roles/openshift_health_checker/openshift_checks/package_availability.py4
-rw-r--r--roles/openshift_health_checker/openshift_checks/package_version.py15
-rw-r--r--roles/openshift_health_checker/test/action_plugin_test.py229
-rw-r--r--roles/openshift_health_checker/test/aos_version_test.py120
-rw-r--r--roles/openshift_health_checker/test/conftest.py10
-rw-r--r--roles/openshift_health_checker/test/disk_availability_test.py155
-rw-r--r--roles/openshift_health_checker/test/memory_availability_test.py91
-rw-r--r--roles/openshift_health_checker/test/package_availability_test.py14
-rw-r--r--roles/openshift_health_checker/test/package_version_test.py35
-rw-r--r--roles/openshift_hosted/defaults/main.yml5
-rw-r--r--roles/openshift_hosted/tasks/registry/registry.yml6
-rw-r--r--roles/openshift_hosted/tasks/registry/storage/glusterfs.yml51
-rw-r--r--roles/openshift_hosted/tasks/router/router.yml34
-rw-r--r--roles/openshift_hosted_templates/files/v1.4/enterprise/logging-deployer.yaml2
-rw-r--r--roles/openshift_hosted_templates/files/v1.4/enterprise/metrics-deployer.yaml4
-rw-r--r--roles/openshift_hosted_templates/files/v1.5/enterprise/logging-deployer.yaml345
-rw-r--r--roles/openshift_hosted_templates/files/v1.5/enterprise/metrics-deployer.yaml168
-rw-r--r--roles/openshift_hosted_templates/files/v1.5/origin/logging-deployer.yaml342
-rw-r--r--roles/openshift_hosted_templates/files/v1.5/origin/metrics-deployer.yaml168
-rw-r--r--roles/openshift_hosted_templates/files/v1.6/enterprise/logging-deployer.yaml345
-rw-r--r--roles/openshift_hosted_templates/files/v1.6/enterprise/metrics-deployer.yaml168
-rw-r--r--roles/openshift_hosted_templates/files/v1.6/origin/logging-deployer.yaml342
-rw-r--r--roles/openshift_hosted_templates/files/v1.6/origin/metrics-deployer.yaml168
-rw-r--r--roles/openshift_hosted_templates/files/v3.6/enterprise/registry-console.yaml (renamed from roles/openshift_hosted_templates/files/v1.6/enterprise/registry-console.yaml)0
-rw-r--r--roles/openshift_hosted_templates/files/v3.6/origin/registry-console.yaml (renamed from roles/openshift_hosted_templates/files/v1.6/origin/registry-console.yaml)0
-rw-r--r--roles/openshift_logging/defaults/main.yml11
-rw-r--r--roles/openshift_logging/tasks/delete_logging.yaml2
-rw-r--r--roles/openshift_logging/tasks/generate_certs.yaml23
-rw-r--r--roles/openshift_logging/tasks/generate_configmaps.yaml40
-rw-r--r--roles/openshift_logging/tasks/generate_secrets.yaml32
-rw-r--r--roles/openshift_logging/tasks/generate_services.yaml32
-rw-r--r--roles/openshift_logging/tasks/install_elasticsearch.yaml178
-rw-r--r--roles/openshift_logging/tasks/install_logging.yaml4
-rw-r--r--roles/openshift_logging/tasks/install_mux.yaml67
-rw-r--r--roles/openshift_logging/tasks/oc_apply.yaml94
-rw-r--r--roles/openshift_logging/tasks/procure_shared_key.yaml25
-rw-r--r--roles/openshift_logging/tasks/set_es_storage.yaml82
-rw-r--r--roles/openshift_logging/tasks/start_cluster.yaml20
-rw-r--r--roles/openshift_logging/tasks/stop_cluster.yaml20
-rw-r--r--roles/openshift_logging/templates/curator.j25
-rw-r--r--roles/openshift_logging/templates/es.j27
-rw-r--r--roles/openshift_logging/templates/fluentd.j212
-rw-r--r--roles/openshift_logging/templates/mux.j2121
-rw-r--r--roles/openshift_logging/templates/service.j26
-rw-r--r--roles/openshift_logging/vars/openshift-enterprise.yml2
-rw-r--r--roles/openshift_manageiq/tasks/main.yaml88
-rw-r--r--roles/openshift_manageiq/vars/main.yml64
-rw-r--r--roles/openshift_master/templates/docker-cluster/atomic-openshift-master-api.service.j22
-rw-r--r--roles/openshift_master/templates/master_docker/master.docker.service.j22
-rw-r--r--roles/openshift_master_facts/lookup_plugins/openshift_master_facts_default_predicates.py4
-rw-r--r--roles/openshift_master_facts/lookup_plugins/openshift_master_facts_default_priorities.py2
-rw-r--r--roles/openshift_master_facts/test/openshift_master_facts_default_predicates_tests.py7
-rw-r--r--roles/openshift_master_facts/test/openshift_master_facts_default_priorities_tests.py7
-rwxr-xr-xroles/openshift_metrics/files/import_jks_certs.sh52
-rw-r--r--roles/openshift_metrics/tasks/generate_hawkular_certificates.yaml67
-rw-r--r--roles/openshift_metrics/tasks/generate_heapster_certificates.yaml40
-rw-r--r--roles/openshift_metrics/tasks/generate_heapster_secrets.yaml14
-rw-r--r--roles/openshift_metrics/tasks/import_jks_certs.yaml37
-rw-r--r--roles/openshift_metrics/tasks/install_heapster.yaml6
-rw-r--r--roles/openshift_metrics/templates/hawkular_metrics_rc.j224
-rw-r--r--roles/openshift_metrics/templates/heapster.j229
-rw-r--r--roles/openshift_metrics/templates/service.j26
-rw-r--r--roles/openshift_metrics/vars/openshift-enterprise.yml2
-rw-r--r--roles/openshift_node/tasks/main.yml32
-rw-r--r--roles/openshift_node_upgrade/meta/main.yml1
-rw-r--r--roles/openshift_node_upgrade/tasks/docker/upgrade.yml17
-rw-r--r--roles/openshift_node_upgrade/tasks/main.yml72
-rw-r--r--roles/openshift_node_upgrade/tasks/restart.yml (renamed from roles/openshift_node_upgrade/tasks/docker/restart.yml)3
-rw-r--r--roles/openshift_provisioners/README.md29
-rw-r--r--roles/openshift_provisioners/defaults/main.yaml12
-rw-r--r--roles/openshift_provisioners/meta/main.yaml16
-rw-r--r--roles/openshift_provisioners/tasks/generate_clusterrolebindings.yaml19
-rw-r--r--roles/openshift_provisioners/tasks/generate_secrets.yaml14
-rw-r--r--roles/openshift_provisioners/tasks/generate_serviceaccounts.yaml12
-rw-r--r--roles/openshift_provisioners/tasks/install_efs.yaml70
-rw-r--r--roles/openshift_provisioners/tasks/install_provisioners.yaml55
-rw-r--r--roles/openshift_provisioners/tasks/install_support.yaml24
-rw-r--r--roles/openshift_provisioners/tasks/main.yaml27
-rw-r--r--roles/openshift_provisioners/tasks/oc_apply.yaml51
-rw-r--r--roles/openshift_provisioners/tasks/start_cluster.yaml20
-rw-r--r--roles/openshift_provisioners/tasks/stop_cluster.yaml20
-rw-r--r--roles/openshift_provisioners/tasks/uninstall_provisioners.yaml43
-rw-r--r--roles/openshift_provisioners/templates/clusterrolebinding.j230
-rw-r--r--roles/openshift_provisioners/templates/efs.j258
-rw-r--r--roles/openshift_provisioners/templates/pv.j232
-rw-r--r--roles/openshift_provisioners/templates/pvc.j226
-rw-r--r--roles/openshift_provisioners/templates/secret.j215
-rw-r--r--roles/openshift_provisioners/templates/serviceaccount.j216
-rw-r--r--roles/openshift_repos/README.md8
-rw-r--r--roles/openshift_sanitize_inventory/tasks/main.yml34
-rw-r--r--roles/openshift_storage_glusterfs/README.md60
-rw-r--r--roles/openshift_storage_glusterfs/defaults/main.yml17
-rw-r--r--roles/openshift_storage_glusterfs/files/v3.6/deploy-heketi-template.yml115
-rw-r--r--roles/openshift_storage_glusterfs/files/v3.6/glusterfs-registry-service.yml10
-rw-r--r--roles/openshift_storage_glusterfs/files/v3.6/glusterfs-template.yml128
-rw-r--r--roles/openshift_storage_glusterfs/files/v3.6/heketi-template.yml113
-rw-r--r--roles/openshift_storage_glusterfs/filter_plugins/openshift_storage_glusterfs.py23
-rw-r--r--roles/openshift_storage_glusterfs/meta/main.yml15
-rw-r--r--roles/openshift_storage_glusterfs/tasks/glusterfs_deploy.yml107
-rw-r--r--roles/openshift_storage_glusterfs/tasks/glusterfs_registry.yml48
-rw-r--r--roles/openshift_storage_glusterfs/tasks/heketi_deploy_part1.yml41
-rw-r--r--roles/openshift_storage_glusterfs/tasks/heketi_deploy_part2.yml109
-rw-r--r--roles/openshift_storage_glusterfs/tasks/main.yml182
-rw-r--r--roles/openshift_storage_glusterfs/templates/v3.6/glusterfs-registry-endpoints.yml.j211
-rw-r--r--roles/openshift_storage_glusterfs/templates/v3.6/topology.json.j239
-rw-r--r--roles/openshift_version/tasks/main.yml56
287 files changed, 4909 insertions, 3278 deletions
diff --git a/roles/calico_master/templates/calico-policy-controller.yml.j2 b/roles/calico_master/templates/calico-policy-controller.yml.j2
index 66c334ceb..3fb1abf0d 100644
--- a/roles/calico_master/templates/calico-policy-controller.yml.j2
+++ b/roles/calico_master/templates/calico-policy-controller.yml.j2
@@ -74,7 +74,7 @@ spec:
serviceAccountName: calico
containers:
- name: calico-policy-controller
- image: quay.io/calico/kube-policy-controller:v0.5.3
+ image: quay.io/calico/kube-policy-controller:v0.5.4
env:
# The location of the Calico etcd cluster.
- name: ETCD_ENDPOINTS
diff --git a/roles/cockpit-ui/tasks/main.yml b/roles/cockpit-ui/tasks/main.yml
index 8bd68787a..0114498f8 100644
--- a/roles/cockpit-ui/tasks/main.yml
+++ b/roles/cockpit-ui/tasks/main.yml
@@ -1,13 +1,16 @@
---
- block:
- - name: Create passthrough route for docker-registry
+
+ # When openshift_hosted_manage_registry=true the openshift_hosted
+ # role will create the appropriate route for the docker-registry.
+ # When openshift_hosted_manage_registry=false then this code will
+ # not be run.
+ - name: fetch the docker-registry route
oc_route:
kubeconfig: "{{ openshift_master_config_dir }}/admin.kubeconfig"
name: docker-registry
namespace: default
- service_name: docker-registry
- state: present
- tls_termination: passthrough
+ state: list
register: docker_registry_route
- name: Create passthrough route for registry-console
@@ -41,7 +44,7 @@
{% if openshift_cockpit_deployer_prefix is defined %}-p IMAGE_PREFIX="{{ openshift_cockpit_deployer_prefix }}"{% endif %}
{% if openshift_cockpit_deployer_version is defined %}-p IMAGE_VERSION="{{ openshift_cockpit_deployer_version }}"{% endif %}
-p OPENSHIFT_OAUTH_PROVIDER_URL="{{ openshift.master.public_api_url }}"
- -p REGISTRY_HOST="{{ docker_registry_route.results.results[0].spec.host }}"
+ -p REGISTRY_HOST="{{ docker_registry_route.results[0].spec.host }}"
-p COCKPIT_KUBE_URL="https://{{ registry_console_cockpit_kube.results.results[0].spec.host }}"
--config={{ openshift_hosted_kubeconfig }}
-n default
diff --git a/roles/etcd/templates/etcd.conf.j2 b/roles/etcd/templates/etcd.conf.j2
index 9151dd0bd..1b5598f46 100644
--- a/roles/etcd/templates/etcd.conf.j2
+++ b/roles/etcd/templates/etcd.conf.j2
@@ -62,7 +62,7 @@ ETCD_PEER_KEY_FILE={{ etcd_peer_key_file }}
{% endif -%}
#[logging]
-ETCD_DEBUG="{{ etcd_debug | default(false) | string }}"
+ETCD_DEBUG="{{ etcd_debug | default(false) | bool | string }}"
{% if etcd_log_package_levels is defined %}
ETCD_LOG_PACKAGE_LEVELS="{{ etcd_log_package_levels }}"
{% endif %}
diff --git a/roles/lib_openshift/library/oc_adm_ca_server_cert.py b/roles/lib_openshift/library/oc_adm_ca_server_cert.py
index c85bbc205..8a311cd0f 100644
--- a/roles/lib_openshift/library/oc_adm_ca_server_cert.py
+++ b/roles/lib_openshift/library/oc_adm_ca_server_cert.py
@@ -900,6 +900,13 @@ class OpenShiftCLI(object):
def _replace(self, fname, force=False):
'''replace the current object with oc replace'''
+ # We are removing the 'resourceVersion' to handle
+ # a race condition when modifying oc objects
+ yed = Yedit(fname)
+ results = yed.delete('metadata.resourceVersion')
+ if results[0]:
+ yed.write()
+
cmd = ['replace', '-f', fname]
if force:
cmd.append('--force')
@@ -919,11 +926,15 @@ class OpenShiftCLI(object):
'''call oc create on a filename'''
return self.openshift_cmd(['create', '-f', fname])
- def _delete(self, resource, rname, selector=None):
+ def _delete(self, resource, name=None, selector=None):
'''call oc delete on a resource'''
- cmd = ['delete', resource, rname]
- if selector:
- cmd.append('--selector=%s' % selector)
+ cmd = ['delete', resource]
+ if selector is not None:
+ cmd.append('--selector={}'.format(selector))
+ elif name is not None:
+ cmd.append(name)
+ else:
+ raise OpenShiftCLIError('Either name or selector is required when calling delete.')
return self.openshift_cmd(cmd)
@@ -941,7 +952,7 @@ class OpenShiftCLI(object):
else:
cmd.append(template_name)
if params:
- param_str = ["%s=%s" % (key, value) for key, value in params.items()]
+ param_str = ["{}={}".format(key, value) for key, value in params.items()]
cmd.append('-v')
cmd.extend(param_str)
@@ -958,13 +969,13 @@ class OpenShiftCLI(object):
return self.openshift_cmd(['create', '-f', fname])
- def _get(self, resource, rname=None, selector=None):
+ def _get(self, resource, name=None, selector=None):
'''return a resource by name '''
cmd = ['get', resource]
- if selector:
- cmd.append('--selector=%s' % selector)
- elif rname:
- cmd.append(rname)
+ if selector is not None:
+ cmd.append('--selector={}'.format(selector))
+ elif name is not None:
+ cmd.append(name)
cmd.extend(['-o', 'json'])
@@ -984,9 +995,9 @@ class OpenShiftCLI(object):
if node:
cmd.extend(node)
else:
- cmd.append('--selector=%s' % selector)
+ cmd.append('--selector={}'.format(selector))
- cmd.append('--schedulable=%s' % schedulable)
+ cmd.append('--schedulable={}'.format(schedulable))
return self.openshift_cmd(cmd, oadm=True, output=True, output_type='raw') # noqa: E501
@@ -1001,10 +1012,10 @@ class OpenShiftCLI(object):
if node:
cmd.extend(node)
else:
- cmd.append('--selector=%s' % selector)
+ cmd.append('--selector={}'.format(selector))
if pod_selector:
- cmd.append('--pod-selector=%s' % pod_selector)
+ cmd.append('--pod-selector={}'.format(pod_selector))
cmd.extend(['--list-pods', '-o', 'json'])
@@ -1017,16 +1028,16 @@ class OpenShiftCLI(object):
if node:
cmd.extend(node)
else:
- cmd.append('--selector=%s' % selector)
+ cmd.append('--selector={}'.format(selector))
if dry_run:
cmd.append('--dry-run')
if pod_selector:
- cmd.append('--pod-selector=%s' % pod_selector)
+ cmd.append('--pod-selector={}'.format(pod_selector))
if grace_period:
- cmd.append('--grace-period=%s' % int(grace_period))
+ cmd.append('--grace-period={}'.format(int(grace_period)))
if force:
cmd.append('--force')
@@ -1504,7 +1515,10 @@ class CAServerCert(OpenShiftCLI):
x509output, _ = proc.communicate()
if proc.returncode == 0:
regex = re.compile(r"^\s*X509v3 Subject Alternative Name:\s*?\n\s*(.*)\s*\n", re.MULTILINE)
- match = regex.search(x509output) # E501
+ match = regex.search(x509output.decode()) # E501
+ if not match:
+ return False
+
for entry in re.split(r", *", match.group(1)):
if entry.startswith('DNS') or entry.startswith('IP Address'):
cert_names.append(entry.split(':')[1])
@@ -1551,7 +1565,7 @@ class CAServerCert(OpenShiftCLI):
api_rval = server_cert.create()
if api_rval['returncode'] != 0:
- return {'Failed': True, 'msg': api_rval}
+ return {'failed': True, 'msg': api_rval}
return {'changed': True, 'results': api_rval, 'state': state}
diff --git a/roles/lib_openshift/library/oc_adm_manage_node.py b/roles/lib_openshift/library/oc_adm_manage_node.py
index 5f1b94c3a..0930faadb 100644
--- a/roles/lib_openshift/library/oc_adm_manage_node.py
+++ b/roles/lib_openshift/library/oc_adm_manage_node.py
@@ -886,6 +886,13 @@ class OpenShiftCLI(object):
def _replace(self, fname, force=False):
'''replace the current object with oc replace'''
+ # We are removing the 'resourceVersion' to handle
+ # a race condition when modifying oc objects
+ yed = Yedit(fname)
+ results = yed.delete('metadata.resourceVersion')
+ if results[0]:
+ yed.write()
+
cmd = ['replace', '-f', fname]
if force:
cmd.append('--force')
@@ -905,11 +912,15 @@ class OpenShiftCLI(object):
'''call oc create on a filename'''
return self.openshift_cmd(['create', '-f', fname])
- def _delete(self, resource, rname, selector=None):
+ def _delete(self, resource, name=None, selector=None):
'''call oc delete on a resource'''
- cmd = ['delete', resource, rname]
- if selector:
- cmd.append('--selector=%s' % selector)
+ cmd = ['delete', resource]
+ if selector is not None:
+ cmd.append('--selector={}'.format(selector))
+ elif name is not None:
+ cmd.append(name)
+ else:
+ raise OpenShiftCLIError('Either name or selector is required when calling delete.')
return self.openshift_cmd(cmd)
@@ -927,7 +938,7 @@ class OpenShiftCLI(object):
else:
cmd.append(template_name)
if params:
- param_str = ["%s=%s" % (key, value) for key, value in params.items()]
+ param_str = ["{}={}".format(key, value) for key, value in params.items()]
cmd.append('-v')
cmd.extend(param_str)
@@ -944,13 +955,13 @@ class OpenShiftCLI(object):
return self.openshift_cmd(['create', '-f', fname])
- def _get(self, resource, rname=None, selector=None):
+ def _get(self, resource, name=None, selector=None):
'''return a resource by name '''
cmd = ['get', resource]
- if selector:
- cmd.append('--selector=%s' % selector)
- elif rname:
- cmd.append(rname)
+ if selector is not None:
+ cmd.append('--selector={}'.format(selector))
+ elif name is not None:
+ cmd.append(name)
cmd.extend(['-o', 'json'])
@@ -970,9 +981,9 @@ class OpenShiftCLI(object):
if node:
cmd.extend(node)
else:
- cmd.append('--selector=%s' % selector)
+ cmd.append('--selector={}'.format(selector))
- cmd.append('--schedulable=%s' % schedulable)
+ cmd.append('--schedulable={}'.format(schedulable))
return self.openshift_cmd(cmd, oadm=True, output=True, output_type='raw') # noqa: E501
@@ -987,10 +998,10 @@ class OpenShiftCLI(object):
if node:
cmd.extend(node)
else:
- cmd.append('--selector=%s' % selector)
+ cmd.append('--selector={}'.format(selector))
if pod_selector:
- cmd.append('--pod-selector=%s' % pod_selector)
+ cmd.append('--pod-selector={}'.format(pod_selector))
cmd.extend(['--list-pods', '-o', 'json'])
@@ -1003,16 +1014,16 @@ class OpenShiftCLI(object):
if node:
cmd.extend(node)
else:
- cmd.append('--selector=%s' % selector)
+ cmd.append('--selector={}'.format(selector))
if dry_run:
cmd.append('--dry-run')
if pod_selector:
- cmd.append('--pod-selector=%s' % pod_selector)
+ cmd.append('--pod-selector={}'.format(pod_selector))
if grace_period:
- cmd.append('--grace-period=%s' % int(grace_period))
+ cmd.append('--grace-period={}'.format(int(grace_period)))
if force:
cmd.append('--force')
@@ -1457,7 +1468,7 @@ class ManageNode(OpenShiftCLI):
if selector:
_sel = selector
- results = self._get('node', rname=_node, selector=_sel)
+ results = self._get('node', name=_node, selector=_sel)
if results['returncode'] != 0:
return results
diff --git a/roles/lib_openshift/library/oc_adm_policy_group.py b/roles/lib_openshift/library/oc_adm_policy_group.py
index 423dbe44b..6a7be65d0 100644
--- a/roles/lib_openshift/library/oc_adm_policy_group.py
+++ b/roles/lib_openshift/library/oc_adm_policy_group.py
@@ -872,6 +872,13 @@ class OpenShiftCLI(object):
def _replace(self, fname, force=False):
'''replace the current object with oc replace'''
+ # We are removing the 'resourceVersion' to handle
+ # a race condition when modifying oc objects
+ yed = Yedit(fname)
+ results = yed.delete('metadata.resourceVersion')
+ if results[0]:
+ yed.write()
+
cmd = ['replace', '-f', fname]
if force:
cmd.append('--force')
@@ -891,11 +898,15 @@ class OpenShiftCLI(object):
'''call oc create on a filename'''
return self.openshift_cmd(['create', '-f', fname])
- def _delete(self, resource, rname, selector=None):
+ def _delete(self, resource, name=None, selector=None):
'''call oc delete on a resource'''
- cmd = ['delete', resource, rname]
- if selector:
- cmd.append('--selector=%s' % selector)
+ cmd = ['delete', resource]
+ if selector is not None:
+ cmd.append('--selector={}'.format(selector))
+ elif name is not None:
+ cmd.append(name)
+ else:
+ raise OpenShiftCLIError('Either name or selector is required when calling delete.')
return self.openshift_cmd(cmd)
@@ -913,7 +924,7 @@ class OpenShiftCLI(object):
else:
cmd.append(template_name)
if params:
- param_str = ["%s=%s" % (key, value) for key, value in params.items()]
+ param_str = ["{}={}".format(key, value) for key, value in params.items()]
cmd.append('-v')
cmd.extend(param_str)
@@ -930,13 +941,13 @@ class OpenShiftCLI(object):
return self.openshift_cmd(['create', '-f', fname])
- def _get(self, resource, rname=None, selector=None):
+ def _get(self, resource, name=None, selector=None):
'''return a resource by name '''
cmd = ['get', resource]
- if selector:
- cmd.append('--selector=%s' % selector)
- elif rname:
- cmd.append(rname)
+ if selector is not None:
+ cmd.append('--selector={}'.format(selector))
+ elif name is not None:
+ cmd.append(name)
cmd.extend(['-o', 'json'])
@@ -956,9 +967,9 @@ class OpenShiftCLI(object):
if node:
cmd.extend(node)
else:
- cmd.append('--selector=%s' % selector)
+ cmd.append('--selector={}'.format(selector))
- cmd.append('--schedulable=%s' % schedulable)
+ cmd.append('--schedulable={}'.format(schedulable))
return self.openshift_cmd(cmd, oadm=True, output=True, output_type='raw') # noqa: E501
@@ -973,10 +984,10 @@ class OpenShiftCLI(object):
if node:
cmd.extend(node)
else:
- cmd.append('--selector=%s' % selector)
+ cmd.append('--selector={}'.format(selector))
if pod_selector:
- cmd.append('--pod-selector=%s' % pod_selector)
+ cmd.append('--pod-selector={}'.format(pod_selector))
cmd.extend(['--list-pods', '-o', 'json'])
@@ -989,16 +1000,16 @@ class OpenShiftCLI(object):
if node:
cmd.extend(node)
else:
- cmd.append('--selector=%s' % selector)
+ cmd.append('--selector={}'.format(selector))
if dry_run:
cmd.append('--dry-run')
if pod_selector:
- cmd.append('--pod-selector=%s' % pod_selector)
+ cmd.append('--pod-selector={}'.format(pod_selector))
if grace_period:
- cmd.append('--grace-period=%s' % int(grace_period))
+ cmd.append('--grace-period={}'.format(int(grace_period)))
if force:
cmd.append('--force')
diff --git a/roles/lib_openshift/library/oc_adm_policy_user.py b/roles/lib_openshift/library/oc_adm_policy_user.py
index b72fce8bb..44923ecd2 100644
--- a/roles/lib_openshift/library/oc_adm_policy_user.py
+++ b/roles/lib_openshift/library/oc_adm_policy_user.py
@@ -872,6 +872,13 @@ class OpenShiftCLI(object):
def _replace(self, fname, force=False):
'''replace the current object with oc replace'''
+ # We are removing the 'resourceVersion' to handle
+ # a race condition when modifying oc objects
+ yed = Yedit(fname)
+ results = yed.delete('metadata.resourceVersion')
+ if results[0]:
+ yed.write()
+
cmd = ['replace', '-f', fname]
if force:
cmd.append('--force')
@@ -891,11 +898,15 @@ class OpenShiftCLI(object):
'''call oc create on a filename'''
return self.openshift_cmd(['create', '-f', fname])
- def _delete(self, resource, rname, selector=None):
+ def _delete(self, resource, name=None, selector=None):
'''call oc delete on a resource'''
- cmd = ['delete', resource, rname]
- if selector:
- cmd.append('--selector=%s' % selector)
+ cmd = ['delete', resource]
+ if selector is not None:
+ cmd.append('--selector={}'.format(selector))
+ elif name is not None:
+ cmd.append(name)
+ else:
+ raise OpenShiftCLIError('Either name or selector is required when calling delete.')
return self.openshift_cmd(cmd)
@@ -913,7 +924,7 @@ class OpenShiftCLI(object):
else:
cmd.append(template_name)
if params:
- param_str = ["%s=%s" % (key, value) for key, value in params.items()]
+ param_str = ["{}={}".format(key, value) for key, value in params.items()]
cmd.append('-v')
cmd.extend(param_str)
@@ -930,13 +941,13 @@ class OpenShiftCLI(object):
return self.openshift_cmd(['create', '-f', fname])
- def _get(self, resource, rname=None, selector=None):
+ def _get(self, resource, name=None, selector=None):
'''return a resource by name '''
cmd = ['get', resource]
- if selector:
- cmd.append('--selector=%s' % selector)
- elif rname:
- cmd.append(rname)
+ if selector is not None:
+ cmd.append('--selector={}'.format(selector))
+ elif name is not None:
+ cmd.append(name)
cmd.extend(['-o', 'json'])
@@ -956,9 +967,9 @@ class OpenShiftCLI(object):
if node:
cmd.extend(node)
else:
- cmd.append('--selector=%s' % selector)
+ cmd.append('--selector={}'.format(selector))
- cmd.append('--schedulable=%s' % schedulable)
+ cmd.append('--schedulable={}'.format(schedulable))
return self.openshift_cmd(cmd, oadm=True, output=True, output_type='raw') # noqa: E501
@@ -973,10 +984,10 @@ class OpenShiftCLI(object):
if node:
cmd.extend(node)
else:
- cmd.append('--selector=%s' % selector)
+ cmd.append('--selector={}'.format(selector))
if pod_selector:
- cmd.append('--pod-selector=%s' % pod_selector)
+ cmd.append('--pod-selector={}'.format(pod_selector))
cmd.extend(['--list-pods', '-o', 'json'])
@@ -989,16 +1000,16 @@ class OpenShiftCLI(object):
if node:
cmd.extend(node)
else:
- cmd.append('--selector=%s' % selector)
+ cmd.append('--selector={}'.format(selector))
if dry_run:
cmd.append('--dry-run')
if pod_selector:
- cmd.append('--pod-selector=%s' % pod_selector)
+ cmd.append('--pod-selector={}'.format(pod_selector))
if grace_period:
- cmd.append('--grace-period=%s' % int(grace_period))
+ cmd.append('--grace-period={}'.format(int(grace_period)))
if force:
cmd.append('--force')
@@ -1956,7 +1967,7 @@ class PolicyUser(OpenShiftCLI):
@property
def policybindings(self):
if self._policy_bindings is None:
- results = self._get('clusterpolicybindings', None)
+ results = self._get('policybindings', None)
if results['returncode'] != 0:
raise OpenShiftCLIError('Could not retrieve policybindings')
self._policy_bindings = results['results'][0]['items'][0]
diff --git a/roles/lib_openshift/library/oc_adm_registry.py b/roles/lib_openshift/library/oc_adm_registry.py
index 273f38fb7..0604f48bb 100644
--- a/roles/lib_openshift/library/oc_adm_registry.py
+++ b/roles/lib_openshift/library/oc_adm_registry.py
@@ -990,6 +990,13 @@ class OpenShiftCLI(object):
def _replace(self, fname, force=False):
'''replace the current object with oc replace'''
+ # We are removing the 'resourceVersion' to handle
+ # a race condition when modifying oc objects
+ yed = Yedit(fname)
+ results = yed.delete('metadata.resourceVersion')
+ if results[0]:
+ yed.write()
+
cmd = ['replace', '-f', fname]
if force:
cmd.append('--force')
@@ -1009,11 +1016,15 @@ class OpenShiftCLI(object):
'''call oc create on a filename'''
return self.openshift_cmd(['create', '-f', fname])
- def _delete(self, resource, rname, selector=None):
+ def _delete(self, resource, name=None, selector=None):
'''call oc delete on a resource'''
- cmd = ['delete', resource, rname]
- if selector:
- cmd.append('--selector=%s' % selector)
+ cmd = ['delete', resource]
+ if selector is not None:
+ cmd.append('--selector={}'.format(selector))
+ elif name is not None:
+ cmd.append(name)
+ else:
+ raise OpenShiftCLIError('Either name or selector is required when calling delete.')
return self.openshift_cmd(cmd)
@@ -1031,7 +1042,7 @@ class OpenShiftCLI(object):
else:
cmd.append(template_name)
if params:
- param_str = ["%s=%s" % (key, value) for key, value in params.items()]
+ param_str = ["{}={}".format(key, value) for key, value in params.items()]
cmd.append('-v')
cmd.extend(param_str)
@@ -1048,13 +1059,13 @@ class OpenShiftCLI(object):
return self.openshift_cmd(['create', '-f', fname])
- def _get(self, resource, rname=None, selector=None):
+ def _get(self, resource, name=None, selector=None):
'''return a resource by name '''
cmd = ['get', resource]
- if selector:
- cmd.append('--selector=%s' % selector)
- elif rname:
- cmd.append(rname)
+ if selector is not None:
+ cmd.append('--selector={}'.format(selector))
+ elif name is not None:
+ cmd.append(name)
cmd.extend(['-o', 'json'])
@@ -1074,9 +1085,9 @@ class OpenShiftCLI(object):
if node:
cmd.extend(node)
else:
- cmd.append('--selector=%s' % selector)
+ cmd.append('--selector={}'.format(selector))
- cmd.append('--schedulable=%s' % schedulable)
+ cmd.append('--schedulable={}'.format(schedulable))
return self.openshift_cmd(cmd, oadm=True, output=True, output_type='raw') # noqa: E501
@@ -1091,10 +1102,10 @@ class OpenShiftCLI(object):
if node:
cmd.extend(node)
else:
- cmd.append('--selector=%s' % selector)
+ cmd.append('--selector={}'.format(selector))
if pod_selector:
- cmd.append('--pod-selector=%s' % pod_selector)
+ cmd.append('--pod-selector={}'.format(pod_selector))
cmd.extend(['--list-pods', '-o', 'json'])
@@ -1107,16 +1118,16 @@ class OpenShiftCLI(object):
if node:
cmd.extend(node)
else:
- cmd.append('--selector=%s' % selector)
+ cmd.append('--selector={}'.format(selector))
if dry_run:
cmd.append('--dry-run')
if pod_selector:
- cmd.append('--pod-selector=%s' % pod_selector)
+ cmd.append('--pod-selector={}'.format(pod_selector))
if grace_period:
- cmd.append('--grace-period=%s' % int(grace_period))
+ cmd.append('--grace-period={}'.format(int(grace_period)))
if force:
cmd.append('--force')
@@ -2301,7 +2312,7 @@ class Registry(OpenShiftCLI):
rval = 0
for part in self.registry_parts:
- result = self._get(part['kind'], rname=part['name'])
+ result = self._get(part['kind'], name=part['name'])
if result['returncode'] == 0 and part['kind'] == 'dc':
self.deploymentconfig = DeploymentConfig(result['results'][0])
elif result['returncode'] == 0 and part['kind'] == 'svc':
diff --git a/roles/lib_openshift/library/oc_adm_router.py b/roles/lib_openshift/library/oc_adm_router.py
index 16d4a8393..bdcf94a58 100644
--- a/roles/lib_openshift/library/oc_adm_router.py
+++ b/roles/lib_openshift/library/oc_adm_router.py
@@ -1015,6 +1015,13 @@ class OpenShiftCLI(object):
def _replace(self, fname, force=False):
'''replace the current object with oc replace'''
+ # We are removing the 'resourceVersion' to handle
+ # a race condition when modifying oc objects
+ yed = Yedit(fname)
+ results = yed.delete('metadata.resourceVersion')
+ if results[0]:
+ yed.write()
+
cmd = ['replace', '-f', fname]
if force:
cmd.append('--force')
@@ -1034,11 +1041,15 @@ class OpenShiftCLI(object):
'''call oc create on a filename'''
return self.openshift_cmd(['create', '-f', fname])
- def _delete(self, resource, rname, selector=None):
+ def _delete(self, resource, name=None, selector=None):
'''call oc delete on a resource'''
- cmd = ['delete', resource, rname]
- if selector:
- cmd.append('--selector=%s' % selector)
+ cmd = ['delete', resource]
+ if selector is not None:
+ cmd.append('--selector={}'.format(selector))
+ elif name is not None:
+ cmd.append(name)
+ else:
+ raise OpenShiftCLIError('Either name or selector is required when calling delete.')
return self.openshift_cmd(cmd)
@@ -1056,7 +1067,7 @@ class OpenShiftCLI(object):
else:
cmd.append(template_name)
if params:
- param_str = ["%s=%s" % (key, value) for key, value in params.items()]
+ param_str = ["{}={}".format(key, value) for key, value in params.items()]
cmd.append('-v')
cmd.extend(param_str)
@@ -1073,13 +1084,13 @@ class OpenShiftCLI(object):
return self.openshift_cmd(['create', '-f', fname])
- def _get(self, resource, rname=None, selector=None):
+ def _get(self, resource, name=None, selector=None):
'''return a resource by name '''
cmd = ['get', resource]
- if selector:
- cmd.append('--selector=%s' % selector)
- elif rname:
- cmd.append(rname)
+ if selector is not None:
+ cmd.append('--selector={}'.format(selector))
+ elif name is not None:
+ cmd.append(name)
cmd.extend(['-o', 'json'])
@@ -1099,9 +1110,9 @@ class OpenShiftCLI(object):
if node:
cmd.extend(node)
else:
- cmd.append('--selector=%s' % selector)
+ cmd.append('--selector={}'.format(selector))
- cmd.append('--schedulable=%s' % schedulable)
+ cmd.append('--schedulable={}'.format(schedulable))
return self.openshift_cmd(cmd, oadm=True, output=True, output_type='raw') # noqa: E501
@@ -1116,10 +1127,10 @@ class OpenShiftCLI(object):
if node:
cmd.extend(node)
else:
- cmd.append('--selector=%s' % selector)
+ cmd.append('--selector={}'.format(selector))
if pod_selector:
- cmd.append('--pod-selector=%s' % pod_selector)
+ cmd.append('--pod-selector={}'.format(pod_selector))
cmd.extend(['--list-pods', '-o', 'json'])
@@ -1132,16 +1143,16 @@ class OpenShiftCLI(object):
if node:
cmd.extend(node)
else:
- cmd.append('--selector=%s' % selector)
+ cmd.append('--selector={}'.format(selector))
if dry_run:
cmd.append('--dry-run')
if pod_selector:
- cmd.append('--pod-selector=%s' % pod_selector)
+ cmd.append('--pod-selector={}'.format(pod_selector))
if grace_period:
- cmd.append('--grace-period=%s' % int(grace_period))
+ cmd.append('--grace-period={}'.format(int(grace_period)))
if force:
cmd.append('--force')
@@ -2685,7 +2696,7 @@ class Router(OpenShiftCLI):
self.secret = None
self.rolebinding = None
for part in self.router_parts:
- result = self._get(part['kind'], rname=part['name'])
+ result = self._get(part['kind'], name=part['name'])
if result['returncode'] == 0 and part['kind'] == 'dc':
self.deploymentconfig = DeploymentConfig(result['results'][0])
elif result['returncode'] == 0 and part['kind'] == 'svc':
diff --git a/roles/lib_openshift/library/oc_clusterrole.py b/roles/lib_openshift/library/oc_clusterrole.py
index f05d47b63..af48ce636 100644
--- a/roles/lib_openshift/library/oc_clusterrole.py
+++ b/roles/lib_openshift/library/oc_clusterrole.py
@@ -864,6 +864,13 @@ class OpenShiftCLI(object):
def _replace(self, fname, force=False):
'''replace the current object with oc replace'''
+ # We are removing the 'resourceVersion' to handle
+ # a race condition when modifying oc objects
+ yed = Yedit(fname)
+ results = yed.delete('metadata.resourceVersion')
+ if results[0]:
+ yed.write()
+
cmd = ['replace', '-f', fname]
if force:
cmd.append('--force')
@@ -883,11 +890,15 @@ class OpenShiftCLI(object):
'''call oc create on a filename'''
return self.openshift_cmd(['create', '-f', fname])
- def _delete(self, resource, rname, selector=None):
+ def _delete(self, resource, name=None, selector=None):
'''call oc delete on a resource'''
- cmd = ['delete', resource, rname]
- if selector:
- cmd.append('--selector=%s' % selector)
+ cmd = ['delete', resource]
+ if selector is not None:
+ cmd.append('--selector={}'.format(selector))
+ elif name is not None:
+ cmd.append(name)
+ else:
+ raise OpenShiftCLIError('Either name or selector is required when calling delete.')
return self.openshift_cmd(cmd)
@@ -905,7 +916,7 @@ class OpenShiftCLI(object):
else:
cmd.append(template_name)
if params:
- param_str = ["%s=%s" % (key, value) for key, value in params.items()]
+ param_str = ["{}={}".format(key, value) for key, value in params.items()]
cmd.append('-v')
cmd.extend(param_str)
@@ -922,13 +933,13 @@ class OpenShiftCLI(object):
return self.openshift_cmd(['create', '-f', fname])
- def _get(self, resource, rname=None, selector=None):
+ def _get(self, resource, name=None, selector=None):
'''return a resource by name '''
cmd = ['get', resource]
- if selector:
- cmd.append('--selector=%s' % selector)
- elif rname:
- cmd.append(rname)
+ if selector is not None:
+ cmd.append('--selector={}'.format(selector))
+ elif name is not None:
+ cmd.append(name)
cmd.extend(['-o', 'json'])
@@ -948,9 +959,9 @@ class OpenShiftCLI(object):
if node:
cmd.extend(node)
else:
- cmd.append('--selector=%s' % selector)
+ cmd.append('--selector={}'.format(selector))
- cmd.append('--schedulable=%s' % schedulable)
+ cmd.append('--schedulable={}'.format(schedulable))
return self.openshift_cmd(cmd, oadm=True, output=True, output_type='raw') # noqa: E501
@@ -965,10 +976,10 @@ class OpenShiftCLI(object):
if node:
cmd.extend(node)
else:
- cmd.append('--selector=%s' % selector)
+ cmd.append('--selector={}'.format(selector))
if pod_selector:
- cmd.append('--pod-selector=%s' % pod_selector)
+ cmd.append('--pod-selector={}'.format(pod_selector))
cmd.extend(['--list-pods', '-o', 'json'])
@@ -981,16 +992,16 @@ class OpenShiftCLI(object):
if node:
cmd.extend(node)
else:
- cmd.append('--selector=%s' % selector)
+ cmd.append('--selector={}'.format(selector))
if dry_run:
cmd.append('--dry-run')
if pod_selector:
- cmd.append('--pod-selector=%s' % pod_selector)
+ cmd.append('--pod-selector={}'.format(pod_selector))
if grace_period:
- cmd.append('--grace-period=%s' % int(grace_period))
+ cmd.append('--grace-period={}'.format(int(grace_period)))
if force:
cmd.append('--force')
@@ -1527,10 +1538,10 @@ class Rule(object):
results = []
for rule in inc_rules:
- results.append(Rule(rule['apiGroups'],
- rule['attributeRestrictions'],
- rule['resources'],
- rule['verbs']))
+ results.append(Rule(rule.get('apiGroups', ['']),
+ rule.get('attributeRestrictions', None),
+ rule.get('resources', []),
+ rule.get('verbs', [])))
return results
@@ -1629,7 +1640,7 @@ class OCClusterRole(OpenShiftCLI):
@property
def clusterrole(self):
''' property for clusterrole'''
- if not self._clusterrole:
+ if self._clusterrole is None:
self.get()
return self._clusterrole
@@ -1665,6 +1676,7 @@ class OCClusterRole(OpenShiftCLI):
elif 'clusterrole "{}" not found'.format(self.name) in result['stderr']:
result['returncode'] = 0
+ self.clusterrole = None
return result
@@ -1734,6 +1746,9 @@ class OCClusterRole(OpenShiftCLI):
# Create it here
api_rval = oc_clusterrole.create()
+ if api_rval['returncode'] != 0:
+ return {'failed': True, 'msg': api_rval}
+
# return the created object
api_rval = oc_clusterrole.get()
diff --git a/roles/lib_openshift/library/oc_configmap.py b/roles/lib_openshift/library/oc_configmap.py
index 9f4748e0a..385ed888b 100644
--- a/roles/lib_openshift/library/oc_configmap.py
+++ b/roles/lib_openshift/library/oc_configmap.py
@@ -870,6 +870,13 @@ class OpenShiftCLI(object):
def _replace(self, fname, force=False):
'''replace the current object with oc replace'''
+ # We are removing the 'resourceVersion' to handle
+ # a race condition when modifying oc objects
+ yed = Yedit(fname)
+ results = yed.delete('metadata.resourceVersion')
+ if results[0]:
+ yed.write()
+
cmd = ['replace', '-f', fname]
if force:
cmd.append('--force')
@@ -889,11 +896,15 @@ class OpenShiftCLI(object):
'''call oc create on a filename'''
return self.openshift_cmd(['create', '-f', fname])
- def _delete(self, resource, rname, selector=None):
+ def _delete(self, resource, name=None, selector=None):
'''call oc delete on a resource'''
- cmd = ['delete', resource, rname]
- if selector:
- cmd.append('--selector=%s' % selector)
+ cmd = ['delete', resource]
+ if selector is not None:
+ cmd.append('--selector={}'.format(selector))
+ elif name is not None:
+ cmd.append(name)
+ else:
+ raise OpenShiftCLIError('Either name or selector is required when calling delete.')
return self.openshift_cmd(cmd)
@@ -911,7 +922,7 @@ class OpenShiftCLI(object):
else:
cmd.append(template_name)
if params:
- param_str = ["%s=%s" % (key, value) for key, value in params.items()]
+ param_str = ["{}={}".format(key, value) for key, value in params.items()]
cmd.append('-v')
cmd.extend(param_str)
@@ -928,13 +939,13 @@ class OpenShiftCLI(object):
return self.openshift_cmd(['create', '-f', fname])
- def _get(self, resource, rname=None, selector=None):
+ def _get(self, resource, name=None, selector=None):
'''return a resource by name '''
cmd = ['get', resource]
- if selector:
- cmd.append('--selector=%s' % selector)
- elif rname:
- cmd.append(rname)
+ if selector is not None:
+ cmd.append('--selector={}'.format(selector))
+ elif name is not None:
+ cmd.append(name)
cmd.extend(['-o', 'json'])
@@ -954,9 +965,9 @@ class OpenShiftCLI(object):
if node:
cmd.extend(node)
else:
- cmd.append('--selector=%s' % selector)
+ cmd.append('--selector={}'.format(selector))
- cmd.append('--schedulable=%s' % schedulable)
+ cmd.append('--schedulable={}'.format(schedulable))
return self.openshift_cmd(cmd, oadm=True, output=True, output_type='raw') # noqa: E501
@@ -971,10 +982,10 @@ class OpenShiftCLI(object):
if node:
cmd.extend(node)
else:
- cmd.append('--selector=%s' % selector)
+ cmd.append('--selector={}'.format(selector))
if pod_selector:
- cmd.append('--pod-selector=%s' % pod_selector)
+ cmd.append('--pod-selector={}'.format(pod_selector))
cmd.extend(['--list-pods', '-o', 'json'])
@@ -987,16 +998,16 @@ class OpenShiftCLI(object):
if node:
cmd.extend(node)
else:
- cmd.append('--selector=%s' % selector)
+ cmd.append('--selector={}'.format(selector))
if dry_run:
cmd.append('--dry-run')
if pod_selector:
- cmd.append('--pod-selector=%s' % pod_selector)
+ cmd.append('--pod-selector={}'.format(pod_selector))
if grace_period:
- cmd.append('--grace-period=%s' % int(grace_period))
+ cmd.append('--grace-period={}'.format(int(grace_period)))
if force:
cmd.append('--force')
diff --git a/roles/lib_openshift/library/oc_edit.py b/roles/lib_openshift/library/oc_edit.py
index df3c92845..649de547e 100644
--- a/roles/lib_openshift/library/oc_edit.py
+++ b/roles/lib_openshift/library/oc_edit.py
@@ -914,6 +914,13 @@ class OpenShiftCLI(object):
def _replace(self, fname, force=False):
'''replace the current object with oc replace'''
+ # We are removing the 'resourceVersion' to handle
+ # a race condition when modifying oc objects
+ yed = Yedit(fname)
+ results = yed.delete('metadata.resourceVersion')
+ if results[0]:
+ yed.write()
+
cmd = ['replace', '-f', fname]
if force:
cmd.append('--force')
@@ -933,11 +940,15 @@ class OpenShiftCLI(object):
'''call oc create on a filename'''
return self.openshift_cmd(['create', '-f', fname])
- def _delete(self, resource, rname, selector=None):
+ def _delete(self, resource, name=None, selector=None):
'''call oc delete on a resource'''
- cmd = ['delete', resource, rname]
- if selector:
- cmd.append('--selector=%s' % selector)
+ cmd = ['delete', resource]
+ if selector is not None:
+ cmd.append('--selector={}'.format(selector))
+ elif name is not None:
+ cmd.append(name)
+ else:
+ raise OpenShiftCLIError('Either name or selector is required when calling delete.')
return self.openshift_cmd(cmd)
@@ -955,7 +966,7 @@ class OpenShiftCLI(object):
else:
cmd.append(template_name)
if params:
- param_str = ["%s=%s" % (key, value) for key, value in params.items()]
+ param_str = ["{}={}".format(key, value) for key, value in params.items()]
cmd.append('-v')
cmd.extend(param_str)
@@ -972,13 +983,13 @@ class OpenShiftCLI(object):
return self.openshift_cmd(['create', '-f', fname])
- def _get(self, resource, rname=None, selector=None):
+ def _get(self, resource, name=None, selector=None):
'''return a resource by name '''
cmd = ['get', resource]
- if selector:
- cmd.append('--selector=%s' % selector)
- elif rname:
- cmd.append(rname)
+ if selector is not None:
+ cmd.append('--selector={}'.format(selector))
+ elif name is not None:
+ cmd.append(name)
cmd.extend(['-o', 'json'])
@@ -998,9 +1009,9 @@ class OpenShiftCLI(object):
if node:
cmd.extend(node)
else:
- cmd.append('--selector=%s' % selector)
+ cmd.append('--selector={}'.format(selector))
- cmd.append('--schedulable=%s' % schedulable)
+ cmd.append('--schedulable={}'.format(schedulable))
return self.openshift_cmd(cmd, oadm=True, output=True, output_type='raw') # noqa: E501
@@ -1015,10 +1026,10 @@ class OpenShiftCLI(object):
if node:
cmd.extend(node)
else:
- cmd.append('--selector=%s' % selector)
+ cmd.append('--selector={}'.format(selector))
if pod_selector:
- cmd.append('--pod-selector=%s' % pod_selector)
+ cmd.append('--pod-selector={}'.format(pod_selector))
cmd.extend(['--list-pods', '-o', 'json'])
@@ -1031,16 +1042,16 @@ class OpenShiftCLI(object):
if node:
cmd.extend(node)
else:
- cmd.append('--selector=%s' % selector)
+ cmd.append('--selector={}'.format(selector))
if dry_run:
cmd.append('--dry-run')
if pod_selector:
- cmd.append('--pod-selector=%s' % pod_selector)
+ cmd.append('--pod-selector={}'.format(pod_selector))
if grace_period:
- cmd.append('--grace-period=%s' % int(grace_period))
+ cmd.append('--grace-period={}'.format(int(grace_period)))
if force:
cmd.append('--force')
diff --git a/roles/lib_openshift/library/oc_env.py b/roles/lib_openshift/library/oc_env.py
index f96318f83..74bf63353 100644
--- a/roles/lib_openshift/library/oc_env.py
+++ b/roles/lib_openshift/library/oc_env.py
@@ -881,6 +881,13 @@ class OpenShiftCLI(object):
def _replace(self, fname, force=False):
'''replace the current object with oc replace'''
+ # We are removing the 'resourceVersion' to handle
+ # a race condition when modifying oc objects
+ yed = Yedit(fname)
+ results = yed.delete('metadata.resourceVersion')
+ if results[0]:
+ yed.write()
+
cmd = ['replace', '-f', fname]
if force:
cmd.append('--force')
@@ -900,11 +907,15 @@ class OpenShiftCLI(object):
'''call oc create on a filename'''
return self.openshift_cmd(['create', '-f', fname])
- def _delete(self, resource, rname, selector=None):
+ def _delete(self, resource, name=None, selector=None):
'''call oc delete on a resource'''
- cmd = ['delete', resource, rname]
- if selector:
- cmd.append('--selector=%s' % selector)
+ cmd = ['delete', resource]
+ if selector is not None:
+ cmd.append('--selector={}'.format(selector))
+ elif name is not None:
+ cmd.append(name)
+ else:
+ raise OpenShiftCLIError('Either name or selector is required when calling delete.')
return self.openshift_cmd(cmd)
@@ -922,7 +933,7 @@ class OpenShiftCLI(object):
else:
cmd.append(template_name)
if params:
- param_str = ["%s=%s" % (key, value) for key, value in params.items()]
+ param_str = ["{}={}".format(key, value) for key, value in params.items()]
cmd.append('-v')
cmd.extend(param_str)
@@ -939,13 +950,13 @@ class OpenShiftCLI(object):
return self.openshift_cmd(['create', '-f', fname])
- def _get(self, resource, rname=None, selector=None):
+ def _get(self, resource, name=None, selector=None):
'''return a resource by name '''
cmd = ['get', resource]
- if selector:
- cmd.append('--selector=%s' % selector)
- elif rname:
- cmd.append(rname)
+ if selector is not None:
+ cmd.append('--selector={}'.format(selector))
+ elif name is not None:
+ cmd.append(name)
cmd.extend(['-o', 'json'])
@@ -965,9 +976,9 @@ class OpenShiftCLI(object):
if node:
cmd.extend(node)
else:
- cmd.append('--selector=%s' % selector)
+ cmd.append('--selector={}'.format(selector))
- cmd.append('--schedulable=%s' % schedulable)
+ cmd.append('--schedulable={}'.format(schedulable))
return self.openshift_cmd(cmd, oadm=True, output=True, output_type='raw') # noqa: E501
@@ -982,10 +993,10 @@ class OpenShiftCLI(object):
if node:
cmd.extend(node)
else:
- cmd.append('--selector=%s' % selector)
+ cmd.append('--selector={}'.format(selector))
if pod_selector:
- cmd.append('--pod-selector=%s' % pod_selector)
+ cmd.append('--pod-selector={}'.format(pod_selector))
cmd.extend(['--list-pods', '-o', 'json'])
@@ -998,16 +1009,16 @@ class OpenShiftCLI(object):
if node:
cmd.extend(node)
else:
- cmd.append('--selector=%s' % selector)
+ cmd.append('--selector={}'.format(selector))
if dry_run:
cmd.append('--dry-run')
if pod_selector:
- cmd.append('--pod-selector=%s' % pod_selector)
+ cmd.append('--pod-selector={}'.format(pod_selector))
if grace_period:
- cmd.append('--grace-period=%s' % int(grace_period))
+ cmd.append('--grace-period={}'.format(int(grace_period)))
if force:
cmd.append('--force')
diff --git a/roles/lib_openshift/library/oc_group.py b/roles/lib_openshift/library/oc_group.py
index 962af40c1..2dd3d28ec 100644
--- a/roles/lib_openshift/library/oc_group.py
+++ b/roles/lib_openshift/library/oc_group.py
@@ -854,6 +854,13 @@ class OpenShiftCLI(object):
def _replace(self, fname, force=False):
'''replace the current object with oc replace'''
+ # We are removing the 'resourceVersion' to handle
+ # a race condition when modifying oc objects
+ yed = Yedit(fname)
+ results = yed.delete('metadata.resourceVersion')
+ if results[0]:
+ yed.write()
+
cmd = ['replace', '-f', fname]
if force:
cmd.append('--force')
@@ -873,11 +880,15 @@ class OpenShiftCLI(object):
'''call oc create on a filename'''
return self.openshift_cmd(['create', '-f', fname])
- def _delete(self, resource, rname, selector=None):
+ def _delete(self, resource, name=None, selector=None):
'''call oc delete on a resource'''
- cmd = ['delete', resource, rname]
- if selector:
- cmd.append('--selector=%s' % selector)
+ cmd = ['delete', resource]
+ if selector is not None:
+ cmd.append('--selector={}'.format(selector))
+ elif name is not None:
+ cmd.append(name)
+ else:
+ raise OpenShiftCLIError('Either name or selector is required when calling delete.')
return self.openshift_cmd(cmd)
@@ -895,7 +906,7 @@ class OpenShiftCLI(object):
else:
cmd.append(template_name)
if params:
- param_str = ["%s=%s" % (key, value) for key, value in params.items()]
+ param_str = ["{}={}".format(key, value) for key, value in params.items()]
cmd.append('-v')
cmd.extend(param_str)
@@ -912,13 +923,13 @@ class OpenShiftCLI(object):
return self.openshift_cmd(['create', '-f', fname])
- def _get(self, resource, rname=None, selector=None):
+ def _get(self, resource, name=None, selector=None):
'''return a resource by name '''
cmd = ['get', resource]
- if selector:
- cmd.append('--selector=%s' % selector)
- elif rname:
- cmd.append(rname)
+ if selector is not None:
+ cmd.append('--selector={}'.format(selector))
+ elif name is not None:
+ cmd.append(name)
cmd.extend(['-o', 'json'])
@@ -938,9 +949,9 @@ class OpenShiftCLI(object):
if node:
cmd.extend(node)
else:
- cmd.append('--selector=%s' % selector)
+ cmd.append('--selector={}'.format(selector))
- cmd.append('--schedulable=%s' % schedulable)
+ cmd.append('--schedulable={}'.format(schedulable))
return self.openshift_cmd(cmd, oadm=True, output=True, output_type='raw') # noqa: E501
@@ -955,10 +966,10 @@ class OpenShiftCLI(object):
if node:
cmd.extend(node)
else:
- cmd.append('--selector=%s' % selector)
+ cmd.append('--selector={}'.format(selector))
if pod_selector:
- cmd.append('--pod-selector=%s' % pod_selector)
+ cmd.append('--pod-selector={}'.format(pod_selector))
cmd.extend(['--list-pods', '-o', 'json'])
@@ -971,16 +982,16 @@ class OpenShiftCLI(object):
if node:
cmd.extend(node)
else:
- cmd.append('--selector=%s' % selector)
+ cmd.append('--selector={}'.format(selector))
if dry_run:
cmd.append('--dry-run')
if pod_selector:
- cmd.append('--pod-selector=%s' % pod_selector)
+ cmd.append('--pod-selector={}'.format(pod_selector))
if grace_period:
- cmd.append('--grace-period=%s' % int(grace_period))
+ cmd.append('--grace-period={}'.format(int(grace_period)))
if force:
cmd.append('--force')
diff --git a/roles/lib_openshift/library/oc_image.py b/roles/lib_openshift/library/oc_image.py
index 047f49e6d..bb7f97689 100644
--- a/roles/lib_openshift/library/oc_image.py
+++ b/roles/lib_openshift/library/oc_image.py
@@ -873,6 +873,13 @@ class OpenShiftCLI(object):
def _replace(self, fname, force=False):
'''replace the current object with oc replace'''
+ # We are removing the 'resourceVersion' to handle
+ # a race condition when modifying oc objects
+ yed = Yedit(fname)
+ results = yed.delete('metadata.resourceVersion')
+ if results[0]:
+ yed.write()
+
cmd = ['replace', '-f', fname]
if force:
cmd.append('--force')
@@ -892,11 +899,15 @@ class OpenShiftCLI(object):
'''call oc create on a filename'''
return self.openshift_cmd(['create', '-f', fname])
- def _delete(self, resource, rname, selector=None):
+ def _delete(self, resource, name=None, selector=None):
'''call oc delete on a resource'''
- cmd = ['delete', resource, rname]
- if selector:
- cmd.append('--selector=%s' % selector)
+ cmd = ['delete', resource]
+ if selector is not None:
+ cmd.append('--selector={}'.format(selector))
+ elif name is not None:
+ cmd.append(name)
+ else:
+ raise OpenShiftCLIError('Either name or selector is required when calling delete.')
return self.openshift_cmd(cmd)
@@ -914,7 +925,7 @@ class OpenShiftCLI(object):
else:
cmd.append(template_name)
if params:
- param_str = ["%s=%s" % (key, value) for key, value in params.items()]
+ param_str = ["{}={}".format(key, value) for key, value in params.items()]
cmd.append('-v')
cmd.extend(param_str)
@@ -931,13 +942,13 @@ class OpenShiftCLI(object):
return self.openshift_cmd(['create', '-f', fname])
- def _get(self, resource, rname=None, selector=None):
+ def _get(self, resource, name=None, selector=None):
'''return a resource by name '''
cmd = ['get', resource]
- if selector:
- cmd.append('--selector=%s' % selector)
- elif rname:
- cmd.append(rname)
+ if selector is not None:
+ cmd.append('--selector={}'.format(selector))
+ elif name is not None:
+ cmd.append(name)
cmd.extend(['-o', 'json'])
@@ -957,9 +968,9 @@ class OpenShiftCLI(object):
if node:
cmd.extend(node)
else:
- cmd.append('--selector=%s' % selector)
+ cmd.append('--selector={}'.format(selector))
- cmd.append('--schedulable=%s' % schedulable)
+ cmd.append('--schedulable={}'.format(schedulable))
return self.openshift_cmd(cmd, oadm=True, output=True, output_type='raw') # noqa: E501
@@ -974,10 +985,10 @@ class OpenShiftCLI(object):
if node:
cmd.extend(node)
else:
- cmd.append('--selector=%s' % selector)
+ cmd.append('--selector={}'.format(selector))
if pod_selector:
- cmd.append('--pod-selector=%s' % pod_selector)
+ cmd.append('--pod-selector={}'.format(pod_selector))
cmd.extend(['--list-pods', '-o', 'json'])
@@ -990,16 +1001,16 @@ class OpenShiftCLI(object):
if node:
cmd.extend(node)
else:
- cmd.append('--selector=%s' % selector)
+ cmd.append('--selector={}'.format(selector))
if dry_run:
cmd.append('--dry-run')
if pod_selector:
- cmd.append('--pod-selector=%s' % pod_selector)
+ cmd.append('--pod-selector={}'.format(pod_selector))
if grace_period:
- cmd.append('--grace-period=%s' % int(grace_period))
+ cmd.append('--grace-period={}'.format(int(grace_period)))
if force:
cmd.append('--force')
diff --git a/roles/lib_openshift/library/oc_label.py b/roles/lib_openshift/library/oc_label.py
index 700fe6d20..ec9abcda7 100644
--- a/roles/lib_openshift/library/oc_label.py
+++ b/roles/lib_openshift/library/oc_label.py
@@ -890,6 +890,13 @@ class OpenShiftCLI(object):
def _replace(self, fname, force=False):
'''replace the current object with oc replace'''
+ # We are removing the 'resourceVersion' to handle
+ # a race condition when modifying oc objects
+ yed = Yedit(fname)
+ results = yed.delete('metadata.resourceVersion')
+ if results[0]:
+ yed.write()
+
cmd = ['replace', '-f', fname]
if force:
cmd.append('--force')
@@ -909,11 +916,15 @@ class OpenShiftCLI(object):
'''call oc create on a filename'''
return self.openshift_cmd(['create', '-f', fname])
- def _delete(self, resource, rname, selector=None):
+ def _delete(self, resource, name=None, selector=None):
'''call oc delete on a resource'''
- cmd = ['delete', resource, rname]
- if selector:
- cmd.append('--selector=%s' % selector)
+ cmd = ['delete', resource]
+ if selector is not None:
+ cmd.append('--selector={}'.format(selector))
+ elif name is not None:
+ cmd.append(name)
+ else:
+ raise OpenShiftCLIError('Either name or selector is required when calling delete.')
return self.openshift_cmd(cmd)
@@ -931,7 +942,7 @@ class OpenShiftCLI(object):
else:
cmd.append(template_name)
if params:
- param_str = ["%s=%s" % (key, value) for key, value in params.items()]
+ param_str = ["{}={}".format(key, value) for key, value in params.items()]
cmd.append('-v')
cmd.extend(param_str)
@@ -948,13 +959,13 @@ class OpenShiftCLI(object):
return self.openshift_cmd(['create', '-f', fname])
- def _get(self, resource, rname=None, selector=None):
+ def _get(self, resource, name=None, selector=None):
'''return a resource by name '''
cmd = ['get', resource]
- if selector:
- cmd.append('--selector=%s' % selector)
- elif rname:
- cmd.append(rname)
+ if selector is not None:
+ cmd.append('--selector={}'.format(selector))
+ elif name is not None:
+ cmd.append(name)
cmd.extend(['-o', 'json'])
@@ -974,9 +985,9 @@ class OpenShiftCLI(object):
if node:
cmd.extend(node)
else:
- cmd.append('--selector=%s' % selector)
+ cmd.append('--selector={}'.format(selector))
- cmd.append('--schedulable=%s' % schedulable)
+ cmd.append('--schedulable={}'.format(schedulable))
return self.openshift_cmd(cmd, oadm=True, output=True, output_type='raw') # noqa: E501
@@ -991,10 +1002,10 @@ class OpenShiftCLI(object):
if node:
cmd.extend(node)
else:
- cmd.append('--selector=%s' % selector)
+ cmd.append('--selector={}'.format(selector))
if pod_selector:
- cmd.append('--pod-selector=%s' % pod_selector)
+ cmd.append('--pod-selector={}'.format(pod_selector))
cmd.extend(['--list-pods', '-o', 'json'])
@@ -1007,16 +1018,16 @@ class OpenShiftCLI(object):
if node:
cmd.extend(node)
else:
- cmd.append('--selector=%s' % selector)
+ cmd.append('--selector={}'.format(selector))
if dry_run:
cmd.append('--dry-run')
if pod_selector:
- cmd.append('--pod-selector=%s' % pod_selector)
+ cmd.append('--pod-selector={}'.format(pod_selector))
if grace_period:
- cmd.append('--grace-period=%s' % int(grace_period))
+ cmd.append('--grace-period={}'.format(int(grace_period)))
if force:
cmd.append('--force')
@@ -1551,7 +1562,7 @@ class OCLabel(OpenShiftCLI):
label_list = []
if self.name:
- result = self._get(resource=self.kind, rname=self.name)
+ result = self._get(resource=self.kind, name=self.name, selector=self.selector)
if result['results'][0] and 'labels' in result['results'][0]['metadata']:
label_list.append(result['results'][0]['metadata']['labels'])
diff --git a/roles/lib_openshift/library/oc_obj.py b/roles/lib_openshift/library/oc_obj.py
index 9a1eefa3b..3abd50a2e 100644
--- a/roles/lib_openshift/library/oc_obj.py
+++ b/roles/lib_openshift/library/oc_obj.py
@@ -98,7 +98,7 @@ options:
aliases: []
kind:
description:
- - The kind attribute of the object. e.g. dc, bc, svc, route
+ - The kind attribute of the object. e.g. dc, bc, svc, route. May be a comma-separated list, e.g. "dc,po,svc".
required: True
default: None
aliases: []
@@ -893,6 +893,13 @@ class OpenShiftCLI(object):
def _replace(self, fname, force=False):
'''replace the current object with oc replace'''
+ # We are removing the 'resourceVersion' to handle
+ # a race condition when modifying oc objects
+ yed = Yedit(fname)
+ results = yed.delete('metadata.resourceVersion')
+ if results[0]:
+ yed.write()
+
cmd = ['replace', '-f', fname]
if force:
cmd.append('--force')
@@ -912,11 +919,15 @@ class OpenShiftCLI(object):
'''call oc create on a filename'''
return self.openshift_cmd(['create', '-f', fname])
- def _delete(self, resource, rname, selector=None):
+ def _delete(self, resource, name=None, selector=None):
'''call oc delete on a resource'''
- cmd = ['delete', resource, rname]
- if selector:
- cmd.append('--selector=%s' % selector)
+ cmd = ['delete', resource]
+ if selector is not None:
+ cmd.append('--selector={}'.format(selector))
+ elif name is not None:
+ cmd.append(name)
+ else:
+ raise OpenShiftCLIError('Either name or selector is required when calling delete.')
return self.openshift_cmd(cmd)
@@ -934,7 +945,7 @@ class OpenShiftCLI(object):
else:
cmd.append(template_name)
if params:
- param_str = ["%s=%s" % (key, value) for key, value in params.items()]
+ param_str = ["{}={}".format(key, value) for key, value in params.items()]
cmd.append('-v')
cmd.extend(param_str)
@@ -951,13 +962,13 @@ class OpenShiftCLI(object):
return self.openshift_cmd(['create', '-f', fname])
- def _get(self, resource, rname=None, selector=None):
+ def _get(self, resource, name=None, selector=None):
'''return a resource by name '''
cmd = ['get', resource]
- if selector:
- cmd.append('--selector=%s' % selector)
- elif rname:
- cmd.append(rname)
+ if selector is not None:
+ cmd.append('--selector={}'.format(selector))
+ elif name is not None:
+ cmd.append(name)
cmd.extend(['-o', 'json'])
@@ -977,9 +988,9 @@ class OpenShiftCLI(object):
if node:
cmd.extend(node)
else:
- cmd.append('--selector=%s' % selector)
+ cmd.append('--selector={}'.format(selector))
- cmd.append('--schedulable=%s' % schedulable)
+ cmd.append('--schedulable={}'.format(schedulable))
return self.openshift_cmd(cmd, oadm=True, output=True, output_type='raw') # noqa: E501
@@ -994,10 +1005,10 @@ class OpenShiftCLI(object):
if node:
cmd.extend(node)
else:
- cmd.append('--selector=%s' % selector)
+ cmd.append('--selector={}'.format(selector))
if pod_selector:
- cmd.append('--pod-selector=%s' % pod_selector)
+ cmd.append('--pod-selector={}'.format(pod_selector))
cmd.extend(['--list-pods', '-o', 'json'])
@@ -1010,16 +1021,16 @@ class OpenShiftCLI(object):
if node:
cmd.extend(node)
else:
- cmd.append('--selector=%s' % selector)
+ cmd.append('--selector={}'.format(selector))
if dry_run:
cmd.append('--dry-run')
if pod_selector:
- cmd.append('--pod-selector=%s' % pod_selector)
+ cmd.append('--pod-selector={}'.format(pod_selector))
if grace_period:
- cmd.append('--grace-period=%s' % int(grace_period))
+ cmd.append('--grace-period={}'.format(int(grace_period)))
if force:
cmd.append('--force')
@@ -1430,7 +1441,7 @@ class OCObject(OpenShiftCLI):
def __init__(self,
kind,
namespace,
- rname=None,
+ name=None,
selector=None,
kubeconfig='/etc/origin/master/admin.kubeconfig',
verbose=False,
@@ -1439,21 +1450,21 @@ class OCObject(OpenShiftCLI):
super(OCObject, self).__init__(namespace, kubeconfig=kubeconfig, verbose=verbose,
all_namespaces=all_namespaces)
self.kind = kind
- self.name = rname
+ self.name = name
self.selector = selector
def get(self):
'''return a kind by name '''
- results = self._get(self.kind, rname=self.name, selector=self.selector)
- if results['returncode'] != 0 and 'stderr' in results and \
- '\"%s\" not found' % self.name in results['stderr']:
+ results = self._get(self.kind, name=self.name, selector=self.selector)
+ if (results['returncode'] != 0 and 'stderr' in results and
+ '\"{}\" not found'.format(self.name) in results['stderr']):
results['returncode'] = 0
return results
def delete(self):
- '''return all pods '''
- return self._delete(self.kind, self.name)
+ '''delete the object'''
+ return self._delete(self.kind, name=self.name, selector=self.selector)
def create(self, files=None, content=None):
'''
@@ -1529,24 +1540,30 @@ class OCObject(OpenShiftCLI):
# Get
#####
if state == 'list':
- return {'changed': False, 'results': api_rval, 'state': 'list'}
-
- if not params['name']:
- return {'failed': True, 'msg': 'Please specify a name when state is absent|present.'} # noqa: E501
+ return {'changed': False, 'results': api_rval, 'state': state}
########
# Delete
########
if state == 'absent':
- if not Utils.exists(api_rval['results'], params['name']):
- return {'changed': False, 'state': 'absent'}
+ # verify its not in our results
+ if (params['name'] is not None or params['selector'] is not None) and \
+ (len(api_rval['results']) == 0 or len(api_rval['results'][0].getattr('items', [])) == 0):
+ return {'changed': False, 'state': state}
if check_mode:
return {'changed': True, 'msg': 'CHECK_MODE: Would have performed a delete'}
api_rval = ocobj.delete()
- return {'changed': True, 'results': api_rval, 'state': 'absent'}
+ if api_rval['returncode'] != 0:
+ return {'failed': True, 'msg': api_rval}
+
+ return {'changed': True, 'results': api_rval, 'state': state}
+
+ # create/update: Must define a name beyond this point
+ if not params['name']:
+ return {'failed': True, 'msg': 'Please specify a name when state is present.'}
if state == 'present':
########
@@ -1572,7 +1589,7 @@ class OCObject(OpenShiftCLI):
if params['files'] and params['delete_after']:
Utils.cleanup(params['files'])
- return {'changed': True, 'results': api_rval, 'state': "present"}
+ return {'changed': True, 'results': api_rval, 'state': state}
########
# Update
@@ -1587,7 +1604,7 @@ class OCObject(OpenShiftCLI):
if params['files'] and params['delete_after']:
Utils.cleanup(params['files'])
- return {'changed': False, 'results': api_rval['results'][0], 'state': "present"}
+ return {'changed': False, 'results': api_rval['results'][0], 'state': state}
if check_mode:
return {'changed': True, 'msg': 'CHECK_MODE: Would have performed an update.'}
@@ -1606,7 +1623,7 @@ class OCObject(OpenShiftCLI):
if api_rval['returncode'] != 0:
return {'failed': True, 'msg': api_rval}
- return {'changed': True, 'results': api_rval, 'state': "present"}
+ return {'changed': True, 'results': api_rval, 'state': state}
# -*- -*- -*- End included fragment: class/oc_obj.py -*- -*- -*-
@@ -1634,7 +1651,7 @@ def main():
force=dict(default=False, type='bool'),
selector=dict(default=None, type='str'),
),
- mutually_exclusive=[["content", "files"]],
+ mutually_exclusive=[["content", "files"], ["selector", "name"]],
supports_check_mode=True,
)
diff --git a/roles/lib_openshift/library/oc_objectvalidator.py b/roles/lib_openshift/library/oc_objectvalidator.py
index 297ae6cda..bc5245216 100644
--- a/roles/lib_openshift/library/oc_objectvalidator.py
+++ b/roles/lib_openshift/library/oc_objectvalidator.py
@@ -825,6 +825,13 @@ class OpenShiftCLI(object):
def _replace(self, fname, force=False):
'''replace the current object with oc replace'''
+ # We are removing the 'resourceVersion' to handle
+ # a race condition when modifying oc objects
+ yed = Yedit(fname)
+ results = yed.delete('metadata.resourceVersion')
+ if results[0]:
+ yed.write()
+
cmd = ['replace', '-f', fname]
if force:
cmd.append('--force')
@@ -844,11 +851,15 @@ class OpenShiftCLI(object):
'''call oc create on a filename'''
return self.openshift_cmd(['create', '-f', fname])
- def _delete(self, resource, rname, selector=None):
+ def _delete(self, resource, name=None, selector=None):
'''call oc delete on a resource'''
- cmd = ['delete', resource, rname]
- if selector:
- cmd.append('--selector=%s' % selector)
+ cmd = ['delete', resource]
+ if selector is not None:
+ cmd.append('--selector={}'.format(selector))
+ elif name is not None:
+ cmd.append(name)
+ else:
+ raise OpenShiftCLIError('Either name or selector is required when calling delete.')
return self.openshift_cmd(cmd)
@@ -866,7 +877,7 @@ class OpenShiftCLI(object):
else:
cmd.append(template_name)
if params:
- param_str = ["%s=%s" % (key, value) for key, value in params.items()]
+ param_str = ["{}={}".format(key, value) for key, value in params.items()]
cmd.append('-v')
cmd.extend(param_str)
@@ -883,13 +894,13 @@ class OpenShiftCLI(object):
return self.openshift_cmd(['create', '-f', fname])
- def _get(self, resource, rname=None, selector=None):
+ def _get(self, resource, name=None, selector=None):
'''return a resource by name '''
cmd = ['get', resource]
- if selector:
- cmd.append('--selector=%s' % selector)
- elif rname:
- cmd.append(rname)
+ if selector is not None:
+ cmd.append('--selector={}'.format(selector))
+ elif name is not None:
+ cmd.append(name)
cmd.extend(['-o', 'json'])
@@ -909,9 +920,9 @@ class OpenShiftCLI(object):
if node:
cmd.extend(node)
else:
- cmd.append('--selector=%s' % selector)
+ cmd.append('--selector={}'.format(selector))
- cmd.append('--schedulable=%s' % schedulable)
+ cmd.append('--schedulable={}'.format(schedulable))
return self.openshift_cmd(cmd, oadm=True, output=True, output_type='raw') # noqa: E501
@@ -926,10 +937,10 @@ class OpenShiftCLI(object):
if node:
cmd.extend(node)
else:
- cmd.append('--selector=%s' % selector)
+ cmd.append('--selector={}'.format(selector))
if pod_selector:
- cmd.append('--pod-selector=%s' % pod_selector)
+ cmd.append('--pod-selector={}'.format(pod_selector))
cmd.extend(['--list-pods', '-o', 'json'])
@@ -942,16 +953,16 @@ class OpenShiftCLI(object):
if node:
cmd.extend(node)
else:
- cmd.append('--selector=%s' % selector)
+ cmd.append('--selector={}'.format(selector))
if dry_run:
cmd.append('--dry-run')
if pod_selector:
- cmd.append('--pod-selector=%s' % pod_selector)
+ cmd.append('--pod-selector={}'.format(pod_selector))
if grace_period:
- cmd.append('--grace-period=%s' % int(grace_period))
+ cmd.append('--grace-period={}'.format(int(grace_period)))
if force:
cmd.append('--force')
diff --git a/roles/lib_openshift/library/oc_process.py b/roles/lib_openshift/library/oc_process.py
index 7d5b6a751..de5426c51 100644
--- a/roles/lib_openshift/library/oc_process.py
+++ b/roles/lib_openshift/library/oc_process.py
@@ -882,6 +882,13 @@ class OpenShiftCLI(object):
def _replace(self, fname, force=False):
'''replace the current object with oc replace'''
+ # We are removing the 'resourceVersion' to handle
+ # a race condition when modifying oc objects
+ yed = Yedit(fname)
+ results = yed.delete('metadata.resourceVersion')
+ if results[0]:
+ yed.write()
+
cmd = ['replace', '-f', fname]
if force:
cmd.append('--force')
@@ -901,11 +908,15 @@ class OpenShiftCLI(object):
'''call oc create on a filename'''
return self.openshift_cmd(['create', '-f', fname])
- def _delete(self, resource, rname, selector=None):
+ def _delete(self, resource, name=None, selector=None):
'''call oc delete on a resource'''
- cmd = ['delete', resource, rname]
- if selector:
- cmd.append('--selector=%s' % selector)
+ cmd = ['delete', resource]
+ if selector is not None:
+ cmd.append('--selector={}'.format(selector))
+ elif name is not None:
+ cmd.append(name)
+ else:
+ raise OpenShiftCLIError('Either name or selector is required when calling delete.')
return self.openshift_cmd(cmd)
@@ -923,7 +934,7 @@ class OpenShiftCLI(object):
else:
cmd.append(template_name)
if params:
- param_str = ["%s=%s" % (key, value) for key, value in params.items()]
+ param_str = ["{}={}".format(key, value) for key, value in params.items()]
cmd.append('-v')
cmd.extend(param_str)
@@ -940,13 +951,13 @@ class OpenShiftCLI(object):
return self.openshift_cmd(['create', '-f', fname])
- def _get(self, resource, rname=None, selector=None):
+ def _get(self, resource, name=None, selector=None):
'''return a resource by name '''
cmd = ['get', resource]
- if selector:
- cmd.append('--selector=%s' % selector)
- elif rname:
- cmd.append(rname)
+ if selector is not None:
+ cmd.append('--selector={}'.format(selector))
+ elif name is not None:
+ cmd.append(name)
cmd.extend(['-o', 'json'])
@@ -966,9 +977,9 @@ class OpenShiftCLI(object):
if node:
cmd.extend(node)
else:
- cmd.append('--selector=%s' % selector)
+ cmd.append('--selector={}'.format(selector))
- cmd.append('--schedulable=%s' % schedulable)
+ cmd.append('--schedulable={}'.format(schedulable))
return self.openshift_cmd(cmd, oadm=True, output=True, output_type='raw') # noqa: E501
@@ -983,10 +994,10 @@ class OpenShiftCLI(object):
if node:
cmd.extend(node)
else:
- cmd.append('--selector=%s' % selector)
+ cmd.append('--selector={}'.format(selector))
if pod_selector:
- cmd.append('--pod-selector=%s' % pod_selector)
+ cmd.append('--pod-selector={}'.format(pod_selector))
cmd.extend(['--list-pods', '-o', 'json'])
@@ -999,16 +1010,16 @@ class OpenShiftCLI(object):
if node:
cmd.extend(node)
else:
- cmd.append('--selector=%s' % selector)
+ cmd.append('--selector={}'.format(selector))
if dry_run:
cmd.append('--dry-run')
if pod_selector:
- cmd.append('--pod-selector=%s' % pod_selector)
+ cmd.append('--pod-selector={}'.format(pod_selector))
if grace_period:
- cmd.append('--grace-period=%s' % int(grace_period))
+ cmd.append('--grace-period={}'.format(int(grace_period)))
if force:
cmd.append('--force')
diff --git a/roles/lib_openshift/library/oc_project.py b/roles/lib_openshift/library/oc_project.py
index 005195dff..02cd810ce 100644
--- a/roles/lib_openshift/library/oc_project.py
+++ b/roles/lib_openshift/library/oc_project.py
@@ -879,6 +879,13 @@ class OpenShiftCLI(object):
def _replace(self, fname, force=False):
'''replace the current object with oc replace'''
+ # We are removing the 'resourceVersion' to handle
+ # a race condition when modifying oc objects
+ yed = Yedit(fname)
+ results = yed.delete('metadata.resourceVersion')
+ if results[0]:
+ yed.write()
+
cmd = ['replace', '-f', fname]
if force:
cmd.append('--force')
@@ -898,11 +905,15 @@ class OpenShiftCLI(object):
'''call oc create on a filename'''
return self.openshift_cmd(['create', '-f', fname])
- def _delete(self, resource, rname, selector=None):
+ def _delete(self, resource, name=None, selector=None):
'''call oc delete on a resource'''
- cmd = ['delete', resource, rname]
- if selector:
- cmd.append('--selector=%s' % selector)
+ cmd = ['delete', resource]
+ if selector is not None:
+ cmd.append('--selector={}'.format(selector))
+ elif name is not None:
+ cmd.append(name)
+ else:
+ raise OpenShiftCLIError('Either name or selector is required when calling delete.')
return self.openshift_cmd(cmd)
@@ -920,7 +931,7 @@ class OpenShiftCLI(object):
else:
cmd.append(template_name)
if params:
- param_str = ["%s=%s" % (key, value) for key, value in params.items()]
+ param_str = ["{}={}".format(key, value) for key, value in params.items()]
cmd.append('-v')
cmd.extend(param_str)
@@ -937,13 +948,13 @@ class OpenShiftCLI(object):
return self.openshift_cmd(['create', '-f', fname])
- def _get(self, resource, rname=None, selector=None):
+ def _get(self, resource, name=None, selector=None):
'''return a resource by name '''
cmd = ['get', resource]
- if selector:
- cmd.append('--selector=%s' % selector)
- elif rname:
- cmd.append(rname)
+ if selector is not None:
+ cmd.append('--selector={}'.format(selector))
+ elif name is not None:
+ cmd.append(name)
cmd.extend(['-o', 'json'])
@@ -963,9 +974,9 @@ class OpenShiftCLI(object):
if node:
cmd.extend(node)
else:
- cmd.append('--selector=%s' % selector)
+ cmd.append('--selector={}'.format(selector))
- cmd.append('--schedulable=%s' % schedulable)
+ cmd.append('--schedulable={}'.format(schedulable))
return self.openshift_cmd(cmd, oadm=True, output=True, output_type='raw') # noqa: E501
@@ -980,10 +991,10 @@ class OpenShiftCLI(object):
if node:
cmd.extend(node)
else:
- cmd.append('--selector=%s' % selector)
+ cmd.append('--selector={}'.format(selector))
if pod_selector:
- cmd.append('--pod-selector=%s' % pod_selector)
+ cmd.append('--pod-selector={}'.format(pod_selector))
cmd.extend(['--list-pods', '-o', 'json'])
@@ -996,16 +1007,16 @@ class OpenShiftCLI(object):
if node:
cmd.extend(node)
else:
- cmd.append('--selector=%s' % selector)
+ cmd.append('--selector={}'.format(selector))
if dry_run:
cmd.append('--dry-run')
if pod_selector:
- cmd.append('--pod-selector=%s' % pod_selector)
+ cmd.append('--pod-selector={}'.format(pod_selector))
if grace_period:
- cmd.append('--grace-period=%s' % int(grace_period))
+ cmd.append('--grace-period={}'.format(int(grace_period)))
if force:
cmd.append('--force')
diff --git a/roles/lib_openshift/library/oc_pvc.py b/roles/lib_openshift/library/oc_pvc.py
index 2cab01d50..a9103ebf6 100644
--- a/roles/lib_openshift/library/oc_pvc.py
+++ b/roles/lib_openshift/library/oc_pvc.py
@@ -874,6 +874,13 @@ class OpenShiftCLI(object):
def _replace(self, fname, force=False):
'''replace the current object with oc replace'''
+ # We are removing the 'resourceVersion' to handle
+ # a race condition when modifying oc objects
+ yed = Yedit(fname)
+ results = yed.delete('metadata.resourceVersion')
+ if results[0]:
+ yed.write()
+
cmd = ['replace', '-f', fname]
if force:
cmd.append('--force')
@@ -893,11 +900,15 @@ class OpenShiftCLI(object):
'''call oc create on a filename'''
return self.openshift_cmd(['create', '-f', fname])
- def _delete(self, resource, rname, selector=None):
+ def _delete(self, resource, name=None, selector=None):
'''call oc delete on a resource'''
- cmd = ['delete', resource, rname]
- if selector:
- cmd.append('--selector=%s' % selector)
+ cmd = ['delete', resource]
+ if selector is not None:
+ cmd.append('--selector={}'.format(selector))
+ elif name is not None:
+ cmd.append(name)
+ else:
+ raise OpenShiftCLIError('Either name or selector is required when calling delete.')
return self.openshift_cmd(cmd)
@@ -915,7 +926,7 @@ class OpenShiftCLI(object):
else:
cmd.append(template_name)
if params:
- param_str = ["%s=%s" % (key, value) for key, value in params.items()]
+ param_str = ["{}={}".format(key, value) for key, value in params.items()]
cmd.append('-v')
cmd.extend(param_str)
@@ -932,13 +943,13 @@ class OpenShiftCLI(object):
return self.openshift_cmd(['create', '-f', fname])
- def _get(self, resource, rname=None, selector=None):
+ def _get(self, resource, name=None, selector=None):
'''return a resource by name '''
cmd = ['get', resource]
- if selector:
- cmd.append('--selector=%s' % selector)
- elif rname:
- cmd.append(rname)
+ if selector is not None:
+ cmd.append('--selector={}'.format(selector))
+ elif name is not None:
+ cmd.append(name)
cmd.extend(['-o', 'json'])
@@ -958,9 +969,9 @@ class OpenShiftCLI(object):
if node:
cmd.extend(node)
else:
- cmd.append('--selector=%s' % selector)
+ cmd.append('--selector={}'.format(selector))
- cmd.append('--schedulable=%s' % schedulable)
+ cmd.append('--schedulable={}'.format(schedulable))
return self.openshift_cmd(cmd, oadm=True, output=True, output_type='raw') # noqa: E501
@@ -975,10 +986,10 @@ class OpenShiftCLI(object):
if node:
cmd.extend(node)
else:
- cmd.append('--selector=%s' % selector)
+ cmd.append('--selector={}'.format(selector))
if pod_selector:
- cmd.append('--pod-selector=%s' % pod_selector)
+ cmd.append('--pod-selector={}'.format(pod_selector))
cmd.extend(['--list-pods', '-o', 'json'])
@@ -991,16 +1002,16 @@ class OpenShiftCLI(object):
if node:
cmd.extend(node)
else:
- cmd.append('--selector=%s' % selector)
+ cmd.append('--selector={}'.format(selector))
if dry_run:
cmd.append('--dry-run')
if pod_selector:
- cmd.append('--pod-selector=%s' % pod_selector)
+ cmd.append('--pod-selector={}'.format(pod_selector))
if grace_period:
- cmd.append('--grace-period=%s' % int(grace_period))
+ cmd.append('--grace-period={}'.format(int(grace_period)))
if force:
cmd.append('--force')
diff --git a/roles/lib_openshift/library/oc_route.py b/roles/lib_openshift/library/oc_route.py
index e1a96ee94..f005adffc 100644
--- a/roles/lib_openshift/library/oc_route.py
+++ b/roles/lib_openshift/library/oc_route.py
@@ -924,6 +924,13 @@ class OpenShiftCLI(object):
def _replace(self, fname, force=False):
'''replace the current object with oc replace'''
+ # We are removing the 'resourceVersion' to handle
+ # a race condition when modifying oc objects
+ yed = Yedit(fname)
+ results = yed.delete('metadata.resourceVersion')
+ if results[0]:
+ yed.write()
+
cmd = ['replace', '-f', fname]
if force:
cmd.append('--force')
@@ -943,11 +950,15 @@ class OpenShiftCLI(object):
'''call oc create on a filename'''
return self.openshift_cmd(['create', '-f', fname])
- def _delete(self, resource, rname, selector=None):
+ def _delete(self, resource, name=None, selector=None):
'''call oc delete on a resource'''
- cmd = ['delete', resource, rname]
- if selector:
- cmd.append('--selector=%s' % selector)
+ cmd = ['delete', resource]
+ if selector is not None:
+ cmd.append('--selector={}'.format(selector))
+ elif name is not None:
+ cmd.append(name)
+ else:
+ raise OpenShiftCLIError('Either name or selector is required when calling delete.')
return self.openshift_cmd(cmd)
@@ -965,7 +976,7 @@ class OpenShiftCLI(object):
else:
cmd.append(template_name)
if params:
- param_str = ["%s=%s" % (key, value) for key, value in params.items()]
+ param_str = ["{}={}".format(key, value) for key, value in params.items()]
cmd.append('-v')
cmd.extend(param_str)
@@ -982,13 +993,13 @@ class OpenShiftCLI(object):
return self.openshift_cmd(['create', '-f', fname])
- def _get(self, resource, rname=None, selector=None):
+ def _get(self, resource, name=None, selector=None):
'''return a resource by name '''
cmd = ['get', resource]
- if selector:
- cmd.append('--selector=%s' % selector)
- elif rname:
- cmd.append(rname)
+ if selector is not None:
+ cmd.append('--selector={}'.format(selector))
+ elif name is not None:
+ cmd.append(name)
cmd.extend(['-o', 'json'])
@@ -1008,9 +1019,9 @@ class OpenShiftCLI(object):
if node:
cmd.extend(node)
else:
- cmd.append('--selector=%s' % selector)
+ cmd.append('--selector={}'.format(selector))
- cmd.append('--schedulable=%s' % schedulable)
+ cmd.append('--schedulable={}'.format(schedulable))
return self.openshift_cmd(cmd, oadm=True, output=True, output_type='raw') # noqa: E501
@@ -1025,10 +1036,10 @@ class OpenShiftCLI(object):
if node:
cmd.extend(node)
else:
- cmd.append('--selector=%s' % selector)
+ cmd.append('--selector={}'.format(selector))
if pod_selector:
- cmd.append('--pod-selector=%s' % pod_selector)
+ cmd.append('--pod-selector={}'.format(pod_selector))
cmd.extend(['--list-pods', '-o', 'json'])
@@ -1041,16 +1052,16 @@ class OpenShiftCLI(object):
if node:
cmd.extend(node)
else:
- cmd.append('--selector=%s' % selector)
+ cmd.append('--selector={}'.format(selector))
if dry_run:
cmd.append('--dry-run')
if pod_selector:
- cmd.append('--pod-selector=%s' % pod_selector)
+ cmd.append('--pod-selector={}'.format(pod_selector))
if grace_period:
- cmd.append('--grace-period=%s' % int(grace_period))
+ cmd.append('--grace-period={}'.format(int(grace_period)))
if force:
cmd.append('--force')
diff --git a/roles/lib_openshift/library/oc_scale.py b/roles/lib_openshift/library/oc_scale.py
index 9ebabc9cc..9dcb38216 100644
--- a/roles/lib_openshift/library/oc_scale.py
+++ b/roles/lib_openshift/library/oc_scale.py
@@ -868,6 +868,13 @@ class OpenShiftCLI(object):
def _replace(self, fname, force=False):
'''replace the current object with oc replace'''
+ # We are removing the 'resourceVersion' to handle
+ # a race condition when modifying oc objects
+ yed = Yedit(fname)
+ results = yed.delete('metadata.resourceVersion')
+ if results[0]:
+ yed.write()
+
cmd = ['replace', '-f', fname]
if force:
cmd.append('--force')
@@ -887,11 +894,15 @@ class OpenShiftCLI(object):
'''call oc create on a filename'''
return self.openshift_cmd(['create', '-f', fname])
- def _delete(self, resource, rname, selector=None):
+ def _delete(self, resource, name=None, selector=None):
'''call oc delete on a resource'''
- cmd = ['delete', resource, rname]
- if selector:
- cmd.append('--selector=%s' % selector)
+ cmd = ['delete', resource]
+ if selector is not None:
+ cmd.append('--selector={}'.format(selector))
+ elif name is not None:
+ cmd.append(name)
+ else:
+ raise OpenShiftCLIError('Either name or selector is required when calling delete.')
return self.openshift_cmd(cmd)
@@ -909,7 +920,7 @@ class OpenShiftCLI(object):
else:
cmd.append(template_name)
if params:
- param_str = ["%s=%s" % (key, value) for key, value in params.items()]
+ param_str = ["{}={}".format(key, value) for key, value in params.items()]
cmd.append('-v')
cmd.extend(param_str)
@@ -926,13 +937,13 @@ class OpenShiftCLI(object):
return self.openshift_cmd(['create', '-f', fname])
- def _get(self, resource, rname=None, selector=None):
+ def _get(self, resource, name=None, selector=None):
'''return a resource by name '''
cmd = ['get', resource]
- if selector:
- cmd.append('--selector=%s' % selector)
- elif rname:
- cmd.append(rname)
+ if selector is not None:
+ cmd.append('--selector={}'.format(selector))
+ elif name is not None:
+ cmd.append(name)
cmd.extend(['-o', 'json'])
@@ -952,9 +963,9 @@ class OpenShiftCLI(object):
if node:
cmd.extend(node)
else:
- cmd.append('--selector=%s' % selector)
+ cmd.append('--selector={}'.format(selector))
- cmd.append('--schedulable=%s' % schedulable)
+ cmd.append('--schedulable={}'.format(schedulable))
return self.openshift_cmd(cmd, oadm=True, output=True, output_type='raw') # noqa: E501
@@ -969,10 +980,10 @@ class OpenShiftCLI(object):
if node:
cmd.extend(node)
else:
- cmd.append('--selector=%s' % selector)
+ cmd.append('--selector={}'.format(selector))
if pod_selector:
- cmd.append('--pod-selector=%s' % pod_selector)
+ cmd.append('--pod-selector={}'.format(pod_selector))
cmd.extend(['--list-pods', '-o', 'json'])
@@ -985,16 +996,16 @@ class OpenShiftCLI(object):
if node:
cmd.extend(node)
else:
- cmd.append('--selector=%s' % selector)
+ cmd.append('--selector={}'.format(selector))
if dry_run:
cmd.append('--dry-run')
if pod_selector:
- cmd.append('--pod-selector=%s' % pod_selector)
+ cmd.append('--pod-selector={}'.format(pod_selector))
if grace_period:
- cmd.append('--grace-period=%s' % int(grace_period))
+ cmd.append('--grace-period={}'.format(int(grace_period)))
if force:
cmd.append('--force')
diff --git a/roles/lib_openshift/library/oc_secret.py b/roles/lib_openshift/library/oc_secret.py
index c61139bb9..2ac0abcec 100644
--- a/roles/lib_openshift/library/oc_secret.py
+++ b/roles/lib_openshift/library/oc_secret.py
@@ -914,6 +914,13 @@ class OpenShiftCLI(object):
def _replace(self, fname, force=False):
'''replace the current object with oc replace'''
+ # We are removing the 'resourceVersion' to handle
+ # a race condition when modifying oc objects
+ yed = Yedit(fname)
+ results = yed.delete('metadata.resourceVersion')
+ if results[0]:
+ yed.write()
+
cmd = ['replace', '-f', fname]
if force:
cmd.append('--force')
@@ -933,11 +940,15 @@ class OpenShiftCLI(object):
'''call oc create on a filename'''
return self.openshift_cmd(['create', '-f', fname])
- def _delete(self, resource, rname, selector=None):
+ def _delete(self, resource, name=None, selector=None):
'''call oc delete on a resource'''
- cmd = ['delete', resource, rname]
- if selector:
- cmd.append('--selector=%s' % selector)
+ cmd = ['delete', resource]
+ if selector is not None:
+ cmd.append('--selector={}'.format(selector))
+ elif name is not None:
+ cmd.append(name)
+ else:
+ raise OpenShiftCLIError('Either name or selector is required when calling delete.')
return self.openshift_cmd(cmd)
@@ -955,7 +966,7 @@ class OpenShiftCLI(object):
else:
cmd.append(template_name)
if params:
- param_str = ["%s=%s" % (key, value) for key, value in params.items()]
+ param_str = ["{}={}".format(key, value) for key, value in params.items()]
cmd.append('-v')
cmd.extend(param_str)
@@ -972,13 +983,13 @@ class OpenShiftCLI(object):
return self.openshift_cmd(['create', '-f', fname])
- def _get(self, resource, rname=None, selector=None):
+ def _get(self, resource, name=None, selector=None):
'''return a resource by name '''
cmd = ['get', resource]
- if selector:
- cmd.append('--selector=%s' % selector)
- elif rname:
- cmd.append(rname)
+ if selector is not None:
+ cmd.append('--selector={}'.format(selector))
+ elif name is not None:
+ cmd.append(name)
cmd.extend(['-o', 'json'])
@@ -998,9 +1009,9 @@ class OpenShiftCLI(object):
if node:
cmd.extend(node)
else:
- cmd.append('--selector=%s' % selector)
+ cmd.append('--selector={}'.format(selector))
- cmd.append('--schedulable=%s' % schedulable)
+ cmd.append('--schedulable={}'.format(schedulable))
return self.openshift_cmd(cmd, oadm=True, output=True, output_type='raw') # noqa: E501
@@ -1015,10 +1026,10 @@ class OpenShiftCLI(object):
if node:
cmd.extend(node)
else:
- cmd.append('--selector=%s' % selector)
+ cmd.append('--selector={}'.format(selector))
if pod_selector:
- cmd.append('--pod-selector=%s' % pod_selector)
+ cmd.append('--pod-selector={}'.format(pod_selector))
cmd.extend(['--list-pods', '-o', 'json'])
@@ -1031,16 +1042,16 @@ class OpenShiftCLI(object):
if node:
cmd.extend(node)
else:
- cmd.append('--selector=%s' % selector)
+ cmd.append('--selector={}'.format(selector))
if dry_run:
cmd.append('--dry-run')
if pod_selector:
- cmd.append('--pod-selector=%s' % pod_selector)
+ cmd.append('--pod-selector={}'.format(pod_selector))
if grace_period:
- cmd.append('--grace-period=%s' % int(grace_period))
+ cmd.append('--grace-period={}'.format(int(grace_period)))
if force:
cmd.append('--force')
diff --git a/roles/lib_openshift/library/oc_service.py b/roles/lib_openshift/library/oc_service.py
index 855eaade1..0af695e08 100644
--- a/roles/lib_openshift/library/oc_service.py
+++ b/roles/lib_openshift/library/oc_service.py
@@ -920,6 +920,13 @@ class OpenShiftCLI(object):
def _replace(self, fname, force=False):
'''replace the current object with oc replace'''
+ # We are removing the 'resourceVersion' to handle
+ # a race condition when modifying oc objects
+ yed = Yedit(fname)
+ results = yed.delete('metadata.resourceVersion')
+ if results[0]:
+ yed.write()
+
cmd = ['replace', '-f', fname]
if force:
cmd.append('--force')
@@ -939,11 +946,15 @@ class OpenShiftCLI(object):
'''call oc create on a filename'''
return self.openshift_cmd(['create', '-f', fname])
- def _delete(self, resource, rname, selector=None):
+ def _delete(self, resource, name=None, selector=None):
'''call oc delete on a resource'''
- cmd = ['delete', resource, rname]
- if selector:
- cmd.append('--selector=%s' % selector)
+ cmd = ['delete', resource]
+ if selector is not None:
+ cmd.append('--selector={}'.format(selector))
+ elif name is not None:
+ cmd.append(name)
+ else:
+ raise OpenShiftCLIError('Either name or selector is required when calling delete.')
return self.openshift_cmd(cmd)
@@ -961,7 +972,7 @@ class OpenShiftCLI(object):
else:
cmd.append(template_name)
if params:
- param_str = ["%s=%s" % (key, value) for key, value in params.items()]
+ param_str = ["{}={}".format(key, value) for key, value in params.items()]
cmd.append('-v')
cmd.extend(param_str)
@@ -978,13 +989,13 @@ class OpenShiftCLI(object):
return self.openshift_cmd(['create', '-f', fname])
- def _get(self, resource, rname=None, selector=None):
+ def _get(self, resource, name=None, selector=None):
'''return a resource by name '''
cmd = ['get', resource]
- if selector:
- cmd.append('--selector=%s' % selector)
- elif rname:
- cmd.append(rname)
+ if selector is not None:
+ cmd.append('--selector={}'.format(selector))
+ elif name is not None:
+ cmd.append(name)
cmd.extend(['-o', 'json'])
@@ -1004,9 +1015,9 @@ class OpenShiftCLI(object):
if node:
cmd.extend(node)
else:
- cmd.append('--selector=%s' % selector)
+ cmd.append('--selector={}'.format(selector))
- cmd.append('--schedulable=%s' % schedulable)
+ cmd.append('--schedulable={}'.format(schedulable))
return self.openshift_cmd(cmd, oadm=True, output=True, output_type='raw') # noqa: E501
@@ -1021,10 +1032,10 @@ class OpenShiftCLI(object):
if node:
cmd.extend(node)
else:
- cmd.append('--selector=%s' % selector)
+ cmd.append('--selector={}'.format(selector))
if pod_selector:
- cmd.append('--pod-selector=%s' % pod_selector)
+ cmd.append('--pod-selector={}'.format(pod_selector))
cmd.extend(['--list-pods', '-o', 'json'])
@@ -1037,16 +1048,16 @@ class OpenShiftCLI(object):
if node:
cmd.extend(node)
else:
- cmd.append('--selector=%s' % selector)
+ cmd.append('--selector={}'.format(selector))
if dry_run:
cmd.append('--dry-run')
if pod_selector:
- cmd.append('--pod-selector=%s' % pod_selector)
+ cmd.append('--pod-selector={}'.format(pod_selector))
if grace_period:
- cmd.append('--grace-period=%s' % int(grace_period))
+ cmd.append('--grace-period={}'.format(int(grace_period)))
if force:
cmd.append('--force')
diff --git a/roles/lib_openshift/library/oc_serviceaccount.py b/roles/lib_openshift/library/oc_serviceaccount.py
index 6b2bb8469..ba8a1fdac 100644
--- a/roles/lib_openshift/library/oc_serviceaccount.py
+++ b/roles/lib_openshift/library/oc_serviceaccount.py
@@ -866,6 +866,13 @@ class OpenShiftCLI(object):
def _replace(self, fname, force=False):
'''replace the current object with oc replace'''
+ # We are removing the 'resourceVersion' to handle
+ # a race condition when modifying oc objects
+ yed = Yedit(fname)
+ results = yed.delete('metadata.resourceVersion')
+ if results[0]:
+ yed.write()
+
cmd = ['replace', '-f', fname]
if force:
cmd.append('--force')
@@ -885,11 +892,15 @@ class OpenShiftCLI(object):
'''call oc create on a filename'''
return self.openshift_cmd(['create', '-f', fname])
- def _delete(self, resource, rname, selector=None):
+ def _delete(self, resource, name=None, selector=None):
'''call oc delete on a resource'''
- cmd = ['delete', resource, rname]
- if selector:
- cmd.append('--selector=%s' % selector)
+ cmd = ['delete', resource]
+ if selector is not None:
+ cmd.append('--selector={}'.format(selector))
+ elif name is not None:
+ cmd.append(name)
+ else:
+ raise OpenShiftCLIError('Either name or selector is required when calling delete.')
return self.openshift_cmd(cmd)
@@ -907,7 +918,7 @@ class OpenShiftCLI(object):
else:
cmd.append(template_name)
if params:
- param_str = ["%s=%s" % (key, value) for key, value in params.items()]
+ param_str = ["{}={}".format(key, value) for key, value in params.items()]
cmd.append('-v')
cmd.extend(param_str)
@@ -924,13 +935,13 @@ class OpenShiftCLI(object):
return self.openshift_cmd(['create', '-f', fname])
- def _get(self, resource, rname=None, selector=None):
+ def _get(self, resource, name=None, selector=None):
'''return a resource by name '''
cmd = ['get', resource]
- if selector:
- cmd.append('--selector=%s' % selector)
- elif rname:
- cmd.append(rname)
+ if selector is not None:
+ cmd.append('--selector={}'.format(selector))
+ elif name is not None:
+ cmd.append(name)
cmd.extend(['-o', 'json'])
@@ -950,9 +961,9 @@ class OpenShiftCLI(object):
if node:
cmd.extend(node)
else:
- cmd.append('--selector=%s' % selector)
+ cmd.append('--selector={}'.format(selector))
- cmd.append('--schedulable=%s' % schedulable)
+ cmd.append('--schedulable={}'.format(schedulable))
return self.openshift_cmd(cmd, oadm=True, output=True, output_type='raw') # noqa: E501
@@ -967,10 +978,10 @@ class OpenShiftCLI(object):
if node:
cmd.extend(node)
else:
- cmd.append('--selector=%s' % selector)
+ cmd.append('--selector={}'.format(selector))
if pod_selector:
- cmd.append('--pod-selector=%s' % pod_selector)
+ cmd.append('--pod-selector={}'.format(pod_selector))
cmd.extend(['--list-pods', '-o', 'json'])
@@ -983,16 +994,16 @@ class OpenShiftCLI(object):
if node:
cmd.extend(node)
else:
- cmd.append('--selector=%s' % selector)
+ cmd.append('--selector={}'.format(selector))
if dry_run:
cmd.append('--dry-run')
if pod_selector:
- cmd.append('--pod-selector=%s' % pod_selector)
+ cmd.append('--pod-selector={}'.format(pod_selector))
if grace_period:
- cmd.append('--grace-period=%s' % int(grace_period))
+ cmd.append('--grace-period={}'.format(int(grace_period)))
if force:
cmd.append('--force')
diff --git a/roles/lib_openshift/library/oc_serviceaccount_secret.py b/roles/lib_openshift/library/oc_serviceaccount_secret.py
index 881331456..5bff7621c 100644
--- a/roles/lib_openshift/library/oc_serviceaccount_secret.py
+++ b/roles/lib_openshift/library/oc_serviceaccount_secret.py
@@ -866,6 +866,13 @@ class OpenShiftCLI(object):
def _replace(self, fname, force=False):
'''replace the current object with oc replace'''
+ # We are removing the 'resourceVersion' to handle
+ # a race condition when modifying oc objects
+ yed = Yedit(fname)
+ results = yed.delete('metadata.resourceVersion')
+ if results[0]:
+ yed.write()
+
cmd = ['replace', '-f', fname]
if force:
cmd.append('--force')
@@ -885,11 +892,15 @@ class OpenShiftCLI(object):
'''call oc create on a filename'''
return self.openshift_cmd(['create', '-f', fname])
- def _delete(self, resource, rname, selector=None):
+ def _delete(self, resource, name=None, selector=None):
'''call oc delete on a resource'''
- cmd = ['delete', resource, rname]
- if selector:
- cmd.append('--selector=%s' % selector)
+ cmd = ['delete', resource]
+ if selector is not None:
+ cmd.append('--selector={}'.format(selector))
+ elif name is not None:
+ cmd.append(name)
+ else:
+ raise OpenShiftCLIError('Either name or selector is required when calling delete.')
return self.openshift_cmd(cmd)
@@ -907,7 +918,7 @@ class OpenShiftCLI(object):
else:
cmd.append(template_name)
if params:
- param_str = ["%s=%s" % (key, value) for key, value in params.items()]
+ param_str = ["{}={}".format(key, value) for key, value in params.items()]
cmd.append('-v')
cmd.extend(param_str)
@@ -924,13 +935,13 @@ class OpenShiftCLI(object):
return self.openshift_cmd(['create', '-f', fname])
- def _get(self, resource, rname=None, selector=None):
+ def _get(self, resource, name=None, selector=None):
'''return a resource by name '''
cmd = ['get', resource]
- if selector:
- cmd.append('--selector=%s' % selector)
- elif rname:
- cmd.append(rname)
+ if selector is not None:
+ cmd.append('--selector={}'.format(selector))
+ elif name is not None:
+ cmd.append(name)
cmd.extend(['-o', 'json'])
@@ -950,9 +961,9 @@ class OpenShiftCLI(object):
if node:
cmd.extend(node)
else:
- cmd.append('--selector=%s' % selector)
+ cmd.append('--selector={}'.format(selector))
- cmd.append('--schedulable=%s' % schedulable)
+ cmd.append('--schedulable={}'.format(schedulable))
return self.openshift_cmd(cmd, oadm=True, output=True, output_type='raw') # noqa: E501
@@ -967,10 +978,10 @@ class OpenShiftCLI(object):
if node:
cmd.extend(node)
else:
- cmd.append('--selector=%s' % selector)
+ cmd.append('--selector={}'.format(selector))
if pod_selector:
- cmd.append('--pod-selector=%s' % pod_selector)
+ cmd.append('--pod-selector={}'.format(pod_selector))
cmd.extend(['--list-pods', '-o', 'json'])
@@ -983,16 +994,16 @@ class OpenShiftCLI(object):
if node:
cmd.extend(node)
else:
- cmd.append('--selector=%s' % selector)
+ cmd.append('--selector={}'.format(selector))
if dry_run:
cmd.append('--dry-run')
if pod_selector:
- cmd.append('--pod-selector=%s' % pod_selector)
+ cmd.append('--pod-selector={}'.format(pod_selector))
if grace_period:
- cmd.append('--grace-period=%s' % int(grace_period))
+ cmd.append('--grace-period={}'.format(int(grace_period)))
if force:
cmd.append('--force')
diff --git a/roles/lib_openshift/library/oc_user.py b/roles/lib_openshift/library/oc_user.py
index a14248e45..450a30f57 100644
--- a/roles/lib_openshift/library/oc_user.py
+++ b/roles/lib_openshift/library/oc_user.py
@@ -926,6 +926,13 @@ class OpenShiftCLI(object):
def _replace(self, fname, force=False):
'''replace the current object with oc replace'''
+ # We are removing the 'resourceVersion' to handle
+ # a race condition when modifying oc objects
+ yed = Yedit(fname)
+ results = yed.delete('metadata.resourceVersion')
+ if results[0]:
+ yed.write()
+
cmd = ['replace', '-f', fname]
if force:
cmd.append('--force')
@@ -945,11 +952,15 @@ class OpenShiftCLI(object):
'''call oc create on a filename'''
return self.openshift_cmd(['create', '-f', fname])
- def _delete(self, resource, rname, selector=None):
+ def _delete(self, resource, name=None, selector=None):
'''call oc delete on a resource'''
- cmd = ['delete', resource, rname]
- if selector:
- cmd.append('--selector=%s' % selector)
+ cmd = ['delete', resource]
+ if selector is not None:
+ cmd.append('--selector={}'.format(selector))
+ elif name is not None:
+ cmd.append(name)
+ else:
+ raise OpenShiftCLIError('Either name or selector is required when calling delete.')
return self.openshift_cmd(cmd)
@@ -967,7 +978,7 @@ class OpenShiftCLI(object):
else:
cmd.append(template_name)
if params:
- param_str = ["%s=%s" % (key, value) for key, value in params.items()]
+ param_str = ["{}={}".format(key, value) for key, value in params.items()]
cmd.append('-v')
cmd.extend(param_str)
@@ -984,13 +995,13 @@ class OpenShiftCLI(object):
return self.openshift_cmd(['create', '-f', fname])
- def _get(self, resource, rname=None, selector=None):
+ def _get(self, resource, name=None, selector=None):
'''return a resource by name '''
cmd = ['get', resource]
- if selector:
- cmd.append('--selector=%s' % selector)
- elif rname:
- cmd.append(rname)
+ if selector is not None:
+ cmd.append('--selector={}'.format(selector))
+ elif name is not None:
+ cmd.append(name)
cmd.extend(['-o', 'json'])
@@ -1010,9 +1021,9 @@ class OpenShiftCLI(object):
if node:
cmd.extend(node)
else:
- cmd.append('--selector=%s' % selector)
+ cmd.append('--selector={}'.format(selector))
- cmd.append('--schedulable=%s' % schedulable)
+ cmd.append('--schedulable={}'.format(schedulable))
return self.openshift_cmd(cmd, oadm=True, output=True, output_type='raw') # noqa: E501
@@ -1027,10 +1038,10 @@ class OpenShiftCLI(object):
if node:
cmd.extend(node)
else:
- cmd.append('--selector=%s' % selector)
+ cmd.append('--selector={}'.format(selector))
if pod_selector:
- cmd.append('--pod-selector=%s' % pod_selector)
+ cmd.append('--pod-selector={}'.format(pod_selector))
cmd.extend(['--list-pods', '-o', 'json'])
@@ -1043,16 +1054,16 @@ class OpenShiftCLI(object):
if node:
cmd.extend(node)
else:
- cmd.append('--selector=%s' % selector)
+ cmd.append('--selector={}'.format(selector))
if dry_run:
cmd.append('--dry-run')
if pod_selector:
- cmd.append('--pod-selector=%s' % pod_selector)
+ cmd.append('--pod-selector={}'.format(pod_selector))
if grace_period:
- cmd.append('--grace-period=%s' % int(grace_period))
+ cmd.append('--grace-period={}'.format(int(grace_period)))
if force:
cmd.append('--force')
diff --git a/roles/lib_openshift/library/oc_version.py b/roles/lib_openshift/library/oc_version.py
index 947591991..0937df5a1 100644
--- a/roles/lib_openshift/library/oc_version.py
+++ b/roles/lib_openshift/library/oc_version.py
@@ -838,6 +838,13 @@ class OpenShiftCLI(object):
def _replace(self, fname, force=False):
'''replace the current object with oc replace'''
+ # We are removing the 'resourceVersion' to handle
+ # a race condition when modifying oc objects
+ yed = Yedit(fname)
+ results = yed.delete('metadata.resourceVersion')
+ if results[0]:
+ yed.write()
+
cmd = ['replace', '-f', fname]
if force:
cmd.append('--force')
@@ -857,11 +864,15 @@ class OpenShiftCLI(object):
'''call oc create on a filename'''
return self.openshift_cmd(['create', '-f', fname])
- def _delete(self, resource, rname, selector=None):
+ def _delete(self, resource, name=None, selector=None):
'''call oc delete on a resource'''
- cmd = ['delete', resource, rname]
- if selector:
- cmd.append('--selector=%s' % selector)
+ cmd = ['delete', resource]
+ if selector is not None:
+ cmd.append('--selector={}'.format(selector))
+ elif name is not None:
+ cmd.append(name)
+ else:
+ raise OpenShiftCLIError('Either name or selector is required when calling delete.')
return self.openshift_cmd(cmd)
@@ -879,7 +890,7 @@ class OpenShiftCLI(object):
else:
cmd.append(template_name)
if params:
- param_str = ["%s=%s" % (key, value) for key, value in params.items()]
+ param_str = ["{}={}".format(key, value) for key, value in params.items()]
cmd.append('-v')
cmd.extend(param_str)
@@ -896,13 +907,13 @@ class OpenShiftCLI(object):
return self.openshift_cmd(['create', '-f', fname])
- def _get(self, resource, rname=None, selector=None):
+ def _get(self, resource, name=None, selector=None):
'''return a resource by name '''
cmd = ['get', resource]
- if selector:
- cmd.append('--selector=%s' % selector)
- elif rname:
- cmd.append(rname)
+ if selector is not None:
+ cmd.append('--selector={}'.format(selector))
+ elif name is not None:
+ cmd.append(name)
cmd.extend(['-o', 'json'])
@@ -922,9 +933,9 @@ class OpenShiftCLI(object):
if node:
cmd.extend(node)
else:
- cmd.append('--selector=%s' % selector)
+ cmd.append('--selector={}'.format(selector))
- cmd.append('--schedulable=%s' % schedulable)
+ cmd.append('--schedulable={}'.format(schedulable))
return self.openshift_cmd(cmd, oadm=True, output=True, output_type='raw') # noqa: E501
@@ -939,10 +950,10 @@ class OpenShiftCLI(object):
if node:
cmd.extend(node)
else:
- cmd.append('--selector=%s' % selector)
+ cmd.append('--selector={}'.format(selector))
if pod_selector:
- cmd.append('--pod-selector=%s' % pod_selector)
+ cmd.append('--pod-selector={}'.format(pod_selector))
cmd.extend(['--list-pods', '-o', 'json'])
@@ -955,16 +966,16 @@ class OpenShiftCLI(object):
if node:
cmd.extend(node)
else:
- cmd.append('--selector=%s' % selector)
+ cmd.append('--selector={}'.format(selector))
if dry_run:
cmd.append('--dry-run')
if pod_selector:
- cmd.append('--pod-selector=%s' % pod_selector)
+ cmd.append('--pod-selector={}'.format(pod_selector))
if grace_period:
- cmd.append('--grace-period=%s' % int(grace_period))
+ cmd.append('--grace-period={}'.format(int(grace_period)))
if force:
cmd.append('--force')
diff --git a/roles/lib_openshift/library/oc_volume.py b/roles/lib_openshift/library/oc_volume.py
index 607d2e57a..d0e7e77e1 100644
--- a/roles/lib_openshift/library/oc_volume.py
+++ b/roles/lib_openshift/library/oc_volume.py
@@ -903,6 +903,13 @@ class OpenShiftCLI(object):
def _replace(self, fname, force=False):
'''replace the current object with oc replace'''
+ # We are removing the 'resourceVersion' to handle
+ # a race condition when modifying oc objects
+ yed = Yedit(fname)
+ results = yed.delete('metadata.resourceVersion')
+ if results[0]:
+ yed.write()
+
cmd = ['replace', '-f', fname]
if force:
cmd.append('--force')
@@ -922,11 +929,15 @@ class OpenShiftCLI(object):
'''call oc create on a filename'''
return self.openshift_cmd(['create', '-f', fname])
- def _delete(self, resource, rname, selector=None):
+ def _delete(self, resource, name=None, selector=None):
'''call oc delete on a resource'''
- cmd = ['delete', resource, rname]
- if selector:
- cmd.append('--selector=%s' % selector)
+ cmd = ['delete', resource]
+ if selector is not None:
+ cmd.append('--selector={}'.format(selector))
+ elif name is not None:
+ cmd.append(name)
+ else:
+ raise OpenShiftCLIError('Either name or selector is required when calling delete.')
return self.openshift_cmd(cmd)
@@ -944,7 +955,7 @@ class OpenShiftCLI(object):
else:
cmd.append(template_name)
if params:
- param_str = ["%s=%s" % (key, value) for key, value in params.items()]
+ param_str = ["{}={}".format(key, value) for key, value in params.items()]
cmd.append('-v')
cmd.extend(param_str)
@@ -961,13 +972,13 @@ class OpenShiftCLI(object):
return self.openshift_cmd(['create', '-f', fname])
- def _get(self, resource, rname=None, selector=None):
+ def _get(self, resource, name=None, selector=None):
'''return a resource by name '''
cmd = ['get', resource]
- if selector:
- cmd.append('--selector=%s' % selector)
- elif rname:
- cmd.append(rname)
+ if selector is not None:
+ cmd.append('--selector={}'.format(selector))
+ elif name is not None:
+ cmd.append(name)
cmd.extend(['-o', 'json'])
@@ -987,9 +998,9 @@ class OpenShiftCLI(object):
if node:
cmd.extend(node)
else:
- cmd.append('--selector=%s' % selector)
+ cmd.append('--selector={}'.format(selector))
- cmd.append('--schedulable=%s' % schedulable)
+ cmd.append('--schedulable={}'.format(schedulable))
return self.openshift_cmd(cmd, oadm=True, output=True, output_type='raw') # noqa: E501
@@ -1004,10 +1015,10 @@ class OpenShiftCLI(object):
if node:
cmd.extend(node)
else:
- cmd.append('--selector=%s' % selector)
+ cmd.append('--selector={}'.format(selector))
if pod_selector:
- cmd.append('--pod-selector=%s' % pod_selector)
+ cmd.append('--pod-selector={}'.format(pod_selector))
cmd.extend(['--list-pods', '-o', 'json'])
@@ -1020,16 +1031,16 @@ class OpenShiftCLI(object):
if node:
cmd.extend(node)
else:
- cmd.append('--selector=%s' % selector)
+ cmd.append('--selector={}'.format(selector))
if dry_run:
cmd.append('--dry-run')
if pod_selector:
- cmd.append('--pod-selector=%s' % pod_selector)
+ cmd.append('--pod-selector={}'.format(pod_selector))
if grace_period:
- cmd.append('--grace-period=%s' % int(grace_period))
+ cmd.append('--grace-period={}'.format(int(grace_period)))
if force:
cmd.append('--force')
diff --git a/roles/lib_openshift/src/ansible/oc_obj.py b/roles/lib_openshift/src/ansible/oc_obj.py
index 701740e4f..6ab53d044 100644
--- a/roles/lib_openshift/src/ansible/oc_obj.py
+++ b/roles/lib_openshift/src/ansible/oc_obj.py
@@ -23,7 +23,7 @@ def main():
force=dict(default=False, type='bool'),
selector=dict(default=None, type='str'),
),
- mutually_exclusive=[["content", "files"]],
+ mutually_exclusive=[["content", "files"], ["selector", "name"]],
supports_check_mode=True,
)
diff --git a/roles/lib_openshift/src/class/oc_adm_ca_server_cert.py b/roles/lib_openshift/src/class/oc_adm_ca_server_cert.py
index fa0c4e3af..cf99a6584 100644
--- a/roles/lib_openshift/src/class/oc_adm_ca_server_cert.py
+++ b/roles/lib_openshift/src/class/oc_adm_ca_server_cert.py
@@ -77,7 +77,10 @@ class CAServerCert(OpenShiftCLI):
x509output, _ = proc.communicate()
if proc.returncode == 0:
regex = re.compile(r"^\s*X509v3 Subject Alternative Name:\s*?\n\s*(.*)\s*\n", re.MULTILINE)
- match = regex.search(x509output) # E501
+ match = regex.search(x509output.decode()) # E501
+ if not match:
+ return False
+
for entry in re.split(r", *", match.group(1)):
if entry.startswith('DNS') or entry.startswith('IP Address'):
cert_names.append(entry.split(':')[1])
@@ -124,7 +127,7 @@ class CAServerCert(OpenShiftCLI):
api_rval = server_cert.create()
if api_rval['returncode'] != 0:
- return {'Failed': True, 'msg': api_rval}
+ return {'failed': True, 'msg': api_rval}
return {'changed': True, 'results': api_rval, 'state': state}
diff --git a/roles/lib_openshift/src/class/oc_adm_manage_node.py b/roles/lib_openshift/src/class/oc_adm_manage_node.py
index c07320477..6d9f24baa 100644
--- a/roles/lib_openshift/src/class/oc_adm_manage_node.py
+++ b/roles/lib_openshift/src/class/oc_adm_manage_node.py
@@ -44,7 +44,7 @@ class ManageNode(OpenShiftCLI):
if selector:
_sel = selector
- results = self._get('node', rname=_node, selector=_sel)
+ results = self._get('node', name=_node, selector=_sel)
if results['returncode'] != 0:
return results
diff --git a/roles/lib_openshift/src/class/oc_adm_policy_user.py b/roles/lib_openshift/src/class/oc_adm_policy_user.py
index 88fcc1ddc..37a685ebb 100644
--- a/roles/lib_openshift/src/class/oc_adm_policy_user.py
+++ b/roles/lib_openshift/src/class/oc_adm_policy_user.py
@@ -46,7 +46,7 @@ class PolicyUser(OpenShiftCLI):
@property
def policybindings(self):
if self._policy_bindings is None:
- results = self._get('clusterpolicybindings', None)
+ results = self._get('policybindings', None)
if results['returncode'] != 0:
raise OpenShiftCLIError('Could not retrieve policybindings')
self._policy_bindings = results['results'][0]['items'][0]
diff --git a/roles/lib_openshift/src/class/oc_adm_registry.py b/roles/lib_openshift/src/class/oc_adm_registry.py
index 25519c9c9..720b44cdc 100644
--- a/roles/lib_openshift/src/class/oc_adm_registry.py
+++ b/roles/lib_openshift/src/class/oc_adm_registry.py
@@ -105,7 +105,7 @@ class Registry(OpenShiftCLI):
rval = 0
for part in self.registry_parts:
- result = self._get(part['kind'], rname=part['name'])
+ result = self._get(part['kind'], name=part['name'])
if result['returncode'] == 0 and part['kind'] == 'dc':
self.deploymentconfig = DeploymentConfig(result['results'][0])
elif result['returncode'] == 0 and part['kind'] == 'svc':
diff --git a/roles/lib_openshift/src/class/oc_adm_router.py b/roles/lib_openshift/src/class/oc_adm_router.py
index 356d06fdf..1a0b94b80 100644
--- a/roles/lib_openshift/src/class/oc_adm_router.py
+++ b/roles/lib_openshift/src/class/oc_adm_router.py
@@ -136,7 +136,7 @@ class Router(OpenShiftCLI):
self.secret = None
self.rolebinding = None
for part in self.router_parts:
- result = self._get(part['kind'], rname=part['name'])
+ result = self._get(part['kind'], name=part['name'])
if result['returncode'] == 0 and part['kind'] == 'dc':
self.deploymentconfig = DeploymentConfig(result['results'][0])
elif result['returncode'] == 0 and part['kind'] == 'svc':
diff --git a/roles/lib_openshift/src/class/oc_clusterrole.py b/roles/lib_openshift/src/class/oc_clusterrole.py
index 1d3d977db..ae6795446 100644
--- a/roles/lib_openshift/src/class/oc_clusterrole.py
+++ b/roles/lib_openshift/src/class/oc_clusterrole.py
@@ -22,7 +22,7 @@ class OCClusterRole(OpenShiftCLI):
@property
def clusterrole(self):
''' property for clusterrole'''
- if not self._clusterrole:
+ if self._clusterrole is None:
self.get()
return self._clusterrole
@@ -58,6 +58,7 @@ class OCClusterRole(OpenShiftCLI):
elif 'clusterrole "{}" not found'.format(self.name) in result['stderr']:
result['returncode'] = 0
+ self.clusterrole = None
return result
@@ -127,6 +128,9 @@ class OCClusterRole(OpenShiftCLI):
# Create it here
api_rval = oc_clusterrole.create()
+ if api_rval['returncode'] != 0:
+ return {'failed': True, 'msg': api_rval}
+
# return the created object
api_rval = oc_clusterrole.get()
diff --git a/roles/lib_openshift/src/class/oc_label.py b/roles/lib_openshift/src/class/oc_label.py
index ed17eecb1..0a6895177 100644
--- a/roles/lib_openshift/src/class/oc_label.py
+++ b/roles/lib_openshift/src/class/oc_label.py
@@ -134,7 +134,7 @@ class OCLabel(OpenShiftCLI):
label_list = []
if self.name:
- result = self._get(resource=self.kind, rname=self.name)
+ result = self._get(resource=self.kind, name=self.name, selector=self.selector)
if result['results'][0] and 'labels' in result['results'][0]['metadata']:
label_list.append(result['results'][0]['metadata']['labels'])
diff --git a/roles/lib_openshift/src/class/oc_obj.py b/roles/lib_openshift/src/class/oc_obj.py
index 51d3ce996..89ee2f5a0 100644
--- a/roles/lib_openshift/src/class/oc_obj.py
+++ b/roles/lib_openshift/src/class/oc_obj.py
@@ -10,7 +10,7 @@ class OCObject(OpenShiftCLI):
def __init__(self,
kind,
namespace,
- rname=None,
+ name=None,
selector=None,
kubeconfig='/etc/origin/master/admin.kubeconfig',
verbose=False,
@@ -19,21 +19,21 @@ class OCObject(OpenShiftCLI):
super(OCObject, self).__init__(namespace, kubeconfig=kubeconfig, verbose=verbose,
all_namespaces=all_namespaces)
self.kind = kind
- self.name = rname
+ self.name = name
self.selector = selector
def get(self):
'''return a kind by name '''
- results = self._get(self.kind, rname=self.name, selector=self.selector)
- if results['returncode'] != 0 and 'stderr' in results and \
- '\"%s\" not found' % self.name in results['stderr']:
+ results = self._get(self.kind, name=self.name, selector=self.selector)
+ if (results['returncode'] != 0 and 'stderr' in results and
+ '\"{}\" not found'.format(self.name) in results['stderr']):
results['returncode'] = 0
return results
def delete(self):
- '''return all pods '''
- return self._delete(self.kind, self.name)
+ '''delete the object'''
+ return self._delete(self.kind, name=self.name, selector=self.selector)
def create(self, files=None, content=None):
'''
@@ -109,24 +109,30 @@ class OCObject(OpenShiftCLI):
# Get
#####
if state == 'list':
- return {'changed': False, 'results': api_rval, 'state': 'list'}
-
- if not params['name']:
- return {'failed': True, 'msg': 'Please specify a name when state is absent|present.'} # noqa: E501
+ return {'changed': False, 'results': api_rval, 'state': state}
########
# Delete
########
if state == 'absent':
- if not Utils.exists(api_rval['results'], params['name']):
- return {'changed': False, 'state': 'absent'}
+ # verify its not in our results
+ if (params['name'] is not None or params['selector'] is not None) and \
+ (len(api_rval['results']) == 0 or len(api_rval['results'][0].getattr('items', [])) == 0):
+ return {'changed': False, 'state': state}
if check_mode:
return {'changed': True, 'msg': 'CHECK_MODE: Would have performed a delete'}
api_rval = ocobj.delete()
- return {'changed': True, 'results': api_rval, 'state': 'absent'}
+ if api_rval['returncode'] != 0:
+ return {'failed': True, 'msg': api_rval}
+
+ return {'changed': True, 'results': api_rval, 'state': state}
+
+ # create/update: Must define a name beyond this point
+ if not params['name']:
+ return {'failed': True, 'msg': 'Please specify a name when state is present.'}
if state == 'present':
########
@@ -152,7 +158,7 @@ class OCObject(OpenShiftCLI):
if params['files'] and params['delete_after']:
Utils.cleanup(params['files'])
- return {'changed': True, 'results': api_rval, 'state': "present"}
+ return {'changed': True, 'results': api_rval, 'state': state}
########
# Update
@@ -167,7 +173,7 @@ class OCObject(OpenShiftCLI):
if params['files'] and params['delete_after']:
Utils.cleanup(params['files'])
- return {'changed': False, 'results': api_rval['results'][0], 'state': "present"}
+ return {'changed': False, 'results': api_rval['results'][0], 'state': state}
if check_mode:
return {'changed': True, 'msg': 'CHECK_MODE: Would have performed an update.'}
@@ -186,4 +192,4 @@ class OCObject(OpenShiftCLI):
if api_rval['returncode'] != 0:
return {'failed': True, 'msg': api_rval}
- return {'changed': True, 'results': api_rval, 'state': "present"}
+ return {'changed': True, 'results': api_rval, 'state': state}
diff --git a/roles/lib_openshift/src/doc/obj b/roles/lib_openshift/src/doc/obj
index e44843eb3..4ff912b2d 100644
--- a/roles/lib_openshift/src/doc/obj
+++ b/roles/lib_openshift/src/doc/obj
@@ -47,7 +47,7 @@ options:
aliases: []
kind:
description:
- - The kind attribute of the object. e.g. dc, bc, svc, route
+ - The kind attribute of the object. e.g. dc, bc, svc, route. May be a comma-separated list, e.g. "dc,po,svc".
required: True
default: None
aliases: []
diff --git a/roles/lib_openshift/src/lib/base.py b/roles/lib_openshift/src/lib/base.py
index 132c586c9..fc1b6f1ec 100644
--- a/roles/lib_openshift/src/lib/base.py
+++ b/roles/lib_openshift/src/lib/base.py
@@ -76,6 +76,13 @@ class OpenShiftCLI(object):
def _replace(self, fname, force=False):
'''replace the current object with oc replace'''
+ # We are removing the 'resourceVersion' to handle
+ # a race condition when modifying oc objects
+ yed = Yedit(fname)
+ results = yed.delete('metadata.resourceVersion')
+ if results[0]:
+ yed.write()
+
cmd = ['replace', '-f', fname]
if force:
cmd.append('--force')
@@ -95,11 +102,15 @@ class OpenShiftCLI(object):
'''call oc create on a filename'''
return self.openshift_cmd(['create', '-f', fname])
- def _delete(self, resource, rname, selector=None):
+ def _delete(self, resource, name=None, selector=None):
'''call oc delete on a resource'''
- cmd = ['delete', resource, rname]
- if selector:
- cmd.append('--selector=%s' % selector)
+ cmd = ['delete', resource]
+ if selector is not None:
+ cmd.append('--selector={}'.format(selector))
+ elif name is not None:
+ cmd.append(name)
+ else:
+ raise OpenShiftCLIError('Either name or selector is required when calling delete.')
return self.openshift_cmd(cmd)
@@ -117,7 +128,7 @@ class OpenShiftCLI(object):
else:
cmd.append(template_name)
if params:
- param_str = ["%s=%s" % (key, value) for key, value in params.items()]
+ param_str = ["{}={}".format(key, value) for key, value in params.items()]
cmd.append('-v')
cmd.extend(param_str)
@@ -134,13 +145,13 @@ class OpenShiftCLI(object):
return self.openshift_cmd(['create', '-f', fname])
- def _get(self, resource, rname=None, selector=None):
+ def _get(self, resource, name=None, selector=None):
'''return a resource by name '''
cmd = ['get', resource]
- if selector:
- cmd.append('--selector=%s' % selector)
- elif rname:
- cmd.append(rname)
+ if selector is not None:
+ cmd.append('--selector={}'.format(selector))
+ elif name is not None:
+ cmd.append(name)
cmd.extend(['-o', 'json'])
@@ -160,9 +171,9 @@ class OpenShiftCLI(object):
if node:
cmd.extend(node)
else:
- cmd.append('--selector=%s' % selector)
+ cmd.append('--selector={}'.format(selector))
- cmd.append('--schedulable=%s' % schedulable)
+ cmd.append('--schedulable={}'.format(schedulable))
return self.openshift_cmd(cmd, oadm=True, output=True, output_type='raw') # noqa: E501
@@ -177,10 +188,10 @@ class OpenShiftCLI(object):
if node:
cmd.extend(node)
else:
- cmd.append('--selector=%s' % selector)
+ cmd.append('--selector={}'.format(selector))
if pod_selector:
- cmd.append('--pod-selector=%s' % pod_selector)
+ cmd.append('--pod-selector={}'.format(pod_selector))
cmd.extend(['--list-pods', '-o', 'json'])
@@ -193,16 +204,16 @@ class OpenShiftCLI(object):
if node:
cmd.extend(node)
else:
- cmd.append('--selector=%s' % selector)
+ cmd.append('--selector={}'.format(selector))
if dry_run:
cmd.append('--dry-run')
if pod_selector:
- cmd.append('--pod-selector=%s' % pod_selector)
+ cmd.append('--pod-selector={}'.format(pod_selector))
if grace_period:
- cmd.append('--grace-period=%s' % int(grace_period))
+ cmd.append('--grace-period={}'.format(int(grace_period)))
if force:
cmd.append('--force')
diff --git a/roles/lib_openshift/src/lib/rule.py b/roles/lib_openshift/src/lib/rule.py
index 4590dcf90..fe5ed9723 100644
--- a/roles/lib_openshift/src/lib/rule.py
+++ b/roles/lib_openshift/src/lib/rule.py
@@ -136,9 +136,9 @@ class Rule(object):
results = []
for rule in inc_rules:
- results.append(Rule(rule['apiGroups'],
- rule['attributeRestrictions'],
- rule['resources'],
- rule['verbs']))
+ results.append(Rule(rule.get('apiGroups', ['']),
+ rule.get('attributeRestrictions', None),
+ rule.get('resources', []),
+ rule.get('verbs', [])))
return results
diff --git a/roles/lib_openshift/src/test/integration/oc_obj.yml b/roles/lib_openshift/src/test/integration/oc_obj.yml
new file mode 100755
index 000000000..c22a2f6a9
--- /dev/null
+++ b/roles/lib_openshift/src/test/integration/oc_obj.yml
@@ -0,0 +1,207 @@
+#!/usr/bin/ansible-playbook --module-path=../../../library/
+# ./oc_obj.yml -e "cli_master_test=$OPENSHIFT_MASTER
+---
+- hosts: "{{ cli_master_test }}"
+ gather_facts: no
+ user: root
+ tasks:
+ - name: create test project
+ oc_project:
+ name: test
+ description: all things test
+ node_selector: ""
+
+ # Create Check #
+ - name: create a dc
+ oc_obj:
+ state: present
+ name: mysql
+ namespace: test
+ kind: dc
+ content:
+ path: /tmp/dcout
+ data:
+ apiVersion: v1
+ kind: DeploymentConfig
+ metadata:
+ labels:
+ name: mysql
+ name: mysql
+ spec:
+ replicas: 1
+ selector: {}
+ strategy:
+ resources: {}
+ type: Recreate
+ template:
+ metadata:
+ labels:
+ name: mysql
+ spec:
+ containers:
+ - env:
+ - name: MYSQL_USER
+ value: mysql
+ - name: MYSQL_PASSWORD
+ value: mysql
+ - name: MYSQL_DATABASE
+ value: mysql
+ - name: MYSQL_ROOT_PASSWORD
+ value: mysql
+ image: openshift/mysql-55-centos7:latest
+ imagePullPolicy: Always
+ name: mysql
+ ports:
+ - containerPort: 3306
+ name: tcp-3306
+ protocol: TCP
+ resources: {}
+ securityContext:
+ capabilities: {}
+ privileged: false
+ terminationMessagePath: /dev/termination-log
+ dnsPolicy: ClusterFirst
+ restartPolicy: Always
+ securityContext: {}
+ terminationGracePeriodSeconds: 31
+ triggers:
+ - type: ConfigChange
+ - imageChangeParams:
+ automatic: true
+ containerNames:
+ - mysql
+ from:
+ kind: ImageStreamTag
+ name: mysql:latest
+ type: ImageChange
+
+ - name: fetch created dc
+ oc_obj:
+ name: mysql
+ kind: dc
+ state: list
+ namespace: test
+ register: dcout
+
+ - debug: var=dcout
+
+ - assert:
+ that:
+ - dcout.results.returncode == 0
+ - dcout.results.results[0].metadata.name == 'mysql'
+ # End Create Check #
+
+
+ # Delete Check #
+ - name: delete created dc
+ oc_obj:
+ name: mysql
+ kind: dc
+ state: absent
+ namespace: test
+ register: dcout
+
+ - name: fetch delete dc
+ oc_obj:
+ name: mysql
+ kind: dc
+ state: list
+ namespace: test
+ register: dcout
+
+ - debug: var=dcout
+
+ - assert:
+ that:
+ - dcout.results.returncode == 0
+ - "'\"mysql\" not found' in dcout.results.stderr"
+ # End Delete Check #
+
+ # Delete selector Check #
+ - name: create a dc
+ oc_obj:
+ state: present
+ name: mysql
+ namespace: test
+ kind: dc
+ content:
+ path: /tmp/dcout
+ data:
+ apiVersion: v1
+ kind: DeploymentConfig
+ metadata:
+ labels:
+ name: mysql
+ name: mysql
+ spec:
+ replicas: 1
+ selector: {}
+ strategy:
+ resources: {}
+ type: Recreate
+ template:
+ metadata:
+ labels:
+ name: mysql
+ spec:
+ containers:
+ - env:
+ - name: MYSQL_USER
+ value: mysql
+ - name: MYSQL_PASSWORD
+ value: mysql
+ - name: MYSQL_DATABASE
+ value: mysql
+ - name: MYSQL_ROOT_PASSWORD
+ value: mysql
+ image: openshift/mysql-55-centos7:latest
+ imagePullPolicy: Always
+ name: mysql
+ ports:
+ - containerPort: 3306
+ name: tcp-3306
+ protocol: TCP
+ resources: {}
+ securityContext:
+ capabilities: {}
+ privileged: false
+ terminationMessagePath: /dev/termination-log
+ dnsPolicy: ClusterFirst
+ restartPolicy: Always
+ securityContext: {}
+ terminationGracePeriodSeconds: 31
+ triggers:
+ - type: ConfigChange
+ - imageChangeParams:
+ automatic: true
+ containerNames:
+ - mysql
+ from:
+ kind: ImageStreamTag
+ name: mysql:latest
+ type: ImageChange
+
+ - name: delete using selector
+ oc_obj:
+ namespace: test
+ selector: name=mysql
+ kind: dc
+ state: absent
+ register: dcout
+
+ - debug: var=dcout
+
+ - name: get the dc
+ oc_obj:
+ namespace: test
+ selector: name=mysql
+ kind: dc
+ state: list
+ register: dcout
+
+ - debug: var=dcout
+
+ - assert:
+ that:
+ - dcout.results.returncode == 0
+ - dcout.results.results[0]["items"]|length == 0
diff --git a/roles/lib_openshift/src/test/unit/test_oc_adm_registry.py b/roles/lib_openshift/src/test/unit/test_oc_adm_registry.py
index bab36fddc..30e13ce4b 100755
--- a/roles/lib_openshift/src/test/unit/test_oc_adm_registry.py
+++ b/roles/lib_openshift/src/test/unit/test_oc_adm_registry.py
@@ -205,10 +205,11 @@ class RegistryTest(unittest.TestCase):
}
]}'''
+ @mock.patch('oc_adm_registry.locate_oc_binary')
@mock.patch('oc_adm_registry.Utils._write')
@mock.patch('oc_adm_registry.Utils.create_tmpfile_copy')
@mock.patch('oc_adm_registry.Registry._run')
- def test_state_present(self, mock_cmd, mock_tmpfile_copy, mock_write):
+ def test_state_present(self, mock_cmd, mock_tmpfile_copy, mock_write, mock_oc_binary):
''' Testing state present '''
params = {'state': 'present',
'debug': False,
@@ -240,10 +241,9 @@ class RegistryTest(unittest.TestCase):
(0, '', ''),
]
- mock_tmpfile_copy.side_effect = [
- '/tmp/mocked_kubeconfig',
- '/tmp/mocked_kubeconfig',
- ]
+ mock_tmpfile_copy.return_value = '/tmp/mocked_kubeconfig'
+
+ mock_oc_binary.return_value = 'oc'
results = Registry.run_ansible(params, False)
diff --git a/roles/lib_openshift/src/test/unit/test_oc_adm_router.py b/roles/lib_openshift/src/test/unit/test_oc_adm_router.py
index 51393dbaf..5481ac623 100755
--- a/roles/lib_openshift/src/test/unit/test_oc_adm_router.py
+++ b/roles/lib_openshift/src/test/unit/test_oc_adm_router.py
@@ -286,10 +286,11 @@ class RouterTest(unittest.TestCase):
]
}'''
+ @mock.patch('oc_adm_router.locate_oc_binary')
@mock.patch('oc_adm_router.Utils._write')
@mock.patch('oc_adm_router.Utils.create_tmpfile_copy')
@mock.patch('oc_adm_router.Router._run')
- def test_state_present(self, mock_cmd, mock_tmpfile_copy, mock_write):
+ def test_state_present(self, mock_cmd, mock_tmpfile_copy, mock_write, mock_oc_binary):
''' Testing a create '''
params = {'state': 'present',
'debug': False,
@@ -345,6 +346,10 @@ class RouterTest(unittest.TestCase):
'/tmp/mocked_kubeconfig',
]
+ mock_oc_binary.side_effect = [
+ 'oc',
+ ]
+
results = Router.run_ansible(params, False)
self.assertTrue(results['changed'])
diff --git a/roles/lib_openshift/src/test/unit/test_oc_objectvalidator.py b/roles/lib_openshift/src/test/unit/test_oc_objectvalidator.py
index da326742f..b19a5a880 100755
--- a/roles/lib_openshift/src/test/unit/test_oc_objectvalidator.py
+++ b/roles/lib_openshift/src/test/unit/test_oc_objectvalidator.py
@@ -25,9 +25,10 @@ class OCObjectValidatorTest(unittest.TestCase):
maxDiff = None
+ @mock.patch('oc_objectvalidator.locate_oc_binary')
@mock.patch('oc_objectvalidator.Utils.create_tmpfile_copy')
@mock.patch('oc_objectvalidator.OCObjectValidator._run')
- def test_no_data(self, mock_cmd, mock_tmpfile_copy):
+ def test_no_data(self, mock_cmd, mock_tmpfile_copy, mock_oc_binary):
''' Testing when both all objects are empty '''
# Arrange
@@ -62,6 +63,10 @@ class OCObjectValidatorTest(unittest.TestCase):
'/tmp/mocked_kubeconfig',
]
+ mock_oc_binary.side_effect = [
+ 'oc',
+ ]
+
# Act
results = OCObjectValidator.run_ansible(params)
@@ -76,9 +81,10 @@ class OCObjectValidatorTest(unittest.TestCase):
mock.call(['oc', 'get', 'namespace', '-o', 'json', '-n', 'default'], None),
])
+ @mock.patch('oc_objectvalidator.locate_oc_binary')
@mock.patch('oc_objectvalidator.Utils.create_tmpfile_copy')
@mock.patch('oc_objectvalidator.OCObjectValidator._run')
- def test_error_code(self, mock_cmd, mock_tmpfile_copy):
+ def test_error_code(self, mock_cmd, mock_tmpfile_copy, mock_oc_binary):
''' Testing when we fail to get objects '''
# Arrange
@@ -98,6 +104,10 @@ class OCObjectValidatorTest(unittest.TestCase):
'/tmp/mocked_kubeconfig',
]
+ mock_oc_binary.side_effect = [
+ 'oc'
+ ]
+
error_results = {
'returncode': 1,
'stderr': 'Error.',
@@ -120,9 +130,10 @@ class OCObjectValidatorTest(unittest.TestCase):
mock.call(['oc', 'get', 'hostsubnet', '-o', 'json', '-n', 'default'], None),
])
+ @mock.patch('oc_objectvalidator.locate_oc_binary')
@mock.patch('oc_objectvalidator.Utils.create_tmpfile_copy')
@mock.patch('oc_objectvalidator.OCObjectValidator._run')
- def test_valid_both(self, mock_cmd, mock_tmpfile_copy):
+ def test_valid_both(self, mock_cmd, mock_tmpfile_copy, mock_oc_binary):
''' Testing when both all objects are valid '''
# Arrange
@@ -427,6 +438,10 @@ class OCObjectValidatorTest(unittest.TestCase):
'/tmp/mocked_kubeconfig',
]
+ mock_oc_binary.side_effect = [
+ 'oc'
+ ]
+
# Act
results = OCObjectValidator.run_ansible(params)
@@ -441,9 +456,10 @@ class OCObjectValidatorTest(unittest.TestCase):
mock.call(['oc', 'get', 'namespace', '-o', 'json', '-n', 'default'], None),
])
+ @mock.patch('oc_objectvalidator.locate_oc_binary')
@mock.patch('oc_objectvalidator.Utils.create_tmpfile_copy')
@mock.patch('oc_objectvalidator.OCObjectValidator._run')
- def test_invalid_both(self, mock_cmd, mock_tmpfile_copy):
+ def test_invalid_both(self, mock_cmd, mock_tmpfile_copy, mock_oc_binary):
''' Testing when all objects are invalid '''
# Arrange
@@ -886,6 +902,10 @@ class OCObjectValidatorTest(unittest.TestCase):
'/tmp/mocked_kubeconfig',
]
+ mock_oc_binary.side_effect = [
+ 'oc'
+ ]
+
# Act
results = OCObjectValidator.run_ansible(params)
diff --git a/roles/openshift_certificate_expiry/README.md b/roles/openshift_certificate_expiry/README.md
index df43c3770..107e27f89 100644
--- a/roles/openshift_certificate_expiry/README.md
+++ b/roles/openshift_certificate_expiry/README.md
@@ -19,7 +19,6 @@ to be used with an inventory that is representative of the
cluster. For best results run `ansible-playbook` with the `-v` option.
-
# Role Variables
Core variables in this role:
@@ -51,8 +50,8 @@ How to use the Certificate Expiration Checking Role.
Run one of the example playbooks using an inventory file
representative of your existing cluster. Some example playbooks are
-included in this role, or you can read on below after this example to
-craft you own.
+included in this role, or you can [read on below for more examples](#more-example-playbooks)
+to help you craft you own.
```
$ ansible-playbook -v -i HOSTS playbooks/certificate_expiry/easy-mode.yaml
@@ -69,11 +68,47 @@ Using the `easy-mode.yaml` playbook will produce:
> `/usr/share/ansible/openshift-ansible/playbooks/certificate_expiry/easy-mode.yaml`
> instead
+## Run from a container
+
+The example playbooks that use this role are packaged in the
+[container image for openshift-ansible](../../README_CONTAINER_IMAGE.md), so you
+can run any of them by setting the `PLAYBOOK_FILE` environment variable when
+running an openshift-ansible container.
+
+There are several [examples](../../examples/README.md) in the `examples` directory that run certificate check playbooks from a container running on OpenShift.
+
## More Example Playbooks
> **Note:** These Playbooks are available to run directly out of the
> [/playbooks/certificate_expiry/](../../playbooks/certificate_expiry/) directory.
+### Default behavior
+
+This playbook just invokes the certificate expiration check role with default options:
+
+
+```yaml
+---
+- name: Check cert expirys
+ hosts: nodes:masters:etcd
+ become: yes
+ gather_facts: no
+ roles:
+ - role: openshift_certificate_expiry
+```
+
+**From git:**
+```
+$ ansible-playbook -v -i HOSTS playbooks/certificate_expiry/default.yaml
+```
+**From openshift-ansible-playbooks rpm:**
+```
+$ ansible-playbook -v -i HOSTS /usr/share/ansible/openshift-ansible/playbooks/certificate_expiry/default.yaml
+```
+
+> [View This Playbook](../../playbooks/certificate_expiry/default.yaml)
+
+### Easy mode
This example playbook is great if you're just wanting to **try the
role out**. This playbook enables HTML and JSON reports. All
@@ -104,35 +139,70 @@ $ ansible-playbook -v -i HOSTS /usr/share/ansible/openshift-ansible/playbooks/ce
> [View This Playbook](../../playbooks/certificate_expiry/easy-mode.yaml)
-***
+### Easy mode and upload reports to masters
+
+This example builds on top of [easy-mode.yaml](#easy-mode) and additionally
+uploads a copy of the generated reports to the masters, with a timestamp in the
+file names.
+
+This is specially useful when the playbook runs from within a container, because
+the reports are generated inside the container and we need a way to access them.
+Uploading a copy of the reports to the masters is one way to make it easy to
+access them. Alternatively you can use the
+[role variables](#role-variables) that control the path of the generated reports
+to point to a container volume (see the [playbook with custom paths](#generate-html-and-json-reports-in-a-custom-path) for an example).
-Default behavior:
+With the container use case in mind, this playbook allows control over some
+options via environment variables:
+
+ - `CERT_EXPIRY_WARN_DAYS`: sets `openshift_certificate_expiry_warning_days`, overriding the role's default.
+ - `COPY_TO_PATH`: path in the masters where generated reports are uploaded.
```yaml
---
-- name: Check cert expirys
+- name: Generate certificate expiration reports
hosts: nodes:masters:etcd
- become: yes
gather_facts: no
+ vars:
+ openshift_certificate_expiry_save_json_results: yes
+ openshift_certificate_expiry_generate_html_report: yes
+ openshift_certificate_expiry_show_all: yes
+ openshift_certificate_expiry_warning_days: "{{ lookup('env', 'CERT_EXPIRY_WARN_DAYS') | default('45', true) }}"
roles:
- role: openshift_certificate_expiry
+
+- name: Upload reports to master
+ hosts: masters
+ gather_facts: no
+ vars:
+ destination_path: "{{ lookup('env', 'COPY_TO_PATH') | default('/etc/origin/certificate_expiration_report', true) }}"
+ timestamp: "{{ lookup('pipe', 'date +%Y%m%d') }}"
+ tasks:
+ - name: Create directory in masters
+ file:
+ path: "{{ destination_path }}"
+ state: directory
+ - name: Copy the reports to the masters
+ copy:
+ dest: "{{ destination_path }}/{{ timestamp }}-{{ item }}"
+ src: "/tmp/{{ item }}"
+ with_items:
+ - "cert-expiry-report.html"
+ - "cert-expiry-report.json"
```
**From git:**
```
-$ ansible-playbook -v -i HOSTS playbooks/certificate_expiry/default.yaml
+$ ansible-playbook -v -i HOSTS playbooks/certificate_expiry/easy-mode-upload.yaml
```
**From openshift-ansible-playbooks rpm:**
```
-$ ansible-playbook -v -i HOSTS /usr/share/ansible/openshift-ansible/playbooks/certificate_expiry/default.yaml
+$ ansible-playbook -v -i HOSTS /usr/share/ansible/openshift-ansible/playbooks/certificate_expiry/easy-mode-upload.yaml
```
-> [View This Playbook](../../playbooks/certificate_expiry/default.yaml)
+> [View This Playbook](../../playbooks/certificate_expiry/easy-mode-upload.yaml)
-***
-
-
-Generate HTML and JSON artifacts in their default paths:
+### Generate HTML and JSON artifacts in their default paths
```yaml
---
@@ -158,7 +228,38 @@ $ ansible-playbook -v -i HOSTS /usr/share/ansible/openshift-ansible/playbooks/ce
> [View This Playbook](../../playbooks/certificate_expiry/html_and_json_default_paths.yaml)
-***
+### Generate HTML and JSON reports in a custom path
+
+This example customizes the report generation path to point to a specific path (`/var/lib/certcheck`) and uses a date timestamp for the generated files. This allows you to reuse a certain location to keep multiple copies of the reports.
+
+```yaml
+---
+- name: Check cert expirys
+ hosts: nodes:masters:etcd
+ become: yes
+ gather_facts: no
+ vars:
+ openshift_certificate_expiry_generate_html_report: yes
+ openshift_certificate_expiry_save_json_results: yes
+ timestamp: "{{ lookup('pipe', 'date +%Y%m%d') }}"
+ openshift_certificate_expiry_html_report_path: "/var/lib/certcheck/{{ timestamp }}-cert-expiry-report.html"
+ openshift_certificate_expiry_json_results_path: "/var/lib/certcheck/{{ timestamp }}-cert-expiry-report.json"
+ roles:
+ - role: openshift_certificate_expiry
+```
+
+**From git:**
+```
+$ ansible-playbook -v -i HOSTS playbooks/certificate_expiry/html_and_json_timestamp.yaml
+```
+**From openshift-ansible-playbooks rpm:**
+```
+$ ansible-playbook -v -i HOSTS /usr/share/ansible/openshift-ansible/playbooks/certificate_expiry/html_and_json_timestamp.yaml
+```
+
+> [View This Playbook](../../playbooks/certificate_expiry/html_and_json_timestamp.yaml)
+
+### Long warning window
Change the expiration warning window to 1500 days (good for testing
the module out):
@@ -186,7 +287,7 @@ $ ansible-playbook -v -i HOSTS /usr/share/ansible/openshift-ansible/playbooks/ce
> [View This Playbook](../../playbooks/certificate_expiry/longer_warning_period.yaml)
-***
+### Long warning window and JSON report
Change the expiration warning window to 1500 days (good for testing
the module out) and save the results as a JSON file:
diff --git a/roles/openshift_examples/examples-sync.sh b/roles/openshift_examples/examples-sync.sh
index 0f2bec6d3..c7e51bbfc 100755
--- a/roles/openshift_examples/examples-sync.sh
+++ b/roles/openshift_examples/examples-sync.sh
@@ -6,7 +6,7 @@
# This script should be run from openshift-ansible/roles/openshift_examples
XPAAS_VERSION=ose-v1.3.6
-ORIGIN_VERSION=${1:-v1.6}
+ORIGIN_VERSION=${1:-v3.6}
RHAMP_TAG=1.0.0.GA
RHAMP_TEMPLATE=https://raw.githubusercontent.com/3scale/rhamp-openshift-templates/${RHAMP_TAG}/apicast-gateway/apicast-gateway-template.yml
EXAMPLES_BASE=$(pwd)/files/examples/${ORIGIN_VERSION}
diff --git a/roles/openshift_examples/files/examples/latest b/roles/openshift_examples/files/examples/latest
index 536385712..08751d131 120000
--- a/roles/openshift_examples/files/examples/latest
+++ b/roles/openshift_examples/files/examples/latest
@@ -1 +1 @@
-v1.6 \ No newline at end of file
+v3.6 \ No newline at end of file
diff --git a/roles/openshift_examples/files/examples/v1.5/image-streams/image-streams-centos7.json b/roles/openshift_examples/files/examples/v1.5/image-streams/image-streams-centos7.json
index 1a90a9409..a81dbb654 100644
--- a/roles/openshift_examples/files/examples/v1.5/image-streams/image-streams-centos7.json
+++ b/roles/openshift_examples/files/examples/v1.5/image-streams/image-streams-centos7.json
@@ -800,7 +800,7 @@
"openshift.io/display-name": "Jenkins 1.X",
"description": "Provides a Jenkins 1.X server on CentOS 7. For more information about using this container image, including OpenShift considerations, see https://github.com/openshift/jenkins/blob/master/README.md.",
"iconClass": "icon-jenkins",
- "tags": "jenkins",
+ "tags": "hidden,jenkins",
"version": "1.x"
},
"from": {
diff --git a/roles/openshift_examples/files/examples/v1.5/image-streams/image-streams-rhel7.json b/roles/openshift_examples/files/examples/v1.5/image-streams/image-streams-rhel7.json
index eb94c3bb4..2ed0efe1e 100644
--- a/roles/openshift_examples/files/examples/v1.5/image-streams/image-streams-rhel7.json
+++ b/roles/openshift_examples/files/examples/v1.5/image-streams/image-streams-rhel7.json
@@ -707,7 +707,7 @@
"openshift.io/display-name": "Jenkins 1.X",
"description": "Provides a Jenkins 1.X server on RHEL 7. For more information about using this container image, including OpenShift considerations, see https://github.com/openshift/jenkins/blob/master/README.md.",
"iconClass": "icon-jenkins",
- "tags": "jenkins",
+ "tags": "hidden,jenkins",
"version": "1.x"
},
"from": {
diff --git a/roles/openshift_examples/files/examples/v1.5/quickstart-templates/dotnet-pgsql-persistent.json b/roles/openshift_examples/files/examples/v1.5/quickstart-templates/dotnet-pgsql-persistent.json
index fa31f7f61..a2b59c2d3 100644
--- a/roles/openshift_examples/files/examples/v1.5/quickstart-templates/dotnet-pgsql-persistent.json
+++ b/roles/openshift_examples/files/examples/v1.5/quickstart-templates/dotnet-pgsql-persistent.json
@@ -19,6 +19,17 @@
},
"objects": [
{
+ "kind": "Secret",
+ "apiVersion": "v1",
+ "metadata": {
+ "name": "${NAME}"
+ },
+ "stringData": {
+ "database-password": "${DATABASE_PASSWORD}",
+ "connect-string": "Host=${DATABASE_SERVICE_NAME};Database=${DATABASE_NAME};Username=${DATABASE_USER};Password=${DATABASE_PASSWORD}"
+ }
+ },
+ {
"kind": "Service",
"apiVersion": "v1",
"metadata": {
@@ -209,7 +220,12 @@
"env": [
{
"name": "ConnectionString",
- "value": "Host=${DATABASE_SERVICE_NAME};Database=${DATABASE_NAME};Username=${DATABASE_USER};Password=${DATABASE_PASSWORD}"
+ "valueFrom": {
+ "secretKeyRef": {
+ "name": "${NAME}",
+ "key": "connect-string"
+ }
+ }
}
],
"resources": {
@@ -373,7 +389,12 @@
},
{
"name": "POSTGRESQL_PASSWORD",
- "value": "${DATABASE_PASSWORD}"
+ "valueFrom": {
+ "secretKeyRef": {
+ "name": "${NAME}",
+ "key": "database-password"
+ }
+ }
},
{
"name": "POSTGRESQL_DATABASE",
diff --git a/roles/openshift_examples/files/examples/v1.6/cfme-templates/cfme-pv-app-example.yaml b/roles/openshift_examples/files/examples/v3.6/cfme-templates/cfme-pv-app-example.yaml
index 14bdd1dca..14bdd1dca 100644
--- a/roles/openshift_examples/files/examples/v1.6/cfme-templates/cfme-pv-app-example.yaml
+++ b/roles/openshift_examples/files/examples/v3.6/cfme-templates/cfme-pv-app-example.yaml
diff --git a/roles/openshift_examples/files/examples/v1.6/cfme-templates/cfme-pv-example.yaml b/roles/openshift_examples/files/examples/v3.6/cfme-templates/cfme-pv-example.yaml
index 709d8d976..709d8d976 100644
--- a/roles/openshift_examples/files/examples/v1.6/cfme-templates/cfme-pv-example.yaml
+++ b/roles/openshift_examples/files/examples/v3.6/cfme-templates/cfme-pv-example.yaml
diff --git a/roles/openshift_examples/files/examples/v1.6/cfme-templates/cfme-template.yaml b/roles/openshift_examples/files/examples/v3.6/cfme-templates/cfme-template.yaml
index 4f25a9c8f..4f25a9c8f 100644
--- a/roles/openshift_examples/files/examples/v1.6/cfme-templates/cfme-template.yaml
+++ b/roles/openshift_examples/files/examples/v3.6/cfme-templates/cfme-template.yaml
diff --git a/roles/openshift_examples/files/examples/v1.6/db-templates/README.md b/roles/openshift_examples/files/examples/v3.6/db-templates/README.md
index a36d7ba7d..a36d7ba7d 100644
--- a/roles/openshift_examples/files/examples/v1.6/db-templates/README.md
+++ b/roles/openshift_examples/files/examples/v3.6/db-templates/README.md
diff --git a/roles/openshift_examples/files/examples/v1.6/db-templates/mariadb-ephemeral-template.json b/roles/openshift_examples/files/examples/v3.6/db-templates/mariadb-ephemeral-template.json
index f347f1f9f..f347f1f9f 100644
--- a/roles/openshift_examples/files/examples/v1.6/db-templates/mariadb-ephemeral-template.json
+++ b/roles/openshift_examples/files/examples/v3.6/db-templates/mariadb-ephemeral-template.json
diff --git a/roles/openshift_examples/files/examples/v1.6/db-templates/mariadb-persistent-template.json b/roles/openshift_examples/files/examples/v3.6/db-templates/mariadb-persistent-template.json
index 6ed744777..6ed744777 100644
--- a/roles/openshift_examples/files/examples/v1.6/db-templates/mariadb-persistent-template.json
+++ b/roles/openshift_examples/files/examples/v3.6/db-templates/mariadb-persistent-template.json
diff --git a/roles/openshift_examples/files/examples/v1.6/db-templates/mongodb-ephemeral-template.json b/roles/openshift_examples/files/examples/v3.6/db-templates/mongodb-ephemeral-template.json
index 97a8abf6d..97a8abf6d 100644
--- a/roles/openshift_examples/files/examples/v1.6/db-templates/mongodb-ephemeral-template.json
+++ b/roles/openshift_examples/files/examples/v3.6/db-templates/mongodb-ephemeral-template.json
diff --git a/roles/openshift_examples/files/examples/v1.6/db-templates/mongodb-persistent-template.json b/roles/openshift_examples/files/examples/v3.6/db-templates/mongodb-persistent-template.json
index 0656219fb..0656219fb 100644
--- a/roles/openshift_examples/files/examples/v1.6/db-templates/mongodb-persistent-template.json
+++ b/roles/openshift_examples/files/examples/v3.6/db-templates/mongodb-persistent-template.json
diff --git a/roles/openshift_examples/files/examples/v1.6/db-templates/mysql-ephemeral-template.json b/roles/openshift_examples/files/examples/v3.6/db-templates/mysql-ephemeral-template.json
index d60b4647d..d60b4647d 100644
--- a/roles/openshift_examples/files/examples/v1.6/db-templates/mysql-ephemeral-template.json
+++ b/roles/openshift_examples/files/examples/v3.6/db-templates/mysql-ephemeral-template.json
diff --git a/roles/openshift_examples/files/examples/v1.6/db-templates/mysql-persistent-template.json b/roles/openshift_examples/files/examples/v3.6/db-templates/mysql-persistent-template.json
index c2bfa40fd..c2bfa40fd 100644
--- a/roles/openshift_examples/files/examples/v1.6/db-templates/mysql-persistent-template.json
+++ b/roles/openshift_examples/files/examples/v3.6/db-templates/mysql-persistent-template.json
diff --git a/roles/openshift_examples/files/examples/v1.6/db-templates/postgresql-ephemeral-template.json b/roles/openshift_examples/files/examples/v3.6/db-templates/postgresql-ephemeral-template.json
index 7a16e742a..7a16e742a 100644
--- a/roles/openshift_examples/files/examples/v1.6/db-templates/postgresql-ephemeral-template.json
+++ b/roles/openshift_examples/files/examples/v3.6/db-templates/postgresql-ephemeral-template.json
diff --git a/roles/openshift_examples/files/examples/v1.6/db-templates/postgresql-persistent-template.json b/roles/openshift_examples/files/examples/v3.6/db-templates/postgresql-persistent-template.json
index 242212d6f..242212d6f 100644
--- a/roles/openshift_examples/files/examples/v1.6/db-templates/postgresql-persistent-template.json
+++ b/roles/openshift_examples/files/examples/v3.6/db-templates/postgresql-persistent-template.json
diff --git a/roles/openshift_examples/files/examples/v1.6/db-templates/redis-ephemeral-template.json b/roles/openshift_examples/files/examples/v3.6/db-templates/redis-ephemeral-template.json
index e9af50937..e9af50937 100644
--- a/roles/openshift_examples/files/examples/v1.6/db-templates/redis-ephemeral-template.json
+++ b/roles/openshift_examples/files/examples/v3.6/db-templates/redis-ephemeral-template.json
diff --git a/roles/openshift_examples/files/examples/v1.6/db-templates/redis-persistent-template.json b/roles/openshift_examples/files/examples/v3.6/db-templates/redis-persistent-template.json
index aa27578a9..aa27578a9 100644
--- a/roles/openshift_examples/files/examples/v1.6/db-templates/redis-persistent-template.json
+++ b/roles/openshift_examples/files/examples/v3.6/db-templates/redis-persistent-template.json
diff --git a/roles/openshift_examples/files/examples/v1.6/image-streams/dotnet_imagestreams.json b/roles/openshift_examples/files/examples/v3.6/image-streams/dotnet_imagestreams.json
index 857ffa980..857ffa980 100644
--- a/roles/openshift_examples/files/examples/v1.6/image-streams/dotnet_imagestreams.json
+++ b/roles/openshift_examples/files/examples/v3.6/image-streams/dotnet_imagestreams.json
diff --git a/roles/openshift_examples/files/examples/v1.6/image-streams/image-streams-centos7.json b/roles/openshift_examples/files/examples/v3.6/image-streams/image-streams-centos7.json
index 1a90a9409..a81dbb654 100644
--- a/roles/openshift_examples/files/examples/v1.6/image-streams/image-streams-centos7.json
+++ b/roles/openshift_examples/files/examples/v3.6/image-streams/image-streams-centos7.json
@@ -800,7 +800,7 @@
"openshift.io/display-name": "Jenkins 1.X",
"description": "Provides a Jenkins 1.X server on CentOS 7. For more information about using this container image, including OpenShift considerations, see https://github.com/openshift/jenkins/blob/master/README.md.",
"iconClass": "icon-jenkins",
- "tags": "jenkins",
+ "tags": "hidden,jenkins",
"version": "1.x"
},
"from": {
diff --git a/roles/openshift_examples/files/examples/v1.6/image-streams/image-streams-rhel7.json b/roles/openshift_examples/files/examples/v3.6/image-streams/image-streams-rhel7.json
index eb94c3bb4..2ed0efe1e 100644
--- a/roles/openshift_examples/files/examples/v1.6/image-streams/image-streams-rhel7.json
+++ b/roles/openshift_examples/files/examples/v3.6/image-streams/image-streams-rhel7.json
@@ -707,7 +707,7 @@
"openshift.io/display-name": "Jenkins 1.X",
"description": "Provides a Jenkins 1.X server on RHEL 7. For more information about using this container image, including OpenShift considerations, see https://github.com/openshift/jenkins/blob/master/README.md.",
"iconClass": "icon-jenkins",
- "tags": "jenkins",
+ "tags": "hidden,jenkins",
"version": "1.x"
},
"from": {
diff --git a/roles/openshift_examples/files/examples/v1.6/quickstart-templates/README.md b/roles/openshift_examples/files/examples/v3.6/quickstart-templates/README.md
index f48d8d4a8..f48d8d4a8 100644
--- a/roles/openshift_examples/files/examples/v1.6/quickstart-templates/README.md
+++ b/roles/openshift_examples/files/examples/v3.6/quickstart-templates/README.md
diff --git a/roles/openshift_examples/files/examples/v1.6/quickstart-templates/apicast-gateway-template.yml b/roles/openshift_examples/files/examples/v3.6/quickstart-templates/apicast-gateway-template.yml
index 34f5fcbcc..34f5fcbcc 100644
--- a/roles/openshift_examples/files/examples/v1.6/quickstart-templates/apicast-gateway-template.yml
+++ b/roles/openshift_examples/files/examples/v3.6/quickstart-templates/apicast-gateway-template.yml
diff --git a/roles/openshift_examples/files/examples/v1.6/quickstart-templates/cakephp-mysql-persistent.json b/roles/openshift_examples/files/examples/v3.6/quickstart-templates/cakephp-mysql-persistent.json
index eb3d296be..eb3d296be 100644
--- a/roles/openshift_examples/files/examples/v1.6/quickstart-templates/cakephp-mysql-persistent.json
+++ b/roles/openshift_examples/files/examples/v3.6/quickstart-templates/cakephp-mysql-persistent.json
diff --git a/roles/openshift_examples/files/examples/v1.6/quickstart-templates/cakephp-mysql.json b/roles/openshift_examples/files/examples/v3.6/quickstart-templates/cakephp-mysql.json
index da2454d2e..da2454d2e 100644
--- a/roles/openshift_examples/files/examples/v1.6/quickstart-templates/cakephp-mysql.json
+++ b/roles/openshift_examples/files/examples/v3.6/quickstart-templates/cakephp-mysql.json
diff --git a/roles/openshift_examples/files/examples/v1.6/quickstart-templates/dancer-mysql-persistent.json b/roles/openshift_examples/files/examples/v3.6/quickstart-templates/dancer-mysql-persistent.json
index 81ae63416..81ae63416 100644
--- a/roles/openshift_examples/files/examples/v1.6/quickstart-templates/dancer-mysql-persistent.json
+++ b/roles/openshift_examples/files/examples/v3.6/quickstart-templates/dancer-mysql-persistent.json
diff --git a/roles/openshift_examples/files/examples/v1.6/quickstart-templates/dancer-mysql.json b/roles/openshift_examples/files/examples/v3.6/quickstart-templates/dancer-mysql.json
index 7a285dba8..7a285dba8 100644
--- a/roles/openshift_examples/files/examples/v1.6/quickstart-templates/dancer-mysql.json
+++ b/roles/openshift_examples/files/examples/v3.6/quickstart-templates/dancer-mysql.json
diff --git a/roles/openshift_examples/files/examples/v1.6/quickstart-templates/django-postgresql-persistent.json b/roles/openshift_examples/files/examples/v3.6/quickstart-templates/django-postgresql-persistent.json
index 9f982c286..9f982c286 100644
--- a/roles/openshift_examples/files/examples/v1.6/quickstart-templates/django-postgresql-persistent.json
+++ b/roles/openshift_examples/files/examples/v3.6/quickstart-templates/django-postgresql-persistent.json
diff --git a/roles/openshift_examples/files/examples/v1.6/quickstart-templates/django-postgresql.json b/roles/openshift_examples/files/examples/v3.6/quickstart-templates/django-postgresql.json
index 7bee85ddd..7bee85ddd 100644
--- a/roles/openshift_examples/files/examples/v1.6/quickstart-templates/django-postgresql.json
+++ b/roles/openshift_examples/files/examples/v3.6/quickstart-templates/django-postgresql.json
diff --git a/roles/openshift_examples/files/examples/v1.6/quickstart-templates/dotnet-example.json b/roles/openshift_examples/files/examples/v3.6/quickstart-templates/dotnet-example.json
index a09d71a00..a09d71a00 100644
--- a/roles/openshift_examples/files/examples/v1.6/quickstart-templates/dotnet-example.json
+++ b/roles/openshift_examples/files/examples/v3.6/quickstart-templates/dotnet-example.json
diff --git a/roles/openshift_examples/files/examples/v1.6/quickstart-templates/dotnet-pgsql-persistent.json b/roles/openshift_examples/files/examples/v3.6/quickstart-templates/dotnet-pgsql-persistent.json
index fa31f7f61..a2b59c2d3 100644
--- a/roles/openshift_examples/files/examples/v1.6/quickstart-templates/dotnet-pgsql-persistent.json
+++ b/roles/openshift_examples/files/examples/v3.6/quickstart-templates/dotnet-pgsql-persistent.json
@@ -19,6 +19,17 @@
},
"objects": [
{
+ "kind": "Secret",
+ "apiVersion": "v1",
+ "metadata": {
+ "name": "${NAME}"
+ },
+ "stringData": {
+ "database-password": "${DATABASE_PASSWORD}",
+ "connect-string": "Host=${DATABASE_SERVICE_NAME};Database=${DATABASE_NAME};Username=${DATABASE_USER};Password=${DATABASE_PASSWORD}"
+ }
+ },
+ {
"kind": "Service",
"apiVersion": "v1",
"metadata": {
@@ -209,7 +220,12 @@
"env": [
{
"name": "ConnectionString",
- "value": "Host=${DATABASE_SERVICE_NAME};Database=${DATABASE_NAME};Username=${DATABASE_USER};Password=${DATABASE_PASSWORD}"
+ "valueFrom": {
+ "secretKeyRef": {
+ "name": "${NAME}",
+ "key": "connect-string"
+ }
+ }
}
],
"resources": {
@@ -373,7 +389,12 @@
},
{
"name": "POSTGRESQL_PASSWORD",
- "value": "${DATABASE_PASSWORD}"
+ "valueFrom": {
+ "secretKeyRef": {
+ "name": "${NAME}",
+ "key": "database-password"
+ }
+ }
},
{
"name": "POSTGRESQL_DATABASE",
diff --git a/roles/openshift_examples/files/examples/v1.6/quickstart-templates/jenkins-ephemeral-template.json b/roles/openshift_examples/files/examples/v3.6/quickstart-templates/jenkins-ephemeral-template.json
index 264e4b2de..264e4b2de 100644
--- a/roles/openshift_examples/files/examples/v1.6/quickstart-templates/jenkins-ephemeral-template.json
+++ b/roles/openshift_examples/files/examples/v3.6/quickstart-templates/jenkins-ephemeral-template.json
diff --git a/roles/openshift_examples/files/examples/v1.6/quickstart-templates/jenkins-persistent-template.json b/roles/openshift_examples/files/examples/v3.6/quickstart-templates/jenkins-persistent-template.json
index b47bdf353..b47bdf353 100644
--- a/roles/openshift_examples/files/examples/v1.6/quickstart-templates/jenkins-persistent-template.json
+++ b/roles/openshift_examples/files/examples/v3.6/quickstart-templates/jenkins-persistent-template.json
diff --git a/roles/openshift_examples/files/examples/v1.6/quickstart-templates/nodejs-mongodb-persistent.json b/roles/openshift_examples/files/examples/v3.6/quickstart-templates/nodejs-mongodb-persistent.json
index 6ee999cb1..6ee999cb1 100644
--- a/roles/openshift_examples/files/examples/v1.6/quickstart-templates/nodejs-mongodb-persistent.json
+++ b/roles/openshift_examples/files/examples/v3.6/quickstart-templates/nodejs-mongodb-persistent.json
diff --git a/roles/openshift_examples/files/examples/v1.6/quickstart-templates/nodejs-mongodb.json b/roles/openshift_examples/files/examples/v3.6/quickstart-templates/nodejs-mongodb.json
index 5c177a7e0..5c177a7e0 100644
--- a/roles/openshift_examples/files/examples/v1.6/quickstart-templates/nodejs-mongodb.json
+++ b/roles/openshift_examples/files/examples/v3.6/quickstart-templates/nodejs-mongodb.json
diff --git a/roles/openshift_examples/files/examples/v1.6/quickstart-templates/rails-postgresql-persistent.json b/roles/openshift_examples/files/examples/v3.6/quickstart-templates/rails-postgresql-persistent.json
index b400cfdb3..b400cfdb3 100644
--- a/roles/openshift_examples/files/examples/v1.6/quickstart-templates/rails-postgresql-persistent.json
+++ b/roles/openshift_examples/files/examples/v3.6/quickstart-templates/rails-postgresql-persistent.json
diff --git a/roles/openshift_examples/files/examples/v1.6/quickstart-templates/rails-postgresql.json b/roles/openshift_examples/files/examples/v3.6/quickstart-templates/rails-postgresql.json
index fa67412ff..fa67412ff 100644
--- a/roles/openshift_examples/files/examples/v1.6/quickstart-templates/rails-postgresql.json
+++ b/roles/openshift_examples/files/examples/v3.6/quickstart-templates/rails-postgresql.json
diff --git a/roles/openshift_examples/files/examples/v1.6/xpaas-streams/fis-image-streams.json b/roles/openshift_examples/files/examples/v3.6/xpaas-streams/fis-image-streams.json
index 9d99973be..9d99973be 100644
--- a/roles/openshift_examples/files/examples/v1.6/xpaas-streams/fis-image-streams.json
+++ b/roles/openshift_examples/files/examples/v3.6/xpaas-streams/fis-image-streams.json
diff --git a/roles/openshift_examples/files/examples/v1.6/xpaas-streams/jboss-image-streams.json b/roles/openshift_examples/files/examples/v3.6/xpaas-streams/jboss-image-streams.json
index 049f3f884..049f3f884 100644
--- a/roles/openshift_examples/files/examples/v1.6/xpaas-streams/jboss-image-streams.json
+++ b/roles/openshift_examples/files/examples/v3.6/xpaas-streams/jboss-image-streams.json
diff --git a/roles/openshift_examples/files/examples/v1.6/xpaas-templates/amq62-basic.json b/roles/openshift_examples/files/examples/v3.6/xpaas-templates/amq62-basic.json
index ab35afead..ab35afead 100644
--- a/roles/openshift_examples/files/examples/v1.6/xpaas-templates/amq62-basic.json
+++ b/roles/openshift_examples/files/examples/v3.6/xpaas-templates/amq62-basic.json
diff --git a/roles/openshift_examples/files/examples/v1.6/xpaas-templates/amq62-persistent-ssl.json b/roles/openshift_examples/files/examples/v3.6/xpaas-templates/amq62-persistent-ssl.json
index c12f06dec..c12f06dec 100644
--- a/roles/openshift_examples/files/examples/v1.6/xpaas-templates/amq62-persistent-ssl.json
+++ b/roles/openshift_examples/files/examples/v3.6/xpaas-templates/amq62-persistent-ssl.json
diff --git a/roles/openshift_examples/files/examples/v1.6/xpaas-templates/amq62-persistent.json b/roles/openshift_examples/files/examples/v3.6/xpaas-templates/amq62-persistent.json
index 897ce0395..897ce0395 100644
--- a/roles/openshift_examples/files/examples/v1.6/xpaas-templates/amq62-persistent.json
+++ b/roles/openshift_examples/files/examples/v3.6/xpaas-templates/amq62-persistent.json
diff --git a/roles/openshift_examples/files/examples/v1.6/xpaas-templates/amq62-ssl.json b/roles/openshift_examples/files/examples/v3.6/xpaas-templates/amq62-ssl.json
index 97d110286..97d110286 100644
--- a/roles/openshift_examples/files/examples/v1.6/xpaas-templates/amq62-ssl.json
+++ b/roles/openshift_examples/files/examples/v3.6/xpaas-templates/amq62-ssl.json
diff --git a/roles/openshift_examples/files/examples/v1.6/xpaas-templates/datagrid65-basic.json b/roles/openshift_examples/files/examples/v3.6/xpaas-templates/datagrid65-basic.json
index 56e76016f..56e76016f 100644
--- a/roles/openshift_examples/files/examples/v1.6/xpaas-templates/datagrid65-basic.json
+++ b/roles/openshift_examples/files/examples/v3.6/xpaas-templates/datagrid65-basic.json
diff --git a/roles/openshift_examples/files/examples/v1.6/xpaas-templates/datagrid65-https.json b/roles/openshift_examples/files/examples/v3.6/xpaas-templates/datagrid65-https.json
index 639ac2e11..639ac2e11 100644
--- a/roles/openshift_examples/files/examples/v1.6/xpaas-templates/datagrid65-https.json
+++ b/roles/openshift_examples/files/examples/v3.6/xpaas-templates/datagrid65-https.json
diff --git a/roles/openshift_examples/files/examples/v1.6/xpaas-templates/datagrid65-mysql-persistent.json b/roles/openshift_examples/files/examples/v3.6/xpaas-templates/datagrid65-mysql-persistent.json
index 22ca3f0a0..22ca3f0a0 100644
--- a/roles/openshift_examples/files/examples/v1.6/xpaas-templates/datagrid65-mysql-persistent.json
+++ b/roles/openshift_examples/files/examples/v3.6/xpaas-templates/datagrid65-mysql-persistent.json
diff --git a/roles/openshift_examples/files/examples/v1.6/xpaas-templates/datagrid65-mysql.json b/roles/openshift_examples/files/examples/v3.6/xpaas-templates/datagrid65-mysql.json
index e1a585d24..e1a585d24 100644
--- a/roles/openshift_examples/files/examples/v1.6/xpaas-templates/datagrid65-mysql.json
+++ b/roles/openshift_examples/files/examples/v3.6/xpaas-templates/datagrid65-mysql.json
diff --git a/roles/openshift_examples/files/examples/v1.6/xpaas-templates/datagrid65-postgresql-persistent.json b/roles/openshift_examples/files/examples/v3.6/xpaas-templates/datagrid65-postgresql-persistent.json
index 12720eb19..12720eb19 100644
--- a/roles/openshift_examples/files/examples/v1.6/xpaas-templates/datagrid65-postgresql-persistent.json
+++ b/roles/openshift_examples/files/examples/v3.6/xpaas-templates/datagrid65-postgresql-persistent.json
diff --git a/roles/openshift_examples/files/examples/v1.6/xpaas-templates/datagrid65-postgresql.json b/roles/openshift_examples/files/examples/v3.6/xpaas-templates/datagrid65-postgresql.json
index da8015fb0..da8015fb0 100644
--- a/roles/openshift_examples/files/examples/v1.6/xpaas-templates/datagrid65-postgresql.json
+++ b/roles/openshift_examples/files/examples/v3.6/xpaas-templates/datagrid65-postgresql.json
diff --git a/roles/openshift_examples/files/examples/v1.6/xpaas-templates/datavirt63-basic-s2i.json b/roles/openshift_examples/files/examples/v3.6/xpaas-templates/datavirt63-basic-s2i.json
index 7d64dac98..7d64dac98 100644
--- a/roles/openshift_examples/files/examples/v1.6/xpaas-templates/datavirt63-basic-s2i.json
+++ b/roles/openshift_examples/files/examples/v3.6/xpaas-templates/datavirt63-basic-s2i.json
diff --git a/roles/openshift_examples/files/examples/v1.6/xpaas-templates/datavirt63-extensions-support-s2i.json b/roles/openshift_examples/files/examples/v3.6/xpaas-templates/datavirt63-extensions-support-s2i.json
index 1e7c03b99..1e7c03b99 100644
--- a/roles/openshift_examples/files/examples/v1.6/xpaas-templates/datavirt63-extensions-support-s2i.json
+++ b/roles/openshift_examples/files/examples/v3.6/xpaas-templates/datavirt63-extensions-support-s2i.json
diff --git a/roles/openshift_examples/files/examples/v1.6/xpaas-templates/datavirt63-secure-s2i.json b/roles/openshift_examples/files/examples/v3.6/xpaas-templates/datavirt63-secure-s2i.json
index 07f926ff3..07f926ff3 100644
--- a/roles/openshift_examples/files/examples/v1.6/xpaas-templates/datavirt63-secure-s2i.json
+++ b/roles/openshift_examples/files/examples/v3.6/xpaas-templates/datavirt63-secure-s2i.json
diff --git a/roles/openshift_examples/files/examples/v1.6/xpaas-templates/decisionserver62-amq-s2i.json b/roles/openshift_examples/files/examples/v3.6/xpaas-templates/decisionserver62-amq-s2i.json
index 754a3b4c0..754a3b4c0 100644
--- a/roles/openshift_examples/files/examples/v1.6/xpaas-templates/decisionserver62-amq-s2i.json
+++ b/roles/openshift_examples/files/examples/v3.6/xpaas-templates/decisionserver62-amq-s2i.json
diff --git a/roles/openshift_examples/files/examples/v1.6/xpaas-templates/decisionserver62-basic-s2i.json b/roles/openshift_examples/files/examples/v3.6/xpaas-templates/decisionserver62-basic-s2i.json
index 8be4ac90b..8be4ac90b 100644
--- a/roles/openshift_examples/files/examples/v1.6/xpaas-templates/decisionserver62-basic-s2i.json
+++ b/roles/openshift_examples/files/examples/v3.6/xpaas-templates/decisionserver62-basic-s2i.json
diff --git a/roles/openshift_examples/files/examples/v1.6/xpaas-templates/decisionserver62-https-s2i.json b/roles/openshift_examples/files/examples/v3.6/xpaas-templates/decisionserver62-https-s2i.json
index bf9047599..bf9047599 100644
--- a/roles/openshift_examples/files/examples/v1.6/xpaas-templates/decisionserver62-https-s2i.json
+++ b/roles/openshift_examples/files/examples/v3.6/xpaas-templates/decisionserver62-https-s2i.json
diff --git a/roles/openshift_examples/files/examples/v1.6/xpaas-templates/decisionserver63-amq-s2i.json b/roles/openshift_examples/files/examples/v3.6/xpaas-templates/decisionserver63-amq-s2i.json
index 51e667e02..51e667e02 100644
--- a/roles/openshift_examples/files/examples/v1.6/xpaas-templates/decisionserver63-amq-s2i.json
+++ b/roles/openshift_examples/files/examples/v3.6/xpaas-templates/decisionserver63-amq-s2i.json
diff --git a/roles/openshift_examples/files/examples/v1.6/xpaas-templates/decisionserver63-basic-s2i.json b/roles/openshift_examples/files/examples/v3.6/xpaas-templates/decisionserver63-basic-s2i.json
index c5f0d006a..c5f0d006a 100644
--- a/roles/openshift_examples/files/examples/v1.6/xpaas-templates/decisionserver63-basic-s2i.json
+++ b/roles/openshift_examples/files/examples/v3.6/xpaas-templates/decisionserver63-basic-s2i.json
diff --git a/roles/openshift_examples/files/examples/v1.6/xpaas-templates/decisionserver63-https-s2i.json b/roles/openshift_examples/files/examples/v3.6/xpaas-templates/decisionserver63-https-s2i.json
index 3db0e4c84..3db0e4c84 100644
--- a/roles/openshift_examples/files/examples/v1.6/xpaas-templates/decisionserver63-https-s2i.json
+++ b/roles/openshift_examples/files/examples/v3.6/xpaas-templates/decisionserver63-https-s2i.json
diff --git a/roles/openshift_examples/files/examples/v1.6/xpaas-templates/eap64-amq-persistent-s2i.json b/roles/openshift_examples/files/examples/v3.6/xpaas-templates/eap64-amq-persistent-s2i.json
index 72dbb4302..72dbb4302 100644
--- a/roles/openshift_examples/files/examples/v1.6/xpaas-templates/eap64-amq-persistent-s2i.json
+++ b/roles/openshift_examples/files/examples/v3.6/xpaas-templates/eap64-amq-persistent-s2i.json
diff --git a/roles/openshift_examples/files/examples/v1.6/xpaas-templates/eap64-amq-s2i.json b/roles/openshift_examples/files/examples/v3.6/xpaas-templates/eap64-amq-s2i.json
index 9dd847451..9dd847451 100644
--- a/roles/openshift_examples/files/examples/v1.6/xpaas-templates/eap64-amq-s2i.json
+++ b/roles/openshift_examples/files/examples/v3.6/xpaas-templates/eap64-amq-s2i.json
diff --git a/roles/openshift_examples/files/examples/v1.6/xpaas-templates/eap64-basic-s2i.json b/roles/openshift_examples/files/examples/v3.6/xpaas-templates/eap64-basic-s2i.json
index 7b1800b7b..7b1800b7b 100644
--- a/roles/openshift_examples/files/examples/v1.6/xpaas-templates/eap64-basic-s2i.json
+++ b/roles/openshift_examples/files/examples/v3.6/xpaas-templates/eap64-basic-s2i.json
diff --git a/roles/openshift_examples/files/examples/v1.6/xpaas-templates/eap64-https-s2i.json b/roles/openshift_examples/files/examples/v3.6/xpaas-templates/eap64-https-s2i.json
index 31716d84c..31716d84c 100644
--- a/roles/openshift_examples/files/examples/v1.6/xpaas-templates/eap64-https-s2i.json
+++ b/roles/openshift_examples/files/examples/v3.6/xpaas-templates/eap64-https-s2i.json
diff --git a/roles/openshift_examples/files/examples/v1.6/xpaas-templates/eap64-mongodb-persistent-s2i.json b/roles/openshift_examples/files/examples/v3.6/xpaas-templates/eap64-mongodb-persistent-s2i.json
index 212431056..212431056 100644
--- a/roles/openshift_examples/files/examples/v1.6/xpaas-templates/eap64-mongodb-persistent-s2i.json
+++ b/roles/openshift_examples/files/examples/v3.6/xpaas-templates/eap64-mongodb-persistent-s2i.json
diff --git a/roles/openshift_examples/files/examples/v1.6/xpaas-templates/eap64-mongodb-s2i.json b/roles/openshift_examples/files/examples/v3.6/xpaas-templates/eap64-mongodb-s2i.json
index 13fbbdd93..13fbbdd93 100644
--- a/roles/openshift_examples/files/examples/v1.6/xpaas-templates/eap64-mongodb-s2i.json
+++ b/roles/openshift_examples/files/examples/v3.6/xpaas-templates/eap64-mongodb-s2i.json
diff --git a/roles/openshift_examples/files/examples/v1.6/xpaas-templates/eap64-mysql-persistent-s2i.json b/roles/openshift_examples/files/examples/v3.6/xpaas-templates/eap64-mysql-persistent-s2i.json
index 69fdec206..69fdec206 100644
--- a/roles/openshift_examples/files/examples/v1.6/xpaas-templates/eap64-mysql-persistent-s2i.json
+++ b/roles/openshift_examples/files/examples/v3.6/xpaas-templates/eap64-mysql-persistent-s2i.json
diff --git a/roles/openshift_examples/files/examples/v1.6/xpaas-templates/eap64-mysql-s2i.json b/roles/openshift_examples/files/examples/v3.6/xpaas-templates/eap64-mysql-s2i.json
index 2bd3c249f..2bd3c249f 100644
--- a/roles/openshift_examples/files/examples/v1.6/xpaas-templates/eap64-mysql-s2i.json
+++ b/roles/openshift_examples/files/examples/v3.6/xpaas-templates/eap64-mysql-s2i.json
diff --git a/roles/openshift_examples/files/examples/v1.6/xpaas-templates/eap64-postgresql-persistent-s2i.json b/roles/openshift_examples/files/examples/v3.6/xpaas-templates/eap64-postgresql-persistent-s2i.json
index 31f245950..31f245950 100644
--- a/roles/openshift_examples/files/examples/v1.6/xpaas-templates/eap64-postgresql-persistent-s2i.json
+++ b/roles/openshift_examples/files/examples/v3.6/xpaas-templates/eap64-postgresql-persistent-s2i.json
diff --git a/roles/openshift_examples/files/examples/v1.6/xpaas-templates/eap64-postgresql-s2i.json b/roles/openshift_examples/files/examples/v3.6/xpaas-templates/eap64-postgresql-s2i.json
index eac964697..eac964697 100644
--- a/roles/openshift_examples/files/examples/v1.6/xpaas-templates/eap64-postgresql-s2i.json
+++ b/roles/openshift_examples/files/examples/v3.6/xpaas-templates/eap64-postgresql-s2i.json
diff --git a/roles/openshift_examples/files/examples/v1.6/xpaas-templates/eap64-sso-s2i.json b/roles/openshift_examples/files/examples/v3.6/xpaas-templates/eap64-sso-s2i.json
index 09023be71..09023be71 100644
--- a/roles/openshift_examples/files/examples/v1.6/xpaas-templates/eap64-sso-s2i.json
+++ b/roles/openshift_examples/files/examples/v3.6/xpaas-templates/eap64-sso-s2i.json
diff --git a/roles/openshift_examples/files/examples/v1.6/xpaas-templates/eap70-amq-persistent-s2i.json b/roles/openshift_examples/files/examples/v3.6/xpaas-templates/eap70-amq-persistent-s2i.json
index f08cdf2f9..f08cdf2f9 100644
--- a/roles/openshift_examples/files/examples/v1.6/xpaas-templates/eap70-amq-persistent-s2i.json
+++ b/roles/openshift_examples/files/examples/v3.6/xpaas-templates/eap70-amq-persistent-s2i.json
diff --git a/roles/openshift_examples/files/examples/v1.6/xpaas-templates/eap70-amq-s2i.json b/roles/openshift_examples/files/examples/v3.6/xpaas-templates/eap70-amq-s2i.json
index 3ca9e9fab..3ca9e9fab 100644
--- a/roles/openshift_examples/files/examples/v1.6/xpaas-templates/eap70-amq-s2i.json
+++ b/roles/openshift_examples/files/examples/v3.6/xpaas-templates/eap70-amq-s2i.json
diff --git a/roles/openshift_examples/files/examples/v1.6/xpaas-templates/eap70-basic-s2i.json b/roles/openshift_examples/files/examples/v3.6/xpaas-templates/eap70-basic-s2i.json
index 83b4d5b24..83b4d5b24 100644
--- a/roles/openshift_examples/files/examples/v1.6/xpaas-templates/eap70-basic-s2i.json
+++ b/roles/openshift_examples/files/examples/v3.6/xpaas-templates/eap70-basic-s2i.json
diff --git a/roles/openshift_examples/files/examples/v1.6/xpaas-templates/eap70-https-s2i.json b/roles/openshift_examples/files/examples/v3.6/xpaas-templates/eap70-https-s2i.json
index 1292442a4..1292442a4 100644
--- a/roles/openshift_examples/files/examples/v1.6/xpaas-templates/eap70-https-s2i.json
+++ b/roles/openshift_examples/files/examples/v3.6/xpaas-templates/eap70-https-s2i.json
diff --git a/roles/openshift_examples/files/examples/v1.6/xpaas-templates/eap70-mongodb-persistent-s2i.json b/roles/openshift_examples/files/examples/v3.6/xpaas-templates/eap70-mongodb-persistent-s2i.json
index 99db77d58..99db77d58 100644
--- a/roles/openshift_examples/files/examples/v1.6/xpaas-templates/eap70-mongodb-persistent-s2i.json
+++ b/roles/openshift_examples/files/examples/v3.6/xpaas-templates/eap70-mongodb-persistent-s2i.json
diff --git a/roles/openshift_examples/files/examples/v1.6/xpaas-templates/eap70-mongodb-s2i.json b/roles/openshift_examples/files/examples/v3.6/xpaas-templates/eap70-mongodb-s2i.json
index c8150c231..c8150c231 100644
--- a/roles/openshift_examples/files/examples/v1.6/xpaas-templates/eap70-mongodb-s2i.json
+++ b/roles/openshift_examples/files/examples/v3.6/xpaas-templates/eap70-mongodb-s2i.json
diff --git a/roles/openshift_examples/files/examples/v1.6/xpaas-templates/eap70-mysql-persistent-s2i.json b/roles/openshift_examples/files/examples/v3.6/xpaas-templates/eap70-mysql-persistent-s2i.json
index f8e5c2b04..f8e5c2b04 100644
--- a/roles/openshift_examples/files/examples/v1.6/xpaas-templates/eap70-mysql-persistent-s2i.json
+++ b/roles/openshift_examples/files/examples/v3.6/xpaas-templates/eap70-mysql-persistent-s2i.json
diff --git a/roles/openshift_examples/files/examples/v1.6/xpaas-templates/eap70-mysql-s2i.json b/roles/openshift_examples/files/examples/v3.6/xpaas-templates/eap70-mysql-s2i.json
index 1edeb62e7..1edeb62e7 100644
--- a/roles/openshift_examples/files/examples/v1.6/xpaas-templates/eap70-mysql-s2i.json
+++ b/roles/openshift_examples/files/examples/v3.6/xpaas-templates/eap70-mysql-s2i.json
diff --git a/roles/openshift_examples/files/examples/v1.6/xpaas-templates/eap70-postgresql-persistent-s2i.json b/roles/openshift_examples/files/examples/v3.6/xpaas-templates/eap70-postgresql-persistent-s2i.json
index d11df06ee..d11df06ee 100644
--- a/roles/openshift_examples/files/examples/v1.6/xpaas-templates/eap70-postgresql-persistent-s2i.json
+++ b/roles/openshift_examples/files/examples/v3.6/xpaas-templates/eap70-postgresql-persistent-s2i.json
diff --git a/roles/openshift_examples/files/examples/v1.6/xpaas-templates/eap70-postgresql-s2i.json b/roles/openshift_examples/files/examples/v3.6/xpaas-templates/eap70-postgresql-s2i.json
index 6b7f6d707..6b7f6d707 100644
--- a/roles/openshift_examples/files/examples/v1.6/xpaas-templates/eap70-postgresql-s2i.json
+++ b/roles/openshift_examples/files/examples/v3.6/xpaas-templates/eap70-postgresql-s2i.json
diff --git a/roles/openshift_examples/files/examples/v1.6/xpaas-templates/eap70-sso-s2i.json b/roles/openshift_examples/files/examples/v3.6/xpaas-templates/eap70-sso-s2i.json
index 811602220..811602220 100644
--- a/roles/openshift_examples/files/examples/v1.6/xpaas-templates/eap70-sso-s2i.json
+++ b/roles/openshift_examples/files/examples/v3.6/xpaas-templates/eap70-sso-s2i.json
diff --git a/roles/openshift_examples/files/examples/v1.6/xpaas-templates/jws30-tomcat7-basic-s2i.json b/roles/openshift_examples/files/examples/v3.6/xpaas-templates/jws30-tomcat7-basic-s2i.json
index 413a6de87..413a6de87 100644
--- a/roles/openshift_examples/files/examples/v1.6/xpaas-templates/jws30-tomcat7-basic-s2i.json
+++ b/roles/openshift_examples/files/examples/v3.6/xpaas-templates/jws30-tomcat7-basic-s2i.json
diff --git a/roles/openshift_examples/files/examples/v1.6/xpaas-templates/jws30-tomcat7-https-s2i.json b/roles/openshift_examples/files/examples/v3.6/xpaas-templates/jws30-tomcat7-https-s2i.json
index 610ea9441..610ea9441 100644
--- a/roles/openshift_examples/files/examples/v1.6/xpaas-templates/jws30-tomcat7-https-s2i.json
+++ b/roles/openshift_examples/files/examples/v3.6/xpaas-templates/jws30-tomcat7-https-s2i.json
diff --git a/roles/openshift_examples/files/examples/v1.6/xpaas-templates/jws30-tomcat7-mongodb-persistent-s2i.json b/roles/openshift_examples/files/examples/v3.6/xpaas-templates/jws30-tomcat7-mongodb-persistent-s2i.json
index 6ef9d6e4c..6ef9d6e4c 100644
--- a/roles/openshift_examples/files/examples/v1.6/xpaas-templates/jws30-tomcat7-mongodb-persistent-s2i.json
+++ b/roles/openshift_examples/files/examples/v3.6/xpaas-templates/jws30-tomcat7-mongodb-persistent-s2i.json
diff --git a/roles/openshift_examples/files/examples/v1.6/xpaas-templates/jws30-tomcat7-mongodb-s2i.json b/roles/openshift_examples/files/examples/v3.6/xpaas-templates/jws30-tomcat7-mongodb-s2i.json
index 9b48f8ae7..9b48f8ae7 100644
--- a/roles/openshift_examples/files/examples/v1.6/xpaas-templates/jws30-tomcat7-mongodb-s2i.json
+++ b/roles/openshift_examples/files/examples/v3.6/xpaas-templates/jws30-tomcat7-mongodb-s2i.json
diff --git a/roles/openshift_examples/files/examples/v1.6/xpaas-templates/jws30-tomcat7-mysql-persistent-s2i.json b/roles/openshift_examples/files/examples/v3.6/xpaas-templates/jws30-tomcat7-mysql-persistent-s2i.json
index 30af703ce..30af703ce 100644
--- a/roles/openshift_examples/files/examples/v1.6/xpaas-templates/jws30-tomcat7-mysql-persistent-s2i.json
+++ b/roles/openshift_examples/files/examples/v3.6/xpaas-templates/jws30-tomcat7-mysql-persistent-s2i.json
diff --git a/roles/openshift_examples/files/examples/v1.6/xpaas-templates/jws30-tomcat7-mysql-s2i.json b/roles/openshift_examples/files/examples/v3.6/xpaas-templates/jws30-tomcat7-mysql-s2i.json
index c2843af63..c2843af63 100644
--- a/roles/openshift_examples/files/examples/v1.6/xpaas-templates/jws30-tomcat7-mysql-s2i.json
+++ b/roles/openshift_examples/files/examples/v3.6/xpaas-templates/jws30-tomcat7-mysql-s2i.json
diff --git a/roles/openshift_examples/files/examples/v1.6/xpaas-templates/jws30-tomcat7-postgresql-persistent-s2i.json b/roles/openshift_examples/files/examples/v3.6/xpaas-templates/jws30-tomcat7-postgresql-persistent-s2i.json
index b8372f374..b8372f374 100644
--- a/roles/openshift_examples/files/examples/v1.6/xpaas-templates/jws30-tomcat7-postgresql-persistent-s2i.json
+++ b/roles/openshift_examples/files/examples/v3.6/xpaas-templates/jws30-tomcat7-postgresql-persistent-s2i.json
diff --git a/roles/openshift_examples/files/examples/v1.6/xpaas-templates/jws30-tomcat7-postgresql-s2i.json b/roles/openshift_examples/files/examples/v3.6/xpaas-templates/jws30-tomcat7-postgresql-s2i.json
index cd5bb9fa4..cd5bb9fa4 100644
--- a/roles/openshift_examples/files/examples/v1.6/xpaas-templates/jws30-tomcat7-postgresql-s2i.json
+++ b/roles/openshift_examples/files/examples/v3.6/xpaas-templates/jws30-tomcat7-postgresql-s2i.json
diff --git a/roles/openshift_examples/files/examples/v1.6/xpaas-templates/jws30-tomcat8-basic-s2i.json b/roles/openshift_examples/files/examples/v3.6/xpaas-templates/jws30-tomcat8-basic-s2i.json
index cb1e49d29..cb1e49d29 100644
--- a/roles/openshift_examples/files/examples/v1.6/xpaas-templates/jws30-tomcat8-basic-s2i.json
+++ b/roles/openshift_examples/files/examples/v3.6/xpaas-templates/jws30-tomcat8-basic-s2i.json
diff --git a/roles/openshift_examples/files/examples/v1.6/xpaas-templates/jws30-tomcat8-https-s2i.json b/roles/openshift_examples/files/examples/v3.6/xpaas-templates/jws30-tomcat8-https-s2i.json
index 21d5662c7..21d5662c7 100644
--- a/roles/openshift_examples/files/examples/v1.6/xpaas-templates/jws30-tomcat8-https-s2i.json
+++ b/roles/openshift_examples/files/examples/v3.6/xpaas-templates/jws30-tomcat8-https-s2i.json
diff --git a/roles/openshift_examples/files/examples/v1.6/xpaas-templates/jws30-tomcat8-mongodb-persistent-s2i.json b/roles/openshift_examples/files/examples/v3.6/xpaas-templates/jws30-tomcat8-mongodb-persistent-s2i.json
index 34657d826..34657d826 100644
--- a/roles/openshift_examples/files/examples/v1.6/xpaas-templates/jws30-tomcat8-mongodb-persistent-s2i.json
+++ b/roles/openshift_examples/files/examples/v3.6/xpaas-templates/jws30-tomcat8-mongodb-persistent-s2i.json
diff --git a/roles/openshift_examples/files/examples/v1.6/xpaas-templates/jws30-tomcat8-mongodb-s2i.json b/roles/openshift_examples/files/examples/v3.6/xpaas-templates/jws30-tomcat8-mongodb-s2i.json
index 974cfaddb..974cfaddb 100644
--- a/roles/openshift_examples/files/examples/v1.6/xpaas-templates/jws30-tomcat8-mongodb-s2i.json
+++ b/roles/openshift_examples/files/examples/v3.6/xpaas-templates/jws30-tomcat8-mongodb-s2i.json
diff --git a/roles/openshift_examples/files/examples/v1.6/xpaas-templates/jws30-tomcat8-mysql-persistent-s2i.json b/roles/openshift_examples/files/examples/v3.6/xpaas-templates/jws30-tomcat8-mysql-persistent-s2i.json
index 7a8231cc5..7a8231cc5 100644
--- a/roles/openshift_examples/files/examples/v1.6/xpaas-templates/jws30-tomcat8-mysql-persistent-s2i.json
+++ b/roles/openshift_examples/files/examples/v3.6/xpaas-templates/jws30-tomcat8-mysql-persistent-s2i.json
diff --git a/roles/openshift_examples/files/examples/v1.6/xpaas-templates/jws30-tomcat8-mysql-s2i.json b/roles/openshift_examples/files/examples/v3.6/xpaas-templates/jws30-tomcat8-mysql-s2i.json
index cda21f237..cda21f237 100644
--- a/roles/openshift_examples/files/examples/v1.6/xpaas-templates/jws30-tomcat8-mysql-s2i.json
+++ b/roles/openshift_examples/files/examples/v3.6/xpaas-templates/jws30-tomcat8-mysql-s2i.json
diff --git a/roles/openshift_examples/files/examples/v1.6/xpaas-templates/jws30-tomcat8-postgresql-persistent-s2i.json b/roles/openshift_examples/files/examples/v3.6/xpaas-templates/jws30-tomcat8-postgresql-persistent-s2i.json
index 4dfc98015..4dfc98015 100644
--- a/roles/openshift_examples/files/examples/v1.6/xpaas-templates/jws30-tomcat8-postgresql-persistent-s2i.json
+++ b/roles/openshift_examples/files/examples/v3.6/xpaas-templates/jws30-tomcat8-postgresql-persistent-s2i.json
diff --git a/roles/openshift_examples/files/examples/v1.6/xpaas-templates/jws30-tomcat8-postgresql-s2i.json b/roles/openshift_examples/files/examples/v3.6/xpaas-templates/jws30-tomcat8-postgresql-s2i.json
index f6c85668c..f6c85668c 100644
--- a/roles/openshift_examples/files/examples/v1.6/xpaas-templates/jws30-tomcat8-postgresql-s2i.json
+++ b/roles/openshift_examples/files/examples/v3.6/xpaas-templates/jws30-tomcat8-postgresql-s2i.json
diff --git a/roles/openshift_examples/files/examples/v1.6/xpaas-templates/karaf2-camel-amq-template.json b/roles/openshift_examples/files/examples/v3.6/xpaas-templates/karaf2-camel-amq-template.json
index cd0bec3c1..cd0bec3c1 100644
--- a/roles/openshift_examples/files/examples/v1.6/xpaas-templates/karaf2-camel-amq-template.json
+++ b/roles/openshift_examples/files/examples/v3.6/xpaas-templates/karaf2-camel-amq-template.json
diff --git a/roles/openshift_examples/files/examples/v1.6/xpaas-templates/karaf2-camel-log-template.json b/roles/openshift_examples/files/examples/v3.6/xpaas-templates/karaf2-camel-log-template.json
index 2ecce08a9..2ecce08a9 100644
--- a/roles/openshift_examples/files/examples/v1.6/xpaas-templates/karaf2-camel-log-template.json
+++ b/roles/openshift_examples/files/examples/v3.6/xpaas-templates/karaf2-camel-log-template.json
diff --git a/roles/openshift_examples/files/examples/v1.6/xpaas-templates/karaf2-camel-rest-sql-template.json b/roles/openshift_examples/files/examples/v3.6/xpaas-templates/karaf2-camel-rest-sql-template.json
index d80939efb..d80939efb 100644
--- a/roles/openshift_examples/files/examples/v1.6/xpaas-templates/karaf2-camel-rest-sql-template.json
+++ b/roles/openshift_examples/files/examples/v3.6/xpaas-templates/karaf2-camel-rest-sql-template.json
diff --git a/roles/openshift_examples/files/examples/v1.6/xpaas-templates/karaf2-cxf-rest-template.json b/roles/openshift_examples/files/examples/v3.6/xpaas-templates/karaf2-cxf-rest-template.json
index f99099868..f99099868 100644
--- a/roles/openshift_examples/files/examples/v1.6/xpaas-templates/karaf2-cxf-rest-template.json
+++ b/roles/openshift_examples/files/examples/v3.6/xpaas-templates/karaf2-cxf-rest-template.json
diff --git a/roles/openshift_examples/files/examples/v1.6/xpaas-templates/openjdk18-web-basic-s2i.json b/roles/openshift_examples/files/examples/v3.6/xpaas-templates/openjdk18-web-basic-s2i.json
index 143e16756..143e16756 100644
--- a/roles/openshift_examples/files/examples/v1.6/xpaas-templates/openjdk18-web-basic-s2i.json
+++ b/roles/openshift_examples/files/examples/v3.6/xpaas-templates/openjdk18-web-basic-s2i.json
diff --git a/roles/openshift_examples/files/examples/v1.6/xpaas-templates/processserver63-amq-mysql-persistent-s2i.json b/roles/openshift_examples/files/examples/v3.6/xpaas-templates/processserver63-amq-mysql-persistent-s2i.json
index 1dea463ac..1dea463ac 100644
--- a/roles/openshift_examples/files/examples/v1.6/xpaas-templates/processserver63-amq-mysql-persistent-s2i.json
+++ b/roles/openshift_examples/files/examples/v3.6/xpaas-templates/processserver63-amq-mysql-persistent-s2i.json
diff --git a/roles/openshift_examples/files/examples/v1.6/xpaas-templates/processserver63-amq-mysql-s2i.json b/roles/openshift_examples/files/examples/v3.6/xpaas-templates/processserver63-amq-mysql-s2i.json
index 42264585b..42264585b 100644
--- a/roles/openshift_examples/files/examples/v1.6/xpaas-templates/processserver63-amq-mysql-s2i.json
+++ b/roles/openshift_examples/files/examples/v3.6/xpaas-templates/processserver63-amq-mysql-s2i.json
diff --git a/roles/openshift_examples/files/examples/v1.6/xpaas-templates/processserver63-amq-postgresql-persistent-s2i.json b/roles/openshift_examples/files/examples/v3.6/xpaas-templates/processserver63-amq-postgresql-persistent-s2i.json
index f6d0c99ed..f6d0c99ed 100644
--- a/roles/openshift_examples/files/examples/v1.6/xpaas-templates/processserver63-amq-postgresql-persistent-s2i.json
+++ b/roles/openshift_examples/files/examples/v3.6/xpaas-templates/processserver63-amq-postgresql-persistent-s2i.json
diff --git a/roles/openshift_examples/files/examples/v1.6/xpaas-templates/processserver63-amq-postgresql-s2i.json b/roles/openshift_examples/files/examples/v3.6/xpaas-templates/processserver63-amq-postgresql-s2i.json
index 41c726cf0..41c726cf0 100644
--- a/roles/openshift_examples/files/examples/v1.6/xpaas-templates/processserver63-amq-postgresql-s2i.json
+++ b/roles/openshift_examples/files/examples/v3.6/xpaas-templates/processserver63-amq-postgresql-s2i.json
diff --git a/roles/openshift_examples/files/examples/v1.6/xpaas-templates/processserver63-basic-s2i.json b/roles/openshift_examples/files/examples/v3.6/xpaas-templates/processserver63-basic-s2i.json
index 170c919cb..170c919cb 100644
--- a/roles/openshift_examples/files/examples/v1.6/xpaas-templates/processserver63-basic-s2i.json
+++ b/roles/openshift_examples/files/examples/v3.6/xpaas-templates/processserver63-basic-s2i.json
diff --git a/roles/openshift_examples/files/examples/v1.6/xpaas-templates/processserver63-mysql-persistent-s2i.json b/roles/openshift_examples/files/examples/v3.6/xpaas-templates/processserver63-mysql-persistent-s2i.json
index 89d0db1a6..89d0db1a6 100644
--- a/roles/openshift_examples/files/examples/v1.6/xpaas-templates/processserver63-mysql-persistent-s2i.json
+++ b/roles/openshift_examples/files/examples/v3.6/xpaas-templates/processserver63-mysql-persistent-s2i.json
diff --git a/roles/openshift_examples/files/examples/v1.6/xpaas-templates/processserver63-mysql-s2i.json b/roles/openshift_examples/files/examples/v3.6/xpaas-templates/processserver63-mysql-s2i.json
index 26cab29f8..26cab29f8 100644
--- a/roles/openshift_examples/files/examples/v1.6/xpaas-templates/processserver63-mysql-s2i.json
+++ b/roles/openshift_examples/files/examples/v3.6/xpaas-templates/processserver63-mysql-s2i.json
diff --git a/roles/openshift_examples/files/examples/v1.6/xpaas-templates/processserver63-postgresql-persistent-s2i.json b/roles/openshift_examples/files/examples/v3.6/xpaas-templates/processserver63-postgresql-persistent-s2i.json
index 32a512829..32a512829 100644
--- a/roles/openshift_examples/files/examples/v1.6/xpaas-templates/processserver63-postgresql-persistent-s2i.json
+++ b/roles/openshift_examples/files/examples/v3.6/xpaas-templates/processserver63-postgresql-persistent-s2i.json
diff --git a/roles/openshift_examples/files/examples/v1.6/xpaas-templates/processserver63-postgresql-s2i.json b/roles/openshift_examples/files/examples/v3.6/xpaas-templates/processserver63-postgresql-s2i.json
index 55e2199bb..55e2199bb 100644
--- a/roles/openshift_examples/files/examples/v1.6/xpaas-templates/processserver63-postgresql-s2i.json
+++ b/roles/openshift_examples/files/examples/v3.6/xpaas-templates/processserver63-postgresql-s2i.json
diff --git a/roles/openshift_examples/files/examples/v1.6/xpaas-templates/spring-boot-camel-amq-template.json b/roles/openshift_examples/files/examples/v3.6/xpaas-templates/spring-boot-camel-amq-template.json
index 8b3cd6ed0..8b3cd6ed0 100644
--- a/roles/openshift_examples/files/examples/v1.6/xpaas-templates/spring-boot-camel-amq-template.json
+++ b/roles/openshift_examples/files/examples/v3.6/xpaas-templates/spring-boot-camel-amq-template.json
diff --git a/roles/openshift_examples/files/examples/v1.6/xpaas-templates/spring-boot-camel-config-template.json b/roles/openshift_examples/files/examples/v3.6/xpaas-templates/spring-boot-camel-config-template.json
index bc5bbad22..bc5bbad22 100644
--- a/roles/openshift_examples/files/examples/v1.6/xpaas-templates/spring-boot-camel-config-template.json
+++ b/roles/openshift_examples/files/examples/v3.6/xpaas-templates/spring-boot-camel-config-template.json
diff --git a/roles/openshift_examples/files/examples/v1.6/xpaas-templates/spring-boot-camel-drools-template.json b/roles/openshift_examples/files/examples/v3.6/xpaas-templates/spring-boot-camel-drools-template.json
index e54fa0d59..e54fa0d59 100644
--- a/roles/openshift_examples/files/examples/v1.6/xpaas-templates/spring-boot-camel-drools-template.json
+++ b/roles/openshift_examples/files/examples/v3.6/xpaas-templates/spring-boot-camel-drools-template.json
diff --git a/roles/openshift_examples/files/examples/v1.6/xpaas-templates/spring-boot-camel-infinispan-template.json b/roles/openshift_examples/files/examples/v3.6/xpaas-templates/spring-boot-camel-infinispan-template.json
index 20ba97dac..20ba97dac 100644
--- a/roles/openshift_examples/files/examples/v1.6/xpaas-templates/spring-boot-camel-infinispan-template.json
+++ b/roles/openshift_examples/files/examples/v3.6/xpaas-templates/spring-boot-camel-infinispan-template.json
diff --git a/roles/openshift_examples/files/examples/v1.6/xpaas-templates/spring-boot-camel-rest-sql-template.json b/roles/openshift_examples/files/examples/v3.6/xpaas-templates/spring-boot-camel-rest-sql-template.json
index 555647fab..555647fab 100644
--- a/roles/openshift_examples/files/examples/v1.6/xpaas-templates/spring-boot-camel-rest-sql-template.json
+++ b/roles/openshift_examples/files/examples/v3.6/xpaas-templates/spring-boot-camel-rest-sql-template.json
diff --git a/roles/openshift_examples/files/examples/v1.6/xpaas-templates/spring-boot-camel-teiid-template.json b/roles/openshift_examples/files/examples/v3.6/xpaas-templates/spring-boot-camel-teiid-template.json
index cf9a4e903..cf9a4e903 100644
--- a/roles/openshift_examples/files/examples/v1.6/xpaas-templates/spring-boot-camel-teiid-template.json
+++ b/roles/openshift_examples/files/examples/v3.6/xpaas-templates/spring-boot-camel-teiid-template.json
diff --git a/roles/openshift_examples/files/examples/v1.6/xpaas-templates/spring-boot-camel-template.json b/roles/openshift_examples/files/examples/v3.6/xpaas-templates/spring-boot-camel-template.json
index c78a96f7c..c78a96f7c 100644
--- a/roles/openshift_examples/files/examples/v1.6/xpaas-templates/spring-boot-camel-template.json
+++ b/roles/openshift_examples/files/examples/v3.6/xpaas-templates/spring-boot-camel-template.json
diff --git a/roles/openshift_examples/files/examples/v1.6/xpaas-templates/spring-boot-camel-xml-template.json b/roles/openshift_examples/files/examples/v3.6/xpaas-templates/spring-boot-camel-xml-template.json
index 620425902..620425902 100644
--- a/roles/openshift_examples/files/examples/v1.6/xpaas-templates/spring-boot-camel-xml-template.json
+++ b/roles/openshift_examples/files/examples/v3.6/xpaas-templates/spring-boot-camel-xml-template.json
diff --git a/roles/openshift_examples/files/examples/v1.6/xpaas-templates/spring-boot-cxf-jaxrs-template.json b/roles/openshift_examples/files/examples/v3.6/xpaas-templates/spring-boot-cxf-jaxrs-template.json
index 15cfc93fd..15cfc93fd 100644
--- a/roles/openshift_examples/files/examples/v1.6/xpaas-templates/spring-boot-cxf-jaxrs-template.json
+++ b/roles/openshift_examples/files/examples/v3.6/xpaas-templates/spring-boot-cxf-jaxrs-template.json
diff --git a/roles/openshift_examples/files/examples/v1.6/xpaas-templates/spring-boot-cxf-jaxws-template.json b/roles/openshift_examples/files/examples/v3.6/xpaas-templates/spring-boot-cxf-jaxws-template.json
index c70ee7726..c70ee7726 100644
--- a/roles/openshift_examples/files/examples/v1.6/xpaas-templates/spring-boot-cxf-jaxws-template.json
+++ b/roles/openshift_examples/files/examples/v3.6/xpaas-templates/spring-boot-cxf-jaxws-template.json
diff --git a/roles/openshift_examples/files/examples/v1.6/xpaas-templates/sso70-https.json b/roles/openshift_examples/files/examples/v3.6/xpaas-templates/sso70-https.json
index fb0578a67..fb0578a67 100644
--- a/roles/openshift_examples/files/examples/v1.6/xpaas-templates/sso70-https.json
+++ b/roles/openshift_examples/files/examples/v3.6/xpaas-templates/sso70-https.json
diff --git a/roles/openshift_examples/files/examples/v1.6/xpaas-templates/sso70-mysql-persistent.json b/roles/openshift_examples/files/examples/v3.6/xpaas-templates/sso70-mysql-persistent.json
index dcbb24bf1..dcbb24bf1 100644
--- a/roles/openshift_examples/files/examples/v1.6/xpaas-templates/sso70-mysql-persistent.json
+++ b/roles/openshift_examples/files/examples/v3.6/xpaas-templates/sso70-mysql-persistent.json
diff --git a/roles/openshift_examples/files/examples/v1.6/xpaas-templates/sso70-mysql.json b/roles/openshift_examples/files/examples/v3.6/xpaas-templates/sso70-mysql.json
index 1768f7a1b..1768f7a1b 100644
--- a/roles/openshift_examples/files/examples/v1.6/xpaas-templates/sso70-mysql.json
+++ b/roles/openshift_examples/files/examples/v3.6/xpaas-templates/sso70-mysql.json
diff --git a/roles/openshift_examples/files/examples/v1.6/xpaas-templates/sso70-postgresql-persistent.json b/roles/openshift_examples/files/examples/v3.6/xpaas-templates/sso70-postgresql-persistent.json
index 4c2f81f2e..4c2f81f2e 100644
--- a/roles/openshift_examples/files/examples/v1.6/xpaas-templates/sso70-postgresql-persistent.json
+++ b/roles/openshift_examples/files/examples/v3.6/xpaas-templates/sso70-postgresql-persistent.json
diff --git a/roles/openshift_examples/files/examples/v1.6/xpaas-templates/sso70-postgresql.json b/roles/openshift_examples/files/examples/v3.6/xpaas-templates/sso70-postgresql.json
index d8402ef72..d8402ef72 100644
--- a/roles/openshift_examples/files/examples/v1.6/xpaas-templates/sso70-postgresql.json
+++ b/roles/openshift_examples/files/examples/v3.6/xpaas-templates/sso70-postgresql.json
diff --git a/roles/openshift_excluder/tasks/disable.yml b/roles/openshift_excluder/tasks/disable.yml
index 325d2a4e8..97044fff6 100644
--- a/roles/openshift_excluder/tasks/disable.yml
+++ b/roles/openshift_excluder/tasks/disable.yml
@@ -4,6 +4,14 @@
# - docker_excluder_package_state
- include: init.yml
+# unexclude the current openshift/origin-excluder if it is installed so it can be updated
+- include: unexclude.yml
+ vars:
+ unexclude_docker_excluder: false
+ unexclude_openshift_excluder: "{{ openshift_excluder_on | bool }}"
+ when:
+ - not openshift.common.is_atomic | bool
+
# Install any excluder that is enabled
- include: install.yml
vars:
diff --git a/roles/openshift_facts/library/openshift_facts.py b/roles/openshift_facts/library/openshift_facts.py
index 7edf141e5..ca0279426 100755
--- a/roles/openshift_facts/library/openshift_facts.py
+++ b/roles/openshift_facts/library/openshift_facts.py
@@ -911,7 +911,7 @@ def set_version_facts_if_unset(facts):
version_gte_3_3_or_1_3 = version >= LooseVersion('1.3.0')
version_gte_3_4_or_1_4 = version >= LooseVersion('1.4.0')
version_gte_3_5_or_1_5 = version >= LooseVersion('1.5.0')
- version_gte_3_6_or_1_6 = version >= LooseVersion('3.6.0') or version >= LooseVersion('1.6.0')
+ version_gte_3_6 = version >= LooseVersion('3.6.0')
else:
version_gte_3_1_or_1_1 = version >= LooseVersion('3.0.2.905')
version_gte_3_1_1_or_1_1_1 = version >= LooseVersion('3.1.1')
@@ -919,25 +919,26 @@ def set_version_facts_if_unset(facts):
version_gte_3_3_or_1_3 = version >= LooseVersion('3.3.0')
version_gte_3_4_or_1_4 = version >= LooseVersion('3.4.0')
version_gte_3_5_or_1_5 = version >= LooseVersion('3.5.0')
- version_gte_3_6_or_1_6 = version >= LooseVersion('3.6.0')
+ version_gte_3_6 = version >= LooseVersion('3.6.0')
else:
+ # 'Latest' version is set to True, 'Next' versions set to False
version_gte_3_1_or_1_1 = True
version_gte_3_1_1_or_1_1_1 = True
version_gte_3_2_or_1_2 = True
version_gte_3_3_or_1_3 = True
version_gte_3_4_or_1_4 = True
version_gte_3_5_or_1_5 = True
- version_gte_3_6_or_1_6 = False
+ version_gte_3_6 = True
facts['common']['version_gte_3_1_or_1_1'] = version_gte_3_1_or_1_1
facts['common']['version_gte_3_1_1_or_1_1_1'] = version_gte_3_1_1_or_1_1_1
facts['common']['version_gte_3_2_or_1_2'] = version_gte_3_2_or_1_2
facts['common']['version_gte_3_3_or_1_3'] = version_gte_3_3_or_1_3
facts['common']['version_gte_3_4_or_1_4'] = version_gte_3_4_or_1_4
facts['common']['version_gte_3_5_or_1_5'] = version_gte_3_5_or_1_5
- facts['common']['version_gte_3_6_or_1_6'] = version_gte_3_6_or_1_6
+ facts['common']['version_gte_3_6'] = version_gte_3_6
- if version_gte_3_6_or_1_6:
- examples_content_version = 'v1.6'
+ if version_gte_3_6:
+ examples_content_version = 'v3.6'
elif version_gte_3_5_or_1_5:
examples_content_version = 'v1.5'
elif version_gte_3_4_or_1_4:
@@ -2155,6 +2156,10 @@ class OpenShiftFacts(object):
nfs=dict(
directory='/exports',
options='*(rw,root_squash)'),
+ glusterfs=dict(
+ endpoints='glusterfs-registry-endpoints',
+ path='glusterfs-registry-volume',
+ readOnly=False),
host=None,
access=dict(
modes=['ReadWriteMany']
diff --git a/roles/openshift_health_checker/action_plugins/openshift_health_check.py b/roles/openshift_health_checker/action_plugins/openshift_health_check.py
index cf0fe19f1..03c40b78b 100644
--- a/roles/openshift_health_checker/action_plugins/openshift_health_check.py
+++ b/roles/openshift_health_checker/action_plugins/openshift_health_check.py
@@ -4,6 +4,7 @@ Ansible action plugin to execute health checks in OpenShift clusters.
# pylint: disable=wrong-import-position,missing-docstring,invalid-name
import sys
import os
+from collections import defaultdict
try:
from __main__ import display
@@ -41,20 +42,11 @@ class ActionModule(ActionBase):
return result
args = self._task.args
- requested_checks = resolve_checks(args.get("checks", []), known_checks.values())
-
- unknown_checks = requested_checks - set(known_checks)
- if unknown_checks:
- result["failed"] = True
- result["msg"] = (
- "One or more checks are unknown: {}. "
- "Make sure there is no typo in the playbook and no files are missing."
- ).format(", ".join(unknown_checks))
- return result
+ resolved_checks = resolve_checks(args.get("checks", []), known_checks.values())
result["checks"] = check_results = {}
- for check_name in requested_checks & set(known_checks):
+ for check_name in resolved_checks:
display.banner("CHECK [{} : {}]".format(check_name, task_vars["ansible_host"]))
check = known_checks[check_name]
@@ -81,10 +73,7 @@ class ActionModule(ActionBase):
load_checks()
known_checks = {}
-
- known_check_classes = set(cls for cls in OpenShiftCheck.subclasses())
-
- for cls in known_check_classes:
+ for cls in OpenShiftCheck.subclasses():
check_name = cls.name
if check_name in known_checks:
other_cls = known_checks[check_name].__class__
@@ -94,26 +83,45 @@ class ActionModule(ActionBase):
cls.__module__, cls.__name__,
other_cls.__module__, other_cls.__name__))
known_checks[check_name] = cls(execute_module=self._execute_module)
-
return known_checks
def resolve_checks(names, all_checks):
"""Returns a set of resolved check names.
- Resolving a check name involves expanding tag references (e.g., '@tag') with
- all the checks that contain the given tag.
+ Resolving a check name expands tag references (e.g., "@tag") to all the
+ checks that contain the given tag. OpenShiftCheckException is raised if
+ names contains an unknown check or tag name.
names should be a sequence of strings.
all_checks should be a sequence of check classes/instances.
"""
- resolved = set()
- for name in names:
- if name.startswith("@"):
- for check in all_checks:
- if name[1:] in check.tags:
- resolved.add(check.name)
- else:
- resolved.add(name)
+ known_check_names = set(check.name for check in all_checks)
+ known_tag_names = set(name for check in all_checks for name in check.tags)
+
+ check_names = set(name for name in names if not name.startswith('@'))
+ tag_names = set(name[1:] for name in names if name.startswith('@'))
+
+ unknown_check_names = check_names - known_check_names
+ unknown_tag_names = tag_names - known_tag_names
+
+ if unknown_check_names or unknown_tag_names:
+ msg = []
+ if unknown_check_names:
+ msg.append('Unknown check names: {}.'.format(', '.join(sorted(unknown_check_names))))
+ if unknown_tag_names:
+ msg.append('Unknown tag names: {}.'.format(', '.join(sorted(unknown_tag_names))))
+ msg.append('Make sure there is no typo in the playbook and no files are missing.')
+ raise OpenShiftCheckException('\n'.join(msg))
+
+ tag_to_checks = defaultdict(set)
+ for check in all_checks:
+ for tag in check.tags:
+ tag_to_checks[tag].add(check.name)
+
+ resolved = check_names.copy()
+ for tag in tag_names:
+ resolved.update(tag_to_checks[tag])
+
return resolved
diff --git a/roles/openshift_health_checker/library/aos_version.py b/roles/openshift_health_checker/library/aos_version.py
index 191a4b107..a46589443 100755
--- a/roles/openshift_health_checker/library/aos_version.py
+++ b/roles/openshift_health_checker/library/aos_version.py
@@ -1,91 +1,199 @@
#!/usr/bin/python
# vim: expandtab:tabstop=4:shiftwidth=4
'''
-Ansible module for determining if multiple versions of an OpenShift package are
-available, and if the version requested is available down to the given
-precision.
-
-Multiple versions available suggest that multiple repos are enabled for the
-different versions, which may cause installation problems.
+Ansible module for yum-based systems determining if multiple releases
+of an OpenShift package are available, and if the release requested
+(if any) is available down to the given precision.
+
+For Enterprise, multiple releases available suggest that multiple repos
+are enabled for the different releases, which may cause installation
+problems. With Origin, however, this is a normal state of affairs as
+all the releases are provided in a single repo with the expectation that
+only the latest can be installed.
+
+Code in the openshift_version role contains a lot of logic to pin down
+the exact package and image version to use and so does some validation
+of release availability already. Without duplicating all that, we would
+like the user to have a helpful error message if we detect things will
+not work out right. Note that if openshift_release is not specified in
+the inventory, the version comparison checks just pass.
+
+TODO: fail gracefully on non-yum systems (dnf in Fedora)
'''
-import yum # pylint: disable=import-error
-
from ansible.module_utils.basic import AnsibleModule
+IMPORT_EXCEPTION = None
+try:
+ import yum # pylint: disable=import-error
+except ImportError as err:
+ IMPORT_EXCEPTION = err # in tox test env, yum import fails
+
-def main(): # pylint: disable=missing-docstring,too-many-branches
+class AosVersionException(Exception):
+ '''Base exception class for package version problems'''
+ def __init__(self, message, problem_pkgs=None):
+ Exception.__init__(self, message)
+ self.problem_pkgs = problem_pkgs
+
+
+def main():
+ '''Entrypoint for this Ansible module'''
module = AnsibleModule(
argument_spec=dict(
- prefix=dict(required=True), # atomic-openshift, origin, ...
- version=dict(required=True),
+ requested_openshift_release=dict(type="str", default=''),
+ openshift_deployment_type=dict(required=True),
+ rpm_prefix=dict(required=True), # atomic-openshift, origin, ...?
),
supports_check_mode=True
)
- def bail(error): # pylint: disable=missing-docstring
- module.fail_json(msg=error)
-
- rpm_prefix = module.params['prefix']
+ if IMPORT_EXCEPTION:
+ module.fail_json(msg="aos_version module could not import yum: %s" % IMPORT_EXCEPTION)
+ # determine the packages we will look for
+ rpm_prefix = module.params['rpm_prefix']
if not rpm_prefix:
- bail("prefix must not be empty")
-
- yb = yum.YumBase() # pylint: disable=invalid-name
- yb.conf.disable_excludes = ["all"] # assume the openshift excluder will be managed, ignore current state
-
- # search for package versions available for aos pkgs
- expected_pkgs = [
+ module.fail_json(msg="rpm_prefix must not be empty")
+ expected_pkgs = set([
rpm_prefix,
rpm_prefix + '-master',
rpm_prefix + '-node',
- ]
+ ])
+
+ # determine what level of precision the user specified for the openshift version.
+ # should look like a version string with possibly many segments e.g. "3.4.1":
+ requested_openshift_release = module.params['requested_openshift_release']
+
+ # get the list of packages available and complain if anything is wrong
+ try:
+ pkgs = _retrieve_available_packages(expected_pkgs)
+ if requested_openshift_release:
+ _check_precise_version_found(pkgs, expected_pkgs, requested_openshift_release)
+ _check_higher_version_found(pkgs, expected_pkgs, requested_openshift_release)
+ if module.params['openshift_deployment_type'] in ['openshift-enterprise']:
+ _check_multi_minor_release(pkgs, expected_pkgs)
+ except AosVersionException as excinfo:
+ module.fail_json(msg=str(excinfo))
+ module.exit_json(changed=False)
+
+
+def _retrieve_available_packages(expected_pkgs):
+ # search for package versions available for openshift pkgs
+ yb = yum.YumBase() # pylint: disable=invalid-name
+
+ # The openshift excluder prevents unintended updates to openshift
+ # packages by setting yum excludes on those packages. See:
+ # https://wiki.centos.org/SpecialInterestGroup/PaaS/OpenShift-Origin-Control-Updates
+ # Excludes are then disabled during an install or upgrade, but
+ # this check will most likely be running outside either. When we
+ # attempt to determine what packages are available via yum they may
+ # be excluded. So, for our purposes here, disable excludes to see
+ # what will really be available during an install or upgrade.
+ yb.conf.disable_excludes = ['all']
+
try:
pkgs = yb.pkgSack.returnPackages(patterns=expected_pkgs)
- except yum.Errors.PackageSackError as e: # pylint: disable=invalid-name
+ except yum.Errors.PackageSackError as excinfo:
# you only hit this if *none* of the packages are available
- bail('Unable to find any OpenShift packages.\nCheck your subscription and repo settings.\n%s' % e)
+ raise AosVersionException('\n'.join([
+ 'Unable to find any OpenShift packages.',
+ 'Check your subscription and repo settings.',
+ str(excinfo),
+ ]))
+ return pkgs
- # determine what level of precision we're expecting for the version
- expected_version = module.params['version']
- if expected_version.startswith('v'): # v3.3 => 3.3
- expected_version = expected_version[1:]
- num_dots = expected_version.count('.')
- pkgs_by_name_version = {}
+class PreciseVersionNotFound(AosVersionException):
+ '''Exception for reporting packages not available at given release'''
+ def __init__(self, requested_release, not_found):
+ msg = ['Not all of the required packages are available at requested version %s:' % requested_release]
+ msg += [' ' + name for name in not_found]
+ msg += ['Please check your subscriptions and enabled repositories.']
+ AosVersionException.__init__(self, '\n'.join(msg), not_found)
+
+
+def _check_precise_version_found(pkgs, expected_pkgs, requested_openshift_release):
+ # see if any packages couldn't be found at requested release version
+ # we would like to verify that the latest available pkgs have however specific a version is given.
+ # so e.g. if there is a package version 3.4.1.5 the check passes; if only 3.4.0, it fails.
+
pkgs_precise_version_found = {}
for pkg in pkgs:
- # get expected version precision
- match_version = '.'.join(pkg.version.split('.')[:num_dots + 1])
- if match_version == expected_version:
+ if pkg.name not in expected_pkgs:
+ continue
+ # does the version match, to the precision requested?
+ # and, is it strictly greater, at the precision requested?
+ match_version = '.'.join(pkg.version.split('.')[:requested_openshift_release.count('.') + 1])
+ if match_version == requested_openshift_release:
pkgs_precise_version_found[pkg.name] = True
- # get x.y version precision
- minor_version = '.'.join(pkg.version.split('.')[:2])
- if pkg.name not in pkgs_by_name_version:
- pkgs_by_name_version[pkg.name] = {}
- pkgs_by_name_version[pkg.name][minor_version] = True
- # see if any packages couldn't be found at requested version
- # see if any packages are available in more than one minor version
not_found = []
- multi_found = []
for name in expected_pkgs:
if name not in pkgs_precise_version_found:
not_found.append(name)
+
+ if not_found:
+ raise PreciseVersionNotFound(requested_openshift_release, not_found)
+
+
+class FoundHigherVersion(AosVersionException):
+ '''Exception for reporting that a higher version than requested is available'''
+ def __init__(self, requested_release, higher_found):
+ msg = ['Some required package(s) are available at a version',
+ 'that is higher than requested %s:' % requested_release]
+ msg += [' ' + name for name in higher_found]
+ msg += ['This will prevent installing the version you requested.']
+ msg += ['Please check your enabled repositories or adjust openshift_release.']
+ AosVersionException.__init__(self, '\n'.join(msg), higher_found)
+
+
+def _check_higher_version_found(pkgs, expected_pkgs, requested_openshift_release):
+ req_release_arr = [int(segment) for segment in requested_openshift_release.split(".")]
+ # see if any packages are available in a version higher than requested
+ higher_version_for_pkg = {}
+ for pkg in pkgs:
+ if pkg.name not in expected_pkgs:
+ continue
+ version = [int(segment) for segment in pkg.version.split(".")]
+ too_high = version[:len(req_release_arr)] > req_release_arr
+ higher_than_seen = version > higher_version_for_pkg.get(pkg.name, [])
+ if too_high and higher_than_seen:
+ higher_version_for_pkg[pkg.name] = version
+
+ if higher_version_for_pkg:
+ higher_found = []
+ for name, version in higher_version_for_pkg.items():
+ higher_found.append(name + '-' + '.'.join(str(segment) for segment in version))
+ raise FoundHigherVersion(requested_openshift_release, higher_found)
+
+
+class FoundMultiRelease(AosVersionException):
+ '''Exception for reporting multiple minor releases found for same package'''
+ def __init__(self, multi_found):
+ msg = ['Multiple minor versions of these packages are available']
+ msg += [' ' + name for name in multi_found]
+ msg += ["There should only be one OpenShift release repository enabled at a time."]
+ AosVersionException.__init__(self, '\n'.join(msg), multi_found)
+
+
+def _check_multi_minor_release(pkgs, expected_pkgs):
+ # see if any packages are available in more than one minor version
+ pkgs_by_name_version = {}
+ for pkg in pkgs:
+ # keep track of x.y (minor release) versions seen
+ minor_release = '.'.join(pkg.version.split('.')[:2])
+ if pkg.name not in pkgs_by_name_version:
+ pkgs_by_name_version[pkg.name] = {}
+ pkgs_by_name_version[pkg.name][minor_release] = True
+
+ multi_found = []
+ for name in expected_pkgs:
if name in pkgs_by_name_version and len(pkgs_by_name_version[name]) > 1:
multi_found.append(name)
- if not_found:
- msg = 'Not all of the required packages are available at requested version %s:\n' % expected_version
- for name in not_found:
- msg += ' %s\n' % name
- bail(msg + 'Please check your subscriptions and enabled repositories.')
- if multi_found:
- msg = 'Multiple minor versions of these packages are available\n'
- for name in multi_found:
- msg += ' %s\n' % name
- bail(msg + "There should only be one OpenShift version's repository enabled at a time.")
- module.exit_json(changed=False)
+ if multi_found:
+ raise FoundMultiRelease(multi_found)
if __name__ == '__main__':
diff --git a/roles/openshift_health_checker/meta/main.yml b/roles/openshift_health_checker/meta/main.yml
index 0bbeadd34..cd9b55902 100644
--- a/roles/openshift_health_checker/meta/main.yml
+++ b/roles/openshift_health_checker/meta/main.yml
@@ -1,3 +1,4 @@
---
dependencies:
- role: openshift_facts
+ - role: openshift_repos
diff --git a/roles/openshift_health_checker/openshift_checks/disk_availability.py b/roles/openshift_health_checker/openshift_checks/disk_availability.py
new file mode 100644
index 000000000..c2792a0fe
--- /dev/null
+++ b/roles/openshift_health_checker/openshift_checks/disk_availability.py
@@ -0,0 +1,65 @@
+# pylint: disable=missing-docstring
+from openshift_checks import OpenShiftCheck, OpenShiftCheckException, get_var
+from openshift_checks.mixins import NotContainerizedMixin
+
+
+class DiskAvailability(NotContainerizedMixin, OpenShiftCheck):
+ """Check that recommended disk space is available before a first-time install."""
+
+ name = "disk_availability"
+ tags = ["preflight"]
+
+ # Values taken from the official installation documentation:
+ # https://docs.openshift.org/latest/install_config/install/prerequisites.html#system-requirements
+ recommended_disk_space_bytes = {
+ "masters": 40 * 10**9,
+ "nodes": 15 * 10**9,
+ "etcd": 20 * 10**9,
+ }
+
+ @classmethod
+ def is_active(cls, task_vars):
+ """Skip hosts that do not have recommended disk space requirements."""
+ group_names = get_var(task_vars, "group_names", default=[])
+ has_disk_space_recommendation = bool(set(group_names).intersection(cls.recommended_disk_space_bytes))
+ return super(DiskAvailability, cls).is_active(task_vars) and has_disk_space_recommendation
+
+ def run(self, tmp, task_vars):
+ group_names = get_var(task_vars, "group_names")
+ ansible_mounts = get_var(task_vars, "ansible_mounts")
+
+ min_free_bytes = max(self.recommended_disk_space_bytes.get(name, 0) for name in group_names)
+ free_bytes = self.openshift_available_disk(ansible_mounts)
+
+ if free_bytes < min_free_bytes:
+ return {
+ 'failed': True,
+ 'msg': (
+ 'Available disk space ({:.1f} GB) for the volume containing '
+ '"/var" is below minimum recommended space ({:.1f} GB)'
+ ).format(float(free_bytes) / 10**9, float(min_free_bytes) / 10**9)
+ }
+
+ return {}
+
+ @staticmethod
+ def openshift_available_disk(ansible_mounts):
+ """Determine the available disk space for an OpenShift installation.
+
+ ansible_mounts should be a list of dicts like the 'setup' Ansible module
+ returns.
+ """
+ # priority list in descending order
+ supported_mnt_paths = ["/var", "/"]
+ available_mnts = {mnt.get("mount"): mnt for mnt in ansible_mounts}
+
+ try:
+ for path in supported_mnt_paths:
+ if path in available_mnts:
+ return available_mnts[path]["size_available"]
+ except KeyError:
+ pass
+
+ paths = ''.join(sorted(available_mnts)) or 'none'
+ msg = "Unable to determine available disk space. Paths mounted: {}.".format(paths)
+ raise OpenShiftCheckException(msg)
diff --git a/roles/openshift_health_checker/openshift_checks/memory_availability.py b/roles/openshift_health_checker/openshift_checks/memory_availability.py
new file mode 100644
index 000000000..28805dc37
--- /dev/null
+++ b/roles/openshift_health_checker/openshift_checks/memory_availability.py
@@ -0,0 +1,44 @@
+# pylint: disable=missing-docstring
+from openshift_checks import OpenShiftCheck, get_var
+
+
+class MemoryAvailability(OpenShiftCheck):
+ """Check that recommended memory is available."""
+
+ name = "memory_availability"
+ tags = ["preflight"]
+
+ # Values taken from the official installation documentation:
+ # https://docs.openshift.org/latest/install_config/install/prerequisites.html#system-requirements
+ recommended_memory_bytes = {
+ "masters": 16 * 10**9,
+ "nodes": 8 * 10**9,
+ "etcd": 20 * 10**9,
+ }
+
+ @classmethod
+ def is_active(cls, task_vars):
+ """Skip hosts that do not have recommended memory requirements."""
+ group_names = get_var(task_vars, "group_names", default=[])
+ has_memory_recommendation = bool(set(group_names).intersection(cls.recommended_memory_bytes))
+ return super(MemoryAvailability, cls).is_active(task_vars) and has_memory_recommendation
+
+ def run(self, tmp, task_vars):
+ group_names = get_var(task_vars, "group_names")
+ total_memory_bytes = get_var(task_vars, "ansible_memtotal_mb") * 10**6
+
+ min_memory_bytes = max(self.recommended_memory_bytes.get(name, 0) for name in group_names)
+
+ if total_memory_bytes < min_memory_bytes:
+ return {
+ 'failed': True,
+ 'msg': (
+ 'Available memory ({available:.1f} GB) '
+ 'below recommended value ({recommended:.1f} GB)'
+ ).format(
+ available=float(total_memory_bytes) / 10**9,
+ recommended=float(min_memory_bytes) / 10**9,
+ ),
+ }
+
+ return {}
diff --git a/roles/openshift_health_checker/openshift_checks/mixins.py b/roles/openshift_health_checker/openshift_checks/mixins.py
index 657e15160..20d160eaf 100644
--- a/roles/openshift_health_checker/openshift_checks/mixins.py
+++ b/roles/openshift_health_checker/openshift_checks/mixins.py
@@ -1,4 +1,8 @@
-# pylint: disable=missing-docstring
+# pylint: disable=missing-docstring,too-few-public-methods
+"""
+Mixin classes meant to be used with subclasses of OpenShiftCheck.
+"""
+
from openshift_checks import get_var
@@ -7,12 +11,5 @@ class NotContainerizedMixin(object):
@classmethod
def is_active(cls, task_vars):
- return (
- # This mixin is meant to be used with subclasses of OpenShiftCheck.
- super(NotContainerizedMixin, cls).is_active(task_vars) and
- not cls.is_containerized(task_vars)
- )
-
- @staticmethod
- def is_containerized(task_vars):
- return get_var(task_vars, "openshift", "common", "is_containerized")
+ is_containerized = get_var(task_vars, "openshift", "common", "is_containerized")
+ return super(NotContainerizedMixin, cls).is_active(task_vars) and not is_containerized
diff --git a/roles/openshift_health_checker/openshift_checks/package_availability.py b/roles/openshift_health_checker/openshift_checks/package_availability.py
index 9891972a6..a7eb720fd 100644
--- a/roles/openshift_health_checker/openshift_checks/package_availability.py
+++ b/roles/openshift_health_checker/openshift_checks/package_availability.py
@@ -9,6 +9,10 @@ class PackageAvailability(NotContainerizedMixin, OpenShiftCheck):
name = "package_availability"
tags = ["preflight"]
+ @classmethod
+ def is_active(cls, task_vars):
+ return super(PackageAvailability, cls).is_active(task_vars) and task_vars["ansible_pkg_mgr"] == "yum"
+
def run(self, tmp, task_vars):
rpm_prefix = get_var(task_vars, "openshift", "common", "service_type")
group_names = get_var(task_vars, "group_names", default=[])
diff --git a/roles/openshift_health_checker/openshift_checks/package_version.py b/roles/openshift_health_checker/openshift_checks/package_version.py
index 42193a1c6..682f6bd40 100644
--- a/roles/openshift_health_checker/openshift_checks/package_version.py
+++ b/roles/openshift_health_checker/openshift_checks/package_version.py
@@ -9,12 +9,17 @@ class PackageVersion(NotContainerizedMixin, OpenShiftCheck):
name = "package_version"
tags = ["preflight"]
- def run(self, tmp, task_vars):
- rpm_prefix = get_var(task_vars, "openshift", "common", "service_type")
- openshift_release = get_var(task_vars, "openshift_release")
+ @classmethod
+ def is_active(cls, task_vars):
+ """Skip hosts that do not have package requirements."""
+ group_names = get_var(task_vars, "group_names", default=[])
+ master_or_node = 'masters' in group_names or 'nodes' in group_names
+ return super(PackageVersion, cls).is_active(task_vars) and master_or_node
+ def run(self, tmp, task_vars):
args = {
- "prefix": rpm_prefix,
- "version": openshift_release,
+ "requested_openshift_release": get_var(task_vars, "openshift_release", default=''),
+ "openshift_deployment_type": get_var(task_vars, "openshift_deployment_type"),
+ "rpm_prefix": get_var(task_vars, "openshift", "common", "service_type"),
}
return self.execute_module("aos_version", args, tmp, task_vars)
diff --git a/roles/openshift_health_checker/test/action_plugin_test.py b/roles/openshift_health_checker/test/action_plugin_test.py
new file mode 100644
index 000000000..2693ae37b
--- /dev/null
+++ b/roles/openshift_health_checker/test/action_plugin_test.py
@@ -0,0 +1,229 @@
+import pytest
+
+from ansible.playbook.play_context import PlayContext
+
+from openshift_health_check import ActionModule, resolve_checks
+from openshift_checks import OpenShiftCheckException
+
+
+def fake_check(name='fake_check', tags=None, is_active=True, run_return=None, run_exception=None):
+ """Returns a new class that is compatible with OpenShiftCheck for testing."""
+
+ _name, _tags = name, tags
+
+ class FakeCheck(object):
+ name = _name
+ tags = _tags or []
+
+ def __init__(self, execute_module=None):
+ pass
+
+ @classmethod
+ def is_active(cls, task_vars):
+ return is_active
+
+ def run(self, tmp, task_vars):
+ if run_exception is not None:
+ raise run_exception
+ return run_return
+
+ return FakeCheck
+
+
+# Fixtures
+
+
+@pytest.fixture
+def plugin():
+ task = FakeTask('openshift_health_check', {'checks': ['fake_check']})
+ plugin = ActionModule(task, None, PlayContext(), None, None, None)
+ return plugin
+
+
+class FakeTask(object):
+ def __init__(self, action, args):
+ self.action = action
+ self.args = args
+ self.async = 0
+
+
+@pytest.fixture
+def task_vars():
+ return dict(openshift=dict(), ansible_host='unit-test-host')
+
+
+# Assertion helpers
+
+
+def failed(result, msg_has=None):
+ if msg_has is not None:
+ assert 'msg' in result
+ for term in msg_has:
+ assert term in result['msg']
+ return result.get('failed', False)
+
+
+def changed(result):
+ return result.get('changed', False)
+
+
+def skipped(result):
+ return result.get('skipped', False)
+
+
+# Tests
+
+
+@pytest.mark.parametrize('task_vars', [
+ None,
+ {},
+])
+def test_action_plugin_missing_openshift_facts(plugin, task_vars):
+ result = plugin.run(tmp=None, task_vars=task_vars)
+
+ assert failed(result, msg_has=['openshift_facts'])
+
+
+def test_action_plugin_cannot_load_checks_with_the_same_name(plugin, task_vars, monkeypatch):
+ FakeCheck1 = fake_check('duplicate_name')
+ FakeCheck2 = fake_check('duplicate_name')
+ checks = [FakeCheck1, FakeCheck2]
+ monkeypatch.setattr('openshift_checks.OpenShiftCheck.subclasses', classmethod(lambda cls: checks))
+
+ result = plugin.run(tmp=None, task_vars=task_vars)
+
+ assert failed(result, msg_has=['unique', 'duplicate_name', 'FakeCheck'])
+
+
+def test_action_plugin_skip_non_active_checks(plugin, task_vars, monkeypatch):
+ checks = [fake_check(is_active=False)]
+ monkeypatch.setattr('openshift_checks.OpenShiftCheck.subclasses', classmethod(lambda cls: checks))
+
+ result = plugin.run(tmp=None, task_vars=task_vars)
+
+ assert result['checks']['fake_check'] == {'skipped': True}
+ assert not failed(result)
+ assert not changed(result)
+ assert not skipped(result)
+
+
+def test_action_plugin_run_check_ok(plugin, task_vars, monkeypatch):
+ check_return_value = {'ok': 'test'}
+ check_class = fake_check(run_return=check_return_value)
+ monkeypatch.setattr(plugin, 'load_known_checks', lambda: {'fake_check': check_class()})
+ monkeypatch.setattr('openshift_health_check.resolve_checks', lambda *args: ['fake_check'])
+
+ result = plugin.run(tmp=None, task_vars=task_vars)
+
+ assert result['checks']['fake_check'] == check_return_value
+ assert not failed(result)
+ assert not changed(result)
+ assert not skipped(result)
+
+
+def test_action_plugin_run_check_changed(plugin, task_vars, monkeypatch):
+ check_return_value = {'ok': 'test', 'changed': True}
+ check_class = fake_check(run_return=check_return_value)
+ monkeypatch.setattr(plugin, 'load_known_checks', lambda: {'fake_check': check_class()})
+ monkeypatch.setattr('openshift_health_check.resolve_checks', lambda *args: ['fake_check'])
+
+ result = plugin.run(tmp=None, task_vars=task_vars)
+
+ assert result['checks']['fake_check'] == check_return_value
+ assert not failed(result)
+ assert changed(result)
+ assert not skipped(result)
+
+
+def test_action_plugin_run_check_fail(plugin, task_vars, monkeypatch):
+ check_return_value = {'failed': True}
+ check_class = fake_check(run_return=check_return_value)
+ monkeypatch.setattr(plugin, 'load_known_checks', lambda: {'fake_check': check_class()})
+ monkeypatch.setattr('openshift_health_check.resolve_checks', lambda *args: ['fake_check'])
+
+ result = plugin.run(tmp=None, task_vars=task_vars)
+
+ assert result['checks']['fake_check'] == check_return_value
+ assert failed(result, msg_has=['failed'])
+ assert not changed(result)
+ assert not skipped(result)
+
+
+def test_action_plugin_run_check_exception(plugin, task_vars, monkeypatch):
+ exception_msg = 'fake check has an exception'
+ run_exception = OpenShiftCheckException(exception_msg)
+ check_class = fake_check(run_exception=run_exception)
+ monkeypatch.setattr(plugin, 'load_known_checks', lambda: {'fake_check': check_class()})
+ monkeypatch.setattr('openshift_health_check.resolve_checks', lambda *args: ['fake_check'])
+
+ result = plugin.run(tmp=None, task_vars=task_vars)
+
+ assert failed(result['checks']['fake_check'], msg_has=exception_msg)
+ assert failed(result, msg_has=['failed'])
+ assert not changed(result)
+ assert not skipped(result)
+
+
+@pytest.mark.parametrize('names,all_checks,expected', [
+ ([], [], set()),
+ (
+ ['a', 'b'],
+ [
+ fake_check('a'),
+ fake_check('b'),
+ ],
+ set(['a', 'b']),
+ ),
+ (
+ ['a', 'b', '@group'],
+ [
+ fake_check('from_group_1', ['group', 'another_group']),
+ fake_check('not_in_group', ['another_group']),
+ fake_check('from_group_2', ['preflight', 'group']),
+ fake_check('a'),
+ fake_check('b'),
+ ],
+ set(['a', 'b', 'from_group_1', 'from_group_2']),
+ ),
+])
+def test_resolve_checks_ok(names, all_checks, expected):
+ assert resolve_checks(names, all_checks) == expected
+
+
+@pytest.mark.parametrize('names,all_checks,words_in_exception,words_not_in_exception', [
+ (
+ ['testA', 'testB'],
+ [],
+ ['check', 'name', 'testA', 'testB'],
+ ['tag', 'group', '@'],
+ ),
+ (
+ ['@group'],
+ [],
+ ['tag', 'name', 'group'],
+ ['check', '@'],
+ ),
+ (
+ ['testA', 'testB', '@group'],
+ [],
+ ['check', 'name', 'testA', 'testB', 'tag', 'group'],
+ ['@'],
+ ),
+ (
+ ['testA', 'testB', '@group'],
+ [
+ fake_check('from_group_1', ['group', 'another_group']),
+ fake_check('not_in_group', ['another_group']),
+ fake_check('from_group_2', ['preflight', 'group']),
+ ],
+ ['check', 'name', 'testA', 'testB'],
+ ['tag', 'group', '@'],
+ ),
+])
+def test_resolve_checks_failure(names, all_checks, words_in_exception, words_not_in_exception):
+ with pytest.raises(Exception) as excinfo:
+ resolve_checks(names, all_checks)
+ for word in words_in_exception:
+ assert word in str(excinfo.value)
+ for word in words_not_in_exception:
+ assert word not in str(excinfo.value)
diff --git a/roles/openshift_health_checker/test/aos_version_test.py b/roles/openshift_health_checker/test/aos_version_test.py
new file mode 100644
index 000000000..39c86067a
--- /dev/null
+++ b/roles/openshift_health_checker/test/aos_version_test.py
@@ -0,0 +1,120 @@
+import pytest
+import aos_version
+
+from collections import namedtuple
+Package = namedtuple('Package', ['name', 'version'])
+
+expected_pkgs = set(['spam', 'eggs'])
+
+
+@pytest.mark.parametrize('pkgs, requested_release, expect_not_found', [
+ (
+ [],
+ '3.2.1',
+ expected_pkgs, # none found
+ ),
+ (
+ [Package('spam', '3.2.1')],
+ '3.2',
+ ['eggs'], # completely missing
+ ),
+ (
+ [Package('spam', '3.2.1'), Package('eggs', '3.3.2')],
+ '3.2',
+ ['eggs'], # not the right version
+ ),
+ (
+ [Package('spam', '3.2.1'), Package('eggs', '3.2.1')],
+ '3.2',
+ [], # all found
+ ),
+ (
+ [Package('spam', '3.2.1'), Package('eggs', '3.2.1.5')],
+ '3.2.1',
+ [], # found with more specific version
+ ),
+ (
+ [Package('eggs', '1.2.3'), Package('eggs', '3.2.1.5')],
+ '3.2.1',
+ ['spam'], # eggs found with multiple versions
+ ),
+])
+def test_check_pkgs_for_precise_version(pkgs, requested_release, expect_not_found):
+ if expect_not_found:
+ with pytest.raises(aos_version.PreciseVersionNotFound) as e:
+ aos_version._check_precise_version_found(pkgs, expected_pkgs, requested_release)
+ assert set(expect_not_found) == set(e.value.problem_pkgs)
+ else:
+ aos_version._check_precise_version_found(pkgs, expected_pkgs, requested_release)
+
+
+@pytest.mark.parametrize('pkgs, requested_release, expect_higher', [
+ (
+ [],
+ '3.2.1',
+ [],
+ ),
+ (
+ [Package('spam', '3.2.1')],
+ '3.2',
+ [], # more precise but not strictly higher
+ ),
+ (
+ [Package('spam', '3.3')],
+ '3.2.1',
+ ['spam-3.3'], # lower precision, but higher
+ ),
+ (
+ [Package('spam', '3.2.1'), Package('eggs', '3.3.2')],
+ '3.2',
+ ['eggs-3.3.2'], # one too high
+ ),
+ (
+ [Package('eggs', '1.2.3'), Package('eggs', '3.2.1.5'), Package('eggs', '3.4')],
+ '3.2.1',
+ ['eggs-3.4'], # multiple versions, one is higher
+ ),
+ (
+ [Package('eggs', '3.2.1'), Package('eggs', '3.4'), Package('eggs', '3.3')],
+ '3.2.1',
+ ['eggs-3.4'], # multiple versions, two are higher
+ ),
+])
+def test_check_pkgs_for_greater_version(pkgs, requested_release, expect_higher):
+ if expect_higher:
+ with pytest.raises(aos_version.FoundHigherVersion) as e:
+ aos_version._check_higher_version_found(pkgs, expected_pkgs, requested_release)
+ assert set(expect_higher) == set(e.value.problem_pkgs)
+ else:
+ aos_version._check_higher_version_found(pkgs, expected_pkgs, requested_release)
+
+
+@pytest.mark.parametrize('pkgs, expect_to_flag_pkgs', [
+ (
+ [],
+ [],
+ ),
+ (
+ [Package('spam', '3.2.1')],
+ [],
+ ),
+ (
+ [Package('spam', '3.2.1'), Package('eggs', '3.2.2')],
+ [],
+ ),
+ (
+ [Package('spam', '3.2.1'), Package('spam', '3.3.2')],
+ ['spam'],
+ ),
+ (
+ [Package('eggs', '1.2.3'), Package('eggs', '3.2.1.5'), Package('eggs', '3.4')],
+ ['eggs'],
+ ),
+])
+def test_check_pkgs_for_multi_release(pkgs, expect_to_flag_pkgs):
+ if expect_to_flag_pkgs:
+ with pytest.raises(aos_version.FoundMultiRelease) as e:
+ aos_version._check_multi_minor_release(pkgs, expected_pkgs)
+ assert set(expect_to_flag_pkgs) == set(e.value.problem_pkgs)
+ else:
+ aos_version._check_multi_minor_release(pkgs, expected_pkgs)
diff --git a/roles/openshift_health_checker/test/conftest.py b/roles/openshift_health_checker/test/conftest.py
index bf717ae85..3cbd65507 100644
--- a/roles/openshift_health_checker/test/conftest.py
+++ b/roles/openshift_health_checker/test/conftest.py
@@ -1,5 +1,11 @@
import os
import sys
-# extend sys.path so that tests can import openshift_checks
-sys.path.insert(1, os.path.dirname(os.path.dirname(__file__)))
+# extend sys.path so that tests can import openshift_checks and action plugins
+# from this role.
+openshift_health_checker_path = os.path.dirname(os.path.dirname(__file__))
+sys.path[1:1] = [
+ openshift_health_checker_path,
+ os.path.join(openshift_health_checker_path, 'action_plugins'),
+ os.path.join(openshift_health_checker_path, 'library'),
+]
diff --git a/roles/openshift_health_checker/test/disk_availability_test.py b/roles/openshift_health_checker/test/disk_availability_test.py
new file mode 100644
index 000000000..970b474d7
--- /dev/null
+++ b/roles/openshift_health_checker/test/disk_availability_test.py
@@ -0,0 +1,155 @@
+import pytest
+
+from openshift_checks.disk_availability import DiskAvailability, OpenShiftCheckException
+
+
+@pytest.mark.parametrize('group_names,is_containerized,is_active', [
+ (['masters'], False, True),
+ # ensure check is skipped on containerized installs
+ (['masters'], True, False),
+ (['nodes'], False, True),
+ (['etcd'], False, True),
+ (['masters', 'nodes'], False, True),
+ (['masters', 'etcd'], False, True),
+ ([], False, False),
+ (['lb'], False, False),
+ (['nfs'], False, False),
+])
+def test_is_active(group_names, is_containerized, is_active):
+ task_vars = dict(
+ group_names=group_names,
+ openshift=dict(common=dict(is_containerized=is_containerized)),
+ )
+ assert DiskAvailability.is_active(task_vars=task_vars) == is_active
+
+
+@pytest.mark.parametrize('ansible_mounts,extra_words', [
+ ([], ['none']), # empty ansible_mounts
+ ([{'mount': '/mnt'}], ['/mnt']), # missing relevant mount paths
+ ([{'mount': '/var'}], ['/var']), # missing size_available
+])
+def test_cannot_determine_available_disk(ansible_mounts, extra_words):
+ task_vars = dict(
+ group_names=['masters'],
+ ansible_mounts=ansible_mounts,
+ )
+ check = DiskAvailability(execute_module=fake_execute_module)
+
+ with pytest.raises(OpenShiftCheckException) as excinfo:
+ check.run(tmp=None, task_vars=task_vars)
+
+ for word in 'determine available disk'.split() + extra_words:
+ assert word in str(excinfo.value)
+
+
+@pytest.mark.parametrize('group_names,ansible_mounts', [
+ (
+ ['masters'],
+ [{
+ 'mount': '/',
+ 'size_available': 40 * 10**9 + 1,
+ }],
+ ),
+ (
+ ['nodes'],
+ [{
+ 'mount': '/',
+ 'size_available': 15 * 10**9 + 1,
+ }],
+ ),
+ (
+ ['etcd'],
+ [{
+ 'mount': '/',
+ 'size_available': 20 * 10**9 + 1,
+ }],
+ ),
+ (
+ ['etcd'],
+ [{
+ # not enough space on / ...
+ 'mount': '/',
+ 'size_available': 0,
+ }, {
+ # ... but enough on /var
+ 'mount': '/var',
+ 'size_available': 20 * 10**9 + 1,
+ }],
+ ),
+])
+def test_succeeds_with_recommended_disk_space(group_names, ansible_mounts):
+ task_vars = dict(
+ group_names=group_names,
+ ansible_mounts=ansible_mounts,
+ )
+
+ check = DiskAvailability(execute_module=fake_execute_module)
+ result = check.run(tmp=None, task_vars=task_vars)
+
+ assert not result.get('failed', False)
+
+
+@pytest.mark.parametrize('group_names,ansible_mounts,extra_words', [
+ (
+ ['masters'],
+ [{
+ 'mount': '/',
+ 'size_available': 1,
+ }],
+ ['0.0 GB'],
+ ),
+ (
+ ['nodes'],
+ [{
+ 'mount': '/',
+ 'size_available': 1 * 10**9,
+ }],
+ ['1.0 GB'],
+ ),
+ (
+ ['etcd'],
+ [{
+ 'mount': '/',
+ 'size_available': 1,
+ }],
+ ['0.0 GB'],
+ ),
+ (
+ ['nodes', 'masters'],
+ [{
+ 'mount': '/',
+ # enough space for a node, not enough for a master
+ 'size_available': 15 * 10**9 + 1,
+ }],
+ ['15.0 GB'],
+ ),
+ (
+ ['etcd'],
+ [{
+ # enough space on / ...
+ 'mount': '/',
+ 'size_available': 20 * 10**9 + 1,
+ }, {
+ # .. but not enough on /var
+ 'mount': '/var',
+ 'size_available': 0,
+ }],
+ ['0.0 GB'],
+ ),
+])
+def test_fails_with_insufficient_disk_space(group_names, ansible_mounts, extra_words):
+ task_vars = dict(
+ group_names=group_names,
+ ansible_mounts=ansible_mounts,
+ )
+
+ check = DiskAvailability(execute_module=fake_execute_module)
+ result = check.run(tmp=None, task_vars=task_vars)
+
+ assert result['failed']
+ for word in 'below recommended'.split() + extra_words:
+ assert word in result['msg']
+
+
+def fake_execute_module(*args):
+ raise AssertionError('this function should not be called')
diff --git a/roles/openshift_health_checker/test/memory_availability_test.py b/roles/openshift_health_checker/test/memory_availability_test.py
new file mode 100644
index 000000000..e161a5b9e
--- /dev/null
+++ b/roles/openshift_health_checker/test/memory_availability_test.py
@@ -0,0 +1,91 @@
+import pytest
+
+from openshift_checks.memory_availability import MemoryAvailability
+
+
+@pytest.mark.parametrize('group_names,is_active', [
+ (['masters'], True),
+ (['nodes'], True),
+ (['etcd'], True),
+ (['masters', 'nodes'], True),
+ (['masters', 'etcd'], True),
+ ([], False),
+ (['lb'], False),
+ (['nfs'], False),
+])
+def test_is_active(group_names, is_active):
+ task_vars = dict(
+ group_names=group_names,
+ )
+ assert MemoryAvailability.is_active(task_vars=task_vars) == is_active
+
+
+@pytest.mark.parametrize('group_names,ansible_memtotal_mb', [
+ (
+ ['masters'],
+ 17200,
+ ),
+ (
+ ['nodes'],
+ 8200,
+ ),
+ (
+ ['etcd'],
+ 22200,
+ ),
+ (
+ ['masters', 'nodes'],
+ 17000,
+ ),
+])
+def test_succeeds_with_recommended_memory(group_names, ansible_memtotal_mb):
+ task_vars = dict(
+ group_names=group_names,
+ ansible_memtotal_mb=ansible_memtotal_mb,
+ )
+
+ check = MemoryAvailability(execute_module=fake_execute_module)
+ result = check.run(tmp=None, task_vars=task_vars)
+
+ assert not result.get('failed', False)
+
+
+@pytest.mark.parametrize('group_names,ansible_memtotal_mb,extra_words', [
+ (
+ ['masters'],
+ 0,
+ ['0.0 GB'],
+ ),
+ (
+ ['nodes'],
+ 100,
+ ['0.1 GB'],
+ ),
+ (
+ ['etcd'],
+ -1,
+ ['0.0 GB'],
+ ),
+ (
+ ['nodes', 'masters'],
+ # enough memory for a node, not enough for a master
+ 11000,
+ ['11.0 GB'],
+ ),
+])
+def test_fails_with_insufficient_memory(group_names, ansible_memtotal_mb, extra_words):
+ task_vars = dict(
+ group_names=group_names,
+ ansible_memtotal_mb=ansible_memtotal_mb,
+ )
+
+ check = MemoryAvailability(execute_module=fake_execute_module)
+ result = check.run(tmp=None, task_vars=task_vars)
+
+ assert result['failed']
+ for word in 'below recommended'.split() + extra_words:
+ assert word in result['msg']
+
+
+def fake_execute_module(*args):
+ raise AssertionError('this function should not be called')
diff --git a/roles/openshift_health_checker/test/package_availability_test.py b/roles/openshift_health_checker/test/package_availability_test.py
index 25385339a..f7e916a46 100644
--- a/roles/openshift_health_checker/test/package_availability_test.py
+++ b/roles/openshift_health_checker/test/package_availability_test.py
@@ -3,6 +3,20 @@ import pytest
from openshift_checks.package_availability import PackageAvailability
+@pytest.mark.parametrize('pkg_mgr,is_containerized,is_active', [
+ ('yum', False, True),
+ ('yum', True, False),
+ ('dnf', True, False),
+ ('dnf', False, False),
+])
+def test_is_active(pkg_mgr, is_containerized, is_active):
+ task_vars = dict(
+ ansible_pkg_mgr=pkg_mgr,
+ openshift=dict(common=dict(is_containerized=is_containerized)),
+ )
+ assert PackageAvailability.is_active(task_vars=task_vars) == is_active
+
+
@pytest.mark.parametrize('task_vars,must_have_packages,must_not_have_packages', [
(
dict(openshift=dict(common=dict(service_type='openshift'))),
diff --git a/roles/openshift_health_checker/test/package_version_test.py b/roles/openshift_health_checker/test/package_version_test.py
index cc1d263bc..196d9816a 100644
--- a/roles/openshift_health_checker/test/package_version_test.py
+++ b/roles/openshift_health_checker/test/package_version_test.py
@@ -1,21 +1,46 @@
+import pytest
+
from openshift_checks.package_version import PackageVersion
def test_package_version():
task_vars = dict(
openshift=dict(common=dict(service_type='origin')),
- openshift_release='v3.5',
+ openshift_release='3.5',
+ openshift_deployment_type='origin',
)
return_value = object()
def execute_module(module_name=None, module_args=None, tmp=None, task_vars=None):
assert module_name == 'aos_version'
- assert 'prefix' in module_args
- assert 'version' in module_args
- assert module_args['prefix'] == task_vars['openshift']['common']['service_type']
- assert module_args['version'] == task_vars['openshift_release']
+ assert 'requested_openshift_release' in module_args
+ assert 'openshift_deployment_type' in module_args
+ assert 'rpm_prefix' in module_args
+ assert module_args['requested_openshift_release'] == task_vars['openshift_release']
+ assert module_args['openshift_deployment_type'] == task_vars['openshift_deployment_type']
+ assert module_args['rpm_prefix'] == task_vars['openshift']['common']['service_type']
return return_value
check = PackageVersion(execute_module=execute_module)
result = check.run(tmp=None, task_vars=task_vars)
assert result is return_value
+
+
+@pytest.mark.parametrize('group_names,is_containerized,is_active', [
+ (['masters'], False, True),
+ # ensure check is skipped on containerized installs
+ (['masters'], True, False),
+ (['nodes'], False, True),
+ (['masters', 'nodes'], False, True),
+ (['masters', 'etcd'], False, True),
+ ([], False, False),
+ (['etcd'], False, False),
+ (['lb'], False, False),
+ (['nfs'], False, False),
+])
+def test_package_version_skip_when_not_master_nor_node(group_names, is_containerized, is_active):
+ task_vars = dict(
+ group_names=group_names,
+ openshift=dict(common=dict(is_containerized=is_containerized)),
+ )
+ assert PackageVersion.is_active(task_vars=task_vars) == is_active
diff --git a/roles/openshift_hosted/defaults/main.yml b/roles/openshift_hosted/defaults/main.yml
index d73f339f7..e7e62e5e4 100644
--- a/roles/openshift_hosted/defaults/main.yml
+++ b/roles/openshift_hosted/defaults/main.yml
@@ -24,8 +24,9 @@ openshift_hosted_routers:
ports:
- 80:80
- 443:443
- certificates: "{{ openshift_hosted_router_certificate | default({}) }}"
+ certificate: "{{ openshift_hosted_router_certificate | default({}) }}"
-openshift_hosted_router_certificates: {}
+openshift_hosted_router_certificate: {}
openshift_hosted_registry_cert_expire_days: 730
+openshift_hosted_router_create_certificate: False
diff --git a/roles/openshift_hosted/tasks/registry/registry.yml b/roles/openshift_hosted/tasks/registry/registry.yml
index 0b8042473..6e691c26f 100644
--- a/roles/openshift_hosted/tasks/registry/registry.yml
+++ b/roles/openshift_hosted/tasks/registry/registry.yml
@@ -109,7 +109,7 @@
type: persistentVolumeClaim
claim_name: "{{ openshift.hosted.registry.storage.volume.name }}-claim"
when:
- - openshift.hosted.registry.storage.kind | default(none) in ['nfs', 'openstack']
+ - openshift.hosted.registry.storage.kind | default(none) in ['nfs', 'openstack', 'glusterfs']
- name: Create OpenShift registry
oc_adm_registry:
@@ -123,3 +123,7 @@
volume_mounts: "{{ openshift_hosted_registry_volumes }}"
edits: "{{ openshift_hosted_registry_edits }}"
force: "{{ True|bool in openshift_hosted_registry_force }}"
+
+- include: storage/glusterfs.yml
+ when:
+ - openshift.hosted.registry.storage.kind | default(none) == 'glusterfs'
diff --git a/roles/openshift_hosted/tasks/registry/storage/glusterfs.yml b/roles/openshift_hosted/tasks/registry/storage/glusterfs.yml
new file mode 100644
index 000000000..b18b24266
--- /dev/null
+++ b/roles/openshift_hosted/tasks/registry/storage/glusterfs.yml
@@ -0,0 +1,51 @@
+---
+- name: Wait for registry pods
+ oc_obj:
+ namespace: "{{ openshift_hosted_registry_namespace }}"
+ state: list
+ kind: pod
+ selector: "{{ openshift_hosted_registry_name }}={{ openshift_hosted_registry_namespace }}"
+ register: registry_pods
+ until:
+ - "registry_pods.results.results[0]['items'] | count > 0"
+ # There must be as many matching pods with 'Ready' status True as there are expected replicas
+ - "registry_pods.results.results[0]['items'] | oo_collect(attribute='status.conditions') | oo_collect(attribute='status', filters={'type': 'Ready'}) | map('bool') | select | list | count == openshift_hosted_registry_replicas | int"
+ delay: 10
+ retries: "{{ (600 / 10) | int }}"
+
+- name: Determine registry fsGroup
+ set_fact:
+ openshift_hosted_registry_fsgroup: "{{ registry_pods.results.results[0]['items'][0].spec.securityContext.fsGroup }}"
+
+- name: Create temp mount directory
+ command: mktemp -d /tmp/openshift-glusterfs-registry-XXXXXX
+ register: mktemp
+ changed_when: False
+ check_mode: no
+
+- name: Mount registry volume
+ mount:
+ state: mounted
+ fstype: glusterfs
+ src: "{{ groups.oo_glusterfs_to_config[0] }}:/{{ openshift.hosted.registry.storage.glusterfs.path }}"
+ name: "{{ mktemp.stdout }}"
+
+- name: Set registry volume permissions
+ file:
+ dest: "{{ mktemp.stdout }}"
+ state: directory
+ group: "{{ openshift_hosted_registry_fsgroup }}"
+ mode: "2775"
+ recurse: True
+
+- name: Unmount registry volume
+ mount:
+ state: unmounted
+ name: "{{ mktemp.stdout }}"
+
+- name: Delete temp mount directory
+ file:
+ dest: "{{ mktemp.stdout }}"
+ state: absent
+ changed_when: False
+ check_mode: no
diff --git a/roles/openshift_hosted/tasks/router/router.yml b/roles/openshift_hosted/tasks/router/router.yml
index 0861b9ec2..e75e3b16f 100644
--- a/roles/openshift_hosted/tasks/router/router.yml
+++ b/roles/openshift_hosted/tasks/router/router.yml
@@ -14,13 +14,39 @@
openshift_hosted_router_selector: "{{ openshift.hosted.router.selector | default(None) }}"
openshift_hosted_router_image: "{{ openshift.hosted.router.registryurl }}"
+# This is for when we desire a cluster signed cert
+# The certificate is generated and placed in master_config_dir/
+- block:
+ - name: generate a default wildcard router certificate
+ oc_adm_ca_server_cert:
+ signer_cert: "{{ openshift_master_config_dir }}/ca.crt"
+ signer_key: "{{ openshift_master_config_dir }}/ca.key"
+ signer_serial: "{{ openshift_master_config_dir }}/ca.serial.txt"
+ hostnames:
+ - "{{ openshift_master_default_subdomain }}"
+ - "*.{{ openshift_master_default_subdomain }}"
+ cert: "{{ ('/etc/origin/master/' ~ (item.certificate.certfile | basename)) if 'certfile' in item.certificate else ((openshift_master_config_dir) ~ '/openshift-router.crt') }}"
+ key: "{{ ('/etc/origin/master/' ~ (item.certificate.keyfile | basename)) if 'keyfile' in item.certificate else ((openshift_master_config_dir) ~ '/openshift-router.key') }}"
+ with_items: "{{ openshift_hosted_routers }}"
+
+ - name: set the openshift_hosted_router_certificate
+ set_fact:
+ openshift_hosted_router_certificate:
+ certfile: "{{ openshift_master_config_dir ~ '/openshift-router.crt' }}"
+ keyfile: "{{ openshift_master_config_dir ~ '/openshift-router.key' }}"
+ cafile: "{{ openshift_master_config_dir ~ '/ca.crt' }}"
+
+ # End Block
+ when: openshift_hosted_router_create_certificate
+
- name: Get the certificate contents for router
copy:
backup: True
dest: "/etc/origin/master/{{ item | basename }}"
src: "{{ item }}"
- with_items: "{{ openshift_hosted_routers | oo_collect(attribute='certificates') |
+ with_items: "{{ openshift_hosted_routers | oo_collect(attribute='certificate') |
oo_select_keys_from_list(['keyfile', 'certfile', 'cafile']) }}"
+ when: not openshift_hosted_router_create_certificate
- name: Create the router service account(s)
oc_serviceaccount:
@@ -56,9 +82,9 @@
service_account: "{{ item.serviceaccount | default('router') }}"
selector: "{{ item.selector | default(none) }}"
images: "{{ item.images | default(omit) }}"
- cert_file: "{{ ('/etc/origin/master/' ~ (item.certificates.certfile | basename)) if 'certfile' in item.certificates else omit }}"
- key_file: "{{ ('/etc/origin/master/' ~ (item.certificates.keyfile | basename)) if 'keyfile' in item.certificates else omit }}"
- cacert_file: "{{ ('/etc/origin/master/' ~ (item.certificates.cafile | basename)) if 'cafile' in item.certificates else omit }}"
+ cert_file: "{{ ('/etc/origin/master/' ~ (item.certificate.certfile | basename)) if 'certfile' in item.certificate else omit }}"
+ key_file: "{{ ('/etc/origin/master/' ~ (item.certificate.keyfile | basename)) if 'keyfile' in item.certificate else omit }}"
+ cacert_file: "{{ ('/etc/origin/master/' ~ (item.certificate.cafile | basename)) if 'cafile' in item.certificate else omit }}"
edits: "{{ openshift_hosted_router_edits | union(item.edits) }}"
ports: "{{ item.ports }}"
stats_port: "{{ item.stats_port }}"
diff --git a/roles/openshift_hosted_templates/files/v1.4/enterprise/logging-deployer.yaml b/roles/openshift_hosted_templates/files/v1.4/enterprise/logging-deployer.yaml
index c67058696..5abb2ef83 100644
--- a/roles/openshift_hosted_templates/files/v1.4/enterprise/logging-deployer.yaml
+++ b/roles/openshift_hosted_templates/files/v1.4/enterprise/logging-deployer.yaml
@@ -223,7 +223,7 @@ items:
-
description: 'Specify version for logging components; e.g. for "registry.access.redhat.com/openshift3/logging-deployer:3.4.0", set version "3.4.0"'
name: IMAGE_VERSION
- value: "3.4.0"
+ value: "v3.4"
-
description: "(Deprecated) Specify the name of an existing pull secret to be used for pulling component images from an authenticated registry."
name: IMAGE_PULL_SECRET
diff --git a/roles/openshift_hosted_templates/files/v1.4/enterprise/metrics-deployer.yaml b/roles/openshift_hosted_templates/files/v1.4/enterprise/metrics-deployer.yaml
index 6ead122c5..1d319eab8 100644
--- a/roles/openshift_hosted_templates/files/v1.4/enterprise/metrics-deployer.yaml
+++ b/roles/openshift_hosted_templates/files/v1.4/enterprise/metrics-deployer.yaml
@@ -105,7 +105,7 @@ parameters:
-
description: 'Specify version for metrics components; e.g. for "openshift/origin-metrics-deployer:latest", set version "latest"'
name: IMAGE_VERSION
- value: "3.4.0"
+ value: "v3.4"
-
description: "Internal URL for the master, for authentication retrieval"
name: MASTER_URL
@@ -118,7 +118,7 @@ parameters:
description: "Can be set to: 'preflight' to perform validation before a deployment; 'deploy' to perform an initial deployment; 'refresh' to delete and redeploy all components but to keep persisted data and routes; 'redeploy' to delete and redeploy everything (losing all data in the process); 'validate' to re-run validations after a deployment"
name: MODE
value: "deploy"
--
+-
description: "Set to true to continue even if the deployer runs into an error."
name: CONTINUE_ON_ERROR
value: "false"
diff --git a/roles/openshift_hosted_templates/files/v1.5/enterprise/logging-deployer.yaml b/roles/openshift_hosted_templates/files/v1.5/enterprise/logging-deployer.yaml
deleted file mode 100644
index fdfc285ca..000000000
--- a/roles/openshift_hosted_templates/files/v1.5/enterprise/logging-deployer.yaml
+++ /dev/null
@@ -1,345 +0,0 @@
-apiVersion: "v1"
-kind: "List"
-items:
--
- apiVersion: "v1"
- kind: "Template"
- metadata:
- name: logging-deployer-account-template
- annotations:
- description: "Template for creating the deployer account and roles needed for the aggregated logging deployer. Create as cluster-admin."
- tags: "infrastructure"
- objects:
- -
- apiVersion: v1
- kind: ServiceAccount
- name: logging-deployer
- metadata:
- name: logging-deployer
- labels:
- logging-infra: deployer
- provider: openshift
- component: deployer
- -
- apiVersion: v1
- kind: ServiceAccount
- metadata:
- name: aggregated-logging-kibana
- -
- apiVersion: v1
- kind: ServiceAccount
- metadata:
- name: aggregated-logging-elasticsearch
- -
- apiVersion: v1
- kind: ServiceAccount
- metadata:
- name: aggregated-logging-fluentd
- -
- apiVersion: v1
- kind: ServiceAccount
- metadata:
- name: aggregated-logging-curator
- - apiVersion: v1
- kind: ClusterRole
- metadata:
- name: oauth-editor
- rules:
- - resources:
- - oauthclients
- verbs:
- - create
- - delete
- - apiVersion: v1
- kind: ClusterRole
- metadata:
- name: daemonset-admin
- rules:
- - resources:
- - daemonsets
- apiGroups:
- - extensions
- verbs:
- - create
- - get
- - list
- - watch
- - delete
- - update
- - apiVersion: v1
- kind: ClusterRole
- metadata:
- name: rolebinding-reader
- rules:
- - resources:
- - clusterrolebindings
- verbs:
- - get
- -
- apiVersion: v1
- kind: RoleBinding
- metadata:
- name: logging-deployer-edit-role
- roleRef:
- kind: ClusterRole
- name: edit
- subjects:
- - kind: ServiceAccount
- name: logging-deployer
- -
- apiVersion: v1
- kind: RoleBinding
- metadata:
- name: logging-deployer-dsadmin-role
- roleRef:
- kind: ClusterRole
- name: daemonset-admin
- subjects:
- - kind: ServiceAccount
- name: logging-deployer
- -
- apiVersion: v1
- kind: RoleBinding
- metadata:
- name: logging-elasticsearch-view-role
- roleRef:
- kind: ClusterRole
- name: view
- subjects:
- - kind: ServiceAccount
- name: aggregated-logging-elasticsearch
--
- apiVersion: "v1"
- kind: "Template"
- metadata:
- name: logging-deployer-template
- annotations:
- description: "Template for running the aggregated logging deployer in a pod. Requires empowered 'logging-deployer' service account."
- tags: "infrastructure"
- labels:
- logging-infra: deployer
- provider: openshift
- objects:
- -
- apiVersion: v1
- kind: Pod
- metadata:
- generateName: logging-deployer-
- spec:
- containers:
- - image: ${IMAGE_PREFIX}logging-deployer:${IMAGE_VERSION}
- imagePullPolicy: Always
- name: deployer
- volumeMounts:
- - name: empty
- mountPath: /etc/deploy
- env:
- - name: PROJECT
- valueFrom:
- fieldRef:
- fieldPath: metadata.namespace
- - name: IMAGE_PREFIX
- value: ${IMAGE_PREFIX}
- - name: IMAGE_VERSION
- value: ${IMAGE_VERSION}
- - name: IMAGE_PULL_SECRET
- value: ${IMAGE_PULL_SECRET}
- - name: INSECURE_REGISTRY
- value: ${INSECURE_REGISTRY}
- - name: ENABLE_OPS_CLUSTER
- value: ${ENABLE_OPS_CLUSTER}
- - name: KIBANA_HOSTNAME
- value: ${KIBANA_HOSTNAME}
- - name: KIBANA_OPS_HOSTNAME
- value: ${KIBANA_OPS_HOSTNAME}
- - name: PUBLIC_MASTER_URL
- value: ${PUBLIC_MASTER_URL}
- - name: MASTER_URL
- value: ${MASTER_URL}
- - name: ES_INSTANCE_RAM
- value: ${ES_INSTANCE_RAM}
- - name: ES_PVC_SIZE
- value: ${ES_PVC_SIZE}
- - name: ES_PVC_PREFIX
- value: ${ES_PVC_PREFIX}
- - name: ES_PVC_DYNAMIC
- value: ${ES_PVC_DYNAMIC}
- - name: ES_CLUSTER_SIZE
- value: ${ES_CLUSTER_SIZE}
- - name: ES_NODE_QUORUM
- value: ${ES_NODE_QUORUM}
- - name: ES_RECOVER_AFTER_NODES
- value: ${ES_RECOVER_AFTER_NODES}
- - name: ES_RECOVER_EXPECTED_NODES
- value: ${ES_RECOVER_EXPECTED_NODES}
- - name: ES_RECOVER_AFTER_TIME
- value: ${ES_RECOVER_AFTER_TIME}
- - name: ES_OPS_INSTANCE_RAM
- value: ${ES_OPS_INSTANCE_RAM}
- - name: ES_OPS_PVC_SIZE
- value: ${ES_OPS_PVC_SIZE}
- - name: ES_OPS_PVC_PREFIX
- value: ${ES_OPS_PVC_PREFIX}
- - name: ES_OPS_PVC_DYNAMIC
- value: ${ES_OPS_PVC_DYNAMIC}
- - name: ES_OPS_CLUSTER_SIZE
- value: ${ES_OPS_CLUSTER_SIZE}
- - name: ES_OPS_NODE_QUORUM
- value: ${ES_OPS_NODE_QUORUM}
- - name: ES_OPS_RECOVER_AFTER_NODES
- value: ${ES_OPS_RECOVER_AFTER_NODES}
- - name: ES_OPS_RECOVER_EXPECTED_NODES
- value: ${ES_OPS_RECOVER_EXPECTED_NODES}
- - name: ES_OPS_RECOVER_AFTER_TIME
- value: ${ES_OPS_RECOVER_AFTER_TIME}
- - name: FLUENTD_NODESELECTOR
- value: ${FLUENTD_NODESELECTOR}
- - name: ES_NODESELECTOR
- value: ${ES_NODESELECTOR}
- - name: ES_OPS_NODESELECTOR
- value: ${ES_OPS_NODESELECTOR}
- - name: KIBANA_NODESELECTOR
- value: ${KIBANA_NODESELECTOR}
- - name: KIBANA_OPS_NODESELECTOR
- value: ${KIBANA_OPS_NODESELECTOR}
- - name: CURATOR_NODESELECTOR
- value: ${CURATOR_NODESELECTOR}
- - name: CURATOR_OPS_NODESELECTOR
- value: ${CURATOR_OPS_NODESELECTOR}
- - name: MODE
- value: ${MODE}
- dnsPolicy: ClusterFirst
- restartPolicy: Never
- serviceAccount: logging-deployer
- volumes:
- - name: empty
- emptyDir: {}
- parameters:
- -
- description: "The mode that the deployer runs in."
- name: MODE
- value: "install"
- -
- description: 'Specify prefix for logging components; e.g. for "registry.access.redhat.com/openshift3/logging-deployer:3.3.0", set prefix "registry.access.redhat.com/openshift3/"'
- name: IMAGE_PREFIX
- value: "registry.access.redhat.com/openshift3/"
- -
- description: 'Specify version for logging components; e.g. for "registry.access.redhat.com/openshift3/logging-deployer:3.3.0", set version "3.3.0"'
- name: IMAGE_VERSION
- value: "3.4.0"
- -
- description: "(Deprecated) Specify the name of an existing pull secret to be used for pulling component images from an authenticated registry."
- name: IMAGE_PULL_SECRET
- -
- description: "(Deprecated) Allow the registry for logging component images to be non-secure (not secured with a certificate signed by a known CA)"
- name: INSECURE_REGISTRY
- value: "false"
- -
- description: "(Deprecated) If true, set up to use a second ES cluster for ops logs."
- name: ENABLE_OPS_CLUSTER
- value: "false"
- -
- description: "(Deprecated) External hostname where clients will reach kibana"
- name: KIBANA_HOSTNAME
- value: "kibana.example.com"
- -
- description: "(Deprecated) External hostname at which admins will visit the ops Kibana."
- name: KIBANA_OPS_HOSTNAME
- value: kibana-ops.example.com
- -
- description: "(Deprecated) External URL for the master, for OAuth purposes"
- name: PUBLIC_MASTER_URL
- value: "https://localhost:8443"
- -
- description: "(Deprecated) Internal URL for the master, for authentication retrieval"
- name: MASTER_URL
- value: "https://kubernetes.default.svc.cluster.local"
- -
- description: "(Deprecated) How many instances of ElasticSearch to deploy."
- name: ES_CLUSTER_SIZE
- value: "1"
- -
- description: "(Deprecated) Amount of RAM to reserve per ElasticSearch instance."
- name: ES_INSTANCE_RAM
- value: "8G"
- -
- description: "(Deprecated) Size of the PersistentVolumeClaim to create per ElasticSearch instance, e.g. 100G. If empty, no PVCs will be created and emptyDir volumes are used instead."
- name: ES_PVC_SIZE
- -
- description: "(Deprecated) Prefix for the names of PersistentVolumeClaims to be created; a number will be appended per instance. If they don't already exist, they will be created with size ES_PVC_SIZE."
- name: ES_PVC_PREFIX
- value: "logging-es-"
- -
- description: '(Deprecated) Set to "true" to request dynamic provisioning (if enabled for your cluster) of a PersistentVolume for the ES PVC. '
- name: ES_PVC_DYNAMIC
- -
- description: "(Deprecated) Number of nodes required to elect a master (ES minimum_master_nodes). By default, derived from ES_CLUSTER_SIZE / 2 + 1."
- name: ES_NODE_QUORUM
- -
- description: "(Deprecated) Number of nodes required to be present before the cluster will recover from a full restart. By default, one fewer than ES_CLUSTER_SIZE."
- name: ES_RECOVER_AFTER_NODES
- -
- description: "(Deprecated) Number of nodes desired to be present before the cluster will recover from a full restart. By default, ES_CLUSTER_SIZE."
- name: ES_RECOVER_EXPECTED_NODES
- -
- description: "(Deprecated) Timeout for *expected* nodes to be present when cluster is recovering from a full restart."
- name: ES_RECOVER_AFTER_TIME
- value: "5m"
- -
- description: "(Deprecated) How many ops instances of ElasticSearch to deploy. By default, ES_CLUSTER_SIZE."
- name: ES_OPS_CLUSTER_SIZE
- -
- description: "(Deprecated) Amount of RAM to reserve per ops ElasticSearch instance."
- name: ES_OPS_INSTANCE_RAM
- value: "8G"
- -
- description: "(Deprecated) Size of the PersistentVolumeClaim to create per ElasticSearch ops instance, e.g. 100G. If empty, no PVCs will be created and emptyDir volumes are used instead."
- name: ES_OPS_PVC_SIZE
- -
- description: "(Deprecated) Prefix for the names of PersistentVolumeClaims to be created; a number will be appended per instance. If they don't already exist, they will be created with size ES_OPS_PVC_SIZE."
- name: ES_OPS_PVC_PREFIX
- value: "logging-es-ops-"
- -
- description: '(Deprecated) Set to "true" to request dynamic provisioning (if enabled for your cluster) of a PersistentVolume for the ES ops PVC. '
- name: ES_OPS_PVC_DYNAMIC
- -
- description: "(Deprecated) Number of ops nodes required to elect a master (ES minimum_master_nodes). By default, derived from ES_CLUSTER_SIZE / 2 + 1."
- name: ES_OPS_NODE_QUORUM
- -
- description: "(Deprecated) Number of ops nodes required to be present before the cluster will recover from a full restart. By default, one fewer than ES_OPS_CLUSTER_SIZE."
- name: ES_OPS_RECOVER_AFTER_NODES
- -
- description: "(Deprecated) Number of ops nodes desired to be present before the cluster will recover from a full restart. By default, ES_OPS_CLUSTER_SIZE."
- name: ES_OPS_RECOVER_EXPECTED_NODES
- -
- description: "(Deprecated) Timeout for *expected* ops nodes to be present when cluster is recovering from a full restart."
- name: ES_OPS_RECOVER_AFTER_TIME
- value: "5m"
- -
- description: "(Deprecated) The nodeSelector used for the Fluentd DaemonSet."
- name: FLUENTD_NODESELECTOR
- value: "logging-infra-fluentd=true"
- -
- description: "(Deprecated) Node selector Elasticsearch cluster (label=value)."
- name: ES_NODESELECTOR
- value: ""
- -
- description: "(Deprecated) Node selector Elasticsearch operations cluster (label=value)."
- name: ES_OPS_NODESELECTOR
- value: ""
- -
- description: "(Deprecated) Node selector Kibana cluster (label=value)."
- name: KIBANA_NODESELECTOR
- value: ""
- -
- description: "(Deprecated) Node selector Kibana operations cluster (label=value)."
- name: KIBANA_OPS_NODESELECTOR
- value: ""
- -
- description: "(Deprecated) Node selector Curator (label=value)."
- name: CURATOR_NODESELECTOR
- value: ""
- -
- description: "(Deprecated) Node selector operations Curator (label=value)."
- name: CURATOR_OPS_NODESELECTOR
- value: ""
diff --git a/roles/openshift_hosted_templates/files/v1.5/enterprise/metrics-deployer.yaml b/roles/openshift_hosted_templates/files/v1.5/enterprise/metrics-deployer.yaml
deleted file mode 100644
index c4ab794ae..000000000
--- a/roles/openshift_hosted_templates/files/v1.5/enterprise/metrics-deployer.yaml
+++ /dev/null
@@ -1,168 +0,0 @@
-#!/bin/bash
-#
-# Copyright 2014-2015 Red Hat, Inc. and/or its affiliates
-# and other contributors as indicated by the @author tags.
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-#
-
-apiVersion: "v1"
-kind: "Template"
-metadata:
- name: metrics-deployer-template
- annotations:
- description: "Template for deploying the required Metrics integration. Requires cluster-admin 'metrics-deployer' service account and 'metrics-deployer' secret."
- tags: "infrastructure"
-labels:
- metrics-infra: deployer
- provider: openshift
- component: deployer
-objects:
--
- apiVersion: v1
- kind: Pod
- metadata:
- generateName: metrics-deployer-
- spec:
- securityContext: {}
- containers:
- - image: ${IMAGE_PREFIX}metrics-deployer:${IMAGE_VERSION}
- name: deployer
- securityContext: {}
- volumeMounts:
- - name: secret
- mountPath: /secret
- readOnly: true
- - name: empty
- mountPath: /etc/deploy
- env:
- - name: PROJECT
- valueFrom:
- fieldRef:
- fieldPath: metadata.namespace
- - name: POD_NAME
- valueFrom:
- fieldRef:
- fieldPath: metadata.name
- - name: IMAGE_PREFIX
- value: ${IMAGE_PREFIX}
- - name: IMAGE_VERSION
- value: ${IMAGE_VERSION}
- - name: MASTER_URL
- value: ${MASTER_URL}
- - name: MODE
- value: ${MODE}
- - name: CONTINUE_ON_ERROR
- value: ${CONTINUE_ON_ERROR}
- - name: REDEPLOY
- value: ${REDEPLOY}
- - name: IGNORE_PREFLIGHT
- value: ${IGNORE_PREFLIGHT}
- - name: USE_PERSISTENT_STORAGE
- value: ${USE_PERSISTENT_STORAGE}
- - name: DYNAMICALLY_PROVISION_STORAGE
- value: ${DYNAMICALLY_PROVISION_STORAGE}
- - name: HAWKULAR_METRICS_HOSTNAME
- value: ${HAWKULAR_METRICS_HOSTNAME}
- - name: CASSANDRA_NODES
- value: ${CASSANDRA_NODES}
- - name: CASSANDRA_PV_SIZE
- value: ${CASSANDRA_PV_SIZE}
- - name: METRIC_DURATION
- value: ${METRIC_DURATION}
- - name: USER_WRITE_ACCESS
- value: ${USER_WRITE_ACCESS}
- - name: HEAPSTER_NODE_ID
- value: ${HEAPSTER_NODE_ID}
- - name: METRIC_RESOLUTION
- value: ${METRIC_RESOLUTION}
- - name: STARTUP_TIMEOUT
- value: ${STARTUP_TIMEOUT}
- dnsPolicy: ClusterFirst
- restartPolicy: Never
- serviceAccount: metrics-deployer
- volumes:
- - name: empty
- emptyDir: {}
- - name: secret
- secret:
- secretName: metrics-deployer
-parameters:
--
- description: 'Specify prefix for metrics components; e.g. for "openshift/origin-metrics-deployer:latest", set prefix "openshift/origin-"'
- name: IMAGE_PREFIX
- value: "registry.access.redhat.com/openshift3/"
--
- description: 'Specify version for metrics components; e.g. for "openshift/origin-metrics-deployer:latest", set version "latest"'
- name: IMAGE_VERSION
- value: "v3.5"
--
- description: "Internal URL for the master, for authentication retrieval"
- name: MASTER_URL
- value: "https://kubernetes.default.svc:443"
--
- description: "External hostname where clients will reach Hawkular Metrics"
- name: HAWKULAR_METRICS_HOSTNAME
- required: true
--
- description: "Can be set to: 'preflight' to perform validation before a deployment; 'deploy' to perform an initial deployment; 'refresh' to delete and redeploy all components but to keep persisted data and routes; 'redeploy' to delete and redeploy everything (losing all data in the process); 'validate' to re-run validations after a deployment"
- name: MODE
- value: "deploy"
--
- description: "Set to true to continue even if the deployer runs into an error."
- name: CONTINUE_ON_ERROR
- value: "false"
--
- description: "(Deprecated) Turns 'deploy' mode into 'redeploy' mode, deleting and redeploying everything (losing all data in the process)"
- name: REDEPLOY
- value: "false"
--
- description: "If preflight validation is blocking deployment and you're sure you don't care about it, this will ignore the results and proceed to deploy."
- name: IGNORE_PREFLIGHT
- value: "false"
--
- description: "Set to true for persistent storage, set to false to use non persistent storage"
- name: USE_PERSISTENT_STORAGE
- value: "true"
--
- description: "Set to true to dynamically provision storage, set to false to use use pre-created persistent volumes"
- name: DYNAMICALLY_PROVISION_STORAGE
- value: "false"
--
- description: "The number of Cassandra Nodes to deploy for the initial cluster"
- name: CASSANDRA_NODES
- value: "1"
--
- description: "The persistent volume size for each of the Cassandra nodes"
- name: CASSANDRA_PV_SIZE
- value: "10Gi"
--
- description: "How many days metrics should be stored for."
- name: METRIC_DURATION
- value: "7"
--
- description: "If a user accounts should be allowed to write metrics."
- name: USER_WRITE_ACCESS
- value: "false"
--
- description: "The identifier used when generating metric ids in Hawkular"
- name: HEAPSTER_NODE_ID
- value: "nodename"
--
- description: "How often metrics should be gathered. Defaults value of '30s' for 30 seconds"
- name: METRIC_RESOLUTION
- value: "30s"
--
- description: "How long in seconds we should wait until Hawkular Metrics and Heapster starts up before attempting a restart"
- name: STARTUP_TIMEOUT
- value: "500"
diff --git a/roles/openshift_hosted_templates/files/v1.5/origin/logging-deployer.yaml b/roles/openshift_hosted_templates/files/v1.5/origin/logging-deployer.yaml
deleted file mode 100644
index 5b5503500..000000000
--- a/roles/openshift_hosted_templates/files/v1.5/origin/logging-deployer.yaml
+++ /dev/null
@@ -1,342 +0,0 @@
-apiVersion: "v1"
-kind: "List"
-items:
--
- apiVersion: "v1"
- kind: "Template"
- metadata:
- name: logging-deployer-account-template
- annotations:
- description: "Template for creating the deployer account and roles needed for the aggregated logging deployer. Create as cluster-admin."
- tags: "infrastructure"
- objects:
- -
- apiVersion: v1
- kind: ServiceAccount
- name: logging-deployer
- metadata:
- name: logging-deployer
- labels:
- logging-infra: deployer
- provider: openshift
- component: deployer
- -
- apiVersion: v1
- kind: ServiceAccount
- metadata:
- name: aggregated-logging-kibana
- -
- apiVersion: v1
- kind: ServiceAccount
- metadata:
- name: aggregated-logging-elasticsearch
- -
- apiVersion: v1
- kind: ServiceAccount
- metadata:
- name: aggregated-logging-fluentd
- -
- apiVersion: v1
- kind: ServiceAccount
- metadata:
- name: aggregated-logging-curator
- - apiVersion: v1
- kind: ClusterRole
- metadata:
- name: oauth-editor
- rules:
- - resources:
- - oauthclients
- verbs:
- - create
- - delete
- - apiVersion: v1
- kind: ClusterRole
- metadata:
- name: daemonset-admin
- rules:
- - resources:
- - daemonsets
- apiGroups:
- - extensions
- verbs:
- - create
- - get
- - list
- - watch
- - delete
- - update
- - apiVersion: v1
- kind: ClusterRole
- metadata:
- name: rolebinding-reader
- rules:
- - resources:
- - clusterrolebindings
- verbs:
- - get
- -
- apiVersion: v1
- kind: RoleBinding
- metadata:
- name: logging-deployer-edit-role
- roleRef:
- name: edit
- subjects:
- - kind: ServiceAccount
- name: logging-deployer
- -
- apiVersion: v1
- kind: RoleBinding
- metadata:
- name: logging-deployer-dsadmin-role
- roleRef:
- name: daemonset-admin
- subjects:
- - kind: ServiceAccount
- name: logging-deployer
- -
- apiVersion: v1
- kind: RoleBinding
- metadata:
- name: logging-elasticsearch-view-role
- roleRef:
- name: view
- subjects:
- - kind: ServiceAccount
- name: aggregated-logging-elasticsearch
--
- apiVersion: "v1"
- kind: "Template"
- metadata:
- name: logging-deployer-template
- annotations:
- description: "Template for running the aggregated logging deployer in a pod. Requires empowered 'logging-deployer' service account."
- tags: "infrastructure"
- labels:
- logging-infra: deployer
- provider: openshift
- objects:
- -
- apiVersion: v1
- kind: Pod
- metadata:
- generateName: logging-deployer-
- spec:
- containers:
- - image: ${IMAGE_PREFIX}logging-deployment:${IMAGE_VERSION}
- imagePullPolicy: Always
- name: deployer
- volumeMounts:
- - name: empty
- mountPath: /etc/deploy
- env:
- - name: PROJECT
- valueFrom:
- fieldRef:
- fieldPath: metadata.namespace
- - name: IMAGE_PREFIX
- value: ${IMAGE_PREFIX}
- - name: IMAGE_VERSION
- value: ${IMAGE_VERSION}
- - name: IMAGE_PULL_SECRET
- value: ${IMAGE_PULL_SECRET}
- - name: INSECURE_REGISTRY
- value: ${INSECURE_REGISTRY}
- - name: ENABLE_OPS_CLUSTER
- value: ${ENABLE_OPS_CLUSTER}
- - name: KIBANA_HOSTNAME
- value: ${KIBANA_HOSTNAME}
- - name: KIBANA_OPS_HOSTNAME
- value: ${KIBANA_OPS_HOSTNAME}
- - name: PUBLIC_MASTER_URL
- value: ${PUBLIC_MASTER_URL}
- - name: MASTER_URL
- value: ${MASTER_URL}
- - name: ES_INSTANCE_RAM
- value: ${ES_INSTANCE_RAM}
- - name: ES_PVC_SIZE
- value: ${ES_PVC_SIZE}
- - name: ES_PVC_PREFIX
- value: ${ES_PVC_PREFIX}
- - name: ES_PVC_DYNAMIC
- value: ${ES_PVC_DYNAMIC}
- - name: ES_CLUSTER_SIZE
- value: ${ES_CLUSTER_SIZE}
- - name: ES_NODE_QUORUM
- value: ${ES_NODE_QUORUM}
- - name: ES_RECOVER_AFTER_NODES
- value: ${ES_RECOVER_AFTER_NODES}
- - name: ES_RECOVER_EXPECTED_NODES
- value: ${ES_RECOVER_EXPECTED_NODES}
- - name: ES_RECOVER_AFTER_TIME
- value: ${ES_RECOVER_AFTER_TIME}
- - name: ES_OPS_INSTANCE_RAM
- value: ${ES_OPS_INSTANCE_RAM}
- - name: ES_OPS_PVC_SIZE
- value: ${ES_OPS_PVC_SIZE}
- - name: ES_OPS_PVC_PREFIX
- value: ${ES_OPS_PVC_PREFIX}
- - name: ES_OPS_PVC_DYNAMIC
- value: ${ES_OPS_PVC_DYNAMIC}
- - name: ES_OPS_CLUSTER_SIZE
- value: ${ES_OPS_CLUSTER_SIZE}
- - name: ES_OPS_NODE_QUORUM
- value: ${ES_OPS_NODE_QUORUM}
- - name: ES_OPS_RECOVER_AFTER_NODES
- value: ${ES_OPS_RECOVER_AFTER_NODES}
- - name: ES_OPS_RECOVER_EXPECTED_NODES
- value: ${ES_OPS_RECOVER_EXPECTED_NODES}
- - name: ES_OPS_RECOVER_AFTER_TIME
- value: ${ES_OPS_RECOVER_AFTER_TIME}
- - name: FLUENTD_NODESELECTOR
- value: ${FLUENTD_NODESELECTOR}
- - name: ES_NODESELECTOR
- value: ${ES_NODESELECTOR}
- - name: ES_OPS_NODESELECTOR
- value: ${ES_OPS_NODESELECTOR}
- - name: KIBANA_NODESELECTOR
- value: ${KIBANA_NODESELECTOR}
- - name: KIBANA_OPS_NODESELECTOR
- value: ${KIBANA_OPS_NODESELECTOR}
- - name: CURATOR_NODESELECTOR
- value: ${CURATOR_NODESELECTOR}
- - name: CURATOR_OPS_NODESELECTOR
- value: ${CURATOR_OPS_NODESELECTOR}
- - name: MODE
- value: ${MODE}
- dnsPolicy: ClusterFirst
- restartPolicy: Never
- serviceAccount: logging-deployer
- volumes:
- - name: empty
- emptyDir: {}
- parameters:
- -
- description: "The mode that the deployer runs in."
- name: MODE
- value: "install"
- -
- description: 'Specify prefix for logging components; e.g. for "openshift/origin-logging-deployer:v1.1", set prefix "openshift/origin-"'
- name: IMAGE_PREFIX
- value: "docker.io/openshift/origin-"
- -
- description: 'Specify version for logging components; e.g. for "openshift/origin-logging-deployer:v1.1", set version "v1.1"'
- name: IMAGE_VERSION
- value: "latest"
- -
- description: "(Deprecated) Specify the name of an existing pull secret to be used for pulling component images from an authenticated registry."
- name: IMAGE_PULL_SECRET
- -
- description: "(Deprecated) Allow the registry for logging component images to be non-secure (not secured with a certificate signed by a known CA)"
- name: INSECURE_REGISTRY
- value: "false"
- -
- description: "(Deprecated) If true, set up to use a second ES cluster for ops logs."
- name: ENABLE_OPS_CLUSTER
- value: "false"
- -
- description: "(Deprecated) External hostname where clients will reach kibana"
- name: KIBANA_HOSTNAME
- value: "kibana.example.com"
- -
- description: "(Deprecated) External hostname at which admins will visit the ops Kibana."
- name: KIBANA_OPS_HOSTNAME
- value: kibana-ops.example.com
- -
- description: "(Deprecated) External URL for the master, for OAuth purposes"
- name: PUBLIC_MASTER_URL
- value: "https://localhost:8443"
- -
- description: "(Deprecated) Internal URL for the master, for authentication retrieval"
- name: MASTER_URL
- value: "https://kubernetes.default.svc.cluster.local"
- -
- description: "(Deprecated) How many instances of ElasticSearch to deploy."
- name: ES_CLUSTER_SIZE
- value: "1"
- -
- description: "(Deprecated) Amount of RAM to reserve per ElasticSearch instance."
- name: ES_INSTANCE_RAM
- value: "8G"
- -
- description: "(Deprecated) Size of the PersistentVolumeClaim to create per ElasticSearch instance, e.g. 100G. If empty, no PVCs will be created and emptyDir volumes are used instead."
- name: ES_PVC_SIZE
- -
- description: "(Deprecated) Prefix for the names of PersistentVolumeClaims to be created; a number will be appended per instance. If they don't already exist, they will be created with size ES_PVC_SIZE."
- name: ES_PVC_PREFIX
- value: "logging-es-"
- -
- description: '(Deprecated) Set to "true" to request dynamic provisioning (if enabled for your cluster) of a PersistentVolume for the ES PVC. '
- name: ES_PVC_DYNAMIC
- -
- description: "(Deprecated) Number of nodes required to elect a master (ES minimum_master_nodes). By default, derived from ES_CLUSTER_SIZE / 2 + 1."
- name: ES_NODE_QUORUM
- -
- description: "(Deprecated) Number of nodes required to be present before the cluster will recover from a full restart. By default, one fewer than ES_CLUSTER_SIZE."
- name: ES_RECOVER_AFTER_NODES
- -
- description: "(Deprecated) Number of nodes desired to be present before the cluster will recover from a full restart. By default, ES_CLUSTER_SIZE."
- name: ES_RECOVER_EXPECTED_NODES
- -
- description: "(Deprecated) Timeout for *expected* nodes to be present when cluster is recovering from a full restart."
- name: ES_RECOVER_AFTER_TIME
- value: "5m"
- -
- description: "(Deprecated) How many ops instances of ElasticSearch to deploy. By default, ES_CLUSTER_SIZE."
- name: ES_OPS_CLUSTER_SIZE
- -
- description: "(Deprecated) Amount of RAM to reserve per ops ElasticSearch instance."
- name: ES_OPS_INSTANCE_RAM
- value: "8G"
- -
- description: "(Deprecated) Size of the PersistentVolumeClaim to create per ElasticSearch ops instance, e.g. 100G. If empty, no PVCs will be created and emptyDir volumes are used instead."
- name: ES_OPS_PVC_SIZE
- -
- description: "(Deprecated) Prefix for the names of PersistentVolumeClaims to be created; a number will be appended per instance. If they don't already exist, they will be created with size ES_OPS_PVC_SIZE."
- name: ES_OPS_PVC_PREFIX
- value: "logging-es-ops-"
- -
- description: '(Deprecated) Set to "true" to request dynamic provisioning (if enabled for your cluster) of a PersistentVolume for the ES ops PVC. '
- name: ES_OPS_PVC_DYNAMIC
- -
- description: "(Deprecated) Number of ops nodes required to elect a master (ES minimum_master_nodes). By default, derived from ES_CLUSTER_SIZE / 2 + 1."
- name: ES_OPS_NODE_QUORUM
- -
- description: "(Deprecated) Number of ops nodes required to be present before the cluster will recover from a full restart. By default, one fewer than ES_OPS_CLUSTER_SIZE."
- name: ES_OPS_RECOVER_AFTER_NODES
- -
- description: "(Deprecated) Number of ops nodes desired to be present before the cluster will recover from a full restart. By default, ES_OPS_CLUSTER_SIZE."
- name: ES_OPS_RECOVER_EXPECTED_NODES
- -
- description: "(Deprecated) Timeout for *expected* ops nodes to be present when cluster is recovering from a full restart."
- name: ES_OPS_RECOVER_AFTER_TIME
- value: "5m"
- -
- description: "(Deprecated) The nodeSelector used for the Fluentd DaemonSet."
- name: FLUENTD_NODESELECTOR
- value: "logging-infra-fluentd=true"
- -
- description: "(Deprecated) Node selector Elasticsearch cluster (label=value)."
- name: ES_NODESELECTOR
- value: ""
- -
- description: "(Deprecated) Node selector Elasticsearch operations cluster (label=value)."
- name: ES_OPS_NODESELECTOR
- value: ""
- -
- description: "(Deprecated) Node selector Kibana cluster (label=value)."
- name: KIBANA_NODESELECTOR
- value: ""
- -
- description: "(Deprecated) Node selector Kibana operations cluster (label=value)."
- name: KIBANA_OPS_NODESELECTOR
- value: ""
- -
- description: "(Deprecated) Node selector Curator (label=value)."
- name: CURATOR_NODESELECTOR
- value: ""
- -
- description: "(Deprecated) Node selector operations Curator (label=value)."
- name: CURATOR_OPS_NODESELECTOR
- value: ""
diff --git a/roles/openshift_hosted_templates/files/v1.5/origin/metrics-deployer.yaml b/roles/openshift_hosted_templates/files/v1.5/origin/metrics-deployer.yaml
deleted file mode 100644
index d191c0439..000000000
--- a/roles/openshift_hosted_templates/files/v1.5/origin/metrics-deployer.yaml
+++ /dev/null
@@ -1,168 +0,0 @@
-#!/bin/bash
-#
-# Copyright 2014-2015 Red Hat, Inc. and/or its affiliates
-# and other contributors as indicated by the @author tags.
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-#
-
-apiVersion: "v1"
-kind: "Template"
-metadata:
- name: metrics-deployer-template
- annotations:
- description: "Template for deploying the required Metrics integration. Requires cluster-admin 'metrics-deployer' service account and 'metrics-deployer' secret."
- tags: "infrastructure"
-labels:
- metrics-infra: deployer
- provider: openshift
- component: deployer
-objects:
--
- apiVersion: v1
- kind: Pod
- metadata:
- generateName: metrics-deployer-
- spec:
- securityContext: {}
- containers:
- - image: ${IMAGE_PREFIX}metrics-deployer:${IMAGE_VERSION}
- name: deployer
- securityContext: {}
- volumeMounts:
- - name: secret
- mountPath: /secret
- readOnly: true
- - name: empty
- mountPath: /etc/deploy
- env:
- - name: PROJECT
- valueFrom:
- fieldRef:
- fieldPath: metadata.namespace
- - name: POD_NAME
- valueFrom:
- fieldRef:
- fieldPath: metadata.name
- - name: IMAGE_PREFIX
- value: ${IMAGE_PREFIX}
- - name: IMAGE_VERSION
- value: ${IMAGE_VERSION}
- - name: MASTER_URL
- value: ${MASTER_URL}
- - name: MODE
- value: ${MODE}
- - name: CONTINUE_ON_ERROR
- value: ${CONTINUE_ON_ERROR}
- - name: REDEPLOY
- value: ${REDEPLOY}
- - name: IGNORE_PREFLIGHT
- value: ${IGNORE_PREFLIGHT}
- - name: USE_PERSISTENT_STORAGE
- value: ${USE_PERSISTENT_STORAGE}
- - name: DYNAMICALLY_PROVISION_STORAGE
- value: ${DYNAMICALLY_PROVISION_STORAGE}
- - name: HAWKULAR_METRICS_HOSTNAME
- value: ${HAWKULAR_METRICS_HOSTNAME}
- - name: CASSANDRA_NODES
- value: ${CASSANDRA_NODES}
- - name: CASSANDRA_PV_SIZE
- value: ${CASSANDRA_PV_SIZE}
- - name: METRIC_DURATION
- value: ${METRIC_DURATION}
- - name: USER_WRITE_ACCESS
- value: ${USER_WRITE_ACCESS}
- - name: HEAPSTER_NODE_ID
- value: ${HEAPSTER_NODE_ID}
- - name: METRIC_RESOLUTION
- value: ${METRIC_RESOLUTION}
- - name: STARTUP_TIMEOUT
- value: ${STARTUP_TIMEOUT}
- dnsPolicy: ClusterFirst
- restartPolicy: Never
- serviceAccount: metrics-deployer
- volumes:
- - name: empty
- emptyDir: {}
- - name: secret
- secret:
- secretName: metrics-deployer
-parameters:
--
- description: 'Specify prefix for metrics components; e.g. for "openshift/origin-metrics-deployer:latest", set prefix "openshift/origin-"'
- name: IMAGE_PREFIX
- value: "openshift/origin-"
--
- description: 'Specify version for metrics components; e.g. for "openshift/origin-metrics-deployer:latest", set version "latest"'
- name: IMAGE_VERSION
- value: "latest"
--
- description: "Internal URL for the master, for authentication retrieval"
- name: MASTER_URL
- value: "https://kubernetes.default.svc:443"
--
- description: "External hostname where clients will reach Hawkular Metrics"
- name: HAWKULAR_METRICS_HOSTNAME
- required: true
--
- description: "Can be set to: 'preflight' to perform validation before a deployment; 'deploy' to perform an initial deployment; 'refresh' to delete and redeploy all components but to keep persisted data and routes; 'redeploy' to delete and redeploy everything (losing all data in the process); 'validate' to re-run validations after a deployment"
- name: MODE
- value: "deploy"
--
- description: "Set to true to continue even if the deployer runs into an error."
- name: CONTINUE_ON_ERROR
- value: "false"
--
- description: "(Deprecated) Turns 'deploy' mode into 'redeploy' mode, deleting and redeploying everything (losing all data in the process)"
- name: REDEPLOY
- value: "false"
--
- description: "If preflight validation is blocking deployment and you're sure you don't care about it, this will ignore the results and proceed to deploy."
- name: IGNORE_PREFLIGHT
- value: "false"
--
- description: "Set to true for persistent storage, set to false to use non persistent storage"
- name: USE_PERSISTENT_STORAGE
- value: "true"
--
- description: "Set to true to dynamically provision storage, set to false to use use pre-created persistent volumes"
- name: DYNAMICALLY_PROVISION_STORAGE
- value: "false"
--
- description: "The number of Cassandra Nodes to deploy for the initial cluster"
- name: CASSANDRA_NODES
- value: "1"
--
- description: "The persistent volume size for each of the Cassandra nodes"
- name: CASSANDRA_PV_SIZE
- value: "10Gi"
--
- description: "How many days metrics should be stored for."
- name: METRIC_DURATION
- value: "7"
--
- description: "If a user accounts should be allowed to write metrics."
- name: USER_WRITE_ACCESS
- value: "false"
--
- description: "The identifier used when generating metric ids in Hawkular"
- name: HEAPSTER_NODE_ID
- value: "nodename"
--
- description: "How often metrics should be gathered. Defaults value of '30s' for 30 seconds"
- name: METRIC_RESOLUTION
- value: "30s"
--
- description: "How long in seconds we should wait until Hawkular Metrics and Heapster starts up before attempting a restart"
- name: STARTUP_TIMEOUT
- value: "500"
diff --git a/roles/openshift_hosted_templates/files/v1.6/enterprise/logging-deployer.yaml b/roles/openshift_hosted_templates/files/v1.6/enterprise/logging-deployer.yaml
deleted file mode 100644
index fdfc285ca..000000000
--- a/roles/openshift_hosted_templates/files/v1.6/enterprise/logging-deployer.yaml
+++ /dev/null
@@ -1,345 +0,0 @@
-apiVersion: "v1"
-kind: "List"
-items:
--
- apiVersion: "v1"
- kind: "Template"
- metadata:
- name: logging-deployer-account-template
- annotations:
- description: "Template for creating the deployer account and roles needed for the aggregated logging deployer. Create as cluster-admin."
- tags: "infrastructure"
- objects:
- -
- apiVersion: v1
- kind: ServiceAccount
- name: logging-deployer
- metadata:
- name: logging-deployer
- labels:
- logging-infra: deployer
- provider: openshift
- component: deployer
- -
- apiVersion: v1
- kind: ServiceAccount
- metadata:
- name: aggregated-logging-kibana
- -
- apiVersion: v1
- kind: ServiceAccount
- metadata:
- name: aggregated-logging-elasticsearch
- -
- apiVersion: v1
- kind: ServiceAccount
- metadata:
- name: aggregated-logging-fluentd
- -
- apiVersion: v1
- kind: ServiceAccount
- metadata:
- name: aggregated-logging-curator
- - apiVersion: v1
- kind: ClusterRole
- metadata:
- name: oauth-editor
- rules:
- - resources:
- - oauthclients
- verbs:
- - create
- - delete
- - apiVersion: v1
- kind: ClusterRole
- metadata:
- name: daemonset-admin
- rules:
- - resources:
- - daemonsets
- apiGroups:
- - extensions
- verbs:
- - create
- - get
- - list
- - watch
- - delete
- - update
- - apiVersion: v1
- kind: ClusterRole
- metadata:
- name: rolebinding-reader
- rules:
- - resources:
- - clusterrolebindings
- verbs:
- - get
- -
- apiVersion: v1
- kind: RoleBinding
- metadata:
- name: logging-deployer-edit-role
- roleRef:
- kind: ClusterRole
- name: edit
- subjects:
- - kind: ServiceAccount
- name: logging-deployer
- -
- apiVersion: v1
- kind: RoleBinding
- metadata:
- name: logging-deployer-dsadmin-role
- roleRef:
- kind: ClusterRole
- name: daemonset-admin
- subjects:
- - kind: ServiceAccount
- name: logging-deployer
- -
- apiVersion: v1
- kind: RoleBinding
- metadata:
- name: logging-elasticsearch-view-role
- roleRef:
- kind: ClusterRole
- name: view
- subjects:
- - kind: ServiceAccount
- name: aggregated-logging-elasticsearch
--
- apiVersion: "v1"
- kind: "Template"
- metadata:
- name: logging-deployer-template
- annotations:
- description: "Template for running the aggregated logging deployer in a pod. Requires empowered 'logging-deployer' service account."
- tags: "infrastructure"
- labels:
- logging-infra: deployer
- provider: openshift
- objects:
- -
- apiVersion: v1
- kind: Pod
- metadata:
- generateName: logging-deployer-
- spec:
- containers:
- - image: ${IMAGE_PREFIX}logging-deployer:${IMAGE_VERSION}
- imagePullPolicy: Always
- name: deployer
- volumeMounts:
- - name: empty
- mountPath: /etc/deploy
- env:
- - name: PROJECT
- valueFrom:
- fieldRef:
- fieldPath: metadata.namespace
- - name: IMAGE_PREFIX
- value: ${IMAGE_PREFIX}
- - name: IMAGE_VERSION
- value: ${IMAGE_VERSION}
- - name: IMAGE_PULL_SECRET
- value: ${IMAGE_PULL_SECRET}
- - name: INSECURE_REGISTRY
- value: ${INSECURE_REGISTRY}
- - name: ENABLE_OPS_CLUSTER
- value: ${ENABLE_OPS_CLUSTER}
- - name: KIBANA_HOSTNAME
- value: ${KIBANA_HOSTNAME}
- - name: KIBANA_OPS_HOSTNAME
- value: ${KIBANA_OPS_HOSTNAME}
- - name: PUBLIC_MASTER_URL
- value: ${PUBLIC_MASTER_URL}
- - name: MASTER_URL
- value: ${MASTER_URL}
- - name: ES_INSTANCE_RAM
- value: ${ES_INSTANCE_RAM}
- - name: ES_PVC_SIZE
- value: ${ES_PVC_SIZE}
- - name: ES_PVC_PREFIX
- value: ${ES_PVC_PREFIX}
- - name: ES_PVC_DYNAMIC
- value: ${ES_PVC_DYNAMIC}
- - name: ES_CLUSTER_SIZE
- value: ${ES_CLUSTER_SIZE}
- - name: ES_NODE_QUORUM
- value: ${ES_NODE_QUORUM}
- - name: ES_RECOVER_AFTER_NODES
- value: ${ES_RECOVER_AFTER_NODES}
- - name: ES_RECOVER_EXPECTED_NODES
- value: ${ES_RECOVER_EXPECTED_NODES}
- - name: ES_RECOVER_AFTER_TIME
- value: ${ES_RECOVER_AFTER_TIME}
- - name: ES_OPS_INSTANCE_RAM
- value: ${ES_OPS_INSTANCE_RAM}
- - name: ES_OPS_PVC_SIZE
- value: ${ES_OPS_PVC_SIZE}
- - name: ES_OPS_PVC_PREFIX
- value: ${ES_OPS_PVC_PREFIX}
- - name: ES_OPS_PVC_DYNAMIC
- value: ${ES_OPS_PVC_DYNAMIC}
- - name: ES_OPS_CLUSTER_SIZE
- value: ${ES_OPS_CLUSTER_SIZE}
- - name: ES_OPS_NODE_QUORUM
- value: ${ES_OPS_NODE_QUORUM}
- - name: ES_OPS_RECOVER_AFTER_NODES
- value: ${ES_OPS_RECOVER_AFTER_NODES}
- - name: ES_OPS_RECOVER_EXPECTED_NODES
- value: ${ES_OPS_RECOVER_EXPECTED_NODES}
- - name: ES_OPS_RECOVER_AFTER_TIME
- value: ${ES_OPS_RECOVER_AFTER_TIME}
- - name: FLUENTD_NODESELECTOR
- value: ${FLUENTD_NODESELECTOR}
- - name: ES_NODESELECTOR
- value: ${ES_NODESELECTOR}
- - name: ES_OPS_NODESELECTOR
- value: ${ES_OPS_NODESELECTOR}
- - name: KIBANA_NODESELECTOR
- value: ${KIBANA_NODESELECTOR}
- - name: KIBANA_OPS_NODESELECTOR
- value: ${KIBANA_OPS_NODESELECTOR}
- - name: CURATOR_NODESELECTOR
- value: ${CURATOR_NODESELECTOR}
- - name: CURATOR_OPS_NODESELECTOR
- value: ${CURATOR_OPS_NODESELECTOR}
- - name: MODE
- value: ${MODE}
- dnsPolicy: ClusterFirst
- restartPolicy: Never
- serviceAccount: logging-deployer
- volumes:
- - name: empty
- emptyDir: {}
- parameters:
- -
- description: "The mode that the deployer runs in."
- name: MODE
- value: "install"
- -
- description: 'Specify prefix for logging components; e.g. for "registry.access.redhat.com/openshift3/logging-deployer:3.3.0", set prefix "registry.access.redhat.com/openshift3/"'
- name: IMAGE_PREFIX
- value: "registry.access.redhat.com/openshift3/"
- -
- description: 'Specify version for logging components; e.g. for "registry.access.redhat.com/openshift3/logging-deployer:3.3.0", set version "3.3.0"'
- name: IMAGE_VERSION
- value: "3.4.0"
- -
- description: "(Deprecated) Specify the name of an existing pull secret to be used for pulling component images from an authenticated registry."
- name: IMAGE_PULL_SECRET
- -
- description: "(Deprecated) Allow the registry for logging component images to be non-secure (not secured with a certificate signed by a known CA)"
- name: INSECURE_REGISTRY
- value: "false"
- -
- description: "(Deprecated) If true, set up to use a second ES cluster for ops logs."
- name: ENABLE_OPS_CLUSTER
- value: "false"
- -
- description: "(Deprecated) External hostname where clients will reach kibana"
- name: KIBANA_HOSTNAME
- value: "kibana.example.com"
- -
- description: "(Deprecated) External hostname at which admins will visit the ops Kibana."
- name: KIBANA_OPS_HOSTNAME
- value: kibana-ops.example.com
- -
- description: "(Deprecated) External URL for the master, for OAuth purposes"
- name: PUBLIC_MASTER_URL
- value: "https://localhost:8443"
- -
- description: "(Deprecated) Internal URL for the master, for authentication retrieval"
- name: MASTER_URL
- value: "https://kubernetes.default.svc.cluster.local"
- -
- description: "(Deprecated) How many instances of ElasticSearch to deploy."
- name: ES_CLUSTER_SIZE
- value: "1"
- -
- description: "(Deprecated) Amount of RAM to reserve per ElasticSearch instance."
- name: ES_INSTANCE_RAM
- value: "8G"
- -
- description: "(Deprecated) Size of the PersistentVolumeClaim to create per ElasticSearch instance, e.g. 100G. If empty, no PVCs will be created and emptyDir volumes are used instead."
- name: ES_PVC_SIZE
- -
- description: "(Deprecated) Prefix for the names of PersistentVolumeClaims to be created; a number will be appended per instance. If they don't already exist, they will be created with size ES_PVC_SIZE."
- name: ES_PVC_PREFIX
- value: "logging-es-"
- -
- description: '(Deprecated) Set to "true" to request dynamic provisioning (if enabled for your cluster) of a PersistentVolume for the ES PVC. '
- name: ES_PVC_DYNAMIC
- -
- description: "(Deprecated) Number of nodes required to elect a master (ES minimum_master_nodes). By default, derived from ES_CLUSTER_SIZE / 2 + 1."
- name: ES_NODE_QUORUM
- -
- description: "(Deprecated) Number of nodes required to be present before the cluster will recover from a full restart. By default, one fewer than ES_CLUSTER_SIZE."
- name: ES_RECOVER_AFTER_NODES
- -
- description: "(Deprecated) Number of nodes desired to be present before the cluster will recover from a full restart. By default, ES_CLUSTER_SIZE."
- name: ES_RECOVER_EXPECTED_NODES
- -
- description: "(Deprecated) Timeout for *expected* nodes to be present when cluster is recovering from a full restart."
- name: ES_RECOVER_AFTER_TIME
- value: "5m"
- -
- description: "(Deprecated) How many ops instances of ElasticSearch to deploy. By default, ES_CLUSTER_SIZE."
- name: ES_OPS_CLUSTER_SIZE
- -
- description: "(Deprecated) Amount of RAM to reserve per ops ElasticSearch instance."
- name: ES_OPS_INSTANCE_RAM
- value: "8G"
- -
- description: "(Deprecated) Size of the PersistentVolumeClaim to create per ElasticSearch ops instance, e.g. 100G. If empty, no PVCs will be created and emptyDir volumes are used instead."
- name: ES_OPS_PVC_SIZE
- -
- description: "(Deprecated) Prefix for the names of PersistentVolumeClaims to be created; a number will be appended per instance. If they don't already exist, they will be created with size ES_OPS_PVC_SIZE."
- name: ES_OPS_PVC_PREFIX
- value: "logging-es-ops-"
- -
- description: '(Deprecated) Set to "true" to request dynamic provisioning (if enabled for your cluster) of a PersistentVolume for the ES ops PVC. '
- name: ES_OPS_PVC_DYNAMIC
- -
- description: "(Deprecated) Number of ops nodes required to elect a master (ES minimum_master_nodes). By default, derived from ES_CLUSTER_SIZE / 2 + 1."
- name: ES_OPS_NODE_QUORUM
- -
- description: "(Deprecated) Number of ops nodes required to be present before the cluster will recover from a full restart. By default, one fewer than ES_OPS_CLUSTER_SIZE."
- name: ES_OPS_RECOVER_AFTER_NODES
- -
- description: "(Deprecated) Number of ops nodes desired to be present before the cluster will recover from a full restart. By default, ES_OPS_CLUSTER_SIZE."
- name: ES_OPS_RECOVER_EXPECTED_NODES
- -
- description: "(Deprecated) Timeout for *expected* ops nodes to be present when cluster is recovering from a full restart."
- name: ES_OPS_RECOVER_AFTER_TIME
- value: "5m"
- -
- description: "(Deprecated) The nodeSelector used for the Fluentd DaemonSet."
- name: FLUENTD_NODESELECTOR
- value: "logging-infra-fluentd=true"
- -
- description: "(Deprecated) Node selector Elasticsearch cluster (label=value)."
- name: ES_NODESELECTOR
- value: ""
- -
- description: "(Deprecated) Node selector Elasticsearch operations cluster (label=value)."
- name: ES_OPS_NODESELECTOR
- value: ""
- -
- description: "(Deprecated) Node selector Kibana cluster (label=value)."
- name: KIBANA_NODESELECTOR
- value: ""
- -
- description: "(Deprecated) Node selector Kibana operations cluster (label=value)."
- name: KIBANA_OPS_NODESELECTOR
- value: ""
- -
- description: "(Deprecated) Node selector Curator (label=value)."
- name: CURATOR_NODESELECTOR
- value: ""
- -
- description: "(Deprecated) Node selector operations Curator (label=value)."
- name: CURATOR_OPS_NODESELECTOR
- value: ""
diff --git a/roles/openshift_hosted_templates/files/v1.6/enterprise/metrics-deployer.yaml b/roles/openshift_hosted_templates/files/v1.6/enterprise/metrics-deployer.yaml
deleted file mode 100644
index c4ab794ae..000000000
--- a/roles/openshift_hosted_templates/files/v1.6/enterprise/metrics-deployer.yaml
+++ /dev/null
@@ -1,168 +0,0 @@
-#!/bin/bash
-#
-# Copyright 2014-2015 Red Hat, Inc. and/or its affiliates
-# and other contributors as indicated by the @author tags.
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-#
-
-apiVersion: "v1"
-kind: "Template"
-metadata:
- name: metrics-deployer-template
- annotations:
- description: "Template for deploying the required Metrics integration. Requires cluster-admin 'metrics-deployer' service account and 'metrics-deployer' secret."
- tags: "infrastructure"
-labels:
- metrics-infra: deployer
- provider: openshift
- component: deployer
-objects:
--
- apiVersion: v1
- kind: Pod
- metadata:
- generateName: metrics-deployer-
- spec:
- securityContext: {}
- containers:
- - image: ${IMAGE_PREFIX}metrics-deployer:${IMAGE_VERSION}
- name: deployer
- securityContext: {}
- volumeMounts:
- - name: secret
- mountPath: /secret
- readOnly: true
- - name: empty
- mountPath: /etc/deploy
- env:
- - name: PROJECT
- valueFrom:
- fieldRef:
- fieldPath: metadata.namespace
- - name: POD_NAME
- valueFrom:
- fieldRef:
- fieldPath: metadata.name
- - name: IMAGE_PREFIX
- value: ${IMAGE_PREFIX}
- - name: IMAGE_VERSION
- value: ${IMAGE_VERSION}
- - name: MASTER_URL
- value: ${MASTER_URL}
- - name: MODE
- value: ${MODE}
- - name: CONTINUE_ON_ERROR
- value: ${CONTINUE_ON_ERROR}
- - name: REDEPLOY
- value: ${REDEPLOY}
- - name: IGNORE_PREFLIGHT
- value: ${IGNORE_PREFLIGHT}
- - name: USE_PERSISTENT_STORAGE
- value: ${USE_PERSISTENT_STORAGE}
- - name: DYNAMICALLY_PROVISION_STORAGE
- value: ${DYNAMICALLY_PROVISION_STORAGE}
- - name: HAWKULAR_METRICS_HOSTNAME
- value: ${HAWKULAR_METRICS_HOSTNAME}
- - name: CASSANDRA_NODES
- value: ${CASSANDRA_NODES}
- - name: CASSANDRA_PV_SIZE
- value: ${CASSANDRA_PV_SIZE}
- - name: METRIC_DURATION
- value: ${METRIC_DURATION}
- - name: USER_WRITE_ACCESS
- value: ${USER_WRITE_ACCESS}
- - name: HEAPSTER_NODE_ID
- value: ${HEAPSTER_NODE_ID}
- - name: METRIC_RESOLUTION
- value: ${METRIC_RESOLUTION}
- - name: STARTUP_TIMEOUT
- value: ${STARTUP_TIMEOUT}
- dnsPolicy: ClusterFirst
- restartPolicy: Never
- serviceAccount: metrics-deployer
- volumes:
- - name: empty
- emptyDir: {}
- - name: secret
- secret:
- secretName: metrics-deployer
-parameters:
--
- description: 'Specify prefix for metrics components; e.g. for "openshift/origin-metrics-deployer:latest", set prefix "openshift/origin-"'
- name: IMAGE_PREFIX
- value: "registry.access.redhat.com/openshift3/"
--
- description: 'Specify version for metrics components; e.g. for "openshift/origin-metrics-deployer:latest", set version "latest"'
- name: IMAGE_VERSION
- value: "v3.5"
--
- description: "Internal URL for the master, for authentication retrieval"
- name: MASTER_URL
- value: "https://kubernetes.default.svc:443"
--
- description: "External hostname where clients will reach Hawkular Metrics"
- name: HAWKULAR_METRICS_HOSTNAME
- required: true
--
- description: "Can be set to: 'preflight' to perform validation before a deployment; 'deploy' to perform an initial deployment; 'refresh' to delete and redeploy all components but to keep persisted data and routes; 'redeploy' to delete and redeploy everything (losing all data in the process); 'validate' to re-run validations after a deployment"
- name: MODE
- value: "deploy"
--
- description: "Set to true to continue even if the deployer runs into an error."
- name: CONTINUE_ON_ERROR
- value: "false"
--
- description: "(Deprecated) Turns 'deploy' mode into 'redeploy' mode, deleting and redeploying everything (losing all data in the process)"
- name: REDEPLOY
- value: "false"
--
- description: "If preflight validation is blocking deployment and you're sure you don't care about it, this will ignore the results and proceed to deploy."
- name: IGNORE_PREFLIGHT
- value: "false"
--
- description: "Set to true for persistent storage, set to false to use non persistent storage"
- name: USE_PERSISTENT_STORAGE
- value: "true"
--
- description: "Set to true to dynamically provision storage, set to false to use use pre-created persistent volumes"
- name: DYNAMICALLY_PROVISION_STORAGE
- value: "false"
--
- description: "The number of Cassandra Nodes to deploy for the initial cluster"
- name: CASSANDRA_NODES
- value: "1"
--
- description: "The persistent volume size for each of the Cassandra nodes"
- name: CASSANDRA_PV_SIZE
- value: "10Gi"
--
- description: "How many days metrics should be stored for."
- name: METRIC_DURATION
- value: "7"
--
- description: "If a user accounts should be allowed to write metrics."
- name: USER_WRITE_ACCESS
- value: "false"
--
- description: "The identifier used when generating metric ids in Hawkular"
- name: HEAPSTER_NODE_ID
- value: "nodename"
--
- description: "How often metrics should be gathered. Defaults value of '30s' for 30 seconds"
- name: METRIC_RESOLUTION
- value: "30s"
--
- description: "How long in seconds we should wait until Hawkular Metrics and Heapster starts up before attempting a restart"
- name: STARTUP_TIMEOUT
- value: "500"
diff --git a/roles/openshift_hosted_templates/files/v1.6/origin/logging-deployer.yaml b/roles/openshift_hosted_templates/files/v1.6/origin/logging-deployer.yaml
deleted file mode 100644
index 5b5503500..000000000
--- a/roles/openshift_hosted_templates/files/v1.6/origin/logging-deployer.yaml
+++ /dev/null
@@ -1,342 +0,0 @@
-apiVersion: "v1"
-kind: "List"
-items:
--
- apiVersion: "v1"
- kind: "Template"
- metadata:
- name: logging-deployer-account-template
- annotations:
- description: "Template for creating the deployer account and roles needed for the aggregated logging deployer. Create as cluster-admin."
- tags: "infrastructure"
- objects:
- -
- apiVersion: v1
- kind: ServiceAccount
- name: logging-deployer
- metadata:
- name: logging-deployer
- labels:
- logging-infra: deployer
- provider: openshift
- component: deployer
- -
- apiVersion: v1
- kind: ServiceAccount
- metadata:
- name: aggregated-logging-kibana
- -
- apiVersion: v1
- kind: ServiceAccount
- metadata:
- name: aggregated-logging-elasticsearch
- -
- apiVersion: v1
- kind: ServiceAccount
- metadata:
- name: aggregated-logging-fluentd
- -
- apiVersion: v1
- kind: ServiceAccount
- metadata:
- name: aggregated-logging-curator
- - apiVersion: v1
- kind: ClusterRole
- metadata:
- name: oauth-editor
- rules:
- - resources:
- - oauthclients
- verbs:
- - create
- - delete
- - apiVersion: v1
- kind: ClusterRole
- metadata:
- name: daemonset-admin
- rules:
- - resources:
- - daemonsets
- apiGroups:
- - extensions
- verbs:
- - create
- - get
- - list
- - watch
- - delete
- - update
- - apiVersion: v1
- kind: ClusterRole
- metadata:
- name: rolebinding-reader
- rules:
- - resources:
- - clusterrolebindings
- verbs:
- - get
- -
- apiVersion: v1
- kind: RoleBinding
- metadata:
- name: logging-deployer-edit-role
- roleRef:
- name: edit
- subjects:
- - kind: ServiceAccount
- name: logging-deployer
- -
- apiVersion: v1
- kind: RoleBinding
- metadata:
- name: logging-deployer-dsadmin-role
- roleRef:
- name: daemonset-admin
- subjects:
- - kind: ServiceAccount
- name: logging-deployer
- -
- apiVersion: v1
- kind: RoleBinding
- metadata:
- name: logging-elasticsearch-view-role
- roleRef:
- name: view
- subjects:
- - kind: ServiceAccount
- name: aggregated-logging-elasticsearch
--
- apiVersion: "v1"
- kind: "Template"
- metadata:
- name: logging-deployer-template
- annotations:
- description: "Template for running the aggregated logging deployer in a pod. Requires empowered 'logging-deployer' service account."
- tags: "infrastructure"
- labels:
- logging-infra: deployer
- provider: openshift
- objects:
- -
- apiVersion: v1
- kind: Pod
- metadata:
- generateName: logging-deployer-
- spec:
- containers:
- - image: ${IMAGE_PREFIX}logging-deployment:${IMAGE_VERSION}
- imagePullPolicy: Always
- name: deployer
- volumeMounts:
- - name: empty
- mountPath: /etc/deploy
- env:
- - name: PROJECT
- valueFrom:
- fieldRef:
- fieldPath: metadata.namespace
- - name: IMAGE_PREFIX
- value: ${IMAGE_PREFIX}
- - name: IMAGE_VERSION
- value: ${IMAGE_VERSION}
- - name: IMAGE_PULL_SECRET
- value: ${IMAGE_PULL_SECRET}
- - name: INSECURE_REGISTRY
- value: ${INSECURE_REGISTRY}
- - name: ENABLE_OPS_CLUSTER
- value: ${ENABLE_OPS_CLUSTER}
- - name: KIBANA_HOSTNAME
- value: ${KIBANA_HOSTNAME}
- - name: KIBANA_OPS_HOSTNAME
- value: ${KIBANA_OPS_HOSTNAME}
- - name: PUBLIC_MASTER_URL
- value: ${PUBLIC_MASTER_URL}
- - name: MASTER_URL
- value: ${MASTER_URL}
- - name: ES_INSTANCE_RAM
- value: ${ES_INSTANCE_RAM}
- - name: ES_PVC_SIZE
- value: ${ES_PVC_SIZE}
- - name: ES_PVC_PREFIX
- value: ${ES_PVC_PREFIX}
- - name: ES_PVC_DYNAMIC
- value: ${ES_PVC_DYNAMIC}
- - name: ES_CLUSTER_SIZE
- value: ${ES_CLUSTER_SIZE}
- - name: ES_NODE_QUORUM
- value: ${ES_NODE_QUORUM}
- - name: ES_RECOVER_AFTER_NODES
- value: ${ES_RECOVER_AFTER_NODES}
- - name: ES_RECOVER_EXPECTED_NODES
- value: ${ES_RECOVER_EXPECTED_NODES}
- - name: ES_RECOVER_AFTER_TIME
- value: ${ES_RECOVER_AFTER_TIME}
- - name: ES_OPS_INSTANCE_RAM
- value: ${ES_OPS_INSTANCE_RAM}
- - name: ES_OPS_PVC_SIZE
- value: ${ES_OPS_PVC_SIZE}
- - name: ES_OPS_PVC_PREFIX
- value: ${ES_OPS_PVC_PREFIX}
- - name: ES_OPS_PVC_DYNAMIC
- value: ${ES_OPS_PVC_DYNAMIC}
- - name: ES_OPS_CLUSTER_SIZE
- value: ${ES_OPS_CLUSTER_SIZE}
- - name: ES_OPS_NODE_QUORUM
- value: ${ES_OPS_NODE_QUORUM}
- - name: ES_OPS_RECOVER_AFTER_NODES
- value: ${ES_OPS_RECOVER_AFTER_NODES}
- - name: ES_OPS_RECOVER_EXPECTED_NODES
- value: ${ES_OPS_RECOVER_EXPECTED_NODES}
- - name: ES_OPS_RECOVER_AFTER_TIME
- value: ${ES_OPS_RECOVER_AFTER_TIME}
- - name: FLUENTD_NODESELECTOR
- value: ${FLUENTD_NODESELECTOR}
- - name: ES_NODESELECTOR
- value: ${ES_NODESELECTOR}
- - name: ES_OPS_NODESELECTOR
- value: ${ES_OPS_NODESELECTOR}
- - name: KIBANA_NODESELECTOR
- value: ${KIBANA_NODESELECTOR}
- - name: KIBANA_OPS_NODESELECTOR
- value: ${KIBANA_OPS_NODESELECTOR}
- - name: CURATOR_NODESELECTOR
- value: ${CURATOR_NODESELECTOR}
- - name: CURATOR_OPS_NODESELECTOR
- value: ${CURATOR_OPS_NODESELECTOR}
- - name: MODE
- value: ${MODE}
- dnsPolicy: ClusterFirst
- restartPolicy: Never
- serviceAccount: logging-deployer
- volumes:
- - name: empty
- emptyDir: {}
- parameters:
- -
- description: "The mode that the deployer runs in."
- name: MODE
- value: "install"
- -
- description: 'Specify prefix for logging components; e.g. for "openshift/origin-logging-deployer:v1.1", set prefix "openshift/origin-"'
- name: IMAGE_PREFIX
- value: "docker.io/openshift/origin-"
- -
- description: 'Specify version for logging components; e.g. for "openshift/origin-logging-deployer:v1.1", set version "v1.1"'
- name: IMAGE_VERSION
- value: "latest"
- -
- description: "(Deprecated) Specify the name of an existing pull secret to be used for pulling component images from an authenticated registry."
- name: IMAGE_PULL_SECRET
- -
- description: "(Deprecated) Allow the registry for logging component images to be non-secure (not secured with a certificate signed by a known CA)"
- name: INSECURE_REGISTRY
- value: "false"
- -
- description: "(Deprecated) If true, set up to use a second ES cluster for ops logs."
- name: ENABLE_OPS_CLUSTER
- value: "false"
- -
- description: "(Deprecated) External hostname where clients will reach kibana"
- name: KIBANA_HOSTNAME
- value: "kibana.example.com"
- -
- description: "(Deprecated) External hostname at which admins will visit the ops Kibana."
- name: KIBANA_OPS_HOSTNAME
- value: kibana-ops.example.com
- -
- description: "(Deprecated) External URL for the master, for OAuth purposes"
- name: PUBLIC_MASTER_URL
- value: "https://localhost:8443"
- -
- description: "(Deprecated) Internal URL for the master, for authentication retrieval"
- name: MASTER_URL
- value: "https://kubernetes.default.svc.cluster.local"
- -
- description: "(Deprecated) How many instances of ElasticSearch to deploy."
- name: ES_CLUSTER_SIZE
- value: "1"
- -
- description: "(Deprecated) Amount of RAM to reserve per ElasticSearch instance."
- name: ES_INSTANCE_RAM
- value: "8G"
- -
- description: "(Deprecated) Size of the PersistentVolumeClaim to create per ElasticSearch instance, e.g. 100G. If empty, no PVCs will be created and emptyDir volumes are used instead."
- name: ES_PVC_SIZE
- -
- description: "(Deprecated) Prefix for the names of PersistentVolumeClaims to be created; a number will be appended per instance. If they don't already exist, they will be created with size ES_PVC_SIZE."
- name: ES_PVC_PREFIX
- value: "logging-es-"
- -
- description: '(Deprecated) Set to "true" to request dynamic provisioning (if enabled for your cluster) of a PersistentVolume for the ES PVC. '
- name: ES_PVC_DYNAMIC
- -
- description: "(Deprecated) Number of nodes required to elect a master (ES minimum_master_nodes). By default, derived from ES_CLUSTER_SIZE / 2 + 1."
- name: ES_NODE_QUORUM
- -
- description: "(Deprecated) Number of nodes required to be present before the cluster will recover from a full restart. By default, one fewer than ES_CLUSTER_SIZE."
- name: ES_RECOVER_AFTER_NODES
- -
- description: "(Deprecated) Number of nodes desired to be present before the cluster will recover from a full restart. By default, ES_CLUSTER_SIZE."
- name: ES_RECOVER_EXPECTED_NODES
- -
- description: "(Deprecated) Timeout for *expected* nodes to be present when cluster is recovering from a full restart."
- name: ES_RECOVER_AFTER_TIME
- value: "5m"
- -
- description: "(Deprecated) How many ops instances of ElasticSearch to deploy. By default, ES_CLUSTER_SIZE."
- name: ES_OPS_CLUSTER_SIZE
- -
- description: "(Deprecated) Amount of RAM to reserve per ops ElasticSearch instance."
- name: ES_OPS_INSTANCE_RAM
- value: "8G"
- -
- description: "(Deprecated) Size of the PersistentVolumeClaim to create per ElasticSearch ops instance, e.g. 100G. If empty, no PVCs will be created and emptyDir volumes are used instead."
- name: ES_OPS_PVC_SIZE
- -
- description: "(Deprecated) Prefix for the names of PersistentVolumeClaims to be created; a number will be appended per instance. If they don't already exist, they will be created with size ES_OPS_PVC_SIZE."
- name: ES_OPS_PVC_PREFIX
- value: "logging-es-ops-"
- -
- description: '(Deprecated) Set to "true" to request dynamic provisioning (if enabled for your cluster) of a PersistentVolume for the ES ops PVC. '
- name: ES_OPS_PVC_DYNAMIC
- -
- description: "(Deprecated) Number of ops nodes required to elect a master (ES minimum_master_nodes). By default, derived from ES_CLUSTER_SIZE / 2 + 1."
- name: ES_OPS_NODE_QUORUM
- -
- description: "(Deprecated) Number of ops nodes required to be present before the cluster will recover from a full restart. By default, one fewer than ES_OPS_CLUSTER_SIZE."
- name: ES_OPS_RECOVER_AFTER_NODES
- -
- description: "(Deprecated) Number of ops nodes desired to be present before the cluster will recover from a full restart. By default, ES_OPS_CLUSTER_SIZE."
- name: ES_OPS_RECOVER_EXPECTED_NODES
- -
- description: "(Deprecated) Timeout for *expected* ops nodes to be present when cluster is recovering from a full restart."
- name: ES_OPS_RECOVER_AFTER_TIME
- value: "5m"
- -
- description: "(Deprecated) The nodeSelector used for the Fluentd DaemonSet."
- name: FLUENTD_NODESELECTOR
- value: "logging-infra-fluentd=true"
- -
- description: "(Deprecated) Node selector Elasticsearch cluster (label=value)."
- name: ES_NODESELECTOR
- value: ""
- -
- description: "(Deprecated) Node selector Elasticsearch operations cluster (label=value)."
- name: ES_OPS_NODESELECTOR
- value: ""
- -
- description: "(Deprecated) Node selector Kibana cluster (label=value)."
- name: KIBANA_NODESELECTOR
- value: ""
- -
- description: "(Deprecated) Node selector Kibana operations cluster (label=value)."
- name: KIBANA_OPS_NODESELECTOR
- value: ""
- -
- description: "(Deprecated) Node selector Curator (label=value)."
- name: CURATOR_NODESELECTOR
- value: ""
- -
- description: "(Deprecated) Node selector operations Curator (label=value)."
- name: CURATOR_OPS_NODESELECTOR
- value: ""
diff --git a/roles/openshift_hosted_templates/files/v1.6/origin/metrics-deployer.yaml b/roles/openshift_hosted_templates/files/v1.6/origin/metrics-deployer.yaml
deleted file mode 100644
index d191c0439..000000000
--- a/roles/openshift_hosted_templates/files/v1.6/origin/metrics-deployer.yaml
+++ /dev/null
@@ -1,168 +0,0 @@
-#!/bin/bash
-#
-# Copyright 2014-2015 Red Hat, Inc. and/or its affiliates
-# and other contributors as indicated by the @author tags.
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-#
-
-apiVersion: "v1"
-kind: "Template"
-metadata:
- name: metrics-deployer-template
- annotations:
- description: "Template for deploying the required Metrics integration. Requires cluster-admin 'metrics-deployer' service account and 'metrics-deployer' secret."
- tags: "infrastructure"
-labels:
- metrics-infra: deployer
- provider: openshift
- component: deployer
-objects:
--
- apiVersion: v1
- kind: Pod
- metadata:
- generateName: metrics-deployer-
- spec:
- securityContext: {}
- containers:
- - image: ${IMAGE_PREFIX}metrics-deployer:${IMAGE_VERSION}
- name: deployer
- securityContext: {}
- volumeMounts:
- - name: secret
- mountPath: /secret
- readOnly: true
- - name: empty
- mountPath: /etc/deploy
- env:
- - name: PROJECT
- valueFrom:
- fieldRef:
- fieldPath: metadata.namespace
- - name: POD_NAME
- valueFrom:
- fieldRef:
- fieldPath: metadata.name
- - name: IMAGE_PREFIX
- value: ${IMAGE_PREFIX}
- - name: IMAGE_VERSION
- value: ${IMAGE_VERSION}
- - name: MASTER_URL
- value: ${MASTER_URL}
- - name: MODE
- value: ${MODE}
- - name: CONTINUE_ON_ERROR
- value: ${CONTINUE_ON_ERROR}
- - name: REDEPLOY
- value: ${REDEPLOY}
- - name: IGNORE_PREFLIGHT
- value: ${IGNORE_PREFLIGHT}
- - name: USE_PERSISTENT_STORAGE
- value: ${USE_PERSISTENT_STORAGE}
- - name: DYNAMICALLY_PROVISION_STORAGE
- value: ${DYNAMICALLY_PROVISION_STORAGE}
- - name: HAWKULAR_METRICS_HOSTNAME
- value: ${HAWKULAR_METRICS_HOSTNAME}
- - name: CASSANDRA_NODES
- value: ${CASSANDRA_NODES}
- - name: CASSANDRA_PV_SIZE
- value: ${CASSANDRA_PV_SIZE}
- - name: METRIC_DURATION
- value: ${METRIC_DURATION}
- - name: USER_WRITE_ACCESS
- value: ${USER_WRITE_ACCESS}
- - name: HEAPSTER_NODE_ID
- value: ${HEAPSTER_NODE_ID}
- - name: METRIC_RESOLUTION
- value: ${METRIC_RESOLUTION}
- - name: STARTUP_TIMEOUT
- value: ${STARTUP_TIMEOUT}
- dnsPolicy: ClusterFirst
- restartPolicy: Never
- serviceAccount: metrics-deployer
- volumes:
- - name: empty
- emptyDir: {}
- - name: secret
- secret:
- secretName: metrics-deployer
-parameters:
--
- description: 'Specify prefix for metrics components; e.g. for "openshift/origin-metrics-deployer:latest", set prefix "openshift/origin-"'
- name: IMAGE_PREFIX
- value: "openshift/origin-"
--
- description: 'Specify version for metrics components; e.g. for "openshift/origin-metrics-deployer:latest", set version "latest"'
- name: IMAGE_VERSION
- value: "latest"
--
- description: "Internal URL for the master, for authentication retrieval"
- name: MASTER_URL
- value: "https://kubernetes.default.svc:443"
--
- description: "External hostname where clients will reach Hawkular Metrics"
- name: HAWKULAR_METRICS_HOSTNAME
- required: true
--
- description: "Can be set to: 'preflight' to perform validation before a deployment; 'deploy' to perform an initial deployment; 'refresh' to delete and redeploy all components but to keep persisted data and routes; 'redeploy' to delete and redeploy everything (losing all data in the process); 'validate' to re-run validations after a deployment"
- name: MODE
- value: "deploy"
--
- description: "Set to true to continue even if the deployer runs into an error."
- name: CONTINUE_ON_ERROR
- value: "false"
--
- description: "(Deprecated) Turns 'deploy' mode into 'redeploy' mode, deleting and redeploying everything (losing all data in the process)"
- name: REDEPLOY
- value: "false"
--
- description: "If preflight validation is blocking deployment and you're sure you don't care about it, this will ignore the results and proceed to deploy."
- name: IGNORE_PREFLIGHT
- value: "false"
--
- description: "Set to true for persistent storage, set to false to use non persistent storage"
- name: USE_PERSISTENT_STORAGE
- value: "true"
--
- description: "Set to true to dynamically provision storage, set to false to use use pre-created persistent volumes"
- name: DYNAMICALLY_PROVISION_STORAGE
- value: "false"
--
- description: "The number of Cassandra Nodes to deploy for the initial cluster"
- name: CASSANDRA_NODES
- value: "1"
--
- description: "The persistent volume size for each of the Cassandra nodes"
- name: CASSANDRA_PV_SIZE
- value: "10Gi"
--
- description: "How many days metrics should be stored for."
- name: METRIC_DURATION
- value: "7"
--
- description: "If a user accounts should be allowed to write metrics."
- name: USER_WRITE_ACCESS
- value: "false"
--
- description: "The identifier used when generating metric ids in Hawkular"
- name: HEAPSTER_NODE_ID
- value: "nodename"
--
- description: "How often metrics should be gathered. Defaults value of '30s' for 30 seconds"
- name: METRIC_RESOLUTION
- value: "30s"
--
- description: "How long in seconds we should wait until Hawkular Metrics and Heapster starts up before attempting a restart"
- name: STARTUP_TIMEOUT
- value: "500"
diff --git a/roles/openshift_hosted_templates/files/v1.6/enterprise/registry-console.yaml b/roles/openshift_hosted_templates/files/v3.6/enterprise/registry-console.yaml
index 28feac4e6..28feac4e6 100644
--- a/roles/openshift_hosted_templates/files/v1.6/enterprise/registry-console.yaml
+++ b/roles/openshift_hosted_templates/files/v3.6/enterprise/registry-console.yaml
diff --git a/roles/openshift_hosted_templates/files/v1.6/origin/registry-console.yaml b/roles/openshift_hosted_templates/files/v3.6/origin/registry-console.yaml
index 80cc4233b..80cc4233b 100644
--- a/roles/openshift_hosted_templates/files/v1.6/origin/registry-console.yaml
+++ b/roles/openshift_hosted_templates/files/v3.6/origin/registry-console.yaml
diff --git a/roles/openshift_logging/defaults/main.yml b/roles/openshift_logging/defaults/main.yml
index 96ed44011..5ee8d1e2a 100644
--- a/roles/openshift_logging/defaults/main.yml
+++ b/roles/openshift_logging/defaults/main.yml
@@ -119,6 +119,15 @@ openshift_logging_es_ops_number_of_replicas: 0
# storage related defaults
openshift_logging_storage_access_modes: "{{ openshift_hosted_logging_storage_access_modes | default(['ReadWriteOnce']) }}"
+# mux - secure_forward listener service
+openshift_logging_mux_allow_external: False
+openshift_logging_use_mux: "{{ openshift_logging_mux_allow_external | default(False) }}"
+# this tells the fluentd node agent to use mux instead of sending directly to Elasticsearch
+openshift_logging_use_mux_client: False
+openshift_logging_mux_hostname: "{{ 'mux.' ~ (openshift_master_default_subdomain | default('router.default.svc.cluster.local', true)) }}"
+openshift_logging_mux_port: 24284
+openshift_logging_mux_cpu_limit: 100m
+openshift_logging_mux_memory_limit: 512Mi
# following can be uncommented to provide values for configmaps -- take care when providing file contents as it may cause your cluster to not operate correctly
#es_logging_contents:
@@ -127,3 +136,5 @@ openshift_logging_storage_access_modes: "{{ openshift_hosted_logging_storage_acc
#fluentd_config_contents:
#fluentd_throttle_contents:
#fluentd_secureforward_contents:
+#fluentd_mux_config_contents:
+#fluentd_mux_secureforward_contents:
diff --git a/roles/openshift_logging/tasks/delete_logging.yaml b/roles/openshift_logging/tasks/delete_logging.yaml
index 188ea246c..2f5b68b4d 100644
--- a/roles/openshift_logging/tasks/delete_logging.yaml
+++ b/roles/openshift_logging/tasks/delete_logging.yaml
@@ -44,6 +44,7 @@
- logging-kibana
- logging-kibana-proxy
- logging-curator
+ - logging-mux
ignore_errors: yes
register: delete_result
changed_when: delete_result.stdout.find("deleted") != -1 and delete_result.rc == 0
@@ -109,5 +110,6 @@
- logging-curator
- logging-elasticsearch
- logging-fluentd
+ - logging-mux
register: delete_result
changed_when: delete_result.stdout.find("deleted") != -1 and delete_result.rc == 0
diff --git a/roles/openshift_logging/tasks/generate_certs.yaml b/roles/openshift_logging/tasks/generate_certs.yaml
index 740e490e1..b34df018d 100644
--- a/roles/openshift_logging/tasks/generate_certs.yaml
+++ b/roles/openshift_logging/tasks/generate_certs.yaml
@@ -45,6 +45,21 @@
- procure_component: kibana-internal
hostnames: "kibana, kibana-ops, {{openshift_logging_kibana_hostname}}, {{openshift_logging_kibana_ops_hostname}}"
+- include: procure_server_certs.yaml
+ loop_control:
+ loop_var: cert_info
+ with_items:
+ - procure_component: mux
+ hostnames: "logging-mux, {{openshift_logging_mux_hostname}}"
+ when: openshift_logging_use_mux
+
+- include: procure_shared_key.yaml
+ loop_control:
+ loop_var: shared_key_info
+ with_items:
+ - procure_component: mux
+ when: openshift_logging_use_mux
+
- name: Copy proxy TLS configuration file
copy: src=server-tls.json dest={{generated_certs_dir}}/server-tls.json
when: server_tls_json is undefined
@@ -85,6 +100,14 @@
loop_control:
loop_var: node_name
+- name: Generate PEM cert for mux
+ include: generate_pems.yaml component={{node_name}}
+ with_items:
+ - system.logging.mux
+ loop_control:
+ loop_var: node_name
+ when: openshift_logging_use_mux
+
- name: Creating necessary JKS certs
include: generate_jks.yaml
diff --git a/roles/openshift_logging/tasks/generate_configmaps.yaml b/roles/openshift_logging/tasks/generate_configmaps.yaml
index 253543f54..44bd0058a 100644
--- a/roles/openshift_logging/tasks/generate_configmaps.yaml
+++ b/roles/openshift_logging/tasks/generate_configmaps.yaml
@@ -134,3 +134,43 @@
when: fluentd_configmap.stdout is defined
changed_when: no
check_mode: no
+
+- block:
+ - copy:
+ src: fluent.conf
+ dest: "{{mktemp.stdout}}/fluent-mux.conf"
+ when: fluentd_mux_config_contents is undefined
+ changed_when: no
+
+ - copy:
+ src: secure-forward.conf
+ dest: "{{mktemp.stdout}}/secure-forward-mux.conf"
+ when: fluentd_mux_securefoward_contents is undefined
+ changed_when: no
+
+ - copy:
+ content: "{{fluentd_mux_config_contents}}"
+ dest: "{{mktemp.stdout}}/fluent-mux.conf"
+ when: fluentd_mux_config_contents is defined
+ changed_when: no
+
+ - copy:
+ content: "{{fluentd_mux_secureforward_contents}}"
+ dest: "{{mktemp.stdout}}/secure-forward-mux.conf"
+ when: fluentd_mux_secureforward_contents is defined
+ changed_when: no
+
+ - command: >
+ {{ openshift.common.client_binary }} --config={{ mktemp.stdout }}/admin.kubeconfig create configmap logging-mux
+ --from-file=fluent.conf={{mktemp.stdout}}/fluent-mux.conf
+ --from-file=secure-forward.conf={{mktemp.stdout}}/secure-forward-mux.conf -o yaml --dry-run
+ register: mux_configmap
+ changed_when: no
+
+ - copy:
+ content: "{{mux_configmap.stdout}}"
+ dest: "{{mktemp.stdout}}/templates/logging-mux-configmap.yaml"
+ when: mux_configmap.stdout is defined
+ changed_when: no
+ check_mode: no
+ when: openshift_logging_use_mux
diff --git a/roles/openshift_logging/tasks/generate_secrets.yaml b/roles/openshift_logging/tasks/generate_secrets.yaml
index f396bcc6d..c1da49fd8 100644
--- a/roles/openshift_logging/tasks/generate_secrets.yaml
+++ b/roles/openshift_logging/tasks/generate_secrets.yaml
@@ -34,6 +34,36 @@
check_mode: no
changed_when: no
+- name: Retrieving the cert to use when generating secrets for mux
+ slurp: src="{{generated_certs_dir}}/{{item.file}}"
+ register: mux_key_pairs
+ with_items:
+ - { name: "ca_file", file: "ca.crt" }
+ - { name: "mux_key", file: "system.logging.mux.key"}
+ - { name: "mux_cert", file: "system.logging.mux.crt"}
+ - { name: "mux_shared_key", file: "mux_shared_key"}
+ when: openshift_logging_use_mux
+
+- name: Generating secrets for mux
+ template: src=secret.j2 dest={{mktemp.stdout}}/templates/{{secret_name}}-secret.yaml
+ vars:
+ secret_name: "logging-{{component}}"
+ secret_key_file: "{{component}}_key"
+ secret_cert_file: "{{component}}_cert"
+ secrets:
+ - {key: ca, value: "{{mux_key_pairs | entry_from_named_pair('ca_file')| b64decode }}"}
+ - {key: key, value: "{{mux_key_pairs | entry_from_named_pair(secret_key_file)| b64decode }}"}
+ - {key: cert, value: "{{mux_key_pairs | entry_from_named_pair(secret_cert_file)| b64decode }}"}
+ - {key: shared_key, value: "{{mux_key_pairs | entry_from_named_pair('mux_shared_key')| b64decode }}"}
+ secret_keys: ["ca", "cert", "key", "shared_key"]
+ with_items:
+ - mux
+ loop_control:
+ loop_var: component
+ check_mode: no
+ changed_when: no
+ when: openshift_logging_use_mux
+
- name: Generating secrets for kibana proxy
template: src=secret.j2 dest={{mktemp.stdout}}/templates/{{secret_name}}-secret.yaml
vars:
@@ -43,7 +73,7 @@
- {key: session-secret, value: "{{session_secret}}"}
- {key: server-key, value: "{{kibana_key_file}}"}
- {key: server-cert, value: "{{kibana_cert_file}}"}
- - {key: server-tls, value: "{{server_tls_file}}"}
+ - {key: server-tls.json, value: "{{server_tls_file}}"}
secret_keys: ["server-tls.json", "server-key", "session-secret", "oauth-secret", "server-cert"]
kibana_key_file: "{{key_pairs | entry_from_named_pair('kibana_internal_key')| b64decode }}"
kibana_cert_file: "{{key_pairs | entry_from_named_pair('kibana_internal_cert')| b64decode }}"
diff --git a/roles/openshift_logging/tasks/generate_services.yaml b/roles/openshift_logging/tasks/generate_services.yaml
index 5091c1209..e3a5c5eb3 100644
--- a/roles/openshift_logging/tasks/generate_services.yaml
+++ b/roles/openshift_logging/tasks/generate_services.yaml
@@ -85,3 +85,35 @@
when: openshift_logging_use_ops | bool
check_mode: no
changed_when: no
+
+- name: Generating logging-mux service for external connections
+ template: src=service.j2 dest={{mktemp.stdout}}/templates/logging-mux-svc.yaml
+ vars:
+ obj_name: logging-mux
+ ports:
+ - {port: "{{openshift_logging_mux_port}}", targetPort: mux-forward, name: mux-forward}
+ labels:
+ logging-infra: support
+ selector:
+ provider: openshift
+ component: mux
+ externalIPs:
+ - "{{ ansible_eth0.ipv4.address }}"
+ check_mode: no
+ changed_when: no
+ when: openshift_logging_mux_allow_external
+
+- name: Generating logging-mux service for intra-cluster connections
+ template: src=service.j2 dest={{mktemp.stdout}}/templates/logging-mux-svc.yaml
+ vars:
+ obj_name: logging-mux
+ ports:
+ - {port: "{{openshift_logging_mux_port}}", targetPort: mux-forward, name: mux-forward}
+ labels:
+ logging-infra: support
+ selector:
+ provider: openshift
+ component: mux
+ check_mode: no
+ changed_when: no
+ when: openshift_logging_use_mux and not openshift_logging_mux_allow_external
diff --git a/roles/openshift_logging/tasks/install_elasticsearch.yaml b/roles/openshift_logging/tasks/install_elasticsearch.yaml
index 28fad420b..b80f37892 100644
--- a/roles/openshift_logging/tasks/install_elasticsearch.yaml
+++ b/roles/openshift_logging/tasks/install_elasticsearch.yaml
@@ -5,60 +5,47 @@
- set_fact: openshift_logging_es_pvc_prefix="logging-es"
when: "not openshift_logging_es_pvc_prefix or openshift_logging_es_pvc_prefix == ''"
-- set_fact: es_pvc_pool={{[]}}
-
-- set_fact: openshift_logging_es_pvc_prefix="{{ openshift_logging_es_pvc_prefix | default('logging-es') }}"
-
-- name: Generate PersistentVolumeClaims
- include: "{{ role_path}}/tasks/generate_pvcs.yaml"
+### evaluate if the PVC attached to the dc currently matches the provided vars
+## if it does then we reuse that pvc in the DC
+- include: set_es_storage.yaml
vars:
- es_pv_selector: "{{openshift_logging_es_pv_selector}}"
- es_pvc_dynamic: "{{openshift_logging_es_pvc_dynamic | bool}}"
- es_pvc_names: "{{openshift_logging_facts.elasticsearch.pvcs.keys()}}"
- es_pvc_prefix: "{{openshift_logging_es_pvc_prefix}}"
- es_pvc_size: "{{openshift_logging_es_pvc_size}}"
- es_dc_names: "{{openshift_logging_facts.elasticsearch.deploymentconfigs.keys()}}"
- es_cluster_size: "{{openshift_logging_es_cluster_size}}"
- es_access_modes: "{{ openshift_logging_storage_access_modes }}"
-
-# we should initialize the es_dc_pool with the current keys
-- name: Init pool of DeploymentConfig names for Elasticsearch
- set_fact: es_dc_pool={{ es_dc_pool | default([]) + [deploy_name] }}
- with_items: "{{ openshift_logging_facts.elasticsearch.deploymentconfigs.keys() }}"
+ es_component: es
+ es_name: "{{ deployment.0 }}"
+ es_spec: "{{ deployment.1 }}"
+ es_node_selector: "{{ openshift_logging_es_nodeselector | default({}) }}"
+ es_pvc_names: "{{ openshift_logging_facts.elasticsearch.pvcs.keys() }}"
+ es_pvc_size: "{{ openshift_logging_es_pvc_size }}"
+ es_pvc_prefix: "{{ openshift_logging_es_pvc_prefix }}"
+ es_pvc_dynamic: "{{ openshift_logging_es_pvc_dynamic | bool }}"
+ es_pv_selector: "{{ openshift_logging_es_pv_selector }}"
+ es_cpu_limit: "{{ openshift_logging_es_cpu_limit }}"
+ es_memory_limit: "{{ openshift_logging_es_memory_limit }}"
+ es_number_of_shards: "{{ openshift_logging_es_number_of_shards }}"
+ es_number_of_replicas: "{{ openshift_logging_es_number_of_replicas }}"
+ with_together:
+ - "{{ openshift_logging_facts.elasticsearch.deploymentconfigs.keys() }}"
+ - "{{ openshift_logging_facts.elasticsearch.deploymentconfigs.values() }}"
loop_control:
- loop_var: deploy_name
+ loop_var: deployment
+## if it does not then we should create one that does and attach it
-# This should be used to generate new DC names if necessary
-- name: Create new DeploymentConfig names for Elasticsearch
- set_fact: es_dc_pool={{es_dc_pool|default([]) + [deploy_name]}}
+## create new dc/pvc is needed
+- include: set_es_storage.yaml
vars:
- component: es
- es_cluster_name: "{{component}}"
- deploy_name_prefix: "logging-{{component}}"
- deploy_name: "{{deploy_name_prefix}}-{{'abcdefghijklmnopqrstuvwxyz0123456789'|random_word(8)}}"
- with_sequence: count={{ openshift_logging_es_cluster_size | int - openshift_logging_current_es_size | int }}
- check_mode: no
-
-- name: Generate Elasticsearch DeploymentConfig
- template: src=es.j2 dest={{mktemp.stdout}}/templates/logging-{{deploy_name}}-dc.yaml
- vars:
- component: es
- logging_component: elasticsearch
- deploy_name_prefix: "logging-{{component}}"
- image: "{{openshift_logging_image_prefix}}logging-elasticsearch:{{openshift_logging_image_version}}"
- es_cluster_name: "{{component}}"
- es_cpu_limit: "{{openshift_logging_es_cpu_limit }}"
- es_memory_limit: "{{openshift_logging_es_memory_limit}}"
- pvc_claim: "{{(es_pvc_pool | length > item.0) | ternary(es_pvc_pool[item.0], None)}}"
- deploy_name: "{{item.1}}"
- es_node_selector: "{{openshift_logging_es_nodeselector | default({}) }}"
- es_storage: "{{openshift_logging_facts|es_storage(deploy_name, pvc_claim)}}"
+ es_component: es
+ es_name: "logging-es-{{'abcdefghijklmnopqrstuvwxyz0123456789'|random_word(8)}}"
+ es_spec: "{}"
+ es_node_selector: "{{ openshift_logging_es_nodeselector | default({}) }}"
+ es_pvc_names: "{{ openshift_logging_facts.elasticsearch.pvcs.keys() }}"
+ es_pvc_size: "{{ openshift_logging_es_pvc_size }}"
+ es_pvc_prefix: "{{ openshift_logging_es_pvc_prefix }}"
+ es_pvc_dynamic: "{{ openshift_logging_es_pvc_dynamic | bool }}"
+ es_pv_selector: "{{ openshift_logging_es_pv_selector }}"
+ es_cpu_limit: "{{ openshift_logging_es_cpu_limit }}"
+ es_memory_limit: "{{ openshift_logging_es_memory_limit }}"
es_number_of_shards: "{{ openshift_logging_es_number_of_shards }}"
es_number_of_replicas: "{{ openshift_logging_es_number_of_replicas }}"
- with_indexed_items:
- - "{{ es_dc_pool }}"
- check_mode: no
- changed_when: no
+ with_sequence: count={{ openshift_logging_es_cluster_size | int - openshift_logging_facts.elasticsearch.deploymentconfigs | count }}
# --------- Tasks for Operation clusters ---------
@@ -73,74 +60,53 @@
es_dcs: "{{openshift_logging_facts.elasticsearch_ops.deploymentconfigs}}"
cluster_size: "{{openshift_logging_es_ops_cluster_size|int}}"
when:
- - openshift_logging_use_ops | bool
- - "{{es_dcs | length - openshift_logging_es_ops_cluster_size|int | abs > 1}}"
+ - openshift_logging_use_ops | bool
+ - "{{es_dcs | length - openshift_logging_es_ops_cluster_size|int | abs > 1}}"
check_mode: no
- set_fact: openshift_logging_es_ops_pvc_prefix="logging-es-ops"
when: "not openshift_logging_es_ops_pvc_prefix or openshift_logging_es_ops_pvc_prefix == ''"
-- set_fact: es_pvc_pool={{[]}}
-
-- name: Generate PersistentVolumeClaims for Ops
- include: "{{ role_path}}/tasks/generate_pvcs.yaml"
+- include: set_es_storage.yaml
vars:
- es_pvc_names: "{{openshift_logging_facts.elasticsearch_ops.pvcs.keys()}}"
- es_dc_names: "{{openshift_logging_facts.elasticsearch_ops.deploymentconfigs.keys()}}"
- es_pvc_size: "{{openshift_logging_es_ops_pvc_size}}"
- es_pvc_prefix: "{{openshift_logging_es_ops_pvc_prefix}}"
- es_cluster_size: "{{openshift_logging_es_ops_cluster_size|int}}"
- es_pvc_dynamic: "{{openshift_logging_es_ops_pvc_dynamic | bool}}"
- es_pv_selector: "{{openshift_logging_es_ops_pv_selector}}"
- es_access_modes: "{{ openshift_logging_storage_access_modes }}"
- when:
- - openshift_logging_use_ops | bool
- check_mode: no
-
-- name: Init pool of DeploymentConfig names for Elasticsearch Ops
- set_fact: es_ops_dc_pool={{ es_ops_dc_pool | default([]) + [deploy_name] }}
- with_items: "{{ openshift_logging_facts.elasticsearch_ops.deploymentconfigs.keys() }}"
+ es_component: es-ops
+ es_name: "{{ deployment.0 }}"
+ es_spec: "{{ deployment.1 }}"
+ es_node_selector: "{{ openshift_logging_es_ops_nodeselector | default({}) }}"
+ es_pvc_names: "{{ openshift_logging_facts.elasticsearch_ops.pvcs.keys() }}"
+ es_pvc_size: "{{ openshift_logging_es_ops_pvc_size }}"
+ es_pvc_prefix: "{{ openshift_logging_es_ops_pvc_prefix }}"
+ es_pvc_dynamic: "{{ openshift_logging_es_ops_pvc_dynamic | bool }}"
+ es_pv_selector: "{{ openshift_logging_es_ops_pv_selector }}"
+ es_cpu_limit: "{{ openshift_logging_es_ops_cpu_limit }}"
+ es_memory_limit: "{{ openshift_logging_es_ops_memory_limit }}"
+ es_number_of_shards: "{{ openshift_logging_es_ops_number_of_shards }}"
+ es_number_of_replicas: "{{ openshift_logging_es_ops_number_of_replicas }}"
+ with_together:
+ - "{{ openshift_logging_facts.elasticsearch_ops.deploymentconfigs.keys() }}"
+ - "{{ openshift_logging_facts.elasticsearch_ops.deploymentconfigs.values() }}"
loop_control:
- loop_var: deploy_name
- when:
- - openshift_logging_use_ops | bool
-
-- name: Create new DeploymentConfig names for Elasticsearch Ops
- set_fact: es_ops_dc_pool={{es_ops_dc_pool | default([]) + [deploy_name]}}
- vars:
- component: es-ops
- es_cluster_name: "{{component}}"
- deploy_name_prefix: "logging-{{component}}"
- deploy_name: "{{deploy_name_prefix}}-{{'abcdefghijklmnopqrstuvwxyz0123456789'|random_word(8)}}"
- cluster_size: "{{openshift_logging_es_ops_cluster_size|int}}"
- with_sequence: count={{ openshift_logging_es_ops_cluster_size | int - openshift_logging_current_es_ops_size | int }}
+ loop_var: deployment
when:
- - openshift_logging_use_ops | bool
- check_mode: no
+ - openshift_logging_use_ops | bool
+## if it does not then we should create one that does and attach it
-- name: Generate Elasticsearch DeploymentConfig for Ops
- template: src=es.j2 dest={{mktemp.stdout}}/templates/logging-{{deploy_name}}-dc.yaml
+## create new dc/pvc is needed
+- include: set_es_storage.yaml
vars:
- component: es-ops
- logging_component: elasticsearch
- deploy_name_prefix: "logging-{{component}}"
- image: "{{openshift_logging_image_prefix}}logging-elasticsearch:{{openshift_logging_image_version}}"
- pvc_claim: "{{(es_pvc_pool | length > item.0) | ternary(es_pvc_pool[item.0], None)}}"
- deploy_name: "{{item.1}}"
- es_cluster_name: "{{component}}"
- es_cpu_limit: "{{openshift_logging_es_ops_cpu_limit }}"
- es_memory_limit: "{{openshift_logging_es_ops_memory_limit}}"
- es_node_quorum: "{{es_ops_node_quorum}}"
- es_recover_after_nodes: "{{es_ops_recover_after_nodes}}"
- es_recover_expected_nodes: "{{es_ops_recover_expected_nodes}}"
- openshift_logging_es_recover_after_time: "{{openshift_logging_es_ops_recover_after_time}}"
- es_node_selector: "{{openshift_logging_es_ops_nodeselector | default({}) }}"
- es_storage: "{{openshift_logging_facts|es_storage(deploy_name, pvc_claim,root='elasticsearch_ops')}}"
+ es_component: es-ops
+ es_name: "logging-es-ops-{{'abcdefghijklmnopqrstuvwxyz0123456789'|random_word(8)}}"
+ es_spec: "{}"
+ es_node_selector: "{{ openshift_logging_es_ops_nodeselector | default({}) }}"
+ es_pvc_names: "{{ openshift_logging_facts.elasticsearch_ops.pvcs.keys() }}"
+ es_pvc_size: "{{ openshift_logging_es_ops_pvc_size }}"
+ es_pvc_prefix: "{{ openshift_logging_es_ops_pvc_prefix }}"
+ es_pvc_dynamic: "{{ openshift_logging_es_ops_pvc_dynamic | bool }}"
+ es_pv_selector: "{{ openshift_logging_es_ops_pv_selector }}"
+ es_cpu_limit: "{{ openshift_logging_es_ops_cpu_limit }}"
+ es_memory_limit: "{{ openshift_logging_es_ops_memory_limit }}"
es_number_of_shards: "{{ openshift_logging_es_ops_number_of_shards }}"
es_number_of_replicas: "{{ openshift_logging_es_ops_number_of_replicas }}"
- with_indexed_items:
- - "{{ es_ops_dc_pool | default([]) }}"
+ with_sequence: count={{ openshift_logging_es_ops_cluster_size | int - openshift_logging_facts.elasticsearch_ops.deploymentconfigs | count }}
when:
- - openshift_logging_use_ops | bool
- check_mode: no
- changed_when: no
+ - openshift_logging_use_ops | bool
diff --git a/roles/openshift_logging/tasks/install_logging.yaml b/roles/openshift_logging/tasks/install_logging.yaml
index 83b68fa77..aec455c22 100644
--- a/roles/openshift_logging/tasks/install_logging.yaml
+++ b/roles/openshift_logging/tasks/install_logging.yaml
@@ -27,6 +27,10 @@
loop_control:
loop_var: install_component
+- name: Install logging mux
+ include: "{{ role_path }}/tasks/install_mux.yaml"
+ when: openshift_logging_use_mux
+
- find: paths={{ mktemp.stdout }}/templates patterns=*.yaml
register: object_def_files
changed_when: no
diff --git a/roles/openshift_logging/tasks/install_mux.yaml b/roles/openshift_logging/tasks/install_mux.yaml
new file mode 100644
index 000000000..296da626f
--- /dev/null
+++ b/roles/openshift_logging/tasks/install_mux.yaml
@@ -0,0 +1,67 @@
+---
+- set_fact: mux_ops_host={{ (openshift_logging_use_ops | bool) | ternary(openshift_logging_es_ops_host, openshift_logging_es_host) }}
+ check_mode: no
+
+- set_fact: mux_ops_port={{ (openshift_logging_use_ops | bool) | ternary(openshift_logging_es_ops_port, openshift_logging_es_port) }}
+ check_mode: no
+
+- name: Check mux current replica count
+ command: >
+ {{ openshift.common.client_binary }} --config={{ mktemp.stdout }}/admin.kubeconfig get dc/logging-mux
+ -o jsonpath='{.spec.replicas}' -n {{openshift_logging_namespace}}
+ register: mux_replica_count
+ when: not ansible_check_mode
+ ignore_errors: yes
+ changed_when: no
+
+- name: Generating mux deploymentconfig
+ template: src=mux.j2 dest={{mktemp.stdout}}/templates/logging-mux-dc.yaml
+ vars:
+ component: mux
+ logging_component: mux
+ deploy_name: "logging-{{component}}"
+ image: "{{openshift_logging_image_prefix}}logging-fluentd:{{openshift_logging_image_version}}"
+ es_host: logging-es
+ es_port: "{{openshift_logging_es_port}}"
+ ops_host: "{{ mux_ops_host }}"
+ ops_port: "{{ mux_ops_port }}"
+ mux_cpu_limit: "{{openshift_logging_mux_cpu_limit}}"
+ mux_memory_limit: "{{openshift_logging_mux_memory_limit}}"
+ replicas: "{{mux_replica_count.stdout | default (0)}}"
+ mux_node_selector: "{{openshift_logging_mux_nodeselector | default({})}}"
+ check_mode: no
+ changed_when: no
+
+- name: "Check mux hostmount-anyuid permissions"
+ command: >
+ {{ openshift.common.client_binary }} --config={{ mktemp.stdout }}/admin.kubeconfig
+ get scc/hostmount-anyuid -o jsonpath='{.users}'
+ register: mux_hostmount_anyuid
+ check_mode: no
+ changed_when: no
+
+- name: "Set hostmount-anyuid permissions for mux"
+ command: >
+ {{ openshift.common.admin_binary}} --config={{ mktemp.stdout }}/admin.kubeconfig policy
+ add-scc-to-user hostmount-anyuid system:serviceaccount:{{openshift_logging_namespace}}:aggregated-logging-fluentd
+ register: mux_output
+ failed_when: "mux_output.rc == 1 and 'exists' not in mux_output.stderr"
+ check_mode: no
+ when: mux_hostmount_anyuid.stdout.find("system:serviceaccount:{{openshift_logging_namespace}}:aggregated-logging-fluentd") == -1
+
+- name: "Check mux cluster-reader permissions"
+ command: >
+ {{ openshift.common.client_binary }} --config={{ mktemp.stdout }}/admin.kubeconfig
+ get clusterrolebinding/cluster-readers -o jsonpath='{.userNames}'
+ register: mux_cluster_reader
+ check_mode: no
+ changed_when: no
+
+- name: "Set cluster-reader permissions for mux"
+ command: >
+ {{ openshift.common.admin_binary}} --config={{ mktemp.stdout }}/admin.kubeconfig policy
+ add-cluster-role-to-user cluster-reader system:serviceaccount:{{openshift_logging_namespace}}:aggregated-logging-fluentd
+ register: mux2_output
+ failed_when: "mux2_output.rc == 1 and 'exists' not in mux2_output.stderr"
+ check_mode: no
+ when: mux_cluster_reader.stdout.find("system:serviceaccount:{{openshift_logging_namespace}}:aggregated-logging-fluentd") == -1
diff --git a/roles/openshift_logging/tasks/oc_apply.yaml b/roles/openshift_logging/tasks/oc_apply.yaml
index cb9509de1..c4db7d033 100644
--- a/roles/openshift_logging/tasks/oc_apply.yaml
+++ b/roles/openshift_logging/tasks/oc_apply.yaml
@@ -1,52 +1,52 @@
---
-- name: Checking generation of {{file_content.kind}} {{file_content.metadata.name}}
- command: >
- {{ openshift.common.client_binary }}
- --config={{ kubeconfig }}
- get {{file_content.kind}} {{file_content.metadata.name}}
- -o jsonpath='{.metadata.resourceVersion}'
- -n {{namespace}}
- register: generation_init
- failed_when: "'not found' not in generation_init.stderr and generation_init.stdout == ''"
- changed_when: no
+- oc_obj:
+ kind: "{{ file_content.kind }}"
+ name: "{{ file_content.metadata.name }}"
+ state: present
+ namespace: "{{ namespace }}"
+ files:
+ - "{{ file_name }}"
+ when: file_content.kind != "Service"
-- name: Applying {{file_name}}
- command: >
- {{ openshift.common.client_binary }} --config={{ kubeconfig }}
- apply -f {{ file_name }}
- -n {{ namespace }}
- register: generation_apply
- failed_when: "'error' in generation_apply.stderr"
- changed_when: no
+## still need to do this for services until the template logic is replaced by oc_*
+- block:
+ - name: Checking generation of {{file_content.kind}} {{file_content.metadata.name}}
+ command: >
+ {{ openshift.common.client_binary }}
+ --config={{ kubeconfig }}
+ get {{file_content.kind}} {{file_content.metadata.name}}
+ -o jsonpath='{.metadata.resourceVersion}'
+ -n {{namespace}}
+ register: generation_init
+ failed_when: "'not found' not in generation_init.stderr and generation_init.stdout == ''"
+ changed_when: no
-- name: Determine change status of {{file_content.kind}} {{file_content.metadata.name}}
- command: >
- {{ openshift.common.client_binary }} --config={{ kubeconfig }}
- get {{file_content.kind}} {{file_content.metadata.name}}
- -o jsonpath='{.metadata.resourceVersion}'
- -n {{namespace}}
- register: generation_changed
- failed_when: "'not found' not in generation_changed.stderr and generation_changed.stdout == ''"
- changed_when: generation_changed.stdout | default (0) | int > generation_init.stdout | default(0) | int
- when:
- - "'field is immutable' not in generation_apply.stderr"
+ - name: Applying {{file_name}}
+ command: >
+ {{ openshift.common.client_binary }} --config={{ kubeconfig }}
+ apply -f {{ file_name }}
+ -n {{ namespace }}
+ register: generation_apply
+ failed_when: "'error' in generation_apply.stderr"
+ changed_when: no
-- name: Removing previous {{file_name}}
- command: >
- {{ openshift.common.client_binary }} --config={{ kubeconfig }}
- delete -f {{ file_name }}
- -n {{ namespace }}
- register: generation_delete
- failed_when: "'error' in generation_delete.stderr"
- changed_when: generation_delete.rc == 0
- when: "'field is immutable' in generation_apply.stderr"
+ - name: Removing previous {{file_name}}
+ command: >
+ {{ openshift.common.client_binary }} --config={{ kubeconfig }}
+ delete -f {{ file_name }}
+ -n {{ namespace }}
+ register: generation_delete
+ failed_when: "'error' in generation_delete.stderr"
+ changed_when: generation_delete.rc == 0
+ when: "'field is immutable' in generation_apply.stderr"
-- name: Recreating {{file_name}}
- command: >
- {{ openshift.common.client_binary }} --config={{ kubeconfig }}
- apply -f {{ file_name }}
- -n {{ namespace }}
- register: generation_apply
- failed_when: "'error' in generation_apply.stderr"
- changed_when: generation_apply.rc == 0
- when: "'field is immutable' in generation_apply.stderr"
+ - name: Recreating {{file_name}}
+ command: >
+ {{ openshift.common.client_binary }} --config={{ kubeconfig }}
+ apply -f {{ file_name }}
+ -n {{ namespace }}
+ register: generation_apply
+ failed_when: "'error' in generation_apply.stderr"
+ changed_when: generation_apply.rc == 0
+ when: "'field is immutable' in generation_apply.stderr"
+ when: file_content.kind == "Service"
diff --git a/roles/openshift_logging/tasks/procure_shared_key.yaml b/roles/openshift_logging/tasks/procure_shared_key.yaml
new file mode 100644
index 000000000..056ff6b98
--- /dev/null
+++ b/roles/openshift_logging/tasks/procure_shared_key.yaml
@@ -0,0 +1,25 @@
+---
+- name: Checking for {{ shared_key_info.procure_component }}_shared_key
+ stat: path="{{generated_certs_dir}}/{{ shared_key_info.procure_component }}_shared_key"
+ register: component_shared_key_file
+ check_mode: no
+
+- name: Trying to discover shared key variable name for {{ shared_key_info.procure_component }}
+ set_fact: procure_component_shared_key={{ lookup('env', '{{shared_key_info.procure_component}}' + '_shared_key') }}
+ when:
+ - shared_key_info[ shared_key_info.procure_component + '_shared_key' ] is defined
+ check_mode: no
+
+- name: Creating shared_key for {{ shared_key_info.procure_component }}
+ copy: content="{{'abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789'|random_word(64)}}"
+ dest="{{generated_certs_dir}}/{{shared_key_info.procure_component}}_shared_key"
+ check_mode: no
+ when:
+ - not component_shared_key_file.stat.exists
+
+- name: Copying shared key for {{ shared_key_info.procure_component }} to generated certs directory
+ copy: content="{{procure_component_shared_key}}" dest="{{generated_certs_dir}}/{{shared_key_info.procure_component}}_shared_key"
+ check_mode: no
+ when:
+ - shared_key_info[ shared_key_info.procure_component + '_shared_key' ] is defined
+ - not component_shared_key_file.stat.exists
diff --git a/roles/openshift_logging/tasks/set_es_storage.yaml b/roles/openshift_logging/tasks/set_es_storage.yaml
new file mode 100644
index 000000000..198b1d04d
--- /dev/null
+++ b/roles/openshift_logging/tasks/set_es_storage.yaml
@@ -0,0 +1,82 @@
+---
+- set_fact: es_storage_type="{{ es_spec.volumes['elasticsearch-storage'] }}"
+ when: es_spec.volumes is defined
+
+- set_fact: es_storage_claim="{{ es_spec.volumes['elasticsearch-storage'].persistentVolumeClaim.claimName }}"
+ when:
+ - es_spec.volumes is defined
+ - es_storage_type.persistentVolumeClaim is defined
+
+- set_fact: es_storage_claim=""
+ when:
+ - not es_spec.volumes is defined or not es_storage_type.persistentVolumeClaim is defined
+
+## take an ES dc and evaluate its storage option
+# if it is a hostmount or emptydir we don't do anything with it
+# if its a pvc we see if the corresponding pvc matches the provided specs (if they exist)
+- oc_obj:
+ state: list
+ kind: pvc
+ name: "{{ es_storage_claim }}"
+ namespace: "{{ openshift_logging_namespace }}"
+ register: pvc_spec
+ failed_when: pvc_spec.results.stderr is defined
+ when:
+ - es_spec.volumes is defined
+ - es_storage_type.persistentVolumeClaim is defined
+
+- set_fact: pvc_size="{{ pvc_spec.results.results[0].spec.resources.requests.storage }}"
+ when:
+ - pvc_spec.results is defined
+ - pvc_spec.results.results[0].spec is defined
+
+# if not create the pvc and use it
+- block:
+
+ - name: Generating PersistentVolumeClaims
+ template: src=pvc.j2 dest={{mktemp.stdout}}/templates/logging-{{obj_name}}-pvc.yaml
+ vars:
+ obj_name: "{{ es_pvc_prefix }}-{{ es_pvc_names | count }}"
+ size: "{{ es_pvc_size }}"
+ access_modes: "{{ openshift_logging_storage_access_modes }}"
+ pv_selector: "{{ es_pv_selector }}"
+ when: not es_pvc_dynamic | bool
+ check_mode: no
+ changed_when: no
+
+ - name: Generating PersistentVolumeClaims - Dynamic
+ template: src=pvc.j2 dest={{mktemp.stdout}}/templates/logging-{{obj_name}}-pvc.yaml
+ vars:
+ obj_name: "{{ es_pvc_prefix }}-{{ es_pvc_names | count }}"
+ annotations:
+ volume.alpha.kubernetes.io/storage-class: "dynamic"
+ size: "{{ es_pvc_size }}"
+ access_modes: "{{ openshift_logging_storage_access_modes }}"
+ pv_selector: "{{ es_pv_selector }}"
+ when: es_pvc_dynamic | bool
+ check_mode: no
+ changed_when: no
+
+ - set_fact: es_storage_claim="{{ es_pvc_prefix }}-{{ es_pvc_names | count }}"
+
+ when:
+ - es_pvc_size | search('^\d.*')
+ - not es_spec.volumes is defined or not es_storage_claim | search( es_pvc_prefix ) or ( not pvc_size | search( es_pvc_size ) and not es_pvc_size | search( pvc_size ) )
+
+- name: Generate Elasticsearch DeploymentConfig
+ template: src=es.j2 dest={{mktemp.stdout}}/templates/logging-{{deploy_name}}-dc.yaml
+ vars:
+ component: "{{ es_component }}"
+ deploy_name: "{{ es_name }}"
+ logging_component: elasticsearch
+ deploy_name_prefix: "logging-{{ es_component }}"
+ image: "{{openshift_logging_image_prefix}}logging-elasticsearch:{{openshift_logging_image_version}}"
+ es_cluster_name: "{{component}}"
+ es_cpu_limit: "{{ es_cpu_limit }}"
+ es_memory_limit: "{{ es_memory_limit }}"
+ es_node_selector: "{{ es_node_selector }}"
+ es_storage: "{{ openshift_logging_facts | es_storage( es_name, es_storage_claim ) }}"
+ es_number_of_shards: "{{ es_number_of_shards }}"
+ es_number_of_replicas: "{{ es_number_of_replicas }}"
+ check_mode: no
+ changed_when: no
diff --git a/roles/openshift_logging/tasks/start_cluster.yaml b/roles/openshift_logging/tasks/start_cluster.yaml
index edbb62c3e..1042b3daa 100644
--- a/roles/openshift_logging/tasks/start_cluster.yaml
+++ b/roles/openshift_logging/tasks/start_cluster.yaml
@@ -21,6 +21,26 @@
loop_control:
loop_var: fluentd_host
+- name: Retrieve mux
+ oc_obj:
+ state: list
+ kind: dc
+ selector: "component=mux"
+ namespace: "{{openshift_logging_namespace}}"
+ register: mux_dc
+ when: openshift_logging_use_mux
+
+- name: start mux
+ oc_scale:
+ kind: dc
+ name: "{{ object }}"
+ namespace: "{{openshift_logging_namespace}}"
+ replicas: "{{ openshift_logging_mux_replica_count | default (1) }}"
+ with_items: "{{ mux_dc.results.results[0]['items'] | map(attribute='metadata.name') | list }}"
+ loop_control:
+ loop_var: object
+ when: openshift_logging_use_mux
+
- name: Retrieve elasticsearch
oc_obj:
state: list
diff --git a/roles/openshift_logging/tasks/stop_cluster.yaml b/roles/openshift_logging/tasks/stop_cluster.yaml
index 4b3722e29..d20c57cc1 100644
--- a/roles/openshift_logging/tasks/stop_cluster.yaml
+++ b/roles/openshift_logging/tasks/stop_cluster.yaml
@@ -21,6 +21,26 @@
loop_control:
loop_var: fluentd_host
+- name: Retrieve mux
+ oc_obj:
+ state: list
+ kind: dc
+ selector: "component=mux"
+ namespace: "{{openshift_logging_namespace}}"
+ register: mux_dc
+ when: openshift_logging_use_mux
+
+- name: stop mux
+ oc_scale:
+ kind: dc
+ name: "{{ object }}"
+ namespace: "{{openshift_logging_namespace}}"
+ replicas: 0
+ with_items: "{{ mux_dc.results.results[0]['items'] | map(attribute='metadata.name') | list }}"
+ loop_control:
+ loop_var: object
+ when: openshift_logging_use_mux
+
- name: Retrieve elasticsearch
oc_obj:
state: list
diff --git a/roles/openshift_logging/templates/curator.j2 b/roles/openshift_logging/templates/curator.j2
index a0fefd882..c6284166b 100644
--- a/roles/openshift_logging/templates/curator.j2
+++ b/roles/openshift_logging/templates/curator.j2
@@ -89,9 +89,6 @@ spec:
- name: config
mountPath: /etc/curator/settings
readOnly: true
- - name: elasticsearch-storage
- mountPath: /elasticsearch/persistent
- readOnly: true
volumes:
- name: certs
secret:
@@ -99,5 +96,3 @@ spec:
- name: config
configMap:
name: logging-curator
- - name: elasticsearch-storage
- emptyDir: {}
diff --git a/roles/openshift_logging/templates/es.j2 b/roles/openshift_logging/templates/es.j2
index 16185fc1d..f89855bf5 100644
--- a/roles/openshift_logging/templates/es.j2
+++ b/roles/openshift_logging/templates/es.j2
@@ -95,6 +95,13 @@ spec:
readOnly: true
- name: elasticsearch-storage
mountPath: /elasticsearch/persistent
+ readinessProbe:
+ exec:
+ command:
+ - "/usr/share/elasticsearch/probe/readiness.sh"
+ initialDelaySeconds: 5
+ timeoutSeconds: 4
+ periodSeconds: 5
volumes:
- name: elasticsearch
secret:
diff --git a/roles/openshift_logging/templates/fluentd.j2 b/roles/openshift_logging/templates/fluentd.j2
index 0bf1686ad..d13691259 100644
--- a/roles/openshift_logging/templates/fluentd.j2
+++ b/roles/openshift_logging/templates/fluentd.j2
@@ -59,6 +59,11 @@ spec:
- name: dockercfg
mountPath: /etc/sysconfig/docker
readOnly: true
+{% if openshift_logging_use_mux_client | bool %}
+ - name: muxcerts
+ mountPath: /etc/fluent/muxkeys
+ readOnly: true
+{% endif %}
env:
- name: "K8S_HOST_URL"
value: "{{openshift_logging_master_url}}"
@@ -122,6 +127,8 @@ spec:
value: "{{openshift_logging_fluentd_journal_source | default('')}}"
- name: "JOURNAL_READ_FROM_HEAD"
value: "{{openshift_logging_fluentd_journal_read_from_head|lower}}"
+ - name: "USE_MUX_CLIENT"
+ value: "{{openshift_logging_use_mux_client| default('false')}}"
volumes:
- name: runlogjournal
hostPath:
@@ -147,3 +154,8 @@ spec:
- name: dockercfg
hostPath:
path: /etc/sysconfig/docker
+{% if openshift_logging_use_mux_client | bool %}
+ - name: muxcerts
+ secret:
+ secretName: logging-mux
+{% endif %}
diff --git a/roles/openshift_logging/templates/mux.j2 b/roles/openshift_logging/templates/mux.j2
new file mode 100644
index 000000000..41e6abd52
--- /dev/null
+++ b/roles/openshift_logging/templates/mux.j2
@@ -0,0 +1,121 @@
+apiVersion: "v1"
+kind: "DeploymentConfig"
+metadata:
+ name: "{{deploy_name}}"
+ labels:
+ provider: openshift
+ component: "{{component}}"
+ logging-infra: "{{logging_component}}"
+spec:
+ replicas: {{replicas|default(0)}}
+ selector:
+ provider: openshift
+ component: "{{component}}"
+ logging-infra: "{{logging_component}}"
+ strategy:
+ rollingParams:
+ intervalSeconds: 1
+ timeoutSeconds: 600
+ updatePeriodSeconds: 1
+ type: Rolling
+ template:
+ metadata:
+ name: "{{deploy_name}}"
+ labels:
+ logging-infra: "{{logging_component}}"
+ provider: openshift
+ component: "{{component}}"
+ spec:
+ serviceAccountName: aggregated-logging-fluentd
+{% if mux_node_selector is iterable and mux_node_selector | length > 0 %}
+ nodeSelector:
+{% for key, value in mux_node_selector.iteritems() %}
+ {{key}}: "{{value}}"
+{% endfor %}
+{% endif %}
+ containers:
+ - name: "mux"
+ image: {{image}}
+ imagePullPolicy: Always
+{% if (mux_memory_limit is defined and mux_memory_limit is not none) or (mux_cpu_limit is defined and mux_cpu_limit is not none) %}
+ resources:
+ limits:
+{% if mux_cpu_limit is not none %}
+ cpu: "{{mux_cpu_limit}}"
+{% endif %}
+{% if mux_memory_limit is not none %}
+ memory: "{{mux_memory_limit}}"
+{% endif %}
+{% endif %}
+ ports:
+ - containerPort: "{{ openshift_logging_mux_port }}"
+ name: mux-forward
+ volumeMounts:
+ - name: config
+ mountPath: /etc/fluent/configs.d/user
+ readOnly: true
+ - name: certs
+ mountPath: /etc/fluent/keys
+ readOnly: true
+ - name: dockerhostname
+ mountPath: /etc/docker-hostname
+ readOnly: true
+ - name: localtime
+ mountPath: /etc/localtime
+ readOnly: true
+ - name: muxcerts
+ mountPath: /etc/fluent/muxkeys
+ readOnly: true
+ env:
+ - name: "K8S_HOST_URL"
+ value: "{{openshift_logging_master_url}}"
+ - name: "ES_HOST"
+ value: "{{openshift_logging_es_host}}"
+ - name: "ES_PORT"
+ value: "{{openshift_logging_es_port}}"
+ - name: "ES_CLIENT_CERT"
+ value: "{{openshift_logging_es_client_cert}}"
+ - name: "ES_CLIENT_KEY"
+ value: "{{openshift_logging_es_client_key}}"
+ - name: "ES_CA"
+ value: "{{openshift_logging_es_ca}}"
+ - name: "OPS_HOST"
+ value: "{{ops_host}}"
+ - name: "OPS_PORT"
+ value: "{{ops_port}}"
+ - name: "OPS_CLIENT_CERT"
+ value: "{{openshift_logging_es_ops_client_cert}}"
+ - name: "OPS_CLIENT_KEY"
+ value: "{{openshift_logging_es_ops_client_key}}"
+ - name: "OPS_CA"
+ value: "{{openshift_logging_es_ops_ca}}"
+ - name: "USE_JOURNAL"
+ value: "false"
+ - name: "JOURNAL_SOURCE"
+ value: "{{openshift_logging_fluentd_journal_source | default('')}}"
+ - name: "JOURNAL_READ_FROM_HEAD"
+ value: "{{openshift_logging_fluentd_journal_read_from_head|lower}}"
+ - name: FORWARD_LISTEN_HOST
+ value: "{{ openshift_logging_mux_hostname }}"
+ - name: FORWARD_LISTEN_PORT
+ value: "{{ openshift_logging_mux_port }}"
+ - name: USE_MUX
+ value: "true"
+ - name: MUX_ALLOW_EXTERNAL
+ value: "{{ openshift_logging_mux_allow_external| default('false') }}"
+ volumes:
+ - name: config
+ configMap:
+ name: logging-mux
+ - name: certs
+ secret:
+ secretName: logging-fluentd
+ - name: dockerhostname
+ hostPath:
+ path: /etc/hostname
+ - name: localtime
+ hostPath:
+ path: /etc/localtime
+ - name: muxcerts
+ secret:
+ secretName: logging-mux
diff --git a/roles/openshift_logging/templates/service.j2 b/roles/openshift_logging/templates/service.j2
index 6c4ec0c76..70644a39c 100644
--- a/roles/openshift_logging/templates/service.j2
+++ b/roles/openshift_logging/templates/service.j2
@@ -26,3 +26,9 @@ spec:
{% for key, value in selector.iteritems() %}
{{key}}: {{value}}
{% endfor %}
+{% if externalIPs is defined -%}
+ externalIPs:
+{% for ip in externalIPs %}
+ - {{ ip }}
+{% endfor %}
+{% endif %}
diff --git a/roles/openshift_logging/vars/openshift-enterprise.yml b/roles/openshift_logging/vars/openshift-enterprise.yml
index 9679d209a..92e68a0a3 100644
--- a/roles/openshift_logging/vars/openshift-enterprise.yml
+++ b/roles/openshift_logging/vars/openshift-enterprise.yml
@@ -1,3 +1,3 @@
---
__openshift_logging_image_prefix: "{{ openshift_hosted_logging_deployer_prefix | default('registry.access.redhat.com/openshift3/') }}"
-__openshift_logging_image_version: "{{ openshift_hosted_logging_deployer_version | default(openshift_release | default ('3.5.0') ) }}"
+__openshift_logging_image_version: "{{ openshift_hosted_logging_deployer_version | default ('3.6.0') }}"
diff --git a/roles/openshift_manageiq/tasks/main.yaml b/roles/openshift_manageiq/tasks/main.yaml
index f202486a5..cfc4e2722 100644
--- a/roles/openshift_manageiq/tasks/main.yaml
+++ b/roles/openshift_manageiq/tasks/main.yaml
@@ -3,24 +3,13 @@
msg: "The openshift_manageiq role requires OpenShift Enterprise 3.1 or Origin 1.1."
when: not openshift.common.version_gte_3_1_or_1_1 | bool
-- name: Copy Configuration to temporary conf
- command: >
- cp {{ openshift.common.config_base }}/master/admin.kubeconfig {{manage_iq_tmp_conf}}
- changed_when: false
-
- name: Add Management Infrastructure project
- command: >
- {{ openshift.common.client_binary }} adm new-project
- management-infra
- --description="Management Infrastructure"
- --config={{manage_iq_tmp_conf}}
- register: osmiq_create_mi_project
- failed_when: "'already exists' not in osmiq_create_mi_project.stderr and osmiq_create_mi_project.rc != 0"
- changed_when: osmiq_create_mi_project.rc == 0
+ oc_project:
+ name: management-infra
+ description: Management Infrastructure
- name: Create Admin and Image Inspector Service Account
oc_serviceaccount:
- kubeconfig: "{{ openshift_master_config_dir }}/admin.kubeconfig"
name: "{{ item }}"
namespace: management-infra
state: present
@@ -28,51 +17,42 @@
- management-admin
- inspector-admin
-- name: Create Cluster Role
- shell: >
- echo {{ manageiq_cluster_role | to_json | quote }} |
- {{ openshift.common.client_binary }} create
- --config={{manage_iq_tmp_conf}}
- -f -
- register: osmiq_create_cluster_role
- failed_when: "'already exists' not in osmiq_create_cluster_role.stderr and osmiq_create_cluster_role.rc != 0"
- changed_when: osmiq_create_cluster_role.rc == 0
+- name: Create manageiq cluster role
+ oc_clusterrole:
+ name: management-infra-admin
+ rules:
+ - apiGroups:
+ - ""
+ resources:
+ - pods/proxy
+ verbs:
+ - "*"
- name: Create Hawkular Metrics Admin Cluster Role
- shell: >
- echo {{ manageiq_metrics_admin_clusterrole | to_json | quote }} |
- {{ openshift.common.client_binary }}
- --config={{manage_iq_tmp_conf}}
- create -f -
- register: oshawkular_create_cluster_role
- failed_when: "'already exists' not in oshawkular_create_cluster_role.stderr and oshawkular_create_cluster_role.rc != 0"
- changed_when: oshawkular_create_cluster_role.rc == 0
- # AUDIT:changed_when_note: Checking the return code is insufficient
- # here. We really need to verify the if the role even exists before
- # we run this task.
+ oc_clusterrole:
+ name: hawkular-metrics-admin
+ rules:
+ - apiGroups:
+ - ""
+ resources:
+ - hawkular-alerts
+ - hawkular-metrics
+ verbs:
+ - "*"
- name: Configure role/user permissions
- command: >
- {{ openshift.common.client_binary }} adm {{item}}
- --config={{manage_iq_tmp_conf}}
- with_items: "{{manage_iq_tasks}}"
- register: osmiq_perm_task
- failed_when: "'already exists' not in osmiq_perm_task.stderr and osmiq_perm_task.rc != 0"
- changed_when: osmiq_perm_task.rc == 0
- # AUDIT:changed_when_note: Checking the return code is insufficient
- # here. We really need to compare the current role/user permissions
- # with their expected state. I think we may have a module for this?
-
+ oc_adm_policy_user:
+ namespace: management-infra
+ resource_name: "{{ item.resource_name }}"
+ resource_kind: "{{ item.resource_kind }}"
+ user: "{{ item.user }}"
+ with_items: "{{ manage_iq_tasks }}"
- name: Configure 3_2 role/user permissions
- command: >
- {{ openshift.common.client_binary }} adm {{item}}
- --config={{manage_iq_tmp_conf}}
+ oc_adm_policy_user:
+ namespace: management-infra
+ resource_name: "{{ item.resource_name }}"
+ resource_kind: "{{ item.resource_kind }}"
+ user: "{{ item.user }}"
with_items: "{{manage_iq_openshift_3_2_tasks}}"
- register: osmiq_perm_3_2_task
- failed_when: osmiq_perm_3_2_task.rc != 0
- changed_when: osmiq_perm_3_2_task.rc == 0
when: openshift.common.version_gte_3_2_or_1_2 | bool
-
-- name: Clean temporary configuration file
- file: path={{manage_iq_tmp_conf}} state=absent
diff --git a/roles/openshift_manageiq/vars/main.yml b/roles/openshift_manageiq/vars/main.yml
index 9936bb126..15d667628 100644
--- a/roles/openshift_manageiq/vars/main.yml
+++ b/roles/openshift_manageiq/vars/main.yml
@@ -1,41 +1,31 @@
---
-openshift_master_config_dir: "{{ openshift.common.config_base }}/master"
-manageiq_cluster_role:
- apiVersion: v1
- kind: ClusterRole
- metadata:
- name: management-infra-admin
- rules:
- - resources:
- - pods/proxy
- verbs:
- - '*'
-
-manageiq_metrics_admin_clusterrole:
- apiVersion: v1
- kind: ClusterRole
- metadata:
- name: hawkular-metrics-admin
- rules:
- - apiGroups:
- - ""
- resources:
- - hawkular-metrics
- - hawkular-alerts
- verbs:
- - '*'
-
-manage_iq_tmp_conf: /tmp/manageiq_admin.kubeconfig
-
manage_iq_tasks:
-- policy add-role-to-user -n management-infra admin -z management-admin
-- policy add-role-to-user -n management-infra management-infra-admin -z management-admin
-- policy add-cluster-role-to-user cluster-reader system:serviceaccount:management-infra:management-admin
-- policy add-scc-to-user privileged system:serviceaccount:management-infra:management-admin
-- policy add-cluster-role-to-user system:image-puller system:serviceaccount:management-infra:inspector-admin
-- policy add-scc-to-user privileged system:serviceaccount:management-infra:inspector-admin
-- policy add-cluster-role-to-user self-provisioner system:serviceaccount:management-infra:management-admin
-- policy add-cluster-role-to-user hawkular-metrics-admin system:serviceaccount:management-infra:management-admin
+- resource_kind: role
+ resource_name: admin
+ user: management-admin
+- resource_kind: role
+ resource_name: management-infra-admin
+ user: management-admin
+- resource_kind: cluster-role
+ resource_name: cluster-reader
+ user: system:serviceaccount:management-infra:management-admin
+- resource_kind: scc
+ resource_name: privileged
+ user: system:serviceaccount:management-infra:management-admin
+- resource_kind: cluster-role
+ resource_name: system:image-puller
+ user: system:serviceaccount:management-infra:inspector-admin
+- resource_kind: scc
+ resource_name: privileged
+ user: system:serviceaccount:management-infra:inspector-admin
+- resource_kind: cluster-role
+ resource_name: self-provisioner
+ user: system:serviceaccount:management-infra:management-admin
+- resource_kind: cluster-role
+ resource_name: hawkular-metrics-admin
+ user: system:serviceaccount:management-infra:management-admin
manage_iq_openshift_3_2_tasks:
-- policy add-cluster-role-to-user system:image-auditor system:serviceaccount:management-infra:management-admin
+- resource_kind: cluster-role
+ resource_name: system:image-auditor
+ user: system:serviceaccount:management-infra:management-admin
diff --git a/roles/openshift_master/templates/docker-cluster/atomic-openshift-master-api.service.j2 b/roles/openshift_master/templates/docker-cluster/atomic-openshift-master-api.service.j2
index eef0f414e..155abd970 100644
--- a/roles/openshift_master/templates/docker-cluster/atomic-openshift-master-api.service.j2
+++ b/roles/openshift_master/templates/docker-cluster/atomic-openshift-master-api.service.j2
@@ -12,7 +12,7 @@ Requires=docker.service
EnvironmentFile=/etc/sysconfig/{{ openshift.common.service_type }}-master-api
Environment=GOTRACEBACK=crash
ExecStartPre=-/usr/bin/docker rm -f {{ openshift.common.service_type}}-master-api
-ExecStart=/usr/bin/docker run --rm --privileged --net=host --name {{ openshift.common.service_type }}-master-api --env-file=/etc/sysconfig/{{ openshift.common.service_type }}-master-api -v {{ openshift.common.data_dir }}:{{ openshift.common.data_dir }} -v /var/run/docker.sock:/var/run/docker.sock -v {{ openshift.common.config_base }}:{{ openshift.common.config_base }} {% if openshift_cloudprovider_kind | default('') != '' -%} -v {{ openshift.common.config_base }}/cloudprovider:{{ openshift.common.config_base}}/cloudprovider {% endif -%} {{ openshift.master.master_image }}:${IMAGE_VERSION} start master api --config=${CONFIG_FILE} $OPTIONS
+ExecStart=/usr/bin/docker run --rm --privileged --net=host --name {{ openshift.common.service_type }}-master-api --env-file=/etc/sysconfig/{{ openshift.common.service_type }}-master-api -v {{ openshift.common.data_dir }}:{{ openshift.common.data_dir }} -v /var/log:/var/log -v /var/run/docker.sock:/var/run/docker.sock -v {{ openshift.common.config_base }}:{{ openshift.common.config_base }} {% if openshift_cloudprovider_kind | default('') != '' -%} -v {{ openshift.common.config_base }}/cloudprovider:{{ openshift.common.config_base}}/cloudprovider {% endif -%} {{ openshift.master.master_image }}:${IMAGE_VERSION} start master api --config=${CONFIG_FILE} $OPTIONS
ExecStartPost=/usr/bin/sleep 10
ExecStop=/usr/bin/docker stop {{ openshift.common.service_type }}-master-api
LimitNOFILE=131072
diff --git a/roles/openshift_master/templates/master_docker/master.docker.service.j2 b/roles/openshift_master/templates/master_docker/master.docker.service.j2
index be7644710..13381cd1a 100644
--- a/roles/openshift_master/templates/master_docker/master.docker.service.j2
+++ b/roles/openshift_master/templates/master_docker/master.docker.service.j2
@@ -8,7 +8,7 @@ Wants=etcd_container.service
[Service]
EnvironmentFile=/etc/sysconfig/{{ openshift.common.service_type }}-master
ExecStartPre=-/usr/bin/docker rm -f {{ openshift.common.service_type }}-master
-ExecStart=/usr/bin/docker run --rm --privileged --net=host --name {{ openshift.common.service_type }}-master --env-file=/etc/sysconfig/{{ openshift.common.service_type }}-master -v {{ openshift.common.data_dir }}:{{ openshift.common.data_dir }} -v /var/run/docker.sock:/var/run/docker.sock -v {{ openshift.common.config_base }}:{{ openshift.common.config_base }} {% if openshift_cloudprovider_kind | default('') != '' -%} -v {{ openshift.common.config_base }}/cloudprovider:{{ openshift.common.config_base}}/cloudprovider {% endif -%} {{ openshift.master.master_image }}:${IMAGE_VERSION} start master --config=${CONFIG_FILE} $OPTIONS
+ExecStart=/usr/bin/docker run --rm --privileged --net=host --name {{ openshift.common.service_type }}-master --env-file=/etc/sysconfig/{{ openshift.common.service_type }}-master -v {{ openshift.common.data_dir }}:{{ openshift.common.data_dir }} -v /var/log:/var/log -v /var/run/docker.sock:/var/run/docker.sock -v {{ openshift.common.config_base }}:{{ openshift.common.config_base }} {% if openshift_cloudprovider_kind | default('') != '' -%} -v {{ openshift.common.config_base }}/cloudprovider:{{ openshift.common.config_base}}/cloudprovider {% endif -%} {{ openshift.master.master_image }}:${IMAGE_VERSION} start master --config=${CONFIG_FILE} $OPTIONS
ExecStartPost=/usr/bin/sleep 10
ExecStop=/usr/bin/docker stop {{ openshift.common.service_type }}-master
Restart=always
diff --git a/roles/openshift_master_facts/lookup_plugins/openshift_master_facts_default_predicates.py b/roles/openshift_master_facts/lookup_plugins/openshift_master_facts_default_predicates.py
index 7f7bc4316..b50d6d9db 100644
--- a/roles/openshift_master_facts/lookup_plugins/openshift_master_facts_default_predicates.py
+++ b/roles/openshift_master_facts/lookup_plugins/openshift_master_facts_default_predicates.py
@@ -40,7 +40,7 @@ class LookupModule(LookupBase):
# pylint: disable=line-too-long
raise AnsibleError("Either OpenShift needs to be installed or openshift_release needs to be specified")
if deployment_type == 'origin':
- if short_version not in ['1.1', '1.2', '1.3', '1.4', '1.5', '1.6', '3.6', 'latest']:
+ if short_version not in ['1.1', '1.2', '1.3', '1.4', '1.5', '3.6', 'latest']:
raise AnsibleError("Unknown short_version %s" % short_version)
elif deployment_type == 'openshift-enterprise':
if short_version not in ['3.1', '3.2', '3.3', '3.4', '3.5', '3.6', 'latest']:
@@ -49,7 +49,7 @@ class LookupModule(LookupBase):
raise AnsibleError("Unknown deployment_type %s" % deployment_type)
if deployment_type == 'origin':
- # convert short_version to enterpise short_version
+ # convert short_version to enterprise short_version
short_version = re.sub('^1.', '3.', short_version)
if short_version == 'latest':
diff --git a/roles/openshift_master_facts/lookup_plugins/openshift_master_facts_default_priorities.py b/roles/openshift_master_facts/lookup_plugins/openshift_master_facts_default_priorities.py
index 66e6ecea3..a66cb3c88 100644
--- a/roles/openshift_master_facts/lookup_plugins/openshift_master_facts_default_priorities.py
+++ b/roles/openshift_master_facts/lookup_plugins/openshift_master_facts_default_priorities.py
@@ -41,7 +41,7 @@ class LookupModule(LookupBase):
raise AnsibleError("Either OpenShift needs to be installed or openshift_release needs to be specified")
if deployment_type == 'origin':
- if short_version not in ['1.1', '1.2', '1.3', '1.4', '1.5', '1.6', '3.6', 'latest']:
+ if short_version not in ['1.1', '1.2', '1.3', '1.4', '1.5', '3.6', 'latest']:
raise AnsibleError("Unknown short_version %s" % short_version)
elif deployment_type == 'openshift-enterprise':
if short_version not in ['3.1', '3.2', '3.3', '3.4', '3.5', '3.6', 'latest']:
diff --git a/roles/openshift_master_facts/test/openshift_master_facts_default_predicates_tests.py b/roles/openshift_master_facts/test/openshift_master_facts_default_predicates_tests.py
index 1fab84c71..4a28fb8f8 100644
--- a/roles/openshift_master_facts/test/openshift_master_facts_default_predicates_tests.py
+++ b/roles/openshift_master_facts/test/openshift_master_facts_default_predicates_tests.py
@@ -55,6 +55,8 @@ DEFAULT_PREDICATES_1_5 = [
{'name': 'CheckNodeDiskPressure'},
]
+DEFAULT_PREDICATES_3_6 = DEFAULT_PREDICATES_1_5
+
REGION_PREDICATE = {
'name': 'Region',
'argument': {
@@ -75,9 +77,8 @@ TEST_VARS = [
('3.4', 'openshift-enterprise', DEFAULT_PREDICATES_1_4),
('1.5', 'origin', DEFAULT_PREDICATES_1_5),
('3.5', 'openshift-enterprise', DEFAULT_PREDICATES_1_5),
- ('1.6', 'origin', DEFAULT_PREDICATES_1_5),
- ('3.6', 'origin', DEFAULT_PREDICATES_1_5),
- ('3.6', 'openshift-enterprise', DEFAULT_PREDICATES_1_5),
+ ('3.6', 'origin', DEFAULT_PREDICATES_3_6),
+ ('3.6', 'openshift-enterprise', DEFAULT_PREDICATES_3_6),
]
diff --git a/roles/openshift_master_facts/test/openshift_master_facts_default_priorities_tests.py b/roles/openshift_master_facts/test/openshift_master_facts_default_priorities_tests.py
index 1098f9391..97ef2387e 100644
--- a/roles/openshift_master_facts/test/openshift_master_facts_default_priorities_tests.py
+++ b/roles/openshift_master_facts/test/openshift_master_facts_default_priorities_tests.py
@@ -42,6 +42,8 @@ DEFAULT_PRIORITIES_1_5 = [
{'name': 'TaintTolerationPriority', 'weight': 1}
]
+DEFAULT_PRIORITIES_3_6 = DEFAULT_PRIORITIES_1_5
+
ZONE_PRIORITY = {
'name': 'Zone',
'argument': {
@@ -63,9 +65,8 @@ TEST_VARS = [
('3.4', 'openshift-enterprise', DEFAULT_PRIORITIES_1_4),
('1.5', 'origin', DEFAULT_PRIORITIES_1_5),
('3.5', 'openshift-enterprise', DEFAULT_PRIORITIES_1_5),
- ('1.6', 'origin', DEFAULT_PRIORITIES_1_5),
- ('3.6', 'origin', DEFAULT_PRIORITIES_1_5),
- ('3.6', 'openshift-enterprise', DEFAULT_PRIORITIES_1_5),
+ ('3.6', 'origin', DEFAULT_PRIORITIES_3_6),
+ ('3.6', 'openshift-enterprise', DEFAULT_PRIORITIES_3_6),
]
diff --git a/roles/openshift_metrics/files/import_jks_certs.sh b/roles/openshift_metrics/files/import_jks_certs.sh
deleted file mode 100755
index f977b6dd6..000000000
--- a/roles/openshift_metrics/files/import_jks_certs.sh
+++ /dev/null
@@ -1,52 +0,0 @@
-#!/bin/bash
-#
-# Copyright 2014-2015 Red Hat, Inc. and/or its affiliates
-# and other contributors as indicated by the @author tags.
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-#
-
-set -ex
-
-function import_certs() {
- dir=$CERT_DIR
- hawkular_metrics_keystore_password=$(echo $METRICS_KEYSTORE_PASSWD | base64 --decode)
- hawkular_metrics_truststore_password=$(echo $METRICS_TRUSTSTORE_PASSWD | base64 --decode)
- hawkular_alias=`keytool -noprompt -list -keystore $dir/hawkular-metrics.truststore -storepass ${hawkular_metrics_truststore_password} | sed -n '7~2s/,.*$//p'`
-
- if [ ! -f $dir/hawkular-metrics.keystore ]; then
- echo "Creating the Hawkular Metrics keystore from the PEM file"
- keytool -importkeystore -v \
- -srckeystore $dir/hawkular-metrics.pkcs12 \
- -destkeystore $dir/hawkular-metrics.keystore \
- -srcstoretype PKCS12 \
- -deststoretype JKS \
- -srcstorepass $hawkular_metrics_keystore_password \
- -deststorepass $hawkular_metrics_keystore_password
- fi
-
- cert_alias_names=(ca metricca)
-
- for cert_alias in ${cert_alias_names[*]}; do
- if [[ ! ${hawkular_alias[*]} =~ "$cert_alias" ]]; then
- echo "Importing the CA Certificate with alias $cert_alias into the Hawkular Metrics Truststore"
- keytool -noprompt -import -v -trustcacerts -alias $cert_alias \
- -file ${dir}/ca.crt \
- -keystore $dir/hawkular-metrics.truststore \
- -trustcacerts \
- -storepass $hawkular_metrics_truststore_password
- fi
- done
-}
-
-import_certs
diff --git a/roles/openshift_metrics/tasks/generate_hawkular_certificates.yaml b/roles/openshift_metrics/tasks/generate_hawkular_certificates.yaml
index 01fc1ef64..07b7eca33 100644
--- a/roles/openshift_metrics/tasks/generate_hawkular_certificates.yaml
+++ b/roles/openshift_metrics/tasks/generate_hawkular_certificates.yaml
@@ -13,21 +13,6 @@
hostnames: hawkular-cassandra
changed_when: no
-- slurp: src={{ mktemp.stdout }}/hawkular-metrics-truststore.pwd
- register: hawkular_truststore_password
-
-- stat: path="{{mktemp.stdout}}/{{item}}"
- register: pwd_file_stat
- with_items:
- - hawkular-metrics.pwd
- - hawkular-metrics.htpasswd
- changed_when: no
-
-- set_fact:
- pwd_files: "{{pwd_files | default({}) | combine ({item.item: item.stat}) }}"
- with_items: "{{pwd_file_stat.results}}"
- changed_when: no
-
- name: generate password for hawkular metrics
local_action: copy dest="{{ local_tmp.stdout}}/{{ item }}.pwd" content="{{ 15 | oo_random_word }}"
with_items:
@@ -47,8 +32,6 @@
- hawkular-metrics.pwd
- hawkular-metrics.htpasswd
-- include: import_jks_certs.yaml
-
- name: read files for the hawkular-metrics secret
shell: >
printf '%s: ' '{{ item }}'
@@ -56,13 +39,11 @@
register: hawkular_secrets
with_items:
- ca.crt
- - hawkular-metrics.crt
- - hawkular-metrics.keystore
- - hawkular-metrics-keystore.pwd
- - hawkular-metrics.truststore
- - hawkular-metrics-truststore.pwd
- hawkular-metrics.pwd
- hawkular-metrics.htpasswd
+ - hawkular-metrics.crt
+ - hawkular-metrics.key
+ - hawkular-metrics.pem
- hawkular-cassandra.crt
- hawkular-cassandra.key
- hawkular-cassandra.pem
@@ -73,42 +54,23 @@
{{ hawkular_secrets.results|map(attribute='stdout')|join('
')|from_yaml }}
-- name: generate hawkular-metrics-secrets secret template
- template:
- src: secret.j2
- dest: "{{ mktemp.stdout }}/templates/hawkular_metrics_secrets.yaml"
- vars:
- name: hawkular-metrics-secrets
- labels:
- metrics-infra: hawkular-metrics
- data:
- hawkular-metrics.keystore: >
- {{ hawkular_secrets['hawkular-metrics.keystore'] }}
- hawkular-metrics.keystore.password: >
- {{ hawkular_secrets['hawkular-metrics-keystore.pwd'] }}
- hawkular-metrics.truststore: >
- {{ hawkular_secrets['hawkular-metrics.truststore'] }}
- hawkular-metrics.truststore.password: >
- {{ hawkular_secrets['hawkular-metrics-truststore.pwd'] }}
- hawkular-metrics.keystore.alias: "{{ 'hawkular-metrics'|b64encode }}"
- hawkular-metrics.htpasswd.file: >
- {{ hawkular_secrets['hawkular-metrics.htpasswd'] }}
- when: name not in metrics_secrets.stdout_lines
- changed_when: no
-
-- name: generate hawkular-metrics-certificate secret template
+- name: generate hawkular-metrics-certs secret template
template:
src: secret.j2
- dest: "{{ mktemp.stdout }}/templates/hawkular_metrics_certificate.yaml"
+ dest: "{{ mktemp.stdout }}/templates/hawkular-metrics-certs.yaml"
vars:
- name: hawkular-metrics-certificate
+ name: hawkular-metrics-certs
labels:
- metrics-infra: hawkular-metrics
+ metrics-infra: hawkular-metrics-certs
+ annotations:
+ service.alpha.openshift.io/originating-service-name: hawkular-metrics
data:
- hawkular-metrics.certificate: >
+ tls.crt: >
{{ hawkular_secrets['hawkular-metrics.crt'] }}
- hawkular-metrics-ca.certificate: >
- {{ hawkular_secrets['ca.crt'] }}
+ tls.key: >
+ {{ hawkular_secrets['hawkular-metrics.key'] }}
+ tls.truststore.crt: >
+ {{ hawkular_secrets['hawkular-cassandra.crt'] }}
when: name not in metrics_secrets.stdout_lines
changed_when: no
@@ -122,6 +84,7 @@
metrics-infra: hawkular-metrics
data:
hawkular-metrics.username: "{{ 'hawkular'|b64encode }}"
+ hawkular-metrics.htpasswd: "{{ hawkular_secrets['hawkular-metrics.htpasswd'] }}"
hawkular-metrics.password: >
{{ hawkular_secrets['hawkular-metrics.pwd'] }}
when: name not in metrics_secrets.stdout_lines
diff --git a/roles/openshift_metrics/tasks/generate_heapster_certificates.yaml b/roles/openshift_metrics/tasks/generate_heapster_certificates.yaml
deleted file mode 100644
index ced2df1d0..000000000
--- a/roles/openshift_metrics/tasks/generate_heapster_certificates.yaml
+++ /dev/null
@@ -1,40 +0,0 @@
----
-- name: generate heapster key/cert
- command: >
- {{ openshift.common.admin_binary }} ca create-server-cert
- --config={{ mktemp.stdout }}/admin.kubeconfig
- --key='{{ mktemp.stdout }}/heapster.key'
- --cert='{{ mktemp.stdout }}/heapster.cert'
- --hostnames=heapster
- --signer-cert='{{ mktemp.stdout }}/ca.crt'
- --signer-key='{{ mktemp.stdout }}/ca.key'
- --signer-serial='{{ mktemp.stdout }}/ca.serial.txt'
-
-- when: "'secret/heapster-secrets' not in metrics_secrets.stdout_lines"
- block:
- - name: read files for the heapster secret
- slurp: src={{ item }}
- register: heapster_secret
- with_items:
- - "{{ mktemp.stdout }}/heapster.cert"
- - "{{ mktemp.stdout }}/heapster.key"
- - "{{ client_ca }}"
- vars:
- custom_ca: "{{ mktemp.stdout }}/heapster_client_ca.crt"
- default_ca: "{{ openshift.common.config_base }}/master/ca-bundle.crt"
- client_ca: "{{ custom_ca|exists|ternary(custom_ca, default_ca) }}"
- - name: generate heapster secret template
- template:
- src: secret.j2
- dest: "{{ mktemp.stdout }}/templates/heapster_secrets.yaml"
- force: no
- vars:
- name: heapster-secrets
- labels:
- metrics-infra: heapster
- data:
- heapster.cert: "{{ heapster_secret.results[0].content }}"
- heapster.key: "{{ heapster_secret.results[1].content }}"
- heapster.client-ca: "{{ heapster_secret.results[2].content }}"
- heapster.allowed-users: >
- {{ openshift_metrics_heapster_allowed_users|b64encode }}
diff --git a/roles/openshift_metrics/tasks/generate_heapster_secrets.yaml b/roles/openshift_metrics/tasks/generate_heapster_secrets.yaml
new file mode 100644
index 000000000..e81d90ae7
--- /dev/null
+++ b/roles/openshift_metrics/tasks/generate_heapster_secrets.yaml
@@ -0,0 +1,14 @@
+---
+- name: generate heapster secret template
+ template:
+ src: secret.j2
+ dest: "{{ mktemp.stdout }}/templates/heapster_secrets.yaml"
+ force: no
+ vars:
+ name: heapster-secrets
+ labels:
+ metrics-infra: heapster
+ data:
+ heapster.allowed-users: >
+ {{ openshift_metrics_heapster_allowed_users|b64encode }}
+ when: "'secret/heapster-secrets' not in metrics_secrets.stdout_lines"
diff --git a/roles/openshift_metrics/tasks/import_jks_certs.yaml b/roles/openshift_metrics/tasks/import_jks_certs.yaml
deleted file mode 100644
index e098145e9..000000000
--- a/roles/openshift_metrics/tasks/import_jks_certs.yaml
+++ /dev/null
@@ -1,37 +0,0 @@
----
-- stat: path="{{mktemp.stdout}}/hawkular-metrics.keystore"
- register: metrics_keystore
- check_mode: no
-
-- stat: path="{{mktemp.stdout}}/hawkular-metrics.truststore"
- register: metrics_truststore
- check_mode: no
-
-- block:
- - slurp: src={{ mktemp.stdout }}/hawkular-metrics-keystore.pwd
- register: metrics_keystore_password
-
- - fetch:
- dest: "{{local_tmp.stdout}}/"
- src: "{{ mktemp.stdout }}/{{item}}"
- flat: yes
- changed_when: False
- with_items:
- - hawkular-metrics.pkcs12
- - hawkular-metrics.crt
- - ca.crt
-
- - local_action: command {{role_path}}/files/import_jks_certs.sh
- environment:
- CERT_DIR: "{{local_tmp.stdout}}"
- METRICS_KEYSTORE_PASSWD: "{{metrics_keystore_password.content}}"
- METRICS_TRUSTSTORE_PASSWD: "{{hawkular_truststore_password.content}}"
- changed_when: False
-
- - copy:
- dest: "{{mktemp.stdout}}/"
- src: "{{item}}"
- with_fileglob: "{{local_tmp.stdout}}/*.*store"
-
- when: not metrics_keystore.stat.exists or
- not metrics_truststore.stat.exists
diff --git a/roles/openshift_metrics/tasks/install_heapster.yaml b/roles/openshift_metrics/tasks/install_heapster.yaml
index c490bcdd3..d13b96be1 100644
--- a/roles/openshift_metrics/tasks/install_heapster.yaml
+++ b/roles/openshift_metrics/tasks/install_heapster.yaml
@@ -20,7 +20,7 @@
- set_fact:
heapster_sa_secrets: "{{ heapster_sa_secrets + [item] }}"
with_items:
- - hawkular-metrics-certificate
+ - hawkular-metrics-certs
- hawkular-metrics-account
when: "not {{ openshift_metrics_heapster_standalone | bool }}"
@@ -41,6 +41,8 @@
- {port: 80, targetPort: http-endpoint}
selector:
name: "{{obj_name}}"
+ annotations:
+ service.alpha.openshift.io/serving-cert-secret-name: heapster-certs
labels:
metrics-infra: "{{obj_name}}"
name: "{{obj_name}}"
@@ -64,4 +66,4 @@
namespace: "{{ openshift_metrics_project }}"
changed_when: no
-- include: generate_heapster_certificates.yaml
+- include: generate_heapster_secrets.yaml
diff --git a/roles/openshift_metrics/templates/hawkular_metrics_rc.j2 b/roles/openshift_metrics/templates/hawkular_metrics_rc.j2
index 361378df3..401db4e58 100644
--- a/roles/openshift_metrics/templates/hawkular_metrics_rc.j2
+++ b/roles/openshift_metrics/templates/hawkular_metrics_rc.j2
@@ -40,24 +40,20 @@ spec:
- "-Dhawkular.metrics.cassandra.nodes=hawkular-cassandra"
- "-Dhawkular.metrics.cassandra.use-ssl"
- "-Dhawkular.metrics.openshift.auth-methods=openshift-oauth,htpasswd"
- - "-Dhawkular.metrics.openshift.htpasswd-file=/secrets/hawkular-metrics.htpasswd.file"
+ - "-Dhawkular.metrics.openshift.htpasswd-file=/hawkular-account/hawkular-metrics.htpasswd"
- "-Dhawkular.metrics.allowed-cors-access-control-allow-headers=authorization"
- "-Dhawkular.metrics.default-ttl={{openshift_metrics_duration}}"
- "-Dhawkular.metrics.admin-tenant=_hawkular_admin"
- "-Dhawkular-alerts.cassandra-nodes=hawkular-cassandra"
- "-Dhawkular-alerts.cassandra-use-ssl"
- "-Dhawkular.alerts.openshift.auth-methods=openshift-oauth,htpasswd"
- - "-Dhawkular.alerts.openshift.htpasswd-file=/secrets/hawkular-metrics.htpasswd.file"
+ - "-Dhawkular.alerts.openshift.htpasswd-file=/hawkular-account/hawkular-metrics.htpasswd"
- "-Dhawkular.alerts.allowed-cors-access-control-allow-headers=authorization"
- "-Dorg.apache.tomcat.util.buf.UDecoder.ALLOW_ENCODED_SLASH=true"
- "-Dorg.apache.catalina.connector.CoyoteAdapter.ALLOW_BACKSLASH=true"
- "-Dcom.datastax.driver.FORCE_NIO=true"
- "-DKUBERNETES_MASTER_URL={{openshift_metrics_master_url}}"
- "-DUSER_WRITE_ACCESS={{openshift_metrics_hawkular_user_write_access}}"
- - "--hmw.keystore=/secrets/hawkular-metrics.keystore"
- - "--hmw.truststore=/secrets/hawkular-metrics.truststore"
- - "--hmw.keystore_password_file=/secrets/hawkular-metrics.keystore.password"
- - "--hmw.truststore_password_file=/secrets/hawkular-metrics.truststore.password"
env:
- name: POD_NAMESPACE
valueFrom:
@@ -67,6 +63,8 @@ spec:
value: "{{ openshift_metrics_master_url }}"
- name: JGROUPS_PASSWORD
value: "{{ 17 | oo_random_word }}"
+ - name: TRUSTSTORE_AUTHORITIES
+ value: "/hawkular-metrics-certs/tls.truststore.crt"
- name: OPENSHIFT_KUBE_PING_NAMESPACE
valueFrom:
fieldRef:
@@ -76,10 +74,10 @@ spec:
- name: STARTUP_TIMEOUT
value: "{{ openshift_metrics_startup_timeout }}"
volumeMounts:
- - name: hawkular-metrics-secrets
- mountPath: "/secrets"
- - name: hawkular-metrics-client-secrets
- mountPath: "/client-secrets"
+ - name: hawkular-metrics-certs
+ mountPath: "/hawkular-metrics-certs"
+ - name: hawkular-metrics-account
+ mountPath: "/hawkular-account"
{% if ((openshift_metrics_hawkular_limits_cpu is defined and openshift_metrics_hawkular_limits_cpu is not none)
or (openshift_metrics_hawkular_limits_memory is defined and openshift_metrics_hawkular_limits_memory is not none)
or (openshift_metrics_hawkular_requests_cpu is defined and openshift_metrics_hawkular_requests_cpu is not none)
@@ -118,9 +116,9 @@ spec:
command:
- "/opt/hawkular/scripts/hawkular-metrics-liveness.py"
volumes:
- - name: hawkular-metrics-secrets
+ - name: hawkular-metrics-certs
secret:
- secretName: hawkular-metrics-secrets
- - name: hawkular-metrics-client-secrets
+ secretName: hawkular-metrics-certs
+ - name: hawkular-metrics-account
secret:
secretName: hawkular-metrics-account
diff --git a/roles/openshift_metrics/templates/heapster.j2 b/roles/openshift_metrics/templates/heapster.j2
index 7c837db4d..ab998c2fb 100644
--- a/roles/openshift_metrics/templates/heapster.j2
+++ b/roles/openshift_metrics/templates/heapster.j2
@@ -34,24 +34,24 @@ spec:
- "heapster-wrapper.sh"
- "--wrapper.allowed_users_file=/secrets/heapster.allowed-users"
- "--source=kubernetes.summary_api:${MASTER_URL}?useServiceAccount=true&kubeletHttps=true&kubeletPort=10250"
- - "--tls_cert=/secrets/heapster.cert"
- - "--tls_key=/secrets/heapster.key"
- - "--tls_client_ca=/secrets/heapster.client-ca"
+ - "--tls_cert=/heapster-certs/tls.crt"
+ - "--tls_key=/heapster-certs/tls.key"
+ - "--tls_client_ca=/var/run/secrets/kubernetes.io/serviceaccount/ca.crt"
- "--allowed_users=%allowed_users%"
- "--metric_resolution={{openshift_metrics_resolution}}"
{% if not openshift_metrics_heapster_standalone %}
- "--wrapper.username_file=/hawkular-account/hawkular-metrics.username"
- "--wrapper.password_file=/hawkular-account/hawkular-metrics.password"
- "--wrapper.endpoint_check=https://hawkular-metrics:443/hawkular/metrics/status"
- - "--sink=hawkular:https://hawkular-metrics:443?tenant=_system&labelToTenant=pod_namespace&labelNodeId={{openshift_metrics_node_id}}&caCert=/hawkular-cert/hawkular-metrics-ca.certificate&user=%username%&pass=%password%&filter=label(container_name:^system.slice.*|^user.slice)"
+ - "--sink=hawkular:https://hawkular-metrics:443?tenant=_system&labelToTenant=pod_namespace&labelNodeId={{openshift_metrics_node_id}}&caCert=/hawkular-metrics-certs/tls.crt&user=%username%&pass=%password%&filter=label(container_name:^system.slice.*|^user.slice)"
{% endif %}
env:
- name: STARTUP_TIMEOUT
value: "{{ openshift_metrics_startup_timeout }}"
-{% if ((openshift_metrics_heapster_limits_cpu is defined and openshift_metrics_heapster_limits_cpu is not none)
+{% if ((openshift_metrics_heapster_limits_cpu is defined and openshift_metrics_heapster_limits_cpu is not none)
or (openshift_metrics_heapster_limits_memory is defined and openshift_metrics_heapster_limits_memory is not none)
or (openshift_metrics_heapster_requests_cpu is defined and openshift_metrics_heapster_requests_cpu is not none)
- or (openshift_metrics_heapster_requests_memory is defined and openshift_metrics_heapster_requests_memory is not none))
+ or (openshift_metrics_heapster_requests_memory is defined and openshift_metrics_heapster_requests_memory is not none))
%}
resources:
{% if (openshift_metrics_heapster_limits_cpu is not none
@@ -65,8 +65,8 @@ spec:
memory: "{{openshift_metrics_heapster_limits_memory}}"
{% endif %}
{% endif %}
-{% if (openshift_metrics_heapster_requests_cpu is not none
- or openshift_metrics_heapster_requests_memory is not none)
+{% if (openshift_metrics_heapster_requests_cpu is not none
+ or openshift_metrics_heapster_requests_memory is not none)
%}
requests:
{% if openshift_metrics_heapster_requests_cpu is not none %}
@@ -80,9 +80,11 @@ spec:
volumeMounts:
- name: heapster-secrets
mountPath: "/secrets"
+ - name: heapster-certs
+ mountPath: "/heapster-certs"
{% if not openshift_metrics_heapster_standalone %}
- - name: hawkular-metrics-certificate
- mountPath: "/hawkular-cert"
+ - name: hawkular-metrics-certs
+ mountPath: "/hawkular-metrics-certs"
- name: hawkular-metrics-account
mountPath: "/hawkular-account"
readinessProbe:
@@ -94,10 +96,13 @@ spec:
- name: heapster-secrets
secret:
secretName: heapster-secrets
+ - name: heapster-certs
+ secret:
+ secretName: heapster-certs
{% if not openshift_metrics_heapster_standalone %}
- - name: hawkular-metrics-certificate
+ - name: hawkular-metrics-certs
secret:
- secretName: hawkular-metrics-certificate
+ secretName: hawkular-metrics-certs
- name: hawkular-metrics-account
secret:
secretName: hawkular-metrics-account
diff --git a/roles/openshift_metrics/templates/service.j2 b/roles/openshift_metrics/templates/service.j2
index 8df89127b..ce0bc2eec 100644
--- a/roles/openshift_metrics/templates/service.j2
+++ b/roles/openshift_metrics/templates/service.j2
@@ -2,6 +2,12 @@ apiVersion: "v1"
kind: "Service"
metadata:
name: "{{obj_name}}"
+{% if annotations is defined%}
+ annotations:
+{% for key, value in annotations.iteritems() %}
+ {{key}}: {{value}}
+{% endfor %}
+{% endif %}
{% if labels is defined%}
labels:
{% for key, value in labels.iteritems() %}
diff --git a/roles/openshift_metrics/vars/openshift-enterprise.yml b/roles/openshift_metrics/vars/openshift-enterprise.yml
index f28c3ce48..b20957550 100644
--- a/roles/openshift_metrics/vars/openshift-enterprise.yml
+++ b/roles/openshift_metrics/vars/openshift-enterprise.yml
@@ -1,3 +1,3 @@
---
__openshift_metrics_image_prefix: "{{ openshift_hosted_metrics_deployer_prefix | default('registry.access.redhat.com/openshift3/') }}"
-__openshift_metrics_image_version: "{{ openshift_hosted_metrics_deployer_version | default(openshift_release | default ('3.5.0') ) }}"
+__openshift_metrics_image_version: "{{ openshift_hosted_metrics_deployer_version | default ('3.6.0') }}"
diff --git a/roles/openshift_node/tasks/main.yml b/roles/openshift_node/tasks/main.yml
index 626248306..98139cac2 100644
--- a/roles/openshift_node/tasks/main.yml
+++ b/roles/openshift_node/tasks/main.yml
@@ -34,6 +34,38 @@
dns_ip: "{{ openshift_dns_ip | default(none) | get_dns_ip(hostvars[inventory_hostname])}}"
env_vars: "{{ openshift_node_env_vars | default(None) }}"
+# https://docs.openshift.com/container-platform/3.4/admin_guide/overcommit.html#disabling-swap-memory
+- name: Check for swap usage
+ command: grep "^[^#].*swap" /etc/fstab
+ # grep: match any lines which don't begin with '#' and contain 'swap'
+ changed_when: false
+ failed_when: false
+ register: swap_result
+
+# Disable Swap Block
+- block:
+
+ - name: Disable swap
+ command: swapoff --all
+
+ - name: Remove swap entries from /etc/fstab
+ replace:
+ dest: /etc/fstab
+ regexp: '(^[^#].*swap.*)'
+ replace: '# \1'
+ backup: yes
+
+ - name: Add notice about disabling swap
+ lineinfile:
+ dest: /etc/fstab
+ line: '# OpenShift-Ansible Installer disabled swap per overcommit guidelines'
+ state: present
+
+ when:
+ - swap_result.stdout_lines | length > 0
+ - openshift_disable_swap | default(true)
+# End Disable Swap Block
+
# We have to add tuned-profiles in the same transaction otherwise we run into depsolving
# problems because the rpms don't pin the version properly. This was fixed in 3.1 packaging.
- name: Install Node package
diff --git a/roles/openshift_node_upgrade/meta/main.yml b/roles/openshift_node_upgrade/meta/main.yml
index cd2f362aa..2a36d8945 100644
--- a/roles/openshift_node_upgrade/meta/main.yml
+++ b/roles/openshift_node_upgrade/meta/main.yml
@@ -10,4 +10,5 @@ galaxy_info:
versions:
- 7
dependencies:
+- role: lib_utils
- role: openshift_common
diff --git a/roles/openshift_node_upgrade/tasks/docker/upgrade.yml b/roles/openshift_node_upgrade/tasks/docker/upgrade.yml
index e91891ca9..416cf605a 100644
--- a/roles/openshift_node_upgrade/tasks/docker/upgrade.yml
+++ b/roles/openshift_node_upgrade/tasks/docker/upgrade.yml
@@ -6,20 +6,6 @@
# - docker_version
# - skip_docker_restart
-# We need docker service up to remove all the images, but these services will keep
-# trying to re-start and thus re-pull the images we're trying to delete.
-- name: Stop containerized services
- service: name={{ item }} state=stopped
- with_items:
- - "{{ openshift.common.service_type }}-master"
- - "{{ openshift.common.service_type }}-master-api"
- - "{{ openshift.common.service_type }}-master-controllers"
- - "{{ openshift.common.service_type }}-node"
- - etcd_container
- - openvswitch
- failed_when: false
- when: openshift.common.is_containerized | bool
-
- name: Check Docker image count
shell: "docker images -aq | wc -l"
register: docker_image_count
@@ -45,5 +31,4 @@
- name: Upgrade Docker
package: name=docker{{ '-' + docker_version }} state=present
-- include: restart.yml
- when: not skip_docker_restart | default(False) | bool
+# starting docker happens back in ../main.yml where it calls ../restart.yml
diff --git a/roles/openshift_node_upgrade/tasks/main.yml b/roles/openshift_node_upgrade/tasks/main.yml
index 6ae8dbc12..e725f4a5d 100644
--- a/roles/openshift_node_upgrade/tasks/main.yml
+++ b/roles/openshift_node_upgrade/tasks/main.yml
@@ -9,6 +9,28 @@
# - openshift_release
# tasks file for openshift_node_upgrade
+
+- name: Stop node and openvswitch services
+ service:
+ name: "{{ item }}"
+ state: stopped
+ with_items:
+ - "{{ openshift.common.service_type }}-node"
+ - openvswitch
+ failed_when: false
+
+- name: Stop additional containerized services
+ service:
+ name: "{{ item }}"
+ state: stopped
+ with_items:
+ - "{{ openshift.common.service_type }}-master"
+ - "{{ openshift.common.service_type }}-master-controllers"
+ - "{{ openshift.common.service_type }}-master-api"
+ - etcd_container
+ failed_when: false
+ when: openshift.common.is_containerized | bool
+
- include: docker/upgrade.yml
vars:
# We will restart Docker ourselves after everything is ready:
@@ -16,7 +38,6 @@
when:
- l_docker_upgrade is defined
- l_docker_upgrade | bool
- - not openshift.common.is_containerized | bool
- include: "{{ node_config_hook }}"
when: node_config_hook is defined
@@ -67,16 +88,6 @@
state: latest
when: not openshift.common.is_containerized | bool
-- name: Restart openvswitch
- systemd:
- name: openvswitch
- state: started
- when:
- - not openshift.common.is_containerized | bool
-
-# Mandatory Docker restart, ensure all containerized services are running:
-- include: docker/restart.yml
-
- name: Update oreg value
yedit:
src: "{{ openshift.common.config_base }}/node/node-config.yaml"
@@ -84,11 +95,40 @@
value: "{{ oreg_url }}"
when: oreg_url is defined
-- name: Restart rpm node service
- service:
- name: "{{ openshift.common.service_type }}-node"
- state: restarted
- when: not openshift.common.is_containerized | bool
+# https://docs.openshift.com/container-platform/3.4/admin_guide/overcommit.html#disabling-swap-memory
+- name: Check for swap usage
+ command: grep "^[^#].*swap" /etc/fstab
+ # grep: match any lines which don't begin with '#' and contain 'swap'
+ changed_when: false
+ failed_when: false
+ register: swap_result
+
+ # Disable Swap Block
+- block:
+
+ - name: Disable swap
+ command: swapoff --all
+
+ - name: Remove swap entries from /etc/fstab
+ replace:
+ dest: /etc/fstab
+ regexp: '(^[^#].*swap.*)'
+ replace: '# \1'
+ backup: yes
+
+ - name: Add notice about disabling swap
+ lineinfile:
+ dest: /etc/fstab
+ line: '# OpenShift-Ansible Installer disabled swap per overcommit guidelines'
+ state: present
+
+ when:
+ - swap_result.stdout_lines | length > 0
+ - openshift_disable_swap | default(true)
+ # End Disable Swap Block
+
+# Restart all services
+- include: restart.yml
- name: Wait for node to be ready
oc_obj:
diff --git a/roles/openshift_node_upgrade/tasks/docker/restart.yml b/roles/openshift_node_upgrade/tasks/restart.yml
index 176fc3c0b..a9fab74e1 100644
--- a/roles/openshift_node_upgrade/tasks/docker/restart.yml
+++ b/roles/openshift_node_upgrade/tasks/restart.yml
@@ -12,7 +12,7 @@
openshift_facts:
role: docker
-- name: Restart containerized services
+- name: Start services
service: name={{ item }} state=started
with_items:
- etcd_container
@@ -22,7 +22,6 @@
- "{{ openshift.common.service_type }}-master-controllers"
- "{{ openshift.common.service_type }}-node"
failed_when: false
- when: openshift.common.is_containerized | bool
- name: Wait for master API to come back online
wait_for:
diff --git a/roles/openshift_provisioners/README.md b/roles/openshift_provisioners/README.md
new file mode 100644
index 000000000..7449073e6
--- /dev/null
+++ b/roles/openshift_provisioners/README.md
@@ -0,0 +1,29 @@
+# OpenShift External Dynamic Provisioners
+
+## Required Vars
+* `openshift_provisioners_install_provisioners`: When `True` the openshift_provisioners role will install provisioners that have their "master" var (e.g. `openshift_provisioners_efs`) set `True`. When `False` will uninstall provisioners that have their var set `True`.
+
+## Optional Vars
+* `openshift_provisioners_image_prefix`: The prefix for the provisioner images to use. Defaults to 'docker.io/openshift/origin-'.
+* `openshift_provisioners_image_version`: The image version for the provisioner images to use. Defaults to 'latest'.
+* `openshift_provisioners_project`: The namespace that provisioners will be installed in. Defaults to 'openshift-infra'.
+
+## AWS EFS
+
+### Prerequisites
+* An IAM user assigned the AmazonElasticFileSystemReadOnlyAccess policy (or better)
+* An EFS file system in your cluster's region
+* [Mount targets](http://docs.aws.amazon.com/efs/latest/ug/accessing-fs.html) and [security groups](http://docs.aws.amazon.com/efs/latest/ug/accessing-fs-create-security-groups.html) such that any node (in any zone in the cluster's region) can mount the EFS file system by its [File system DNS name](http://docs.aws.amazon.com/efs/latest/ug/mounting-fs-mount-cmd-dns-name.html)
+
+### Required Vars
+* `openshift_provisioners_efs_fsid`: The [File system ID](http://docs.aws.amazon.com/efs/latest/ug/gs-step-two-create-efs-resources.html) of the EFS file system, e.g. fs-47a2c22e.
+* `openshift_provisioners_efs_region`: The Amazon EC2 region of the EFS file system.
+* `openshift_provisioners_efs_aws_access_key_id`: The AWS access key of the IAM user, used to check that the EFS file system specified actually exists.
+* `openshift_provisioners_efs_aws_secret_access_key`: The AWS secret access key of the IAM user, used to check that the EFS file system specified actually exists.
+
+### Optional Vars
+* `openshift_provisioners_efs`: When `True` the AWS EFS provisioner will be installed or uninstalled according to whether `openshift_provisioners_install_provisioners` is `True` or `False`, respectively. Defaults to `False`.
+* `openshift_provisioners_efs_path`: The path of the directory in the EFS file system in which the EFS provisioner will create a directory to back each PV it creates. It must exist and be mountable by the EFS provisioner. Defaults to '/persistentvolumes'.
+* `openshift_provisioners_efs_name`: The `provisioner` name that `StorageClasses` specify. Defaults to 'openshift.org/aws-efs'.
+* `openshift_provisioners_efs_nodeselector`: A map of labels (e.g. {"node":"infra","region":"west"} to select the nodes where the pod will land.
+* `openshift_provisioners_efs_supplementalgroup`: The supplemental group to give the pod in case it is needed for permission to write to the EFS file system. Defaults to '65534'.
diff --git a/roles/openshift_provisioners/defaults/main.yaml b/roles/openshift_provisioners/defaults/main.yaml
new file mode 100644
index 000000000..a6f040831
--- /dev/null
+++ b/roles/openshift_provisioners/defaults/main.yaml
@@ -0,0 +1,12 @@
+---
+openshift_provisioners_install_provisioners: True
+openshift_provisioners_image_prefix: docker.io/openshift/origin-
+openshift_provisioners_image_version: latest
+
+openshift_provisioners_efs: False
+openshift_provisioners_efs_path: /persistentvolumes
+openshift_provisioners_efs_name: openshift.org/aws-efs
+openshift_provisioners_efs_nodeselector: ""
+openshift_provisioners_efs_supplementalgroup: '65534'
+
+openshift_provisioners_project: openshift-infra
diff --git a/roles/openshift_provisioners/meta/main.yaml b/roles/openshift_provisioners/meta/main.yaml
new file mode 100644
index 000000000..cb9278eb7
--- /dev/null
+++ b/roles/openshift_provisioners/meta/main.yaml
@@ -0,0 +1,16 @@
+---
+galaxy_info:
+ author: OpenShift Red Hat
+ description: OpenShift Provisioners
+ company: Red Hat, Inc.
+ license: Apache License, Version 2.0
+ min_ansible_version: 2.2
+ platforms:
+ - name: EL
+ versions:
+ - 7
+ categories:
+ - cloud
+dependencies:
+- role: lib_openshift
+- role: openshift_facts
diff --git a/roles/openshift_provisioners/tasks/generate_clusterrolebindings.yaml b/roles/openshift_provisioners/tasks/generate_clusterrolebindings.yaml
new file mode 100644
index 000000000..ac21a5e37
--- /dev/null
+++ b/roles/openshift_provisioners/tasks/generate_clusterrolebindings.yaml
@@ -0,0 +1,19 @@
+---
+- name: Generate ClusterRoleBindings
+ template: src=clusterrolebinding.j2 dest={{mktemp.stdout}}/templates/{{obj_name}}-clusterrolebinding.yaml
+ vars:
+ acct_name: provisioners-{{item}}
+ obj_name: run-provisioners-{{item}}
+ labels:
+ provisioners-infra: support
+ crb_usernames: ["system:serviceaccount:{{openshift_provisioners_project}}:{{acct_name}}"]
+ subjects:
+ - kind: ServiceAccount
+ name: "{{acct_name}}"
+ namespace: "{{openshift_provisioners_project}}"
+ cr_name: "system:persistent-volume-provisioner"
+ with_items:
+ # TODO
+ - efs
+ check_mode: no
+ changed_when: no
diff --git a/roles/openshift_provisioners/tasks/generate_secrets.yaml b/roles/openshift_provisioners/tasks/generate_secrets.yaml
new file mode 100644
index 000000000..e6cbb1bbf
--- /dev/null
+++ b/roles/openshift_provisioners/tasks/generate_secrets.yaml
@@ -0,0 +1,14 @@
+---
+- name: Generate secret for efs
+ template: src=secret.j2 dest={{mktemp.stdout}}/templates/{{obj_name}}-secret.yaml
+ vars:
+ name: efs
+ obj_name: "provisioners-efs"
+ labels:
+ provisioners-infra: support
+ secrets:
+ - {key: aws-access-key-id, value: "{{openshift_provisioners_efs_aws_access_key_id}}"}
+ - {key: aws-secret-access-key, value: "{{openshift_provisioners_efs_aws_secret_access_key}}"}
+ check_mode: no
+ changed_when: no
+ when: openshift_provisioners_efs | bool
diff --git a/roles/openshift_provisioners/tasks/generate_serviceaccounts.yaml b/roles/openshift_provisioners/tasks/generate_serviceaccounts.yaml
new file mode 100644
index 000000000..4fe0583ee
--- /dev/null
+++ b/roles/openshift_provisioners/tasks/generate_serviceaccounts.yaml
@@ -0,0 +1,12 @@
+---
+- name: Generating serviceaccounts
+ template: src=serviceaccount.j2 dest={{mktemp.stdout}}/templates/{{obj_name}}-sa.yaml
+ vars:
+ obj_name: provisioners-{{item}}
+ labels:
+ provisioners-infra: support
+ with_items:
+ # TODO
+ - efs
+ check_mode: no
+ changed_when: no
diff --git a/roles/openshift_provisioners/tasks/install_efs.yaml b/roles/openshift_provisioners/tasks/install_efs.yaml
new file mode 100644
index 000000000..57279c665
--- /dev/null
+++ b/roles/openshift_provisioners/tasks/install_efs.yaml
@@ -0,0 +1,70 @@
+---
+- name: Check efs current replica count
+ command: >
+ {{ openshift.common.client_binary }} --config={{ mktemp.stdout }}/admin.kubeconfig get dc provisioners-efs
+ -o jsonpath='{.spec.replicas}' -n {{openshift_provisioners_project}}
+ register: efs_replica_count
+ when: not ansible_check_mode
+ ignore_errors: yes
+ changed_when: no
+
+- name: Generate efs PersistentVolumeClaim
+ template: src=pvc.j2 dest={{mktemp.stdout}}/templates/{{obj_name}}-pvc.yaml
+ vars:
+ obj_name: "provisioners-efs"
+ size: "1Mi"
+ access_modes:
+ - "ReadWriteMany"
+ pv_selector:
+ provisioners-efs: efs
+ check_mode: no
+ changed_when: no
+
+- name: Generate efs PersistentVolume
+ template: src=pv.j2 dest={{mktemp.stdout}}/templates/{{obj_name}}-pv.yaml
+ vars:
+ obj_name: "provisioners-efs"
+ size: "1Mi"
+ access_modes:
+ - "ReadWriteMany"
+ labels:
+ provisioners-efs: efs
+ volume_plugin: "nfs"
+ volume_source:
+ - {key: "server", value: "{{openshift_provisioners_efs_fsid}}.efs.{{openshift_provisioners_efs_region}}.amazonaws.com"}
+ - {key: "path", value: "{{openshift_provisioners_efs_path}}"}
+ claim_name: "provisioners-efs"
+ check_mode: no
+ changed_when: no
+
+- name: Generate efs DeploymentConfig
+ template:
+ src: efs.j2
+ dest: "{{ mktemp.stdout }}/templates/{{deploy_name}}-dc.yaml"
+ vars:
+ name: efs
+ deploy_name: "provisioners-efs"
+ deploy_serviceAccount: "provisioners-efs"
+ replica_count: "{{efs_replica_count.stdout | default(0)}}"
+ node_selector: "{{openshift_provisioners_efs_nodeselector | default('') }}"
+ claim_name: "provisioners-efs"
+ check_mode: no
+ changed_when: false
+
+# anyuid in order to run as root & chgrp shares with allocated gids
+- name: "Check efs anyuid permissions"
+ command: >
+ {{ openshift.common.client_binary }} --config={{ mktemp.stdout }}/admin.kubeconfig
+ get scc/anyuid -o jsonpath='{.users}'
+ register: efs_anyuid
+ check_mode: no
+ changed_when: no
+
+- name: "Set anyuid permissions for efs"
+ command: >
+ {{ openshift.common.admin_binary}} --config={{ mktemp.stdout }}/admin.kubeconfig policy
+ add-scc-to-user anyuid system:serviceaccount:{{openshift_provisioners_project}}:provisioners-efs
+ register: efs_output
+ failed_when: "efs_output.rc == 1 and 'exists' not in efs_output.stderr"
+ check_mode: no
+ when: efs_anyuid.stdout.find("system:serviceaccount:{{openshift_provisioners_project}}:provisioners-efs") == -1
diff --git a/roles/openshift_provisioners/tasks/install_provisioners.yaml b/roles/openshift_provisioners/tasks/install_provisioners.yaml
new file mode 100644
index 000000000..324fdcc82
--- /dev/null
+++ b/roles/openshift_provisioners/tasks/install_provisioners.yaml
@@ -0,0 +1,55 @@
+---
+- name: Check that EFS File System ID is set
+ fail: msg='the openshift_provisioners_efs_fsid variable is required'
+ when: (openshift_provisioners_efs | bool) and openshift_provisioners_efs_fsid is not defined
+
+- name: Check that EFS region is set
+ fail: msg='the openshift_provisioners_efs_region variable is required'
+ when: (openshift_provisioners_efs | bool) and openshift_provisioners_efs_region is not defined
+
+- name: Check that EFS AWS access key id is set
+ fail: msg='the openshift_provisioners_efs_aws_access_key_id variable is required'
+ when: (openshift_provisioners_efs | bool) and openshift_provisioners_efs_aws_access_key_id is not defined
+
+- name: Check that EFS AWS secret access key is set
+ fail: msg='the openshift_provisioners_efs_aws_secret_access_key variable is required'
+ when: (openshift_provisioners_efs | bool) and openshift_provisioners_efs_aws_secret_access_key is not defined
+
+- name: Install support
+ include: install_support.yaml
+
+- name: Install EFS
+ include: install_efs.yaml
+ when: openshift_provisioners_efs | bool
+
+- find: paths={{ mktemp.stdout }}/templates patterns=*.yaml
+ register: object_def_files
+ changed_when: no
+
+- slurp: src={{item}}
+ register: object_defs
+ with_items: "{{object_def_files.files | map(attribute='path') | list | sort}}"
+ changed_when: no
+
+- name: Create objects
+ include: oc_apply.yaml
+ vars:
+ - kubeconfig: "{{ mktemp.stdout }}/admin.kubeconfig"
+ - namespace: "{{ openshift_provisioners_project }}"
+ - file_name: "{{ file.source }}"
+ - file_content: "{{ file.content | b64decode | from_yaml }}"
+ with_items: "{{ object_defs.results }}"
+ loop_control:
+ loop_var: file
+ when: not ansible_check_mode
+
+- name: Printing out objects to create
+ debug: msg={{file.content | b64decode }}
+ with_items: "{{ object_defs.results }}"
+ loop_control:
+ loop_var: file
+ when: ansible_check_mode
+
+- name: Scaling up cluster
+ include: start_cluster.yaml
+ when: start_cluster | default(true) | bool
diff --git a/roles/openshift_provisioners/tasks/install_support.yaml b/roles/openshift_provisioners/tasks/install_support.yaml
new file mode 100644
index 000000000..ba472f1c9
--- /dev/null
+++ b/roles/openshift_provisioners/tasks/install_support.yaml
@@ -0,0 +1,24 @@
+---
+- name: Check for provisioners project already exists
+ command: >
+ {{ openshift.common.client_binary }} --config={{ mktemp.stdout }}/admin.kubeconfig get project {{openshift_provisioners_project}} --no-headers
+ register: provisioners_project_result
+ ignore_errors: yes
+ when: not ansible_check_mode
+ changed_when: no
+
+- name: Create provisioners project
+ command: >
+ {{ openshift.common.admin_binary }} --config={{ mktemp.stdout }}/admin.kubeconfig new-project {{openshift_provisioners_project}}
+ when: not ansible_check_mode and "not found" in provisioners_project_result.stderr
+
+- name: Create temp directory for all our templates
+ file: path={{mktemp.stdout}}/templates state=directory mode=0755
+ changed_when: False
+ check_mode: no
+
+- include: generate_secrets.yaml
+
+- include: generate_clusterrolebindings.yaml
+
+- include: generate_serviceaccounts.yaml
diff --git a/roles/openshift_provisioners/tasks/main.yaml b/roles/openshift_provisioners/tasks/main.yaml
new file mode 100644
index 000000000..a50c78c97
--- /dev/null
+++ b/roles/openshift_provisioners/tasks/main.yaml
@@ -0,0 +1,27 @@
+---
+- name: Create temp directory for doing work in
+ command: mktemp -td openshift-provisioners-ansible-XXXXXX
+ register: mktemp
+ changed_when: False
+ check_mode: no
+
+- name: Copy the admin client config(s)
+ command: >
+ cp {{ openshift.common.config_base}}/master/admin.kubeconfig {{ mktemp.stdout }}/admin.kubeconfig
+ changed_when: False
+ check_mode: no
+ tags: provisioners_init
+
+- include: "{{ role_path }}/tasks/install_provisioners.yaml"
+ when: openshift_provisioners_install_provisioners | default(false) | bool
+
+- include: "{{ role_path }}/tasks/uninstall_provisioners.yaml"
+ when: not openshift_provisioners_install_provisioners | default(false) | bool
+
+- name: Delete temp directory
+ file:
+ name: "{{ mktemp.stdout }}"
+ state: absent
+ tags: provisioners_cleanup
+ changed_when: False
+ check_mode: no
diff --git a/roles/openshift_provisioners/tasks/oc_apply.yaml b/roles/openshift_provisioners/tasks/oc_apply.yaml
new file mode 100644
index 000000000..49d03f203
--- /dev/null
+++ b/roles/openshift_provisioners/tasks/oc_apply.yaml
@@ -0,0 +1,51 @@
+---
+- name: Checking generation of {{file_content.kind}} {{file_content.metadata.name}}
+ command: >
+ {{ openshift.common.client_binary }} --config={{ kubeconfig }}
+ get {{file_content.kind}} {{file_content.metadata.name}}
+ -o jsonpath='{.metadata.resourceVersion}'
+ -n {{namespace}}
+ register: generation_init
+ failed_when: "'not found' not in generation_init.stderr and generation_init.stdout == ''"
+ changed_when: no
+
+- name: Applying {{file_name}}
+ command: >
+ {{ openshift.common.client_binary }} --config={{ kubeconfig }}
+ apply -f {{ file_name }}
+ -n {{ namespace }}
+ register: generation_apply
+ failed_when: "'error' in generation_apply.stderr"
+ changed_when: no
+
+- name: Determine change status of {{file_content.kind}} {{file_content.metadata.name}}
+ command: >
+ {{ openshift.common.client_binary }} --config={{ kubeconfig }}
+ get {{file_content.kind}} {{file_content.metadata.name}}
+ -o jsonpath='{.metadata.resourceVersion}'
+ -n {{namespace}}
+ register: generation_changed
+ failed_when: "'not found' not in generation_changed.stderr and generation_changed.stdout == ''"
+ changed_when: generation_changed.stdout | default (0) | int > generation_init.stdout | default(0) | int
+ when:
+ - "'field is immutable' not in generation_apply.stderr"
+
+- name: Removing previous {{file_name}}
+ command: >
+ {{ openshift.common.client_binary }} --config={{ kubeconfig }}
+ delete -f {{ file_name }}
+ -n {{ namespace }}
+ register: generation_delete
+ failed_when: "'error' in generation_delete.stderr"
+ changed_when: generation_delete.rc == 0
+ when: generation_apply.rc != 0
+
+- name: Recreating {{file_name}}
+ command: >
+ {{ openshift.common.client_binary }} --config={{ kubeconfig }}
+ apply -f {{ file_name }}
+ -n {{ namespace }}
+ register: generation_apply
+ failed_when: "'error' in generation_apply.stderr"
+ changed_when: generation_apply.rc == 0
+ when: generation_apply.rc != 0
diff --git a/roles/openshift_provisioners/tasks/start_cluster.yaml b/roles/openshift_provisioners/tasks/start_cluster.yaml
new file mode 100644
index 000000000..ee7f545a9
--- /dev/null
+++ b/roles/openshift_provisioners/tasks/start_cluster.yaml
@@ -0,0 +1,20 @@
+---
+- name: Retrieve efs
+ oc_obj:
+ state: list
+ kind: dc
+ selector: "provisioners-infra=efs"
+ namespace: "{{openshift_provisioners_project}}"
+ register: efs_dc
+ when: openshift_provisioners_efs | bool
+
+- name: start efs
+ oc_scale:
+ kind: dc
+ name: "{{ object }}"
+ namespace: "{{openshift_provisioners_project}}"
+ replicas: 1
+ with_items: "{{ efs_dc.results.results[0]['items'] | map(attribute='metadata.name') | list }}"
+ loop_control:
+ loop_var: object
+ when: openshift_provisioners_efs | bool
diff --git a/roles/openshift_provisioners/tasks/stop_cluster.yaml b/roles/openshift_provisioners/tasks/stop_cluster.yaml
new file mode 100644
index 000000000..30b6b12c8
--- /dev/null
+++ b/roles/openshift_provisioners/tasks/stop_cluster.yaml
@@ -0,0 +1,20 @@
+---
+- name: Retrieve efs
+ oc_obj:
+ state: list
+ kind: dc
+ selector: "provisioners-infra=efs"
+ namespace: "{{openshift_provisioners_project}}"
+ register: efs_dc
+ when: openshift_provisioners_efs | bool
+
+- name: stop efs
+ oc_scale:
+ kind: dc
+ name: "{{ object }}"
+ namespace: "{{openshift_provisioners_project}}"
+ replicas: 0
+ with_items: "{{ efs_dc.results.results[0]['items'] | map(attribute='metadata.name') | list }}"
+ loop_control:
+ loop_var: object
+ when: openshift_provisioners_efs | bool
diff --git a/roles/openshift_provisioners/tasks/uninstall_provisioners.yaml b/roles/openshift_provisioners/tasks/uninstall_provisioners.yaml
new file mode 100644
index 000000000..0be4bc7d2
--- /dev/null
+++ b/roles/openshift_provisioners/tasks/uninstall_provisioners.yaml
@@ -0,0 +1,43 @@
+---
+- name: stop provisioners
+ include: stop_cluster.yaml
+
+# delete the deployment objects that we had created
+- name: delete provisioner api objects
+ command: >
+ {{ openshift.common.client_binary }} --config={{ mktemp.stdout }}/admin.kubeconfig
+ delete {{ item }} --selector provisioners-infra -n {{ openshift_provisioners_project }} --ignore-not-found=true
+ with_items:
+ - dc
+ register: delete_result
+ changed_when: delete_result.stdout.find("deleted") != -1 and delete_result.rc == 0
+
+# delete our old secrets
+- name: delete provisioner secrets
+ command: >
+ {{ openshift.common.client_binary }} --config={{ mktemp.stdout }}/admin.kubeconfig
+ delete secret {{ item }} -n {{ openshift_provisioners_project }} --ignore-not-found=true
+ with_items:
+ - provisioners-efs
+ ignore_errors: yes
+ register: delete_result
+ changed_when: delete_result.stdout.find("deleted") != -1 and delete_result.rc == 0
+
+# delete cluster role bindings
+- name: delete cluster role bindings
+ command: >
+ {{ openshift.common.client_binary }} --config={{ mktemp.stdout }}/admin.kubeconfig
+ delete clusterrolebindings {{ item }} -n {{ openshift_provisioners_project }} --ignore-not-found=true
+ with_items:
+ - run-provisioners-efs
+ register: delete_result
+ changed_when: delete_result.stdout.find("deleted") != -1 and delete_result.rc == 0
+
+# delete our service accounts
+- name: delete service accounts
+ oc_serviceaccount:
+ name: "{{ item }}"
+ namespace: "{{ openshift_provisioners_project }}"
+ state: absent
+ with_items:
+ - provisioners-efs
diff --git a/roles/openshift_provisioners/templates/clusterrolebinding.j2 b/roles/openshift_provisioners/templates/clusterrolebinding.j2
new file mode 100644
index 000000000..994afa32d
--- /dev/null
+++ b/roles/openshift_provisioners/templates/clusterrolebinding.j2
@@ -0,0 +1,30 @@
+apiVersion: v1
+kind: ClusterRoleBinding
+metadata:
+ name: {{obj_name}}
+{% if labels is defined%}
+ labels:
+{% for key, value in labels.iteritems() %}
+ {{key}}: {{value}}
+{% endfor %}
+{% endif %}
+{% if crb_usernames is defined %}
+userNames:
+{% for name in crb_usernames %}
+ - {{ name }}
+{% endfor %}
+{% endif %}
+{% if crb_groupnames is defined %}
+groupNames:
+{% for name in crb_groupnames %}
+ - {{ name }}
+{% endfor %}
+{% endif %}
+subjects:
+{% for sub in subjects %}
+ - kind: {{ sub.kind }}
+ name: {{ sub.name }}
+ namespace: {{sub.namespace}}
+{% endfor %}
+roleRef:
+ name: {{cr_name}}
diff --git a/roles/openshift_provisioners/templates/efs.j2 b/roles/openshift_provisioners/templates/efs.j2
new file mode 100644
index 000000000..81b9ccca5
--- /dev/null
+++ b/roles/openshift_provisioners/templates/efs.j2
@@ -0,0 +1,58 @@
+kind: DeploymentConfig
+apiVersion: v1
+metadata:
+ name: "{{deploy_name}}"
+ labels:
+ provisioners-infra: "{{name}}"
+ name: "{{name}}"
+spec:
+ replicas: {{replica_count}}
+ selector:
+ provisioners-infra: "{{name}}"
+ name: "{{name}}"
+ strategy:
+ type: Recreate
+ template:
+ metadata:
+ name: "{{deploy_name}}"
+ labels:
+ provisioners-infra: "{{name}}"
+ name: "{{name}}"
+ spec:
+ serviceAccountName: "{{deploy_serviceAccount}}"
+{% if node_selector is iterable and node_selector | length > 0 %}
+ nodeSelector:
+{% for key, value in node_selector.iteritems() %}
+ {{key}}: "{{value}}"
+{% endfor %}
+{% endif %}
+ containers:
+ - name: efs-provisioner
+ image: {{openshift_provisioners_image_prefix}}efs-provisioner:{{openshift_provisioners_image_version}}
+ env:
+ - name: AWS_ACCESS_KEY_ID
+ valueFrom:
+ secretKeyRef:
+ name: provisioners-efs
+ key: aws-access-key-id
+ - name: AWS_SECRET_ACCESS_KEY
+ valueFrom:
+ secretKeyRef:
+ name: provisioners-efs
+ key: aws-secret-access-key
+ - name: FILE_SYSTEM_ID
+ value: "{{openshift_provisioners_efs_fsid}}"
+ - name: AWS_REGION
+ value: "{{openshift_provisioners_efs_region}}"
+ - name: PROVISIONER_NAME
+ value: "{{openshift_provisioners_efs_name}}"
+ volumeMounts:
+ - name: pv-volume
+ mountPath: /persistentvolumes
+ securityContext:
+ supplementalGroups:
+ - {{openshift_provisioners_efs_supplementalgroup}}
+ volumes:
+ - name: pv-volume
+ persistentVolumeClaim:
+ claimName: "{{claim_name}}"
diff --git a/roles/openshift_provisioners/templates/pv.j2 b/roles/openshift_provisioners/templates/pv.j2
new file mode 100644
index 000000000..f4128f9f0
--- /dev/null
+++ b/roles/openshift_provisioners/templates/pv.j2
@@ -0,0 +1,32 @@
+apiVersion: v1
+kind: PersistentVolume
+metadata:
+ name: {{obj_name}}
+{% if annotations is defined %}
+ annotations:
+{% for key,value in annotations.iteritems() %}
+ {{key}}: {{value}}
+{% endfor %}
+{% endif %}
+{% if labels is defined%}
+ labels:
+{% for key, value in labels.iteritems() %}
+ {{key}}: {{value}}
+{% endfor %}
+{% endif %}
+spec:
+ capacity:
+ storage: {{size}}
+ accessModes:
+{% for mode in access_modes %}
+ - {{mode}}
+{% endfor %}
+ {{volume_plugin}}:
+{% for s in volume_source %}
+ {{s.key}}: {{s.value}}
+{% endfor %}
+{% if claim_name is defined%}
+ claimRef:
+ name: {{claim_name}}
+ namespace: {{openshift_provisioners_project}}
+{% endif %}
diff --git a/roles/openshift_provisioners/templates/pvc.j2 b/roles/openshift_provisioners/templates/pvc.j2
new file mode 100644
index 000000000..83d503056
--- /dev/null
+++ b/roles/openshift_provisioners/templates/pvc.j2
@@ -0,0 +1,26 @@
+apiVersion: v1
+kind: PersistentVolumeClaim
+metadata:
+ name: {{obj_name}}
+{% if annotations is defined %}
+ annotations:
+{% for key,value in annotations.iteritems() %}
+ {{key}}: {{value}}
+{% endfor %}
+{% endif %}
+spec:
+{% if pv_selector is defined and pv_selector is mapping %}
+ selector:
+ matchLabels:
+{% for key,value in pv_selector.iteritems() %}
+ {{key}}: {{value}}
+{% endfor %}
+{% endif %}
+ accessModes:
+{% for mode in access_modes %}
+ - {{mode}}
+{% endfor %}
+ resources:
+ requests:
+ storage: {{size}}
+
diff --git a/roles/openshift_provisioners/templates/secret.j2 b/roles/openshift_provisioners/templates/secret.j2
new file mode 100644
index 000000000..78824095b
--- /dev/null
+++ b/roles/openshift_provisioners/templates/secret.j2
@@ -0,0 +1,15 @@
+apiVersion: v1
+kind: Secret
+metadata:
+ name: {{obj_name}}
+{% if labels is defined%}
+ labels:
+{% for key, value in labels.iteritems() %}
+ {{key}}: {{value}}
+{% endfor %}
+{% endif %}
+type: Opaque
+data:
+{% for s in secrets %}
+ "{{s.key}}" : "{{s.value | b64encode}}"
+{% endfor %}
diff --git a/roles/openshift_provisioners/templates/serviceaccount.j2 b/roles/openshift_provisioners/templates/serviceaccount.j2
new file mode 100644
index 000000000..b22acc594
--- /dev/null
+++ b/roles/openshift_provisioners/templates/serviceaccount.j2
@@ -0,0 +1,16 @@
+apiVersion: v1
+kind: ServiceAccount
+metadata:
+ name: {{obj_name}}
+{% if labels is defined%}
+ labels:
+{% for key, value in labels.iteritems() %}
+ {{key}}: {{value}}
+{% endfor %}
+{% endif %}
+{% if secrets is defined %}
+secrets:
+{% for name in secrets %}
+- name: {{ name }}
+{% endfor %}
+{% endif %}
diff --git a/roles/openshift_repos/README.md b/roles/openshift_repos/README.md
index 95b155b29..abd1997dd 100644
--- a/roles/openshift_repos/README.md
+++ b/roles/openshift_repos/README.md
@@ -12,10 +12,10 @@ rhel-7-server-extra-rpms, and rhel-7-server-ose-3.0-rpms repos.
Role Variables
--------------
-| Name | Default value | |
-|-------------------------------|---------------|----------------------------------------------|
-| openshift_deployment_type | None | Possible values enterprise, origin, online |
-| openshift_additional_repos | {} | TODO |
+| Name | Default value | |
+|-------------------------------|---------------|------------------------------------|
+| openshift_deployment_type | None | Possible values enterprise, origin |
+| openshift_additional_repos | {} | TODO |
Dependencies
------------
diff --git a/roles/openshift_sanitize_inventory/tasks/main.yml b/roles/openshift_sanitize_inventory/tasks/main.yml
index fc562c42c..f15dc16d1 100644
--- a/roles/openshift_sanitize_inventory/tasks/main.yml
+++ b/roles/openshift_sanitize_inventory/tasks/main.yml
@@ -1,6 +1,18 @@
---
+- name: Abort when conflicting deployment type variables are set
+ when:
+ - deployment_type is defined
+ - openshift_deployment_type is defined
+ - openshift_deployment_type != deployment_type
+ fail:
+ msg: |-
+ openshift_deployment_type is set to "{{ openshift_deployment_type }}".
+ deployment_type is set to "{{ deployment_type }}".
+ To avoid unexpected results, this conflict is not allowed.
+ deployment_type is deprecated in favor of openshift_deployment_type.
+ Please specify only openshift_deployment_type, or make both the same.
+
- name: Standardize on latest variable names
- no_log: True # keep task description legible
set_fact:
# goal is to deprecate deployment_type in favor of openshift_deployment_type.
# both will be accepted for now, but code should refer to the new name.
@@ -8,8 +20,15 @@
deployment_type: "{{ openshift_deployment_type | default(deployment_type) | default | string }}"
openshift_deployment_type: "{{ openshift_deployment_type | default(deployment_type) | default | string }}"
+- name: Abort when deployment type is invalid
+ # this variable is required; complain early and clearly if it is invalid.
+ when: openshift_deployment_type not in known_openshift_deployment_types
+ fail:
+ msg: |-
+ Please set openshift_deployment_type to one of:
+ {{ known_openshift_deployment_types | join(', ') }}
+
- name: Normalize openshift_release
- no_log: True # keep task description legible
set_fact:
# Normalize release if provided, e.g. "v3.5" => "3.5"
# Currently this is not required to be defined for all installs, and the
@@ -19,10 +38,11 @@
openshift_release: "{{ openshift_release | string | regex_replace('^v', '') }}"
when: openshift_release is defined
-- name: Ensure a valid deployment type has been given.
- # this variable is required; complain early and clearly if it is invalid.
- when: openshift_deployment_type not in known_openshift_deployment_types
+- name: Abort when openshift_release is invalid
+ when:
+ - openshift_release is defined
+ - not openshift_release | match('\d+(\.\d+){1,3}$')
fail:
msg: |-
- Please set openshift_deployment_type to one of:
- {{ known_openshift_deployment_types | join(', ') }}
+ openshift_release is "{{ openshift_release }}" which is not a valid version string.
+ Please set it to a version string like "3.4".
diff --git a/roles/openshift_storage_glusterfs/README.md b/roles/openshift_storage_glusterfs/README.md
new file mode 100644
index 000000000..cf0fb94c9
--- /dev/null
+++ b/roles/openshift_storage_glusterfs/README.md
@@ -0,0 +1,60 @@
+OpenShift GlusterFS Cluster
+===========================
+
+OpenShift GlusterFS Cluster Installation
+
+Requirements
+------------
+
+* Ansible 2.2
+
+Role Variables
+--------------
+
+From this role:
+
+| Name | Default value | |
+|--------------------------------------------------|-------------------------|-----------------------------------------|
+| openshift_storage_glusterfs_timeout | 300 | Seconds to wait for pods to become ready
+| openshift_storage_glusterfs_namespace | 'default' | Namespace in which to create GlusterFS resources
+| openshift_storage_glusterfs_is_native | True | GlusterFS should be containerized
+| openshift_storage_glusterfs_nodeselector | 'storagenode=glusterfs' | Selector to determine which nodes will host GlusterFS pods in native mode
+| openshift_storage_glusterfs_image | 'gluster/gluster-centos'| Container image to use for GlusterFS pods, enterprise default is 'rhgs3/rhgs-server-rhel7'
+| openshift_storage_glusterfs_version | 'latest' | Container image version to use for GlusterFS pods
+| openshift_storage_glusterfs_wipe | False | Destroy any existing GlusterFS resources and wipe storage devices. **WARNING: THIS WILL DESTROY ANY DATA ON THOSE DEVICES.**
+| openshift_storage_glusterfs_heketi_is_native | True | heketi should be containerized
+| openshift_storage_glusterfs_heketi_image | 'heketi/heketi' | Container image to use for heketi pods, enterprise default is 'rhgs3/rhgs-volmanager-rhel7'
+| openshift_storage_glusterfs_heketi_version | 'latest' | Container image version to use for heketi pods
+| openshift_storage_glusterfs_heketi_admin_key | '' | String to use as secret key for performing heketi commands as admin
+| openshift_storage_glusterfs_heketi_user_key | '' | String to use as secret key for performing heketi commands as user that can only view or modify volumes
+| openshift_storage_glusterfs_heketi_topology_load | True | Load the GlusterFS topology information into heketi
+| openshift_storage_glusterfs_heketi_url | Undefined | URL for the heketi REST API, dynamically determined in native mode
+| openshift_storage_glusterfs_heketi_wipe | False | Destroy any existing heketi resources, defaults to the value of `openshift_storage_glusterfs_wipe`
+
+Dependencies
+------------
+
+* os_firewall
+* openshift_hosted_facts
+* openshift_repos
+* lib_openshift
+
+Example Playbook
+----------------
+
+```
+- name: Configure GlusterFS hosts
+ hosts: oo_first_master
+ roles:
+ - role: openshift_storage_glusterfs
+```
+
+License
+-------
+
+Apache License, Version 2.0
+
+Author Information
+------------------
+
+Jose A. Rivera (jarrpa@redhat.com)
diff --git a/roles/openshift_storage_glusterfs/defaults/main.yml b/roles/openshift_storage_glusterfs/defaults/main.yml
new file mode 100644
index 000000000..ade850747
--- /dev/null
+++ b/roles/openshift_storage_glusterfs/defaults/main.yml
@@ -0,0 +1,17 @@
+---
+openshift_storage_glusterfs_timeout: 300
+openshift_storage_glusterfs_namespace: 'default'
+openshift_storage_glusterfs_is_native: True
+openshift_storage_glusterfs_nodeselector: "{{ openshift_storage_glusterfs_nodeselector_label | default('storagenode=glusterfs') | map_from_pairs }}"
+openshift_storage_glusterfs_image: "{{ 'rhgs3/rhgs-server-rhel7' | quote if deployment_type == 'openshift-enterprise' else 'gluster/gluster-centos' | quote }}"
+openshift_storage_glusterfs_version: 'latest'
+openshift_storage_glusterfs_wipe: False
+openshift_storage_glusterfs_heketi_is_native: True
+openshift_storage_glusterfs_heketi_is_missing: True
+openshift_storage_glusterfs_heketi_deploy_is_missing: True
+openshift_storage_glusterfs_heketi_image: "{{ 'rhgs3/rhgs-volmanager-rhel7' | quote if deployment_type == 'openshift-enterprise' else 'heketi/heketi' | quote }}"
+openshift_storage_glusterfs_heketi_version: 'latest'
+openshift_storage_glusterfs_heketi_admin_key: ''
+openshift_storage_glusterfs_heketi_user_key: ''
+openshift_storage_glusterfs_heketi_topology_load: True
+openshift_storage_glusterfs_heketi_wipe: "{{ openshift_storage_glusterfs_wipe }}"
diff --git a/roles/openshift_storage_glusterfs/files/v3.6/deploy-heketi-template.yml b/roles/openshift_storage_glusterfs/files/v3.6/deploy-heketi-template.yml
new file mode 100644
index 000000000..c9945be13
--- /dev/null
+++ b/roles/openshift_storage_glusterfs/files/v3.6/deploy-heketi-template.yml
@@ -0,0 +1,115 @@
+---
+kind: Template
+apiVersion: v1
+metadata:
+ name: deploy-heketi
+ labels:
+ glusterfs: heketi-template
+ deploy-heketi: support
+ annotations:
+ description: Bootstrap Heketi installation
+ tags: glusterfs,heketi,installation
+labels:
+ template: deploy-heketi
+objects:
+- kind: Service
+ apiVersion: v1
+ metadata:
+ name: deploy-heketi
+ labels:
+ glusterfs: deploy-heketi-service
+ deploy-heketi: support
+ annotations:
+ description: Exposes Heketi service
+ spec:
+ ports:
+ - name: deploy-heketi
+ port: 8080
+ targetPort: 8080
+ selector:
+ name: deploy-heketi
+- kind: Route
+ apiVersion: v1
+ metadata:
+ name: deploy-heketi
+ labels:
+ glusterfs: deploy-heketi-route
+ deploy-heketi: support
+ spec:
+ to:
+ kind: Service
+ name: deploy-heketi
+- kind: DeploymentConfig
+ apiVersion: v1
+ metadata:
+ name: deploy-heketi
+ labels:
+ glusterfs: deploy-heketi-dc
+ deploy-heketi: support
+ annotations:
+ description: Defines how to deploy Heketi
+ spec:
+ replicas: 1
+ selector:
+ name: deploy-heketi
+ triggers:
+ - type: ConfigChange
+ strategy:
+ type: Recreate
+ template:
+ metadata:
+ name: deploy-heketi
+ labels:
+ name: deploy-heketi
+ glusterfs: deploy-heketi-pod
+ deploy-heketi: support
+ spec:
+ serviceAccountName: heketi-service-account
+ containers:
+ - name: deploy-heketi
+ image: ${IMAGE_NAME}:${IMAGE_VERSION}
+ env:
+ - name: HEKETI_USER_KEY
+ value: ${HEKETI_USER_KEY}
+ - name: HEKETI_ADMIN_KEY
+ value: ${HEKETI_ADMIN_KEY}
+ - name: HEKETI_EXECUTOR
+ value: kubernetes
+ - name: HEKETI_FSTAB
+ value: /var/lib/heketi/fstab
+ - name: HEKETI_SNAPSHOT_LIMIT
+ value: '14'
+ - name: HEKETI_KUBE_GLUSTER_DAEMONSET
+ value: '1'
+ ports:
+ - containerPort: 8080
+ volumeMounts:
+ - name: db
+ mountPath: /var/lib/heketi
+ readinessProbe:
+ timeoutSeconds: 3
+ initialDelaySeconds: 3
+ httpGet:
+ path: /hello
+ port: 8080
+ livenessProbe:
+ timeoutSeconds: 3
+ initialDelaySeconds: 30
+ httpGet:
+ path: /hello
+ port: 8080
+ volumes:
+ - name: db
+parameters:
+- name: HEKETI_USER_KEY
+ displayName: Heketi User Secret
+ description: Set secret for those creating volumes as type _user_
+- name: HEKETI_ADMIN_KEY
+ displayName: Heketi Administrator Secret
+ description: Set secret for administration of the Heketi service as user _admin_
+- name: IMAGE_NAME
+ displayName: GlusterFS container name
+ required: True
+- name: IMAGE_VERSION
+ displayName: GlusterFS container versiona
+ required: True
diff --git a/roles/openshift_storage_glusterfs/files/v3.6/glusterfs-registry-service.yml b/roles/openshift_storage_glusterfs/files/v3.6/glusterfs-registry-service.yml
new file mode 100644
index 000000000..3f8d8f507
--- /dev/null
+++ b/roles/openshift_storage_glusterfs/files/v3.6/glusterfs-registry-service.yml
@@ -0,0 +1,10 @@
+---
+apiVersion: v1
+kind: Service
+metadata:
+ name: glusterfs-registry-endpoints
+spec:
+ ports:
+ - port: 1
+status:
+ loadBalancer: {}
diff --git a/roles/openshift_storage_glusterfs/files/v3.6/glusterfs-template.yml b/roles/openshift_storage_glusterfs/files/v3.6/glusterfs-template.yml
new file mode 100644
index 000000000..c66705752
--- /dev/null
+++ b/roles/openshift_storage_glusterfs/files/v3.6/glusterfs-template.yml
@@ -0,0 +1,128 @@
+---
+kind: Template
+apiVersion: v1
+metadata:
+ name: glusterfs
+ labels:
+ glusterfs: template
+ annotations:
+ description: GlusterFS DaemonSet template
+ tags: glusterfs
+objects:
+- kind: DaemonSet
+ apiVersion: extensions/v1beta1
+ metadata:
+ name: glusterfs
+ labels:
+ glusterfs: daemonset
+ annotations:
+ description: GlusterFS DaemonSet
+ tags: glusterfs
+ spec:
+ selector:
+ matchLabels:
+ glusterfs-node: pod
+ template:
+ metadata:
+ name: glusterfs
+ labels:
+ glusterfs-node: pod
+ spec:
+ nodeSelector:
+ storagenode: glusterfs
+ hostNetwork: true
+ containers:
+ - name: glusterfs
+ image: ${IMAGE_NAME}:${IMAGE_VERSION}
+ imagePullPolicy: IfNotPresent
+ volumeMounts:
+ - name: glusterfs-heketi
+ mountPath: "/var/lib/heketi"
+ - name: glusterfs-run
+ mountPath: "/run"
+ - name: glusterfs-lvm
+ mountPath: "/run/lvm"
+ - name: glusterfs-etc
+ mountPath: "/etc/glusterfs"
+ - name: glusterfs-logs
+ mountPath: "/var/log/glusterfs"
+ - name: glusterfs-config
+ mountPath: "/var/lib/glusterd"
+ - name: glusterfs-dev
+ mountPath: "/dev"
+ - name: glusterfs-misc
+ mountPath: "/var/lib/misc/glusterfsd"
+ - name: glusterfs-cgroup
+ mountPath: "/sys/fs/cgroup"
+ readOnly: true
+ - name: glusterfs-ssl
+ mountPath: "/etc/ssl"
+ readOnly: true
+ securityContext:
+ capabilities: {}
+ privileged: true
+ readinessProbe:
+ timeoutSeconds: 3
+ initialDelaySeconds: 100
+ exec:
+ command:
+ - "/bin/bash"
+ - "-c"
+ - systemctl status glusterd.service
+ periodSeconds: 10
+ successThreshold: 1
+ failureThreshold: 3
+ livenessProbe:
+ timeoutSeconds: 3
+ initialDelaySeconds: 100
+ exec:
+ command:
+ - "/bin/bash"
+ - "-c"
+ - systemctl status glusterd.service
+ periodSeconds: 10
+ successThreshold: 1
+ failureThreshold: 3
+ resources: {}
+ terminationMessagePath: "/dev/termination-log"
+ volumes:
+ - name: glusterfs-heketi
+ hostPath:
+ path: "/var/lib/heketi"
+ - name: glusterfs-run
+ emptyDir: {}
+ - name: glusterfs-lvm
+ hostPath:
+ path: "/run/lvm"
+ - name: glusterfs-etc
+ hostPath:
+ path: "/etc/glusterfs"
+ - name: glusterfs-logs
+ hostPath:
+ path: "/var/log/glusterfs"
+ - name: glusterfs-config
+ hostPath:
+ path: "/var/lib/glusterd"
+ - name: glusterfs-dev
+ hostPath:
+ path: "/dev"
+ - name: glusterfs-misc
+ hostPath:
+ path: "/var/lib/misc/glusterfsd"
+ - name: glusterfs-cgroup
+ hostPath:
+ path: "/sys/fs/cgroup"
+ - name: glusterfs-ssl
+ hostPath:
+ path: "/etc/ssl"
+ restartPolicy: Always
+ terminationGracePeriodSeconds: 30
+ dnsPolicy: ClusterFirst
+ securityContext: {}
+parameters:
+- name: IMAGE_NAME
+ displayName: GlusterFS container name
+ required: True
+- name: IMAGE_VERSION
+ displayName: GlusterFS container versiona
+ required: True
diff --git a/roles/openshift_storage_glusterfs/files/v3.6/heketi-template.yml b/roles/openshift_storage_glusterfs/files/v3.6/heketi-template.yml
new file mode 100644
index 000000000..df045c170
--- /dev/null
+++ b/roles/openshift_storage_glusterfs/files/v3.6/heketi-template.yml
@@ -0,0 +1,113 @@
+---
+kind: Template
+apiVersion: v1
+metadata:
+ name: heketi
+ labels:
+ glusterfs: heketi-template
+ annotations:
+ description: Heketi service deployment template
+ tags: glusterfs,heketi
+labels:
+ template: heketi
+objects:
+- kind: Service
+ apiVersion: v1
+ metadata:
+ name: heketi
+ labels:
+ glusterfs: heketi-service
+ annotations:
+ description: Exposes Heketi service
+ spec:
+ ports:
+ - name: heketi
+ port: 8080
+ targetPort: 8080
+ selector:
+ glusterfs: heketi-pod
+- kind: Route
+ apiVersion: v1
+ metadata:
+ name: heketi
+ labels:
+ glusterfs: heketi-route
+ spec:
+ to:
+ kind: Service
+ name: heketi
+- kind: DeploymentConfig
+ apiVersion: v1
+ metadata:
+ name: heketi
+ labels:
+ glusterfs: heketi-dc
+ annotations:
+ description: Defines how to deploy Heketi
+ spec:
+ replicas: 1
+ selector:
+ glusterfs: heketi-pod
+ triggers:
+ - type: ConfigChange
+ strategy:
+ type: Recreate
+ template:
+ metadata:
+ name: heketi
+ labels:
+ glusterfs: heketi-pod
+ spec:
+ serviceAccountName: heketi-service-account
+ containers:
+ - name: heketi
+ image: ${IMAGE_NAME}:${IMAGE_VERSION}
+ imagePullPolicy: IfNotPresent
+ env:
+ - name: HEKETI_USER_KEY
+ value: ${HEKETI_USER_KEY}
+ - name: HEKETI_ADMIN_KEY
+ value: ${HEKETI_ADMIN_KEY}
+ - name: HEKETI_EXECUTOR
+ value: kubernetes
+ - name: HEKETI_FSTAB
+ value: /var/lib/heketi/fstab
+ - name: HEKETI_SNAPSHOT_LIMIT
+ value: '14'
+ - name: HEKETI_KUBE_GLUSTER_DAEMONSET
+ value: '1'
+ ports:
+ - containerPort: 8080
+ volumeMounts:
+ - name: db
+ mountPath: /var/lib/heketi
+ readinessProbe:
+ timeoutSeconds: 3
+ initialDelaySeconds: 3
+ httpGet:
+ path: /hello
+ port: 8080
+ livenessProbe:
+ timeoutSeconds: 3
+ initialDelaySeconds: 30
+ httpGet:
+ path: /hello
+ port: 8080
+ volumes:
+ - name: db
+ glusterfs:
+ endpoints: heketi-storage-endpoints
+ path: heketidbstorage
+parameters:
+- name: HEKETI_USER_KEY
+ displayName: Heketi User Secret
+ description: Set secret for those creating volumes as type _user_
+- name: HEKETI_ADMIN_KEY
+ displayName: Heketi Administrator Secret
+ description: Set secret for administration of the Heketi service as user _admin_
+- name: IMAGE_NAME
+ displayName: GlusterFS container name
+ required: True
+- name: IMAGE_VERSION
+ displayName: GlusterFS container versiona
+ required: True
diff --git a/roles/openshift_storage_glusterfs/filter_plugins/openshift_storage_glusterfs.py b/roles/openshift_storage_glusterfs/filter_plugins/openshift_storage_glusterfs.py
new file mode 100644
index 000000000..88801e487
--- /dev/null
+++ b/roles/openshift_storage_glusterfs/filter_plugins/openshift_storage_glusterfs.py
@@ -0,0 +1,23 @@
+'''
+ Openshift Storage GlusterFS class that provides useful filters used in GlusterFS
+'''
+
+
+def map_from_pairs(source, delim="="):
+ ''' Returns a dict given the source and delim delimited '''
+ if source == '':
+ return dict()
+
+ return dict(source.split(delim) for item in source.split(","))
+
+
+# pylint: disable=too-few-public-methods
+class FilterModule(object):
+ ''' OpenShift Storage GlusterFS Filters '''
+
+ # pylint: disable=no-self-use, too-few-public-methods
+ def filters(self):
+ ''' Returns the names of the filters provided by this class '''
+ return {
+ 'map_from_pairs': map_from_pairs
+ }
diff --git a/roles/openshift_storage_glusterfs/meta/main.yml b/roles/openshift_storage_glusterfs/meta/main.yml
new file mode 100644
index 000000000..aab9851f9
--- /dev/null
+++ b/roles/openshift_storage_glusterfs/meta/main.yml
@@ -0,0 +1,15 @@
+---
+galaxy_info:
+ author: Jose A. Rivera
+ description: OpenShift GlusterFS Cluster
+ company: Red Hat, Inc.
+ license: Apache License, Version 2.0
+ min_ansible_version: 2.2
+ platforms:
+ - name: EL
+ versions:
+ - 7
+dependencies:
+- role: openshift_hosted_facts
+- role: openshift_repos
+- role: lib_openshift
diff --git a/roles/openshift_storage_glusterfs/tasks/glusterfs_deploy.yml b/roles/openshift_storage_glusterfs/tasks/glusterfs_deploy.yml
new file mode 100644
index 000000000..2b35e5137
--- /dev/null
+++ b/roles/openshift_storage_glusterfs/tasks/glusterfs_deploy.yml
@@ -0,0 +1,107 @@
+---
+- assert:
+ that: "openshift_storage_glusterfs_nodeselector.keys() | count == 1"
+ msg: Only one GlusterFS nodeselector key pair should be provided
+
+- assert:
+ that: "groups.oo_glusterfs_to_config | count >= 3"
+ msg: There must be at least three GlusterFS nodes specified
+
+- name: Delete pre-existing GlusterFS resources
+ oc_obj:
+ namespace: "{{ openshift_storage_glusterfs_namespace }}"
+ kind: "template,daemonset"
+ name: glusterfs
+ state: absent
+ when: openshift_storage_glusterfs_wipe
+
+- name: Unlabel any existing GlusterFS nodes
+ oc_label:
+ name: "{{ item }}"
+ kind: node
+ state: absent
+ labels: "{{ openshift_storage_glusterfs_nodeselector | oo_dict_to_list_of_dict }}"
+ with_items: "{{ groups.all }}"
+ when: openshift_storage_glusterfs_wipe
+
+- name: Delete pre-existing GlusterFS config
+ file:
+ path: /var/lib/glusterd
+ state: absent
+ delegate_to: "{{ item }}"
+ with_items: "{{ groups.oo_glusterfs_to_config | default([]) }}"
+ when: openshift_storage_glusterfs_wipe
+
+- name: Get GlusterFS storage devices state
+ command: "pvdisplay -C --noheadings -o pv_name,vg_name {% for device in hostvars[item].glusterfs_devices %}{{ device }} {% endfor %}"
+ register: devices_info
+ delegate_to: "{{ item }}"
+ with_items: "{{ groups.oo_glusterfs_to_config | default([]) }}"
+ failed_when: False
+ when: openshift_storage_glusterfs_wipe
+
+ # Runs "vgremove -fy <vg>; pvremove -fy <pv>" for every device found to be a physical volume.
+- name: Clear GlusterFS storage device contents
+ shell: "{% for line in item.stdout_lines %}{% set fields = line.split() %}{% if fields | count > 1 %}vgremove -fy {{ fields[1] }}; {% endif %}pvremove -fy {{ fields[0] }}; {% endfor %}"
+ delegate_to: "{{ item.item }}"
+ with_items: "{{ devices_info.results }}"
+ when:
+ - openshift_storage_glusterfs_wipe
+ - item.stdout_lines | count > 0
+
+- name: Add service accounts to privileged SCC
+ oc_adm_policy_user:
+ user: "system:serviceaccount:{{ openshift_storage_glusterfs_namespace }}:{{ item }}"
+ resource_kind: scc
+ resource_name: privileged
+ state: present
+ with_items:
+ - 'default'
+ - 'router'
+
+- name: Label GlusterFS nodes
+ oc_label:
+ name: "{{ glusterfs_host }}"
+ kind: node
+ state: add
+ labels: "{{ openshift_storage_glusterfs_nodeselector | oo_dict_to_list_of_dict }}"
+ with_items: "{{ groups.oo_glusterfs_to_config | default([]) }}"
+ loop_control:
+ loop_var: glusterfs_host
+
+- name: Copy GlusterFS DaemonSet template
+ copy:
+ src: "{{ openshift.common.examples_content_version }}/glusterfs-template.yml"
+ dest: "{{ mktemp.stdout }}/glusterfs-template.yml"
+
+- name: Create GlusterFS template
+ oc_obj:
+ namespace: "{{ openshift_storage_glusterfs_namespace }}"
+ kind: template
+ name: glusterfs
+ state: present
+ files:
+ - "{{ mktemp.stdout }}/glusterfs-template.yml"
+
+- name: Deploy GlusterFS pods
+ oc_process:
+ namespace: "{{ openshift_storage_glusterfs_namespace }}"
+ template_name: "glusterfs"
+ create: True
+ params:
+ IMAGE_NAME: "{{ openshift_storage_glusterfs_image }}"
+ IMAGE_VERSION: "{{ openshift_storage_glusterfs_version }}"
+
+- name: Wait for GlusterFS pods
+ oc_obj:
+ namespace: "{{ openshift_storage_glusterfs_namespace }}"
+ kind: pod
+ state: list
+ selector: "glusterfs-node=pod"
+ register: glusterfs_pods
+ until:
+ - "glusterfs_pods.results.results[0]['items'] | count > 0"
+ # There must be as many pods with 'Ready' staus True as there are nodes expecting those pods
+ - "glusterfs_pods.results.results[0]['items'] | oo_collect(attribute='status.conditions') | oo_collect(attribute='status', filters={'type': 'Ready'}) | map('bool') | select | list | count == groups.oo_glusterfs_to_config | count"
+ delay: 10
+ retries: "{{ (openshift_storage_glusterfs_timeout / 10) | int }}"
diff --git a/roles/openshift_storage_glusterfs/tasks/glusterfs_registry.yml b/roles/openshift_storage_glusterfs/tasks/glusterfs_registry.yml
new file mode 100644
index 000000000..9f092d5d5
--- /dev/null
+++ b/roles/openshift_storage_glusterfs/tasks/glusterfs_registry.yml
@@ -0,0 +1,48 @@
+---
+- name: Delete pre-existing GlusterFS registry resources
+ oc_obj:
+ namespace: "{{ openshift_storage_glusterfs_namespace }}"
+ kind: "{{ item.kind }}"
+ name: "{{ item.name | default(omit) }}"
+ selector: "{{ item.selector | default(omit) }}"
+ state: absent
+ with_items:
+ - kind: "svc,ep"
+ name: "glusterfs-registry-endpoints"
+ failed_when: False
+
+- name: Generate GlusterFS registry endpoints
+ template:
+ src: "{{ openshift.common.examples_content_version }}/glusterfs-registry-endpoints.yml.j2"
+ dest: "{{ mktemp.stdout }}/glusterfs-registry-endpoints.yml"
+
+- name: Copy GlusterFS registry service
+ copy:
+ src: "{{ openshift.common.examples_content_version }}/glusterfs-registry-service.yml"
+ dest: "{{ mktemp.stdout }}/glusterfs-registry-service.yml"
+
+- name: Create GlusterFS registry endpoints
+ oc_obj:
+ namespace: "{{ openshift.hosted.registry.namespace | default('default') }}"
+ state: present
+ kind: endpoints
+ name: glusterfs-registry-endpoints
+ files:
+ - "{{ mktemp.stdout }}/glusterfs-registry-endpoints.yml"
+
+- name: Create GlusterFS registry service
+ oc_obj:
+ namespace: "{{ openshift.hosted.registry.namespace | default('default') }}"
+ state: present
+ kind: service
+ name: glusterfs-registry-endpoints
+ files:
+ - "{{ mktemp.stdout }}/glusterfs-registry-service.yml"
+
+- name: Check if GlusterFS registry volume exists
+ command: "heketi-cli -s http://{{ openshift_storage_glusterfs_heketi_url }} --user admin --secret '{{ openshift_storage_glusterfs_heketi_admin_key }}' volume list"
+ register: registry_volume
+
+- name: Create GlusterFS registry volume
+ command: "heketi-cli -s http://{{ openshift_storage_glusterfs_heketi_url }} --user admin --secret '{{ openshift_storage_glusterfs_heketi_admin_key }}' volume create --size={{ openshift.hosted.registry.storage.volume.size | replace('Gi','') }} --name={{ openshift.hosted.registry.storage.glusterfs.path }}"
+ when: "'{{ openshift.hosted.registry.storage.glusterfs.path }}' not in registry_volume.stdout"
diff --git a/roles/openshift_storage_glusterfs/tasks/heketi_deploy_part1.yml b/roles/openshift_storage_glusterfs/tasks/heketi_deploy_part1.yml
new file mode 100644
index 000000000..76ae1db75
--- /dev/null
+++ b/roles/openshift_storage_glusterfs/tasks/heketi_deploy_part1.yml
@@ -0,0 +1,41 @@
+---
+- name: Copy initial heketi resource files
+ copy:
+ src: "{{ openshift.common.examples_content_version }}/{{ item }}"
+ dest: "{{ mktemp.stdout }}/{{ item }}"
+ with_items:
+ - "deploy-heketi-template.yml"
+
+- name: Create deploy-heketi resources
+ oc_obj:
+ namespace: "{{ openshift_storage_glusterfs_namespace }}"
+ kind: template
+ name: deploy-heketi
+ state: present
+ files:
+ - "{{ mktemp.stdout }}/deploy-heketi-template.yml"
+
+- name: Deploy deploy-heketi pod
+ oc_process:
+ namespace: "{{ openshift_storage_glusterfs_namespace }}"
+ template_name: "deploy-heketi"
+ create: True
+ params:
+ IMAGE_NAME: "{{ openshift_storage_glusterfs_heketi_image }}"
+ IMAGE_VERSION: "{{ openshift_storage_glusterfs_heketi_version }}"
+ HEKETI_USER_KEY: "{{ openshift_storage_glusterfs_heketi_user_key }}"
+ HEKETI_ADMIN_KEY: "{{ openshift_storage_glusterfs_heketi_admin_key }}"
+
+- name: Wait for deploy-heketi pod
+ oc_obj:
+ namespace: "{{ openshift_storage_glusterfs_namespace }}"
+ kind: pod
+ state: list
+ selector: "glusterfs=deploy-heketi-pod,deploy-heketi=support"
+ register: heketi_pod
+ until:
+ - "heketi_pod.results.results[0]['items'] | count > 0"
+ # Pod's 'Ready' status must be True
+ - "heketi_pod.results.results[0]['items'] | oo_collect(attribute='status.conditions') | oo_collect(attribute='status', filters={'type': 'Ready'}) | map('bool') | select | list | count == 1"
+ delay: 10
+ retries: "{{ (openshift_storage_glusterfs_timeout / 10) | int }}"
diff --git a/roles/openshift_storage_glusterfs/tasks/heketi_deploy_part2.yml b/roles/openshift_storage_glusterfs/tasks/heketi_deploy_part2.yml
new file mode 100644
index 000000000..84b85e95d
--- /dev/null
+++ b/roles/openshift_storage_glusterfs/tasks/heketi_deploy_part2.yml
@@ -0,0 +1,109 @@
+---
+- name: Create heketi DB volume
+ command: "heketi-cli -s http://{{ openshift_storage_glusterfs_heketi_url }} --user admin --secret '{{ openshift_storage_glusterfs_heketi_admin_key }}' setup-openshift-heketi-storage --listfile {{ mktemp.stdout }}/heketi-storage.json"
+ register: setup_storage
+ failed_when: False
+
+# This is used in the subsequent task
+- name: Copy the admin client config
+ command: >
+ cp {{ openshift.common.config_base }}/master/admin.kubeconfig {{ mktemp.stdout }}/admin.kubeconfig
+ changed_when: False
+ check_mode: no
+
+# Need `command` here because heketi-storage.json contains multiple objects.
+- name: Copy heketi DB to GlusterFS volume
+ command: "{{ openshift.common.client_binary }} --config={{ mktemp.stdout }}/admin.kubeconfig create -f {{ mktemp.stdout }}/heketi-storage.json -n {{ openshift_storage_glusterfs_namespace }}"
+ when: "setup_storage.rc == 0"
+
+- name: Wait for copy job to finish
+ oc_obj:
+ namespace: "{{ openshift_storage_glusterfs_namespace }}"
+ kind: job
+ state: list
+ name: "heketi-storage-copy-job"
+ register: heketi_job
+ until:
+ - "'results' in heketi_job.results and heketi_job.results.results | count > 0"
+ # Pod's 'Complete' status must be True
+ - "heketi_job.results.results | oo_collect(attribute='status.conditions') | oo_collect(attribute='status', filters={'type': 'Complete'}) | map('bool') | select | list | count == 1"
+ delay: 10
+ retries: "{{ (openshift_storage_glusterfs_timeout / 10) | int }}"
+ failed_when:
+ - "'results' in heketi_job.results"
+ - "heketi_job.results.results | count > 0"
+ # Fail when pod's 'Failed' status is True
+ - "heketi_job.results.results | oo_collect(attribute='status.conditions') | oo_collect(attribute='status', filters={'type': 'Failed'}) | map('bool') | select | list | count == 1"
+ when: "setup_storage.rc == 0"
+
+- name: Delete deploy resources
+ oc_obj:
+ namespace: "{{ openshift_storage_glusterfs_namespace }}"
+ kind: "{{ item.kind }}"
+ name: "{{ item.name | default(omit) }}"
+ selector: "{{ item.selector | default(omit) }}"
+ state: absent
+ with_items:
+ - kind: "template,route,service,jobs,dc,secret"
+ selector: "deploy-heketi"
+ failed_when: False
+
+- name: Copy heketi template
+ copy:
+ src: "{{ openshift.common.examples_content_version }}/heketi-template.yml"
+ dest: "{{ mktemp.stdout }}/heketi-template.yml"
+
+- name: Create heketi resources
+ oc_obj:
+ namespace: "{{ openshift_storage_glusterfs_namespace }}"
+ kind: template
+ name: heketi
+ state: present
+ files:
+ - "{{ mktemp.stdout }}/heketi-template.yml"
+
+- name: Deploy heketi pod
+ oc_process:
+ namespace: "{{ openshift_storage_glusterfs_namespace }}"
+ template_name: "heketi"
+ create: True
+ params:
+ IMAGE_NAME: "{{ openshift_storage_glusterfs_heketi_image }}"
+ IMAGE_VERSION: "{{ openshift_storage_glusterfs_heketi_version }}"
+ HEKETI_USER_KEY: "{{ openshift_storage_glusterfs_heketi_user_key }}"
+ HEKETI_ADMIN_KEY: "{{ openshift_storage_glusterfs_heketi_admin_key }}"
+
+- name: Wait for heketi pod
+ oc_obj:
+ namespace: "{{ openshift_storage_glusterfs_namespace }}"
+ kind: pod
+ state: list
+ selector: "glusterfs=heketi-pod"
+ register: heketi_pod
+ until:
+ - "heketi_pod.results.results[0]['items'] | count > 0"
+ # Pod's 'Ready' status must be True
+ - "heketi_pod.results.results[0]['items'] | oo_collect(attribute='status.conditions') | oo_collect(attribute='status', filters={'type': 'Ready'}) | map('bool') | select | list | count == 1"
+ delay: 10
+ retries: "{{ (openshift_storage_glusterfs_timeout / 10) | int }}"
+
+- name: Determine heketi URL
+ oc_obj:
+ namespace: "{{ openshift_storage_glusterfs_namespace }}"
+ state: list
+ kind: ep
+ selector: "glusterfs=heketi-service"
+ register: heketi_url
+ until:
+ - "heketi_url.results.results[0]['items'][0].subsets[0].addresses[0].ip != ''"
+ - "heketi_url.results.results[0]['items'][0].subsets[0].ports[0].port != ''"
+ delay: 10
+ retries: "{{ (openshift_storage_glusterfs_timeout / 10) | int }}"
+
+- name: Set heketi URL
+ set_fact:
+ openshift_storage_glusterfs_heketi_url: "{{ heketi_url.results.results[0]['items'][0].subsets[0].addresses[0].ip }}:{{ heketi_url.results.results[0]['items'][0].subsets[0].ports[0].port }}"
+
+- name: Verify heketi service
+ command: "heketi-cli -s http://{{ openshift_storage_glusterfs_heketi_url }} --user admin --secret '{{ openshift_storage_glusterfs_heketi_admin_key }}' cluster list"
+ changed_when: False
diff --git a/roles/openshift_storage_glusterfs/tasks/main.yml b/roles/openshift_storage_glusterfs/tasks/main.yml
new file mode 100644
index 000000000..265a3cc6e
--- /dev/null
+++ b/roles/openshift_storage_glusterfs/tasks/main.yml
@@ -0,0 +1,182 @@
+---
+- name: Create temp directory for doing work in
+ command: mktemp -d /tmp/openshift-glusterfs-ansible-XXXXXX
+ register: mktemp
+ changed_when: False
+ check_mode: no
+
+- name: Verify target namespace exists
+ oc_project:
+ state: present
+ name: "{{ openshift_storage_glusterfs_namespace }}"
+ when: openshift_storage_glusterfs_is_native or openshift_storage_glusterfs_heketi_is_native
+
+- include: glusterfs_deploy.yml
+ when: openshift_storage_glusterfs_is_native
+
+- name: Make sure heketi-client is installed
+ package: name=heketi-client state=present
+
+- name: Delete pre-existing heketi resources
+ oc_obj:
+ namespace: "{{ openshift_storage_glusterfs_namespace }}"
+ kind: "{{ item.kind }}"
+ name: "{{ item.name | default(omit) }}"
+ selector: "{{ item.selector | default(omit) }}"
+ state: absent
+ with_items:
+ - kind: "template,route,service,jobs,dc,secret"
+ selector: "deploy-heketi"
+ - kind: "template,route,dc,service"
+ name: "heketi"
+ - kind: "svc,ep"
+ name: "heketi-storage-endpoints"
+ - kind: "sa"
+ name: "heketi-service-account"
+ failed_when: False
+ when: openshift_storage_glusterfs_heketi_wipe
+
+- name: Wait for deploy-heketi pods to terminate
+ oc_obj:
+ namespace: "{{ openshift_storage_glusterfs_namespace }}"
+ kind: pod
+ state: list
+ selector: "glusterfs=deploy-heketi-pod"
+ register: heketi_pod
+ until: "heketi_pod.results.results[0]['items'] | count == 0"
+ delay: 10
+ retries: "{{ (openshift_storage_glusterfs_timeout / 10) | int }}"
+ when: openshift_storage_glusterfs_heketi_wipe
+
+- name: Wait for heketi pods to terminate
+ oc_obj:
+ namespace: "{{ openshift_storage_glusterfs_namespace }}"
+ kind: pod
+ state: list
+ selector: "glusterfs=heketi-pod"
+ register: heketi_pod
+ until: "heketi_pod.results.results[0]['items'] | count == 0"
+ delay: 10
+ retries: "{{ (openshift_storage_glusterfs_timeout / 10) | int }}"
+ when: openshift_storage_glusterfs_heketi_wipe
+
+- name: Create heketi service account
+ oc_serviceaccount:
+ namespace: "{{ openshift_storage_glusterfs_namespace }}"
+ name: heketi-service-account
+ state: present
+ when: openshift_storage_glusterfs_heketi_is_native
+
+- name: Add heketi service account to privileged SCC
+ oc_adm_policy_user:
+ user: "system:serviceaccount:{{ openshift_storage_glusterfs_namespace }}:heketi-service-account"
+ resource_kind: scc
+ resource_name: privileged
+ state: present
+ when: openshift_storage_glusterfs_heketi_is_native
+
+- name: Allow heketi service account to view/edit pods
+ oc_adm_policy_user:
+ user: "system:serviceaccount:{{ openshift_storage_glusterfs_namespace }}:heketi-service-account"
+ resource_kind: role
+ resource_name: edit
+ state: present
+ when: openshift_storage_glusterfs_heketi_is_native
+
+- name: Check for existing deploy-heketi pod
+ oc_obj:
+ namespace: "{{ openshift_storage_glusterfs_namespace }}"
+ state: list
+ kind: pod
+ selector: "glusterfs=deploy-heketi-pod,deploy-heketi=support"
+ register: heketi_pod
+ when: openshift_storage_glusterfs_heketi_is_native
+
+- name: Check if need to deploy deploy-heketi
+ set_fact:
+ openshift_storage_glusterfs_heketi_deploy_is_missing: False
+ when:
+ - "openshift_storage_glusterfs_heketi_is_native"
+ - "heketi_pod.results.results[0]['items'] | count > 0"
+ # deploy-heketi is not missing when there are one or more pods with matching labels whose 'Ready' status is True
+ - "heketi_pod.results.results[0]['items'] | oo_collect(attribute='status.conditions') | oo_collect(attribute='status', filters={'type': 'Ready'}) | map('bool') | select | list | count > 0"
+
+- name: Check for existing heketi pod
+ oc_obj:
+ namespace: "{{ openshift_storage_glusterfs_namespace }}"
+ state: list
+ kind: pod
+ selector: "glusterfs=heketi-pod"
+ register: heketi_pod
+ when: openshift_storage_glusterfs_heketi_is_native
+
+- name: Check if need to deploy heketi
+ set_fact:
+ openshift_storage_glusterfs_heketi_is_missing: False
+ when:
+ - "openshift_storage_glusterfs_heketi_is_native"
+ - "heketi_pod.results.results[0]['items'] | count > 0"
+ # heketi is not missing when there are one or more pods with matching labels whose 'Ready' status is True
+ - "heketi_pod.results.results[0]['items'] | oo_collect(attribute='status.conditions') | oo_collect(attribute='status', filters={'type': 'Ready'}) | map('bool') | select | list | count > 0"
+
+- include: heketi_deploy_part1.yml
+ when:
+ - openshift_storage_glusterfs_heketi_is_native
+ - openshift_storage_glusterfs_heketi_deploy_is_missing
+ - openshift_storage_glusterfs_heketi_is_missing
+
+- name: Determine heketi URL
+ oc_obj:
+ namespace: "{{ openshift_storage_glusterfs_namespace }}"
+ state: list
+ kind: ep
+ selector: "glusterfs in (deploy-heketi-service, heketi-service)"
+ register: heketi_url
+ until:
+ - "heketi_url.results.results[0]['items'][0].subsets[0].addresses[0].ip != ''"
+ - "heketi_url.results.results[0]['items'][0].subsets[0].ports[0].port != ''"
+ delay: 10
+ retries: "{{ (openshift_storage_glusterfs_timeout / 10) | int }}"
+ when:
+ - openshift_storage_glusterfs_heketi_is_native
+ - openshift_storage_glusterfs_heketi_url is undefined
+
+- name: Set heketi URL
+ set_fact:
+ openshift_storage_glusterfs_heketi_url: "{{ heketi_url.results.results[0]['items'][0].subsets[0].addresses[0].ip }}:{{ heketi_url.results.results[0]['items'][0].subsets[0].ports[0].port }}"
+ when:
+ - openshift_storage_glusterfs_heketi_is_native
+ - openshift_storage_glusterfs_heketi_url is undefined
+
+- name: Verify heketi service
+ command: "heketi-cli -s http://{{ openshift_storage_glusterfs_heketi_url }} --user admin --secret '{{ openshift_storage_glusterfs_heketi_admin_key }}' cluster list"
+ changed_when: False
+
+- name: Generate topology file
+ template:
+ src: "{{ openshift.common.examples_content_version }}/topology.json.j2"
+ dest: "{{ mktemp.stdout }}/topology.json"
+ when:
+ - openshift_storage_glusterfs_is_native
+ - openshift_storage_glusterfs_heketi_topology_load
+
+- name: Load heketi topology
+ command: "heketi-cli -s http://{{ openshift_storage_glusterfs_heketi_url }} --user admin --secret '{{ openshift_storage_glusterfs_heketi_admin_key }}' topology load --json={{ mktemp.stdout }}/topology.json 2>&1"
+ register: topology_load
+ failed_when: "topology_load.rc != 0 or 'Unable' in topology_load.stdout"
+ when:
+ - openshift_storage_glusterfs_is_native
+ - openshift_storage_glusterfs_heketi_topology_load
+
+- include: heketi_deploy_part2.yml
+ when: openshift_storage_glusterfs_heketi_is_native and openshift_storage_glusterfs_heketi_is_missing
+
+- include: glusterfs_registry.yml
+ when: "openshift.hosted.registry.storage.kind == 'glusterfs'"
+
+- name: Delete temp directory
+ file:
+ name: "{{ mktemp.stdout }}"
+ state: absent
+ changed_when: False
+ check_mode: no
diff --git a/roles/openshift_storage_glusterfs/templates/v3.6/glusterfs-registry-endpoints.yml.j2 b/roles/openshift_storage_glusterfs/templates/v3.6/glusterfs-registry-endpoints.yml.j2
new file mode 100644
index 000000000..d72d085c9
--- /dev/null
+++ b/roles/openshift_storage_glusterfs/templates/v3.6/glusterfs-registry-endpoints.yml.j2
@@ -0,0 +1,11 @@
+apiVersion: v1
+kind: Endpoints
+metadata:
+ name: glusterfs-registry-endpoints
+subsets:
+- addresses:
+{% for node in groups.oo_glusterfs_to_config %}
+ - ip: {{ hostvars[node].glusterfs_ip | default(hostvars[node].openshift.common.ip) }}
+{% endfor %}
+ ports:
+ - port: 1
diff --git a/roles/openshift_storage_glusterfs/templates/v3.6/topology.json.j2 b/roles/openshift_storage_glusterfs/templates/v3.6/topology.json.j2
new file mode 100644
index 000000000..eb5b4544f
--- /dev/null
+++ b/roles/openshift_storage_glusterfs/templates/v3.6/topology.json.j2
@@ -0,0 +1,39 @@
+{
+ "clusters": [
+{%- set clusters = {} -%}
+{%- for node in groups.oo_glusterfs_to_config -%}
+ {%- set cluster = hostvars[node].glusterfs_cluster if 'glusterfs_cluster' in node else '1' -%}
+ {%- if cluster in clusters -%}
+ {%- set _dummy = clusters[cluster].append(node) -%}
+ {%- else -%}
+ {%- set _dummy = clusters.update({cluster: [ node, ]}) -%}
+ {%- endif -%}
+{%- endfor -%}
+{%- for cluster in clusters -%}
+ {
+ "nodes": [
+{%- for node in clusters[cluster] -%}
+ {
+ "node": {
+ "hostnames": {
+ "manage": [
+ "{{ hostvars[node].glusterfs_hostname | default(hostvars[node].openshift.common.hostname) }}"
+ ],
+ "storage": [
+ "{{ hostvars[node].glusterfs_ip | default(hostvars[node].openshift.common.ip) }}"
+ ]
+ },
+ "zone": {{ hostvars[node].glusterfs_zone | default(1) }}
+ },
+ "devices": [
+{%- for device in hostvars[node].glusterfs_devices -%}
+ "{{ device }}"{% if not loop.last %},{% endif %}
+{%- endfor -%}
+ ]
+ }{% if not loop.last %},{% endif %}
+{%- endfor -%}
+ ]
+ }{% if not loop.last %},{% endif %}
+{%- endfor -%}
+ ]
+}
diff --git a/roles/openshift_version/tasks/main.yml b/roles/openshift_version/tasks/main.yml
index c3d001bb4..fa9b20e92 100644
--- a/roles/openshift_version/tasks/main.yml
+++ b/roles/openshift_version/tasks/main.yml
@@ -7,8 +7,13 @@
# Block attempts to install origin without specifying some kind of version information.
# This is because the latest tags for origin are usually alpha builds, which should not
# be used by default. Users must indicate what they want.
-- fail:
- msg: "Must specify openshift_release or openshift_image_tag in inventory to install origin. (suggestion: add openshift_release=\"1.2\" to inventory)"
+- name: Abort when we cannot safely guess what Origin image version the user wanted
+ fail:
+ msg: |-
+ To install a containerized Origin release, you must set openshift_release or
+ openshift_image_tag in your inventory to specify which version of the OpenShift
+ component images to use. You may want the latest (usually alpha) releases or
+ a more stable release. (Suggestion: add openshift_release="x.y" to inventory.)
when:
- is_containerized | bool
- openshift.common.deployment_type == 'origin'
@@ -27,7 +32,10 @@
when: openshift_release is defined
# Verify that the image tag is in a valid format
-- block:
+- when:
+ - openshift_image_tag is defined
+ - openshift_image_tag != "latest"
+ block:
# Verifies that when the deployment type is origin the version:
# - starts with a v
@@ -35,12 +43,14 @@
# It also allows for optional trailing data which:
# - must start with a dash
# - may contain numbers, letters, dashes and dots.
- - name: Verify Origin openshift_image_tag is valid
+ - name: (Origin) Verify openshift_image_tag is valid
+ when: openshift.common.deployment_type == 'origin'
assert:
that:
- "{{ openshift_image_tag|match('(^v?\\d+\\.\\d+\\.\\d+(-[\\w\\-\\.]*)?$)') }}"
- msg: "openshift_image_tag must be in the format v#.#.#[-optional.#]. Examples: v1.2.3, v3.5.1-alpha.1"
- when: openshift.common.deployment_type == 'origin'
+ msg: |-
+ openshift_image_tag must be in the format v#.#.#[-optional.#]. Examples: v1.2.3, v3.5.1-alpha.1
+ You specified openshift_image_tag={{ openshift_image_tag }}
# Verifies that when the deployment type is openshift-enterprise the version:
# - starts with a v
@@ -48,16 +58,14 @@
# It also allows for optional trailing data which:
# - must start with a dash
# - may contain numbers
- - name: Verify Enterprise openshift_image_tag is valid
+ - name: (Enterprise) Verify openshift_image_tag is valid
+ when: openshift.common.deployment_type == 'openshift-enterprise'
assert:
that:
- "{{ openshift_image_tag|match('(^v\\d+\\.\\d+[\\.\\d+]*(-\\d+)?$)') }}"
- msg: "openshift_image_tag must be in the format v#.#[.#[.#]]. Examples: v1.2, v3.4.1, v3.5.1.3, v1.2-1, v1.2.3-4"
- when: openshift.common.deployment_type == 'openshift-enterprise'
-
- when:
- - openshift_image_tag is defined
- - openshift_image_tag != "latest"
+ msg: |-
+ openshift_image_tag must be in the format v#.#[.#[.#]]. Examples: v1.2, v3.4.1, v3.5.1.3, v1.2-1, v1.2.3-4
+ You specified openshift_image_tag={{ openshift_image_tag }}
# Make sure we copy this to a fact if given a var:
- set_fact:
@@ -119,30 +127,42 @@
- fail:
msg: openshift_version role was unable to set openshift_version
+ name: Abort if openshift_version was not set
when: openshift_version is not defined
- fail:
msg: openshift_version role was unable to set openshift_image_tag
+ name: Abort if openshift_image_tag was not set
when: openshift_image_tag is not defined
- fail:
msg: openshift_version role was unable to set openshift_pkg_version
+ name: Abort if openshift_pkg_version was not set
when: openshift_pkg_version is not defined
- fail:
- msg: "No OpenShift version available, please ensure your systems are fully registered and have access to appropriate yum repositories."
+ msg: "No OpenShift version available; please ensure your systems are fully registered and have access to appropriate yum repositories."
+ name: Abort if openshift_pkg_version was not set
when:
- not is_containerized | bool
- openshift_version == '0.0'
-# We can't map an openshift_release to full rpm version like we can with containers, make sure
+# We can't map an openshift_release to full rpm version like we can with containers; make sure
# the rpm version we looked up matches the release requested and error out if not.
-- fail:
- msg: "Detected OpenShift version {{ openshift_version }} does not match requested openshift_release {{ openshift_release }}. You may need to adjust your yum repositories, inventory, or run the appropriate OpenShift upgrade playbook."
+- name: For an RPM install, abort when the release requested does not match the available version.
when:
- not is_containerized | bool
- openshift_release is defined
- - not openshift_version.startswith(openshift_release) | bool
+ assert:
+ that:
+ - openshift_version.startswith(openshift_release) | bool
+ msg: |-
+ You requested openshift_release {{ openshift_release }}, which is not matched by
+ the latest OpenShift RPM we detected as {{ openshift.common.service_type }}-{{ openshift_version }}
+ on host {{ inventory_hostname }}.
+ We will only install the latest RPMs, so please ensure you are getting the release
+ you expect. You may need to adjust your Ansible inventory, modify the repositories
+ available on the host, or run the appropriate OpenShift upgrade playbook.
# The end result of these three variables is quite important so make sure they are displayed and logged:
- debug: var=openshift_release