summaryrefslogtreecommitdiffstats
path: root/roles
diff options
context:
space:
mode:
Diffstat (limited to 'roles')
-rw-r--r--roles/etcd/tasks/system_container.yml72
-rw-r--r--roles/lib_openshift/library/oadm_manage_node.py155
-rw-r--r--roles/lib_openshift/library/oc_adm_ca_server_cert.py1544
-rw-r--r--roles/lib_openshift/library/oc_adm_registry.py2612
-rw-r--r--roles/lib_openshift/library/oc_adm_router.py3071
-rw-r--r--roles/lib_openshift/library/oc_atomic_container.py203
-rw-r--r--roles/lib_openshift/library/oc_edit.py155
-rw-r--r--roles/lib_openshift/library/oc_env.py170
-rw-r--r--roles/lib_openshift/library/oc_label.py155
-rw-r--r--roles/lib_openshift/library/oc_obj.py156
-rw-r--r--roles/lib_openshift/library/oc_process.py155
-rw-r--r--roles/lib_openshift/library/oc_route.py163
-rw-r--r--roles/lib_openshift/library/oc_scale.py177
-rw-r--r--roles/lib_openshift/library/oc_sdnvalidator.py1387
-rw-r--r--roles/lib_openshift/library/oc_secret.py162
-rw-r--r--roles/lib_openshift/library/oc_service.py166
-rw-r--r--roles/lib_openshift/library/oc_serviceaccount.py157
-rw-r--r--roles/lib_openshift/library/oc_serviceaccount_secret.py157
-rw-r--r--roles/lib_openshift/library/oc_version.py155
-rw-r--r--roles/lib_openshift/meta/main.yml15
-rw-r--r--roles/lib_openshift/src/ansible/oc_adm_ca_server_cert.py35
-rw-r--r--roles/lib_openshift/src/ansible/oc_adm_registry.py47
-rw-r--r--roles/lib_openshift/src/ansible/oc_adm_router.py69
-rw-r--r--roles/lib_openshift/src/ansible/oc_atomic_container.py137
-rw-r--r--roles/lib_openshift/src/ansible/oc_sdnvalidator.py24
-rw-r--r--roles/lib_openshift/src/class/oc_adm_ca_server_cert.py135
-rw-r--r--roles/lib_openshift/src/class/oc_adm_registry.py421
-rw-r--r--roles/lib_openshift/src/class/oc_adm_router.py500
-rw-r--r--roles/lib_openshift/src/class/oc_obj.py1
-rw-r--r--roles/lib_openshift/src/class/oc_route.py8
-rw-r--r--roles/lib_openshift/src/class/oc_sdnvalidator.py58
-rw-r--r--roles/lib_openshift/src/class/oc_secret.py2
-rw-r--r--roles/lib_openshift/src/class/oc_service.py6
-rw-r--r--roles/lib_openshift/src/doc/atomic_container36
-rw-r--r--roles/lib_openshift/src/doc/ca_server_cert96
-rw-r--r--roles/lib_openshift/src/doc/registry192
-rw-r--r--roles/lib_openshift/src/doc/router217
-rw-r--r--roles/lib_openshift/src/doc/sdnvalidator27
-rw-r--r--roles/lib_openshift/src/lib/base.py64
-rw-r--r--roles/lib_openshift/src/lib/deploymentconfig.py15
-rw-r--r--roles/lib_openshift/src/lib/import.py7
-rw-r--r--roles/lib_openshift/src/lib/replicationcontroller.py7
-rw-r--r--roles/lib_openshift/src/lib/rolebinding.py289
-rw-r--r--roles/lib_openshift/src/lib/secret.py5
-rw-r--r--roles/lib_openshift/src/lib/service.py5
-rw-r--r--roles/lib_openshift/src/lib/serviceaccount.py2
-rw-r--r--roles/lib_openshift/src/lib/volume.py38
-rw-r--r--roles/lib_openshift/src/sources.yml56
-rwxr-xr-xroles/lib_openshift/src/test/unit/oc_sdnvalidator.py481
-rwxr-xr-xroles/lib_openshift/src/test/unit/oc_secret.py96
-rwxr-xr-xroles/lib_openshift/src/test/unit/oc_serviceaccount.py119
-rwxr-xr-xroles/lib_openshift/src/test/unit/oc_version.py73
-rwxr-xr-xroles/lib_openshift/src/test/unit/test_oadm_manage_node.py (renamed from roles/lib_openshift/src/test/unit/oadm_manage_node.py)111
-rwxr-xr-xroles/lib_openshift/src/test/unit/test_oc_env.py (renamed from roles/lib_openshift/src/test/unit/oc_env.py)138
-rwxr-xr-xroles/lib_openshift/src/test/unit/test_oc_label.py (renamed from roles/lib_openshift/src/test/unit/oc_label.py)111
-rwxr-xr-xroles/lib_openshift/src/test/unit/test_oc_process.py (renamed from roles/lib_openshift/src/test/unit/oc_process.py)111
-rwxr-xr-xroles/lib_openshift/src/test/unit/test_oc_route.py (renamed from roles/lib_openshift/src/test/unit/oc_route.py)143
-rwxr-xr-xroles/lib_openshift/src/test/unit/test_oc_scale.py (renamed from roles/lib_openshift/src/test/unit/oc_scale.py)111
-rwxr-xr-xroles/lib_openshift/src/test/unit/test_oc_secret.py210
-rwxr-xr-xroles/lib_openshift/src/test/unit/test_oc_service.py (renamed from roles/lib_openshift/src/test/unit/oc_service.py)111
-rwxr-xr-xroles/lib_openshift/src/test/unit/test_oc_serviceaccount.py233
-rwxr-xr-xroles/lib_openshift/src/test/unit/test_oc_serviceaccount_secret.py (renamed from roles/lib_openshift/src/test/unit/oc_serviceaccount_secret.py)233
-rwxr-xr-xroles/lib_openshift/src/test/unit/test_oc_version.py182
-rw-r--r--roles/lib_openshift/tasks/main.yml8
-rw-r--r--roles/lib_utils/library/repoquery.py10
-rw-r--r--roles/lib_utils/library/yedit.py92
-rw-r--r--roles/lib_utils/meta/main.yml15
-rw-r--r--roles/lib_utils/src/class/repoquery.py2
-rw-r--r--roles/lib_utils/src/class/yedit.py85
-rw-r--r--roles/lib_utils/src/lib/import.py8
-rwxr-xr-xroles/lib_utils/src/test/unit/test_repoquery.py (renamed from roles/lib_utils/src/test/unit/repoquery.py)0
-rwxr-xr-xroles/lib_utils/src/test/unit/test_yedit.py (renamed from roles/lib_utils/src/test/unit/yedit_test.py)2
-rw-r--r--roles/lib_utils/tasks/main.yml6
-rw-r--r--roles/nuage_node/tasks/iptables.yml2
-rw-r--r--roles/openshift_ca/tasks/main.yml1
-rw-r--r--roles/openshift_certificate_expiry/library/openshift_cert_expiry.py204
-rw-r--r--roles/openshift_certificate_expiry/test/master.server.crt42
-rw-r--r--roles/openshift_certificate_expiry/test/master.server.crt.txt82
-rw-r--r--roles/openshift_certificate_expiry/test/system-node-m01.example.com.crt19
-rw-r--r--roles/openshift_certificate_expiry/test/system-node-m01.example.com.crt.txt75
-rw-r--r--roles/openshift_certificate_expiry/test/test_fakeopensslclasses.py82
-rwxr-xr-xroles/openshift_examples/examples-sync.sh17
-rw-r--r--roles/openshift_examples/files/examples/v1.3/xpaas-streams/fis-image-streams.json24
-rw-r--r--roles/openshift_examples/files/examples/v1.3/xpaas-templates/karaf2-camel-amq-template.json362
-rw-r--r--roles/openshift_examples/files/examples/v1.3/xpaas-templates/karaf2-camel-log-template.json336
-rw-r--r--roles/openshift_examples/files/examples/v1.3/xpaas-templates/karaf2-camel-rest-sql-template.json421
-rw-r--r--roles/openshift_examples/files/examples/v1.3/xpaas-templates/karaf2-cxf-rest-template.json385
-rw-r--r--roles/openshift_examples/files/examples/v1.3/xpaas-templates/openjdk18-web-basic-s2i.json267
-rw-r--r--roles/openshift_examples/files/examples/v1.3/xpaas-templates/spring-boot-camel-amq-template.json331
-rw-r--r--roles/openshift_examples/files/examples/v1.3/xpaas-templates/spring-boot-camel-config-template.json327
-rw-r--r--roles/openshift_examples/files/examples/v1.3/xpaas-templates/spring-boot-camel-drools-template.json334
-rw-r--r--roles/openshift_examples/files/examples/v1.3/xpaas-templates/spring-boot-camel-infinispan-template.json315
-rw-r--r--roles/openshift_examples/files/examples/v1.3/xpaas-templates/spring-boot-camel-rest-sql-template.json403
-rw-r--r--roles/openshift_examples/files/examples/v1.3/xpaas-templates/spring-boot-camel-teiid-template.json343
-rw-r--r--roles/openshift_examples/files/examples/v1.3/xpaas-templates/spring-boot-camel-template.json305
-rw-r--r--roles/openshift_examples/files/examples/v1.3/xpaas-templates/spring-boot-camel-xml-template.json305
-rw-r--r--roles/openshift_examples/files/examples/v1.3/xpaas-templates/spring-boot-cxf-jaxrs-template.json305
-rw-r--r--roles/openshift_examples/files/examples/v1.3/xpaas-templates/spring-boot-cxf-jaxws-template.json305
-rw-r--r--roles/openshift_examples/files/examples/v1.4/db-templates/mariadb-ephemeral-template.json6
-rw-r--r--roles/openshift_examples/files/examples/v1.4/db-templates/mariadb-persistent-template.json6
-rw-r--r--roles/openshift_examples/files/examples/v1.4/db-templates/mongodb-ephemeral-template.json6
-rw-r--r--roles/openshift_examples/files/examples/v1.4/db-templates/mongodb-persistent-template.json6
-rw-r--r--roles/openshift_examples/files/examples/v1.4/db-templates/mysql-ephemeral-template.json6
-rw-r--r--roles/openshift_examples/files/examples/v1.4/db-templates/mysql-persistent-template.json6
-rw-r--r--roles/openshift_examples/files/examples/v1.4/db-templates/postgresql-ephemeral-template.json6
-rw-r--r--roles/openshift_examples/files/examples/v1.4/db-templates/postgresql-persistent-template.json6
-rw-r--r--roles/openshift_examples/files/examples/v1.4/db-templates/redis-ephemeral-template.json6
-rw-r--r--roles/openshift_examples/files/examples/v1.4/db-templates/redis-persistent-template.json6
-rw-r--r--roles/openshift_examples/files/examples/v1.4/image-streams/image-streams-rhel7.json2
-rw-r--r--roles/openshift_examples/files/examples/v1.4/quickstart-templates/cakephp-mysql-persistent.json11
-rw-r--r--roles/openshift_examples/files/examples/v1.4/quickstart-templates/cakephp-mysql.json11
-rw-r--r--roles/openshift_examples/files/examples/v1.4/quickstart-templates/dancer-mysql-persistent.json6
-rw-r--r--roles/openshift_examples/files/examples/v1.4/quickstart-templates/dancer-mysql.json6
-rw-r--r--roles/openshift_examples/files/examples/v1.4/quickstart-templates/django-postgresql-persistent.json6
-rw-r--r--roles/openshift_examples/files/examples/v1.4/quickstart-templates/django-postgresql.json6
-rw-r--r--roles/openshift_examples/files/examples/v1.4/quickstart-templates/jenkins-ephemeral-template.json6
-rw-r--r--roles/openshift_examples/files/examples/v1.4/quickstart-templates/jenkins-persistent-template.json6
-rw-r--r--roles/openshift_examples/files/examples/v1.4/quickstart-templates/nodejs-mongodb-persistent.json6
-rw-r--r--roles/openshift_examples/files/examples/v1.4/quickstart-templates/nodejs-mongodb.json6
-rw-r--r--roles/openshift_examples/files/examples/v1.4/quickstart-templates/rails-postgresql-persistent.json10
-rw-r--r--roles/openshift_examples/files/examples/v1.4/quickstart-templates/rails-postgresql.json10
-rw-r--r--roles/openshift_examples/files/examples/v1.4/xpaas-streams/fis-image-streams.json24
-rw-r--r--roles/openshift_examples/files/examples/v1.4/xpaas-streams/jboss-image-streams.json25
-rw-r--r--roles/openshift_examples/files/examples/v1.4/xpaas-templates/karaf2-camel-amq-template.json362
-rw-r--r--roles/openshift_examples/files/examples/v1.4/xpaas-templates/karaf2-camel-log-template.json336
-rw-r--r--roles/openshift_examples/files/examples/v1.4/xpaas-templates/karaf2-camel-rest-sql-template.json421
-rw-r--r--roles/openshift_examples/files/examples/v1.4/xpaas-templates/karaf2-cxf-rest-template.json385
-rw-r--r--roles/openshift_examples/files/examples/v1.4/xpaas-templates/openjdk18-web-basic-s2i.json267
-rw-r--r--roles/openshift_examples/files/examples/v1.4/xpaas-templates/spring-boot-camel-amq-template.json331
-rw-r--r--roles/openshift_examples/files/examples/v1.4/xpaas-templates/spring-boot-camel-config-template.json327
-rw-r--r--roles/openshift_examples/files/examples/v1.4/xpaas-templates/spring-boot-camel-drools-template.json334
-rw-r--r--roles/openshift_examples/files/examples/v1.4/xpaas-templates/spring-boot-camel-infinispan-template.json315
-rw-r--r--roles/openshift_examples/files/examples/v1.4/xpaas-templates/spring-boot-camel-rest-sql-template.json403
-rw-r--r--roles/openshift_examples/files/examples/v1.4/xpaas-templates/spring-boot-camel-teiid-template.json343
-rw-r--r--roles/openshift_examples/files/examples/v1.4/xpaas-templates/spring-boot-camel-template.json305
-rw-r--r--roles/openshift_examples/files/examples/v1.4/xpaas-templates/spring-boot-camel-xml-template.json305
-rw-r--r--roles/openshift_examples/files/examples/v1.4/xpaas-templates/spring-boot-cxf-jaxrs-template.json305
-rw-r--r--roles/openshift_examples/files/examples/v1.4/xpaas-templates/spring-boot-cxf-jaxws-template.json305
-rw-r--r--roles/openshift_examples/files/examples/v1.5/db-templates/mariadb-ephemeral-template.json6
-rw-r--r--roles/openshift_examples/files/examples/v1.5/db-templates/mariadb-persistent-template.json6
-rw-r--r--roles/openshift_examples/files/examples/v1.5/db-templates/mongodb-ephemeral-template.json9
-rw-r--r--roles/openshift_examples/files/examples/v1.5/db-templates/mongodb-persistent-template.json9
-rw-r--r--roles/openshift_examples/files/examples/v1.5/db-templates/mysql-ephemeral-template.json9
-rw-r--r--roles/openshift_examples/files/examples/v1.5/db-templates/mysql-persistent-template.json6
-rw-r--r--roles/openshift_examples/files/examples/v1.5/db-templates/postgresql-ephemeral-template.json9
-rw-r--r--roles/openshift_examples/files/examples/v1.5/db-templates/postgresql-persistent-template.json9
-rw-r--r--roles/openshift_examples/files/examples/v1.5/db-templates/redis-ephemeral-template.json9
-rw-r--r--roles/openshift_examples/files/examples/v1.5/db-templates/redis-persistent-template.json9
-rw-r--r--roles/openshift_examples/files/examples/v1.5/image-streams/image-streams-rhel7.json2
-rw-r--r--roles/openshift_examples/files/examples/v1.5/quickstart-templates/cakephp-mysql-persistent.json11
-rw-r--r--roles/openshift_examples/files/examples/v1.5/quickstart-templates/cakephp-mysql.json11
-rw-r--r--roles/openshift_examples/files/examples/v1.5/quickstart-templates/dancer-mysql-persistent.json6
-rw-r--r--roles/openshift_examples/files/examples/v1.5/quickstart-templates/dancer-mysql.json6
-rw-r--r--roles/openshift_examples/files/examples/v1.5/quickstart-templates/django-postgresql-persistent.json6
-rw-r--r--roles/openshift_examples/files/examples/v1.5/quickstart-templates/django-postgresql.json6
-rw-r--r--roles/openshift_examples/files/examples/v1.5/quickstart-templates/jenkins-ephemeral-template.json6
-rw-r--r--roles/openshift_examples/files/examples/v1.5/quickstart-templates/jenkins-persistent-template.json6
-rw-r--r--roles/openshift_examples/files/examples/v1.5/quickstart-templates/nodejs-mongodb-persistent.json6
-rw-r--r--roles/openshift_examples/files/examples/v1.5/quickstart-templates/nodejs-mongodb.json6
-rw-r--r--roles/openshift_examples/files/examples/v1.5/quickstart-templates/rails-postgresql-persistent.json10
-rw-r--r--roles/openshift_examples/files/examples/v1.5/quickstart-templates/rails-postgresql.json10
-rw-r--r--roles/openshift_examples/files/examples/v1.5/xpaas-streams/fis-image-streams.json24
-rw-r--r--roles/openshift_examples/files/examples/v1.5/xpaas-streams/jboss-image-streams.json25
-rw-r--r--roles/openshift_examples/files/examples/v1.5/xpaas-templates/karaf2-camel-amq-template.json362
-rw-r--r--roles/openshift_examples/files/examples/v1.5/xpaas-templates/karaf2-camel-log-template.json336
-rw-r--r--roles/openshift_examples/files/examples/v1.5/xpaas-templates/karaf2-camel-rest-sql-template.json421
-rw-r--r--roles/openshift_examples/files/examples/v1.5/xpaas-templates/karaf2-cxf-rest-template.json385
-rw-r--r--roles/openshift_examples/files/examples/v1.5/xpaas-templates/openjdk18-web-basic-s2i.json267
-rw-r--r--roles/openshift_examples/files/examples/v1.5/xpaas-templates/spring-boot-camel-amq-template.json331
-rw-r--r--roles/openshift_examples/files/examples/v1.5/xpaas-templates/spring-boot-camel-config-template.json327
-rw-r--r--roles/openshift_examples/files/examples/v1.5/xpaas-templates/spring-boot-camel-drools-template.json334
-rw-r--r--roles/openshift_examples/files/examples/v1.5/xpaas-templates/spring-boot-camel-infinispan-template.json315
-rw-r--r--roles/openshift_examples/files/examples/v1.5/xpaas-templates/spring-boot-camel-rest-sql-template.json403
-rw-r--r--roles/openshift_examples/files/examples/v1.5/xpaas-templates/spring-boot-camel-teiid-template.json343
-rw-r--r--roles/openshift_examples/files/examples/v1.5/xpaas-templates/spring-boot-camel-template.json305
-rw-r--r--roles/openshift_examples/files/examples/v1.5/xpaas-templates/spring-boot-camel-xml-template.json305
-rw-r--r--roles/openshift_examples/files/examples/v1.5/xpaas-templates/spring-boot-cxf-jaxrs-template.json305
-rw-r--r--roles/openshift_examples/files/examples/v1.5/xpaas-templates/spring-boot-cxf-jaxws-template.json305
-rw-r--r--roles/openshift_excluder/tasks/status.yml2
-rwxr-xr-xroles/openshift_facts/library/openshift_facts.py4
-rw-r--r--roles/openshift_facts/tasks/main.yml114
-rw-r--r--roles/openshift_health_checker/HOWTO_CHECKS.md2
-rw-r--r--roles/openshift_health_checker/openshift_checks/__init__.py1
-rw-r--r--roles/openshift_health_checker/openshift_checks/mixins.py9
-rw-r--r--roles/openshift_health_checker/openshift_checks/package_availability.py4
-rw-r--r--roles/openshift_health_checker/openshift_checks/package_update.py4
-rw-r--r--roles/openshift_health_checker/openshift_checks/package_version.py4
-rw-r--r--roles/openshift_hosted/filter_plugins/filters.py35
-rw-r--r--roles/openshift_hosted/handlers/main.yml0
-rw-r--r--roles/openshift_hosted/meta/main.yml3
-rw-r--r--roles/openshift_hosted/tasks/main.yml19
-rw-r--r--roles/openshift_hosted/tasks/registry/registry.yml143
-rw-r--r--roles/openshift_hosted/tasks/registry/secure.yml177
-rw-r--r--roles/openshift_hosted/tasks/registry/storage/object_storage.yml123
-rw-r--r--roles/openshift_hosted/tasks/registry/storage/persistent_volume.yml26
-rw-r--r--roles/openshift_hosted/tasks/registry/storage/s3.yml70
-rw-r--r--roles/openshift_hosted/tasks/router/router.yml151
-rw-r--r--roles/openshift_hosted/templates/registry_config.j22
-rw-r--r--roles/openshift_hosted_logging/handlers/main.yml21
-rw-r--r--roles/openshift_logging/README.md7
-rw-r--r--roles/openshift_logging/defaults/main.yml11
-rw-r--r--roles/openshift_logging/files/fluent.conf2
-rw-r--r--roles/openshift_logging/handlers/main.yml21
-rw-r--r--roles/openshift_logging/tasks/generate_routes.yaml1
-rw-r--r--roles/openshift_logging/tasks/generate_secrets.yaml2
-rw-r--r--roles/openshift_logging/tasks/install_elasticsearch.yaml2
-rw-r--r--roles/openshift_logging/tasks/start_cluster.yaml2
-rw-r--r--roles/openshift_logging/tasks/stop_cluster.yaml2
-rw-r--r--roles/openshift_logging/tasks/update_master_config.yaml2
-rw-r--r--roles/openshift_logging/tasks/upgrade_logging.yaml6
-rw-r--r--roles/openshift_logging/templates/curator.j24
-rw-r--r--roles/openshift_logging/templates/elasticsearch.yml.j27
-rw-r--r--roles/openshift_logging/templates/es.j22
-rw-r--r--roles/openshift_logging/templates/kibana.j22
-rw-r--r--roles/openshift_logging/templates/route_reencrypt.j23
-rw-r--r--roles/openshift_logging/templates/secret.j24
-rw-r--r--roles/openshift_master/tasks/system_container.yml79
-rw-r--r--roles/openshift_master/vars/main.yml1
-rw-r--r--roles/openshift_master_certificates/tasks/main.yml2
-rw-r--r--roles/openshift_master_facts/filter_plugins/openshift_master.py2
-rw-r--r--roles/openshift_master_facts/lookup_plugins/openshift_master_facts_default_predicates.py20
-rw-r--r--roles/openshift_master_facts/lookup_plugins/openshift_master_facts_default_priorities.py18
-rw-r--r--roles/openshift_master_facts/test/conftest.py54
-rw-r--r--roles/openshift_master_facts/test/openshift_master_facts_bad_input_tests.py57
-rw-r--r--roles/openshift_master_facts/test/openshift_master_facts_default_predicates_tests.py265
-rw-r--r--roles/openshift_master_facts/test/openshift_master_facts_default_priorities_tests.py263
-rwxr-xr-xroles/openshift_metrics/files/import_jks_certs.sh19
-rw-r--r--roles/openshift_metrics/tasks/generate_certificates.yaml2
-rw-r--r--roles/openshift_metrics/tasks/generate_hawkular_certificates.yaml14
-rw-r--r--roles/openshift_metrics/tasks/import_jks_certs.yaml11
-rw-r--r--roles/openshift_metrics/templates/hawkular_metrics_rc.j213
-rw-r--r--roles/openshift_node/tasks/node_system_container.yml38
-rw-r--r--roles/openshift_node/tasks/openvswitch_system_container.yml38
-rw-r--r--roles/openshift_node/vars/main.yml2
-rw-r--r--roles/openshift_node_upgrade/README.md2
-rw-r--r--roles/openshift_node_upgrade/tasks/main.yml37
-rw-r--r--roles/openshift_repos/meta/main.yml3
-rw-r--r--roles/openshift_repos/tasks/main.yaml79
238 files changed, 32197 insertions, 2096 deletions
diff --git a/roles/etcd/tasks/system_container.yml b/roles/etcd/tasks/system_container.yml
index 241180e2c..3b80164cc 100644
--- a/roles/etcd/tasks/system_container.yml
+++ b/roles/etcd/tasks/system_container.yml
@@ -1,17 +1,16 @@
---
+- name: Load lib_openshift modules
+ include_role:
+ name: lib_openshift
+
- name: Pull etcd system container
command: atomic pull --storage=ostree {{ openshift.etcd.etcd_image }}
register: pull_result
changed_when: "'Pulling layer' in pull_result.stdout"
-- name: Check etcd system container package
- command: >
- atomic containers list --no-trunc -a -f container=etcd
- register: result
-
- name: Set initial Etcd cluster
set_fact:
- etcd_initial_cluster: >
+ etcd_initial_cluster: >-
{% for host in etcd_peers | default([]) -%}
{% if loop.last -%}
{{ hostvars[host].etcd_hostname }}={{ etcd_peer_url_scheme }}://{{ hostvars[host].etcd_ip }}:{{ etcd_peer_port }}
@@ -20,44 +19,23 @@
{%- endif -%}
{% endfor -%}
-- name: Update Etcd system container package
- command: >
- atomic containers update
- --set ETCD_LISTEN_PEER_URLS={{ etcd_listen_peer_urls }}
- --set ETCD_NAME={{ etcd_hostname }}
- --set ETCD_INITIAL_CLUSTER={{ etcd_initial_cluster | replace('\n', '') }}
- --set ETCD_LISTEN_CLIENT_URLS={{ etcd_listen_client_urls }}
- --set ETCD_INITIAL_ADVERTISE_PEER_URLS={{ etcd_initial_advertise_peer_urls }}
- --set ETCD_INITIAL_CLUSTER_STATE={{ etcd_initial_cluster_state }}
- --set ETCD_INITIAL_CLUSTER_TOKEN={{ etcd_initial_cluster_token }}
- --set ETCD_ADVERTISE_CLIENT_URLS={{ etcd_advertise_client_urls }}
- --set ETCD_CA_FILE={{ etcd_system_container_conf_dir }}/ca.crt
- --set ETCD_CERT_FILE={{ etcd_system_container_conf_dir }}/server.crt
- --set ETCD_KEY_FILE={{ etcd_system_container_conf_dir }}/server.key
- --set ETCD_PEER_CA_FILE={{ etcd_system_container_conf_dir }}/ca.crt
- --set ETCD_PEER_CERT_FILE={{ etcd_system_container_conf_dir }}/peer.crt
- --set ETCD_PEER_KEY_FILE={{ etcd_system_container_conf_dir }}/peer.key
- etcd
- when:
- - ("etcd" in result.stdout)
-
-- name: Install Etcd system container package
- command: >
- atomic install --system --name=etcd
- --set ETCD_LISTEN_PEER_URLS={{ etcd_listen_peer_urls }}
- --set ETCD_NAME={{ etcd_hostname }}
- --set ETCD_INITIAL_CLUSTER={{ etcd_initial_cluster | replace('\n', '') }}
- --set ETCD_LISTEN_CLIENT_URLS={{ etcd_listen_client_urls }}
- --set ETCD_INITIAL_ADVERTISE_PEER_URLS={{ etcd_initial_advertise_peer_urls }}
- --set ETCD_INITIAL_CLUSTER_STATE={{ etcd_initial_cluster_state }}
- --set ETCD_INITIAL_CLUSTER_TOKEN={{ etcd_initial_cluster_token }}
- --set ETCD_ADVERTISE_CLIENT_URLS={{ etcd_advertise_client_urls }}
- --set ETCD_CA_FILE={{ etcd_system_container_conf_dir }}/ca.crt
- --set ETCD_CERT_FILE={{ etcd_system_container_conf_dir }}/server.crt
- --set ETCD_KEY_FILE={{ etcd_system_container_conf_dir }}/server.key
- --set ETCD_PEER_CA_FILE={{ etcd_system_container_conf_dir }}/ca.crt
- --set ETCD_PEER_CERT_FILE={{ etcd_system_container_conf_dir }}/peer.crt
- --set ETCD_PEER_KEY_FILE={{ etcd_system_container_conf_dir }}/peer.key
- {{ openshift.etcd.etcd_image }}
- when:
- - ("etcd" not in result.stdout)
+- name: Install or Update Etcd system container package
+ oc_atomic_container:
+ name: etcd
+ image: "{{ openshift.etcd.etcd_image }}"
+ state: latest
+ values:
+ - ETCD_LISTEN_PEER_URLS={{ etcd_listen_peer_urls }}
+ - ETCD_NAME={{ etcd_hostname }}
+ - ETCD_INITIAL_CLUSTER={{ etcd_initial_cluster }}
+ - ETCD_LISTEN_CLIENT_URLS={{ etcd_listen_client_urls }}
+ - ETCD_INITIAL_ADVERTISE_PEER_URLS={{ etcd_initial_advertise_peer_urls }}
+ - ETCD_INITIAL_CLUSTER_STATE={{ etcd_initial_cluster_state }}
+ - ETCD_INITIAL_CLUSTER_TOKEN={{ etcd_initial_cluster_token }}
+ - ETCD_ADVERTISE_CLIENT_URLS={{ etcd_advertise_client_urls }}
+ - ETCD_CA_FILE={{ etcd_system_container_conf_dir }}/ca.crt
+ - ETCD_CERT_FILE={{ etcd_system_container_conf_dir }}/server.crt
+ - ETCD_KEY_FILE={{ etcd_system_container_conf_dir }}/server.key
+ - ETCD_PEER_CA_FILE={{ etcd_system_container_conf_dir }}/ca.crt
+ - ETCD_PEER_CERT_FILE={{ etcd_system_container_conf_dir }}/peer.crt
+ - ETCD_PEER_KEY_FILE={{ etcd_system_container_conf_dir }}/peer.key
diff --git a/roles/lib_openshift/library/oadm_manage_node.py b/roles/lib_openshift/library/oadm_manage_node.py
index 6c0ff9b13..0c29c1b91 100644
--- a/roles/lib_openshift/library/oadm_manage_node.py
+++ b/roles/lib_openshift/library/oadm_manage_node.py
@@ -33,6 +33,7 @@
from __future__ import print_function
import atexit
+import copy
import json
import os
import re
@@ -40,7 +41,11 @@ import shutil
import subprocess
import tempfile
# pylint: disable=import-error
-import ruamel.yaml as yaml
+try:
+ import ruamel.yaml as yaml
+except ImportError:
+ import yaml
+
from ansible.module_utils.basic import AnsibleModule
# -*- -*- -*- End included fragment: lib/import.py -*- -*- -*-
@@ -136,6 +141,7 @@ EXAMPLES = '''
# -*- -*- -*- End included fragment: doc/manage_node -*- -*- -*-
# -*- -*- -*- Begin included fragment: ../../lib_utils/src/class/yedit.py -*- -*- -*-
+# pylint: disable=undefined-variable,missing-docstring
# noqa: E301,E302
@@ -330,11 +336,17 @@ class Yedit(object):
if self.backup and self.file_exists():
shutil.copy(self.filename, self.filename + '.orig')
- # pylint: disable=no-member
- if hasattr(self.yaml_dict, 'fa'):
+ # Try to set format attributes if supported
+ try:
self.yaml_dict.fa.set_block_style()
+ except AttributeError:
+ pass
- Yedit._write(self.filename, yaml.dump(self.yaml_dict, Dumper=yaml.RoundTripDumper))
+ # Try to use RoundTripDumper if supported.
+ try:
+ Yedit._write(self.filename, yaml.dump(self.yaml_dict, Dumper=yaml.RoundTripDumper))
+ except AttributeError:
+ Yedit._write(self.filename, yaml.safe_dump(self.yaml_dict, default_flow_style=False))
return (True, self.yaml_dict)
@@ -374,10 +386,24 @@ class Yedit(object):
# check if it is yaml
try:
if content_type == 'yaml' and contents:
- self.yaml_dict = yaml.load(contents, yaml.RoundTripLoader)
- # pylint: disable=no-member
- if hasattr(self.yaml_dict, 'fa'):
+ # Try to set format attributes if supported
+ try:
+ self.yaml_dict.fa.set_block_style()
+ except AttributeError:
+ pass
+
+ # Try to use RoundTripLoader if supported.
+ try:
+ self.yaml_dict = yaml.safe_load(contents, yaml.RoundTripLoader)
+ except AttributeError:
+ self.yaml_dict = yaml.safe_load(contents)
+
+ # Try to set format attributes if supported
+ try:
self.yaml_dict.fa.set_block_style()
+ except AttributeError:
+ pass
+
elif content_type == 'json' and contents:
self.yaml_dict = json.loads(contents)
except yaml.YAMLError as err:
@@ -406,14 +432,16 @@ class Yedit(object):
return (False, self.yaml_dict)
if isinstance(entry, dict):
- # pylint: disable=no-member,maybe-no-member
+ # AUDIT:maybe-no-member makes sense due to fuzzy types
+ # pylint: disable=maybe-no-member
if key_or_item in entry:
entry.pop(key_or_item)
return (True, self.yaml_dict)
return (False, self.yaml_dict)
elif isinstance(entry, list):
- # pylint: disable=no-member,maybe-no-member
+ # AUDIT:maybe-no-member makes sense due to fuzzy types
+ # pylint: disable=maybe-no-member
ind = None
try:
ind = entry.index(key_or_item)
@@ -481,7 +509,9 @@ class Yedit(object):
if not isinstance(entry, list):
return (False, self.yaml_dict)
- # pylint: disable=no-member,maybe-no-member
+ # AUDIT:maybe-no-member makes sense due to loading data from
+ # a serialized format.
+ # pylint: disable=maybe-no-member
entry.append(value)
return (True, self.yaml_dict)
@@ -494,7 +524,8 @@ class Yedit(object):
entry = None
if isinstance(entry, dict):
- # pylint: disable=no-member,maybe-no-member
+ # AUDIT:maybe-no-member makes sense due to fuzzy types
+ # pylint: disable=maybe-no-member
if not isinstance(value, dict):
raise YeditException('Cannot replace key, value entry in ' +
'dict with non-dict type. value=[%s] [%s]' % (value, type(value))) # noqa: E501
@@ -503,7 +534,8 @@ class Yedit(object):
return (True, self.yaml_dict)
elif isinstance(entry, list):
- # pylint: disable=no-member,maybe-no-member
+ # AUDIT:maybe-no-member makes sense due to fuzzy types
+ # pylint: disable=maybe-no-member
ind = None
if curr_value:
try:
@@ -542,12 +574,20 @@ class Yedit(object):
return (False, self.yaml_dict)
# deepcopy didn't work
- tmp_copy = yaml.load(yaml.round_trip_dump(self.yaml_dict,
- default_flow_style=False),
- yaml.RoundTripLoader)
- # pylint: disable=no-member
- if hasattr(self.yaml_dict, 'fa'):
+ # Try to use ruamel.yaml and fallback to pyyaml
+ try:
+ tmp_copy = yaml.load(yaml.round_trip_dump(self.yaml_dict,
+ default_flow_style=False),
+ yaml.RoundTripLoader)
+ except AttributeError:
+ tmp_copy = copy.deepcopy(self.yaml_dict)
+
+ # set the format attributes if available
+ try:
tmp_copy.fa.set_block_style()
+ except AttributeError:
+ pass
+
result = Yedit.add_entry(tmp_copy, path, value, self.separator)
if not result:
return (False, self.yaml_dict)
@@ -560,11 +600,20 @@ class Yedit(object):
''' create a yaml file '''
if not self.file_exists():
# deepcopy didn't work
- tmp_copy = yaml.load(yaml.round_trip_dump(self.yaml_dict, default_flow_style=False), # noqa: E501
- yaml.RoundTripLoader)
- # pylint: disable=no-member
- if hasattr(self.yaml_dict, 'fa'):
+ # Try to use ruamel.yaml and fallback to pyyaml
+ try:
+ tmp_copy = yaml.load(yaml.round_trip_dump(self.yaml_dict,
+ default_flow_style=False),
+ yaml.RoundTripLoader)
+ except AttributeError:
+ tmp_copy = copy.deepcopy(self.yaml_dict)
+
+ # set the format attributes if available
+ try:
tmp_copy.fa.set_block_style()
+ except AttributeError:
+ pass
+
result = Yedit.add_entry(tmp_copy, path, value, self.separator)
if result:
self.yaml_dict = tmp_copy
@@ -720,6 +769,32 @@ class OpenShiftCLIError(Exception):
pass
+ADDITIONAL_PATH_LOOKUPS = ['/usr/local/bin', os.path.expanduser('~/bin')]
+
+
+def locate_oc_binary():
+ ''' Find and return oc binary file '''
+ # https://github.com/openshift/openshift-ansible/issues/3410
+ # oc can be in /usr/local/bin in some cases, but that may not
+ # be in $PATH due to ansible/sudo
+ paths = os.environ.get("PATH", os.defpath).split(os.pathsep) + ADDITIONAL_PATH_LOOKUPS
+
+ oc_binary = 'oc'
+
+ # Use shutil.which if it is available, otherwise fallback to a naive path search
+ try:
+ which_result = shutil.which(oc_binary, path=os.pathsep.join(paths))
+ if which_result is not None:
+ oc_binary = which_result
+ except AttributeError:
+ for path in paths:
+ if os.path.exists(os.path.join(path, oc_binary)):
+ oc_binary = os.path.join(path, oc_binary)
+ break
+
+ return oc_binary
+
+
# pylint: disable=too-few-public-methods
class OpenShiftCLI(object):
''' Class to wrap the command line tools '''
@@ -733,6 +808,7 @@ class OpenShiftCLI(object):
self.verbose = verbose
self.kubeconfig = Utils.create_tmpfile_copy(kubeconfig)
self.all_namespaces = all_namespaces
+ self.oc_binary = locate_oc_binary()
# Pylint allows only 5 arguments to be passed.
# pylint: disable=too-many-arguments
@@ -929,24 +1005,23 @@ class OpenShiftCLI(object):
stdout, stderr = proc.communicate(input_data)
- return proc.returncode, stdout, stderr
+ return proc.returncode, stdout.decode(), stderr.decode()
# pylint: disable=too-many-arguments,too-many-branches
def openshift_cmd(self, cmd, oadm=False, output=False, output_type='json', input_data=None):
'''Base command for oc '''
- cmds = []
+ cmds = [self.oc_binary]
+
if oadm:
- cmds = ['oadm']
- else:
- cmds = ['oc']
+ cmds.append('adm')
+
+ cmds.extend(cmd)
if self.all_namespaces:
cmds.extend(['--all-namespaces'])
elif self.namespace is not None and self.namespace.lower() not in ['none', 'emtpy']: # E501
cmds.extend(['-n', self.namespace])
- cmds.extend(cmd)
-
rval = {}
results = ''
err = None
@@ -954,7 +1029,10 @@ class OpenShiftCLI(object):
if self.verbose:
print(' '.join(cmds))
- returncode, stdout, stderr = self._run(cmds, input_data)
+ try:
+ returncode, stdout, stderr = self._run(cmds, input_data)
+ except OSError as ex:
+ returncode, stdout, stderr = 1, '', 'Failed to execute {}: {}'.format(subprocess.list2cmdline(cmds), ex)
rval = {"returncode": returncode,
"results": results,
@@ -1006,7 +1084,13 @@ class Utils(object):
tmp = Utils.create_tmpfile(prefix=rname)
if ftype == 'yaml':
- Utils._write(tmp, yaml.dump(data, Dumper=yaml.RoundTripDumper))
+ # AUDIT:no-member makes sense here due to ruamel.YAML/PyYAML usage
+ # pylint: disable=no-member
+ if hasattr(yaml, 'RoundTripDumper'):
+ Utils._write(tmp, yaml.dump(data, Dumper=yaml.RoundTripDumper))
+ else:
+ Utils._write(tmp, yaml.safe_dump(data, default_flow_style=False))
+
elif ftype == 'json':
Utils._write(tmp, json.dumps(data))
else:
@@ -1088,7 +1172,12 @@ class Utils(object):
contents = sfd.read()
if sfile_type == 'yaml':
- contents = yaml.load(contents, yaml.RoundTripLoader)
+ # AUDIT:no-member makes sense here due to ruamel.YAML/PyYAML usage
+ # pylint: disable=no-member
+ if hasattr(yaml, 'RoundTripLoader'):
+ contents = yaml.load(contents, yaml.RoundTripLoader)
+ else:
+ contents = yaml.safe_load(contents)
elif sfile_type == 'json':
contents = json.loads(contents)
@@ -1194,8 +1283,8 @@ class Utils(object):
elif value != user_def[key]:
if debug:
print('value should be identical')
- print(value)
print(user_def[key])
+ print(value)
return False
# recurse on a dictionary
@@ -1215,8 +1304,8 @@ class Utils(object):
if api_values != user_values:
if debug:
print("keys are not equal in dict")
- print(api_values)
print(user_values)
+ print(api_values)
return False
result = Utils.check_def_equal(user_def[key], value, skip_keys=skip_keys, debug=debug)
diff --git a/roles/lib_openshift/library/oc_adm_ca_server_cert.py b/roles/lib_openshift/library/oc_adm_ca_server_cert.py
new file mode 100644
index 000000000..9390ea7c1
--- /dev/null
+++ b/roles/lib_openshift/library/oc_adm_ca_server_cert.py
@@ -0,0 +1,1544 @@
+#!/usr/bin/env python
+# pylint: disable=missing-docstring
+# flake8: noqa: T001
+# ___ ___ _ _ ___ ___ _ _____ ___ ___
+# / __| __| \| | __| _ \ /_\_ _| __| \
+# | (_ | _|| .` | _|| / / _ \| | | _|| |) |
+# \___|___|_|\_|___|_|_\/_/_\_\_|_|___|___/_ _____
+# | \ / _ \ | \| |/ _ \_ _| | __| \_ _|_ _|
+# | |) | (_) | | .` | (_) || | | _|| |) | | | |
+# |___/ \___/ |_|\_|\___/ |_| |___|___/___| |_|
+#
+# Copyright 2016 Red Hat, Inc. and/or its affiliates
+# and other contributors as indicated by the @author tags.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+
+# -*- -*- -*- Begin included fragment: lib/import.py -*- -*- -*-
+'''
+ OpenShiftCLI class that wraps the oc commands in a subprocess
+'''
+# pylint: disable=too-many-lines
+
+from __future__ import print_function
+import atexit
+import copy
+import json
+import os
+import re
+import shutil
+import subprocess
+import tempfile
+# pylint: disable=import-error
+try:
+ import ruamel.yaml as yaml
+except ImportError:
+ import yaml
+
+from ansible.module_utils.basic import AnsibleModule
+
+# -*- -*- -*- End included fragment: lib/import.py -*- -*- -*-
+
+# -*- -*- -*- Begin included fragment: doc/ca_server_cert -*- -*- -*-
+
+DOCUMENTATION = '''
+---
+module: oc_adm_ca_server_cert
+short_description: Module to run openshift oc adm ca create-server-cert
+description:
+ - Wrapper around the openshift `oc adm ca create-server-cert` command.
+options:
+ state:
+ description:
+ - Present is the only supported state. The state present means that `oc adm ca` will generate a certificate
+ - and verify if the hostnames and the ClusterIP exists in the certificate.
+ - When create-server-cert is desired then the following parameters are passed.
+ - ['cert', 'key', 'signer_cert', 'signer_key', 'signer_serial']
+ required: false
+ default: present
+ choices:
+ - present
+ aliases: []
+ kubeconfig:
+ description:
+ - The path for the kubeconfig file to use for authentication
+ required: false
+ default: /etc/origin/master/admin.kubeconfig
+ aliases: []
+ debug:
+ description:
+ - Turn on debug output.
+ required: false
+ default: False
+ aliases: []
+ cert:
+ description:
+ - The certificate file. Choose a name that indicates what the service is.
+ required: false
+ default: None
+ aliases: []
+ key:
+ description:
+ - The key file. Choose a name that indicates what the service is.
+ required: false
+ default: None
+ aliases: []
+ force:
+ description:
+ - Force updating of the existing cert and key files
+ required: false
+ default: False
+ aliases: []
+ signer_cert:
+ description:
+ - The signer certificate file.
+ required: false
+ default: /etc/origin/master/ca.crt
+ aliases: []
+ signer_key:
+ description:
+ - The signer key file.
+ required: false
+ default: /etc/origin/master/ca.key
+ aliases: []
+ signer_serial:
+ description:
+ - The signer serial file.
+ required: false
+ default: /etc/origin/master/ca.serial.txt
+ aliases: []
+ hostnames:
+ description:
+ - Every hostname or IP that server certs should be valid for
+ required: false
+ default: []
+ aliases: []
+ backup:
+ description:
+ - Whether to backup the cert and key files before writing them.
+ required: false
+ default: True
+ aliases: []
+author:
+- "Kenny Woodson <kwoodson@redhat.com>"
+extends_documentation_fragment: []
+'''
+
+EXAMPLES = '''
+- name: Create a self-signed cert
+ oc_adm_ca_server_cert:
+ signer_cert: /etc/origin/master/ca.crt
+ signer_key: /etc/origin/master/ca.key
+ signer_serial: /etc/origin/master/ca.serial.txt
+ hostnames: "registry.test.openshift.com,127.0.0.1,docker-registry.default.svc.cluster.local"
+ cert: /etc/origin/master/registry.crt
+ key: /etc/origin/master/registry.key
+'''
+
+# -*- -*- -*- End included fragment: doc/ca_server_cert -*- -*- -*-
+
+# -*- -*- -*- Begin included fragment: ../../lib_utils/src/class/yedit.py -*- -*- -*-
+# pylint: disable=undefined-variable,missing-docstring
+# noqa: E301,E302
+
+
+class YeditException(Exception):
+ ''' Exception class for Yedit '''
+ pass
+
+
+# pylint: disable=too-many-public-methods
+class Yedit(object):
+ ''' Class to modify yaml files '''
+ re_valid_key = r"(((\[-?\d+\])|([0-9a-zA-Z%s/_-]+)).?)+$"
+ re_key = r"(?:\[(-?\d+)\])|([0-9a-zA-Z%s/_-]+)"
+ com_sep = set(['.', '#', '|', ':'])
+
+ # pylint: disable=too-many-arguments
+ def __init__(self,
+ filename=None,
+ content=None,
+ content_type='yaml',
+ separator='.',
+ backup=False):
+ self.content = content
+ self._separator = separator
+ self.filename = filename
+ self.__yaml_dict = content
+ self.content_type = content_type
+ self.backup = backup
+ self.load(content_type=self.content_type)
+ if self.__yaml_dict is None:
+ self.__yaml_dict = {}
+
+ @property
+ def separator(self):
+ ''' getter method for yaml_dict '''
+ return self._separator
+
+ @separator.setter
+ def separator(self):
+ ''' getter method for yaml_dict '''
+ return self._separator
+
+ @property
+ def yaml_dict(self):
+ ''' getter method for yaml_dict '''
+ return self.__yaml_dict
+
+ @yaml_dict.setter
+ def yaml_dict(self, value):
+ ''' setter method for yaml_dict '''
+ self.__yaml_dict = value
+
+ @staticmethod
+ def parse_key(key, sep='.'):
+ '''parse the key allowing the appropriate separator'''
+ common_separators = list(Yedit.com_sep - set([sep]))
+ return re.findall(Yedit.re_key % ''.join(common_separators), key)
+
+ @staticmethod
+ def valid_key(key, sep='.'):
+ '''validate the incoming key'''
+ common_separators = list(Yedit.com_sep - set([sep]))
+ if not re.match(Yedit.re_valid_key % ''.join(common_separators), key):
+ return False
+
+ return True
+
+ @staticmethod
+ def remove_entry(data, key, sep='.'):
+ ''' remove data at location key '''
+ if key == '' and isinstance(data, dict):
+ data.clear()
+ return True
+ elif key == '' and isinstance(data, list):
+ del data[:]
+ return True
+
+ if not (key and Yedit.valid_key(key, sep)) and \
+ isinstance(data, (list, dict)):
+ return None
+
+ key_indexes = Yedit.parse_key(key, sep)
+ for arr_ind, dict_key in key_indexes[:-1]:
+ if dict_key and isinstance(data, dict):
+ data = data.get(dict_key, None)
+ elif (arr_ind and isinstance(data, list) and
+ int(arr_ind) <= len(data) - 1):
+ data = data[int(arr_ind)]
+ else:
+ return None
+
+ # process last index for remove
+ # expected list entry
+ if key_indexes[-1][0]:
+ if isinstance(data, list) and int(key_indexes[-1][0]) <= len(data) - 1: # noqa: E501
+ del data[int(key_indexes[-1][0])]
+ return True
+
+ # expected dict entry
+ elif key_indexes[-1][1]:
+ if isinstance(data, dict):
+ del data[key_indexes[-1][1]]
+ return True
+
+ @staticmethod
+ def add_entry(data, key, item=None, sep='.'):
+ ''' Get an item from a dictionary with key notation a.b.c
+ d = {'a': {'b': 'c'}}}
+ key = a#b
+ return c
+ '''
+ if key == '':
+ pass
+ elif (not (key and Yedit.valid_key(key, sep)) and
+ isinstance(data, (list, dict))):
+ return None
+
+ key_indexes = Yedit.parse_key(key, sep)
+ for arr_ind, dict_key in key_indexes[:-1]:
+ if dict_key:
+ if isinstance(data, dict) and dict_key in data and data[dict_key]: # noqa: E501
+ data = data[dict_key]
+ continue
+
+ elif data and not isinstance(data, dict):
+ return None
+
+ data[dict_key] = {}
+ data = data[dict_key]
+
+ elif (arr_ind and isinstance(data, list) and
+ int(arr_ind) <= len(data) - 1):
+ data = data[int(arr_ind)]
+ else:
+ return None
+
+ if key == '':
+ data = item
+
+ # process last index for add
+ # expected list entry
+ elif key_indexes[-1][0] and isinstance(data, list) and int(key_indexes[-1][0]) <= len(data) - 1: # noqa: E501
+ data[int(key_indexes[-1][0])] = item
+
+ # expected dict entry
+ elif key_indexes[-1][1] and isinstance(data, dict):
+ data[key_indexes[-1][1]] = item
+
+ return data
+
+ @staticmethod
+ def get_entry(data, key, sep='.'):
+ ''' Get an item from a dictionary with key notation a.b.c
+ d = {'a': {'b': 'c'}}}
+ key = a.b
+ return c
+ '''
+ if key == '':
+ pass
+ elif (not (key and Yedit.valid_key(key, sep)) and
+ isinstance(data, (list, dict))):
+ return None
+
+ key_indexes = Yedit.parse_key(key, sep)
+ for arr_ind, dict_key in key_indexes:
+ if dict_key and isinstance(data, dict):
+ data = data.get(dict_key, None)
+ elif (arr_ind and isinstance(data, list) and
+ int(arr_ind) <= len(data) - 1):
+ data = data[int(arr_ind)]
+ else:
+ return None
+
+ return data
+
+ @staticmethod
+ def _write(filename, contents):
+ ''' Actually write the file contents to disk. This helps with mocking. '''
+
+ tmp_filename = filename + '.yedit'
+
+ with open(tmp_filename, 'w') as yfd:
+ yfd.write(contents)
+
+ os.rename(tmp_filename, filename)
+
+ def write(self):
+ ''' write to file '''
+ if not self.filename:
+ raise YeditException('Please specify a filename.')
+
+ if self.backup and self.file_exists():
+ shutil.copy(self.filename, self.filename + '.orig')
+
+ # Try to set format attributes if supported
+ try:
+ self.yaml_dict.fa.set_block_style()
+ except AttributeError:
+ pass
+
+ # Try to use RoundTripDumper if supported.
+ try:
+ Yedit._write(self.filename, yaml.dump(self.yaml_dict, Dumper=yaml.RoundTripDumper))
+ except AttributeError:
+ Yedit._write(self.filename, yaml.safe_dump(self.yaml_dict, default_flow_style=False))
+
+ return (True, self.yaml_dict)
+
+ def read(self):
+ ''' read from file '''
+ # check if it exists
+ if self.filename is None or not self.file_exists():
+ return None
+
+ contents = None
+ with open(self.filename) as yfd:
+ contents = yfd.read()
+
+ return contents
+
+ def file_exists(self):
+ ''' return whether file exists '''
+ if os.path.exists(self.filename):
+ return True
+
+ return False
+
+ def load(self, content_type='yaml'):
+ ''' return yaml file '''
+ contents = self.read()
+
+ if not contents and not self.content:
+ return None
+
+ if self.content:
+ if isinstance(self.content, dict):
+ self.yaml_dict = self.content
+ return self.yaml_dict
+ elif isinstance(self.content, str):
+ contents = self.content
+
+ # check if it is yaml
+ try:
+ if content_type == 'yaml' and contents:
+ # Try to set format attributes if supported
+ try:
+ self.yaml_dict.fa.set_block_style()
+ except AttributeError:
+ pass
+
+ # Try to use RoundTripLoader if supported.
+ try:
+ self.yaml_dict = yaml.safe_load(contents, yaml.RoundTripLoader)
+ except AttributeError:
+ self.yaml_dict = yaml.safe_load(contents)
+
+ # Try to set format attributes if supported
+ try:
+ self.yaml_dict.fa.set_block_style()
+ except AttributeError:
+ pass
+
+ elif content_type == 'json' and contents:
+ self.yaml_dict = json.loads(contents)
+ except yaml.YAMLError as err:
+ # Error loading yaml or json
+ raise YeditException('Problem with loading yaml file. %s' % err)
+
+ return self.yaml_dict
+
+ def get(self, key):
+ ''' get a specified key'''
+ try:
+ entry = Yedit.get_entry(self.yaml_dict, key, self.separator)
+ except KeyError:
+ entry = None
+
+ return entry
+
+ def pop(self, path, key_or_item):
+ ''' remove a key, value pair from a dict or an item for a list'''
+ try:
+ entry = Yedit.get_entry(self.yaml_dict, path, self.separator)
+ except KeyError:
+ entry = None
+
+ if entry is None:
+ return (False, self.yaml_dict)
+
+ if isinstance(entry, dict):
+ # AUDIT:maybe-no-member makes sense due to fuzzy types
+ # pylint: disable=maybe-no-member
+ if key_or_item in entry:
+ entry.pop(key_or_item)
+ return (True, self.yaml_dict)
+ return (False, self.yaml_dict)
+
+ elif isinstance(entry, list):
+ # AUDIT:maybe-no-member makes sense due to fuzzy types
+ # pylint: disable=maybe-no-member
+ ind = None
+ try:
+ ind = entry.index(key_or_item)
+ except ValueError:
+ return (False, self.yaml_dict)
+
+ entry.pop(ind)
+ return (True, self.yaml_dict)
+
+ return (False, self.yaml_dict)
+
+ def delete(self, path):
+ ''' remove path from a dict'''
+ try:
+ entry = Yedit.get_entry(self.yaml_dict, path, self.separator)
+ except KeyError:
+ entry = None
+
+ if entry is None:
+ return (False, self.yaml_dict)
+
+ result = Yedit.remove_entry(self.yaml_dict, path, self.separator)
+ if not result:
+ return (False, self.yaml_dict)
+
+ return (True, self.yaml_dict)
+
+ def exists(self, path, value):
+ ''' check if value exists at path'''
+ try:
+ entry = Yedit.get_entry(self.yaml_dict, path, self.separator)
+ except KeyError:
+ entry = None
+
+ if isinstance(entry, list):
+ if value in entry:
+ return True
+ return False
+
+ elif isinstance(entry, dict):
+ if isinstance(value, dict):
+ rval = False
+ for key, val in value.items():
+ if entry[key] != val:
+ rval = False
+ break
+ else:
+ rval = True
+ return rval
+
+ return value in entry
+
+ return entry == value
+
+ def append(self, path, value):
+ '''append value to a list'''
+ try:
+ entry = Yedit.get_entry(self.yaml_dict, path, self.separator)
+ except KeyError:
+ entry = None
+
+ if entry is None:
+ self.put(path, [])
+ entry = Yedit.get_entry(self.yaml_dict, path, self.separator)
+ if not isinstance(entry, list):
+ return (False, self.yaml_dict)
+
+ # AUDIT:maybe-no-member makes sense due to loading data from
+ # a serialized format.
+ # pylint: disable=maybe-no-member
+ entry.append(value)
+ return (True, self.yaml_dict)
+
+ # pylint: disable=too-many-arguments
+ def update(self, path, value, index=None, curr_value=None):
+ ''' put path, value into a dict '''
+ try:
+ entry = Yedit.get_entry(self.yaml_dict, path, self.separator)
+ except KeyError:
+ entry = None
+
+ if isinstance(entry, dict):
+ # AUDIT:maybe-no-member makes sense due to fuzzy types
+ # pylint: disable=maybe-no-member
+ if not isinstance(value, dict):
+ raise YeditException('Cannot replace key, value entry in ' +
+ 'dict with non-dict type. value=[%s] [%s]' % (value, type(value))) # noqa: E501
+
+ entry.update(value)
+ return (True, self.yaml_dict)
+
+ elif isinstance(entry, list):
+ # AUDIT:maybe-no-member makes sense due to fuzzy types
+ # pylint: disable=maybe-no-member
+ ind = None
+ if curr_value:
+ try:
+ ind = entry.index(curr_value)
+ except ValueError:
+ return (False, self.yaml_dict)
+
+ elif index is not None:
+ ind = index
+
+ if ind is not None and entry[ind] != value:
+ entry[ind] = value
+ return (True, self.yaml_dict)
+
+ # see if it exists in the list
+ try:
+ ind = entry.index(value)
+ except ValueError:
+ # doesn't exist, append it
+ entry.append(value)
+ return (True, self.yaml_dict)
+
+ # already exists, return
+ if ind is not None:
+ return (False, self.yaml_dict)
+ return (False, self.yaml_dict)
+
+ def put(self, path, value):
+ ''' put path, value into a dict '''
+ try:
+ entry = Yedit.get_entry(self.yaml_dict, path, self.separator)
+ except KeyError:
+ entry = None
+
+ if entry == value:
+ return (False, self.yaml_dict)
+
+ # deepcopy didn't work
+ # Try to use ruamel.yaml and fallback to pyyaml
+ try:
+ tmp_copy = yaml.load(yaml.round_trip_dump(self.yaml_dict,
+ default_flow_style=False),
+ yaml.RoundTripLoader)
+ except AttributeError:
+ tmp_copy = copy.deepcopy(self.yaml_dict)
+
+ # set the format attributes if available
+ try:
+ tmp_copy.fa.set_block_style()
+ except AttributeError:
+ pass
+
+ result = Yedit.add_entry(tmp_copy, path, value, self.separator)
+ if not result:
+ return (False, self.yaml_dict)
+
+ self.yaml_dict = tmp_copy
+
+ return (True, self.yaml_dict)
+
+ def create(self, path, value):
+ ''' create a yaml file '''
+ if not self.file_exists():
+ # deepcopy didn't work
+ # Try to use ruamel.yaml and fallback to pyyaml
+ try:
+ tmp_copy = yaml.load(yaml.round_trip_dump(self.yaml_dict,
+ default_flow_style=False),
+ yaml.RoundTripLoader)
+ except AttributeError:
+ tmp_copy = copy.deepcopy(self.yaml_dict)
+
+ # set the format attributes if available
+ try:
+ tmp_copy.fa.set_block_style()
+ except AttributeError:
+ pass
+
+ result = Yedit.add_entry(tmp_copy, path, value, self.separator)
+ if result:
+ self.yaml_dict = tmp_copy
+ return (True, self.yaml_dict)
+
+ return (False, self.yaml_dict)
+
+ @staticmethod
+ def get_curr_value(invalue, val_type):
+ '''return the current value'''
+ if invalue is None:
+ return None
+
+ curr_value = invalue
+ if val_type == 'yaml':
+ curr_value = yaml.load(invalue)
+ elif val_type == 'json':
+ curr_value = json.loads(invalue)
+
+ return curr_value
+
+ @staticmethod
+ def parse_value(inc_value, vtype=''):
+ '''determine value type passed'''
+ true_bools = ['y', 'Y', 'yes', 'Yes', 'YES', 'true', 'True', 'TRUE',
+ 'on', 'On', 'ON', ]
+ false_bools = ['n', 'N', 'no', 'No', 'NO', 'false', 'False', 'FALSE',
+ 'off', 'Off', 'OFF']
+
+ # It came in as a string but you didn't specify value_type as string
+ # we will convert to bool if it matches any of the above cases
+ if isinstance(inc_value, str) and 'bool' in vtype:
+ if inc_value not in true_bools and inc_value not in false_bools:
+ raise YeditException('Not a boolean type. str=[%s] vtype=[%s]'
+ % (inc_value, vtype))
+ elif isinstance(inc_value, bool) and 'str' in vtype:
+ inc_value = str(inc_value)
+
+ # If vtype is not str then go ahead and attempt to yaml load it.
+ if isinstance(inc_value, str) and 'str' not in vtype:
+ try:
+ inc_value = yaml.load(inc_value)
+ except Exception:
+ raise YeditException('Could not determine type of incoming ' +
+ 'value. value=[%s] vtype=[%s]'
+ % (type(inc_value), vtype))
+
+ return inc_value
+
+ # pylint: disable=too-many-return-statements,too-many-branches
+ @staticmethod
+ def run_ansible(module):
+ '''perform the idempotent crud operations'''
+ yamlfile = Yedit(filename=module.params['src'],
+ backup=module.params['backup'],
+ separator=module.params['separator'])
+
+ if module.params['src']:
+ rval = yamlfile.load()
+
+ if yamlfile.yaml_dict is None and \
+ module.params['state'] != 'present':
+ return {'failed': True,
+ 'msg': 'Error opening file [%s]. Verify that the ' +
+ 'file exists, that it is has correct' +
+ ' permissions, and is valid yaml.'}
+
+ if module.params['state'] == 'list':
+ if module.params['content']:
+ content = Yedit.parse_value(module.params['content'],
+ module.params['content_type'])
+ yamlfile.yaml_dict = content
+
+ if module.params['key']:
+ rval = yamlfile.get(module.params['key']) or {}
+
+ return {'changed': False, 'result': rval, 'state': "list"}
+
+ elif module.params['state'] == 'absent':
+ if module.params['content']:
+ content = Yedit.parse_value(module.params['content'],
+ module.params['content_type'])
+ yamlfile.yaml_dict = content
+
+ if module.params['update']:
+ rval = yamlfile.pop(module.params['key'],
+ module.params['value'])
+ else:
+ rval = yamlfile.delete(module.params['key'])
+
+ if rval[0] and module.params['src']:
+ yamlfile.write()
+
+ return {'changed': rval[0], 'result': rval[1], 'state': "absent"}
+
+ elif module.params['state'] == 'present':
+ # check if content is different than what is in the file
+ if module.params['content']:
+ content = Yedit.parse_value(module.params['content'],
+ module.params['content_type'])
+
+ # We had no edits to make and the contents are the same
+ if yamlfile.yaml_dict == content and \
+ module.params['value'] is None:
+ return {'changed': False,
+ 'result': yamlfile.yaml_dict,
+ 'state': "present"}
+
+ yamlfile.yaml_dict = content
+
+ # we were passed a value; parse it
+ if module.params['value']:
+ value = Yedit.parse_value(module.params['value'],
+ module.params['value_type'])
+ key = module.params['key']
+ if module.params['update']:
+ # pylint: disable=line-too-long
+ curr_value = Yedit.get_curr_value(Yedit.parse_value(module.params['curr_value']), # noqa: E501
+ module.params['curr_value_format']) # noqa: E501
+
+ rval = yamlfile.update(key, value, module.params['index'], curr_value) # noqa: E501
+
+ elif module.params['append']:
+ rval = yamlfile.append(key, value)
+ else:
+ rval = yamlfile.put(key, value)
+
+ if rval[0] and module.params['src']:
+ yamlfile.write()
+
+ return {'changed': rval[0],
+ 'result': rval[1], 'state': "present"}
+
+ # no edits to make
+ if module.params['src']:
+ # pylint: disable=redefined-variable-type
+ rval = yamlfile.write()
+ return {'changed': rval[0],
+ 'result': rval[1],
+ 'state': "present"}
+
+ return {'failed': True, 'msg': 'Unkown state passed'}
+
+# -*- -*- -*- End included fragment: ../../lib_utils/src/class/yedit.py -*- -*- -*-
+
+# -*- -*- -*- Begin included fragment: lib/base.py -*- -*- -*-
+# pylint: disable=too-many-lines
+# noqa: E301,E302,E303,T001
+
+
+class OpenShiftCLIError(Exception):
+ '''Exception class for openshiftcli'''
+ pass
+
+
+ADDITIONAL_PATH_LOOKUPS = ['/usr/local/bin', os.path.expanduser('~/bin')]
+
+
+def locate_oc_binary():
+ ''' Find and return oc binary file '''
+ # https://github.com/openshift/openshift-ansible/issues/3410
+ # oc can be in /usr/local/bin in some cases, but that may not
+ # be in $PATH due to ansible/sudo
+ paths = os.environ.get("PATH", os.defpath).split(os.pathsep) + ADDITIONAL_PATH_LOOKUPS
+
+ oc_binary = 'oc'
+
+ # Use shutil.which if it is available, otherwise fallback to a naive path search
+ try:
+ which_result = shutil.which(oc_binary, path=os.pathsep.join(paths))
+ if which_result is not None:
+ oc_binary = which_result
+ except AttributeError:
+ for path in paths:
+ if os.path.exists(os.path.join(path, oc_binary)):
+ oc_binary = os.path.join(path, oc_binary)
+ break
+
+ return oc_binary
+
+
+# pylint: disable=too-few-public-methods
+class OpenShiftCLI(object):
+ ''' Class to wrap the command line tools '''
+ def __init__(self,
+ namespace,
+ kubeconfig='/etc/origin/master/admin.kubeconfig',
+ verbose=False,
+ all_namespaces=False):
+ ''' Constructor for OpenshiftCLI '''
+ self.namespace = namespace
+ self.verbose = verbose
+ self.kubeconfig = Utils.create_tmpfile_copy(kubeconfig)
+ self.all_namespaces = all_namespaces
+ self.oc_binary = locate_oc_binary()
+
+ # Pylint allows only 5 arguments to be passed.
+ # pylint: disable=too-many-arguments
+ def _replace_content(self, resource, rname, content, force=False, sep='.'):
+ ''' replace the current object with the content '''
+ res = self._get(resource, rname)
+ if not res['results']:
+ return res
+
+ fname = Utils.create_tmpfile(rname + '-')
+
+ yed = Yedit(fname, res['results'][0], separator=sep)
+ changes = []
+ for key, value in content.items():
+ changes.append(yed.put(key, value))
+
+ if any([change[0] for change in changes]):
+ yed.write()
+
+ atexit.register(Utils.cleanup, [fname])
+
+ return self._replace(fname, force)
+
+ return {'returncode': 0, 'updated': False}
+
+ def _replace(self, fname, force=False):
+ '''replace the current object with oc replace'''
+ cmd = ['replace', '-f', fname]
+ if force:
+ cmd.append('--force')
+ return self.openshift_cmd(cmd)
+
+ def _create_from_content(self, rname, content):
+ '''create a temporary file and then call oc create on it'''
+ fname = Utils.create_tmpfile(rname + '-')
+ yed = Yedit(fname, content=content)
+ yed.write()
+
+ atexit.register(Utils.cleanup, [fname])
+
+ return self._create(fname)
+
+ def _create(self, fname):
+ '''call oc create on a filename'''
+ return self.openshift_cmd(['create', '-f', fname])
+
+ def _delete(self, resource, rname, selector=None):
+ '''call oc delete on a resource'''
+ cmd = ['delete', resource, rname]
+ if selector:
+ cmd.append('--selector=%s' % selector)
+
+ return self.openshift_cmd(cmd)
+
+ def _process(self, template_name, create=False, params=None, template_data=None): # noqa: E501
+ '''process a template
+
+ template_name: the name of the template to process
+ create: whether to send to oc create after processing
+ params: the parameters for the template
+ template_data: the incoming template's data; instead of a file
+ '''
+ cmd = ['process']
+ if template_data:
+ cmd.extend(['-f', '-'])
+ else:
+ cmd.append(template_name)
+ if params:
+ param_str = ["%s=%s" % (key, value) for key, value in params.items()]
+ cmd.append('-v')
+ cmd.extend(param_str)
+
+ results = self.openshift_cmd(cmd, output=True, input_data=template_data)
+
+ if results['returncode'] != 0 or not create:
+ return results
+
+ fname = Utils.create_tmpfile(template_name + '-')
+ yed = Yedit(fname, results['results'])
+ yed.write()
+
+ atexit.register(Utils.cleanup, [fname])
+
+ return self.openshift_cmd(['create', '-f', fname])
+
+ def _get(self, resource, rname=None, selector=None):
+ '''return a resource by name '''
+ cmd = ['get', resource]
+ if selector:
+ cmd.append('--selector=%s' % selector)
+ elif rname:
+ cmd.append(rname)
+
+ cmd.extend(['-o', 'json'])
+
+ rval = self.openshift_cmd(cmd, output=True)
+
+ # Ensure results are retuned in an array
+ if 'items' in rval:
+ rval['results'] = rval['items']
+ elif not isinstance(rval['results'], list):
+ rval['results'] = [rval['results']]
+
+ return rval
+
+ def _schedulable(self, node=None, selector=None, schedulable=True):
+ ''' perform oadm manage-node scheduable '''
+ cmd = ['manage-node']
+ if node:
+ cmd.extend(node)
+ else:
+ cmd.append('--selector=%s' % selector)
+
+ cmd.append('--schedulable=%s' % schedulable)
+
+ return self.openshift_cmd(cmd, oadm=True, output=True, output_type='raw') # noqa: E501
+
+ def _list_pods(self, node=None, selector=None, pod_selector=None):
+ ''' perform oadm list pods
+
+ node: the node in which to list pods
+ selector: the label selector filter if provided
+ pod_selector: the pod selector filter if provided
+ '''
+ cmd = ['manage-node']
+ if node:
+ cmd.extend(node)
+ else:
+ cmd.append('--selector=%s' % selector)
+
+ if pod_selector:
+ cmd.append('--pod-selector=%s' % pod_selector)
+
+ cmd.extend(['--list-pods', '-o', 'json'])
+
+ return self.openshift_cmd(cmd, oadm=True, output=True, output_type='raw')
+
+ # pylint: disable=too-many-arguments
+ def _evacuate(self, node=None, selector=None, pod_selector=None, dry_run=False, grace_period=None, force=False):
+ ''' perform oadm manage-node evacuate '''
+ cmd = ['manage-node']
+ if node:
+ cmd.extend(node)
+ else:
+ cmd.append('--selector=%s' % selector)
+
+ if dry_run:
+ cmd.append('--dry-run')
+
+ if pod_selector:
+ cmd.append('--pod-selector=%s' % pod_selector)
+
+ if grace_period:
+ cmd.append('--grace-period=%s' % int(grace_period))
+
+ if force:
+ cmd.append('--force')
+
+ cmd.append('--evacuate')
+
+ return self.openshift_cmd(cmd, oadm=True, output=True, output_type='raw')
+
+ def _version(self):
+ ''' return the openshift version'''
+ return self.openshift_cmd(['version'], output=True, output_type='raw')
+
+ def _import_image(self, url=None, name=None, tag=None):
+ ''' perform image import '''
+ cmd = ['import-image']
+
+ image = '{0}'.format(name)
+ if tag:
+ image += ':{0}'.format(tag)
+
+ cmd.append(image)
+
+ if url:
+ cmd.append('--from={0}/{1}'.format(url, image))
+
+ cmd.append('-n{0}'.format(self.namespace))
+
+ cmd.append('--confirm')
+ return self.openshift_cmd(cmd)
+
+ def _run(self, cmds, input_data):
+ ''' Actually executes the command. This makes mocking easier. '''
+ curr_env = os.environ.copy()
+ curr_env.update({'KUBECONFIG': self.kubeconfig})
+ proc = subprocess.Popen(cmds,
+ stdin=subprocess.PIPE,
+ stdout=subprocess.PIPE,
+ stderr=subprocess.PIPE,
+ env=curr_env)
+
+ stdout, stderr = proc.communicate(input_data)
+
+ return proc.returncode, stdout.decode(), stderr.decode()
+
+ # pylint: disable=too-many-arguments,too-many-branches
+ def openshift_cmd(self, cmd, oadm=False, output=False, output_type='json', input_data=None):
+ '''Base command for oc '''
+ cmds = [self.oc_binary]
+
+ if oadm:
+ cmds.append('adm')
+
+ cmds.extend(cmd)
+
+ if self.all_namespaces:
+ cmds.extend(['--all-namespaces'])
+ elif self.namespace is not None and self.namespace.lower() not in ['none', 'emtpy']: # E501
+ cmds.extend(['-n', self.namespace])
+
+ rval = {}
+ results = ''
+ err = None
+
+ if self.verbose:
+ print(' '.join(cmds))
+
+ try:
+ returncode, stdout, stderr = self._run(cmds, input_data)
+ except OSError as ex:
+ returncode, stdout, stderr = 1, '', 'Failed to execute {}: {}'.format(subprocess.list2cmdline(cmds), ex)
+
+ rval = {"returncode": returncode,
+ "results": results,
+ "cmd": ' '.join(cmds)}
+
+ if returncode == 0:
+ if output:
+ if output_type == 'json':
+ try:
+ rval['results'] = json.loads(stdout)
+ except ValueError as err:
+ if "No JSON object could be decoded" in err.args:
+ err = err.args
+ elif output_type == 'raw':
+ rval['results'] = stdout
+
+ if self.verbose:
+ print("STDOUT: {0}".format(stdout))
+ print("STDERR: {0}".format(stderr))
+
+ if err:
+ rval.update({"err": err,
+ "stderr": stderr,
+ "stdout": stdout,
+ "cmd": cmds})
+
+ else:
+ rval.update({"stderr": stderr,
+ "stdout": stdout,
+ "results": {}})
+
+ return rval
+
+
+class Utils(object):
+ ''' utilities for openshiftcli modules '''
+
+ @staticmethod
+ def _write(filename, contents):
+ ''' Actually write the file contents to disk. This helps with mocking. '''
+
+ with open(filename, 'w') as sfd:
+ sfd.write(contents)
+
+ @staticmethod
+ def create_tmp_file_from_contents(rname, data, ftype='yaml'):
+ ''' create a file in tmp with name and contents'''
+
+ tmp = Utils.create_tmpfile(prefix=rname)
+
+ if ftype == 'yaml':
+ # AUDIT:no-member makes sense here due to ruamel.YAML/PyYAML usage
+ # pylint: disable=no-member
+ if hasattr(yaml, 'RoundTripDumper'):
+ Utils._write(tmp, yaml.dump(data, Dumper=yaml.RoundTripDumper))
+ else:
+ Utils._write(tmp, yaml.safe_dump(data, default_flow_style=False))
+
+ elif ftype == 'json':
+ Utils._write(tmp, json.dumps(data))
+ else:
+ Utils._write(tmp, data)
+
+ # Register cleanup when module is done
+ atexit.register(Utils.cleanup, [tmp])
+ return tmp
+
+ @staticmethod
+ def create_tmpfile_copy(inc_file):
+ '''create a temporary copy of a file'''
+ tmpfile = Utils.create_tmpfile('lib_openshift-')
+ Utils._write(tmpfile, open(inc_file).read())
+
+ # Cleanup the tmpfile
+ atexit.register(Utils.cleanup, [tmpfile])
+
+ return tmpfile
+
+ @staticmethod
+ def create_tmpfile(prefix='tmp'):
+ ''' Generates and returns a temporary file name '''
+
+ with tempfile.NamedTemporaryFile(prefix=prefix, delete=False) as tmp:
+ return tmp.name
+
+ @staticmethod
+ def create_tmp_files_from_contents(content, content_type=None):
+ '''Turn an array of dict: filename, content into a files array'''
+ if not isinstance(content, list):
+ content = [content]
+ files = []
+ for item in content:
+ path = Utils.create_tmp_file_from_contents(item['path'] + '-',
+ item['data'],
+ ftype=content_type)
+ files.append({'name': os.path.basename(item['path']),
+ 'path': path})
+ return files
+
+ @staticmethod
+ def cleanup(files):
+ '''Clean up on exit '''
+ for sfile in files:
+ if os.path.exists(sfile):
+ if os.path.isdir(sfile):
+ shutil.rmtree(sfile)
+ elif os.path.isfile(sfile):
+ os.remove(sfile)
+
+ @staticmethod
+ def exists(results, _name):
+ ''' Check to see if the results include the name '''
+ if not results:
+ return False
+
+ if Utils.find_result(results, _name):
+ return True
+
+ return False
+
+ @staticmethod
+ def find_result(results, _name):
+ ''' Find the specified result by name'''
+ rval = None
+ for result in results:
+ if 'metadata' in result and result['metadata']['name'] == _name:
+ rval = result
+ break
+
+ return rval
+
+ @staticmethod
+ def get_resource_file(sfile, sfile_type='yaml'):
+ ''' return the service file '''
+ contents = None
+ with open(sfile) as sfd:
+ contents = sfd.read()
+
+ if sfile_type == 'yaml':
+ # AUDIT:no-member makes sense here due to ruamel.YAML/PyYAML usage
+ # pylint: disable=no-member
+ if hasattr(yaml, 'RoundTripLoader'):
+ contents = yaml.load(contents, yaml.RoundTripLoader)
+ else:
+ contents = yaml.safe_load(contents)
+ elif sfile_type == 'json':
+ contents = json.loads(contents)
+
+ return contents
+
+ @staticmethod
+ def filter_versions(stdout):
+ ''' filter the oc version output '''
+
+ version_dict = {}
+ version_search = ['oc', 'openshift', 'kubernetes']
+
+ for line in stdout.strip().split('\n'):
+ for term in version_search:
+ if not line:
+ continue
+ if line.startswith(term):
+ version_dict[term] = line.split()[-1]
+
+ # horrible hack to get openshift version in Openshift 3.2
+ # By default "oc version in 3.2 does not return an "openshift" version
+ if "openshift" not in version_dict:
+ version_dict["openshift"] = version_dict["oc"]
+
+ return version_dict
+
+ @staticmethod
+ def add_custom_versions(versions):
+ ''' create custom versions strings '''
+
+ versions_dict = {}
+
+ for tech, version in versions.items():
+ # clean up "-" from version
+ if "-" in version:
+ version = version.split("-")[0]
+
+ if version.startswith('v'):
+ versions_dict[tech + '_numeric'] = version[1:].split('+')[0]
+ # "v3.3.0.33" is what we have, we want "3.3"
+ versions_dict[tech + '_short'] = version[1:4]
+
+ return versions_dict
+
+ @staticmethod
+ def openshift_installed():
+ ''' check if openshift is installed '''
+ import yum
+
+ yum_base = yum.YumBase()
+ if yum_base.rpmdb.searchNevra(name='atomic-openshift'):
+ return True
+
+ return False
+
+ # Disabling too-many-branches. This is a yaml dictionary comparison function
+ # pylint: disable=too-many-branches,too-many-return-statements,too-many-statements
+ @staticmethod
+ def check_def_equal(user_def, result_def, skip_keys=None, debug=False):
+ ''' Given a user defined definition, compare it with the results given back by our query. '''
+
+ # Currently these values are autogenerated and we do not need to check them
+ skip = ['metadata', 'status']
+ if skip_keys:
+ skip.extend(skip_keys)
+
+ for key, value in result_def.items():
+ if key in skip:
+ continue
+
+ # Both are lists
+ if isinstance(value, list):
+ if key not in user_def:
+ if debug:
+ print('User data does not have key [%s]' % key)
+ print('User data: %s' % user_def)
+ return False
+
+ if not isinstance(user_def[key], list):
+ if debug:
+ print('user_def[key] is not a list key=[%s] user_def[key]=%s' % (key, user_def[key]))
+ return False
+
+ if len(user_def[key]) != len(value):
+ if debug:
+ print("List lengths are not equal.")
+ print("key=[%s]: user_def[%s] != value[%s]" % (key, len(user_def[key]), len(value)))
+ print("user_def: %s" % user_def[key])
+ print("value: %s" % value)
+ return False
+
+ for values in zip(user_def[key], value):
+ if isinstance(values[0], dict) and isinstance(values[1], dict):
+ if debug:
+ print('sending list - list')
+ print(type(values[0]))
+ print(type(values[1]))
+ result = Utils.check_def_equal(values[0], values[1], skip_keys=skip_keys, debug=debug)
+ if not result:
+ print('list compare returned false')
+ return False
+
+ elif value != user_def[key]:
+ if debug:
+ print('value should be identical')
+ print(user_def[key])
+ print(value)
+ return False
+
+ # recurse on a dictionary
+ elif isinstance(value, dict):
+ if key not in user_def:
+ if debug:
+ print("user_def does not have key [%s]" % key)
+ return False
+ if not isinstance(user_def[key], dict):
+ if debug:
+ print("dict returned false: not instance of dict")
+ return False
+
+ # before passing ensure keys match
+ api_values = set(value.keys()) - set(skip)
+ user_values = set(user_def[key].keys()) - set(skip)
+ if api_values != user_values:
+ if debug:
+ print("keys are not equal in dict")
+ print(user_values)
+ print(api_values)
+ return False
+
+ result = Utils.check_def_equal(user_def[key], value, skip_keys=skip_keys, debug=debug)
+ if not result:
+ if debug:
+ print("dict returned false")
+ print(result)
+ return False
+
+ # Verify each key, value pair is the same
+ else:
+ if key not in user_def or value != user_def[key]:
+ if debug:
+ print("value not equal; user_def does not have key")
+ print(key)
+ print(value)
+ if key in user_def:
+ print(user_def[key])
+ return False
+
+ if debug:
+ print('returning true')
+ return True
+
+
+class OpenShiftCLIConfig(object):
+ '''Generic Config'''
+ def __init__(self, rname, namespace, kubeconfig, options):
+ self.kubeconfig = kubeconfig
+ self.name = rname
+ self.namespace = namespace
+ self._options = options
+
+ @property
+ def config_options(self):
+ ''' return config options '''
+ return self._options
+
+ def to_option_list(self):
+ '''return all options as a string'''
+ return self.stringify()
+
+ def stringify(self):
+ ''' return the options hash as cli params in a string '''
+ rval = []
+ for key, data in self.config_options.items():
+ if data['include'] \
+ and (data['value'] or isinstance(data['value'], int)):
+ rval.append('--%s=%s' % (key.replace('_', '-'), data['value']))
+
+ return rval
+
+
+# -*- -*- -*- End included fragment: lib/base.py -*- -*- -*-
+
+# -*- -*- -*- Begin included fragment: class/oc_adm_ca_server_cert.py -*- -*- -*-
+
+class CAServerCertConfig(OpenShiftCLIConfig):
+ ''' CAServerCertConfig is a DTO for the oc adm ca command '''
+ def __init__(self, kubeconfig, verbose, ca_options):
+ super(CAServerCertConfig, self).__init__('ca', None, kubeconfig, ca_options)
+ self.kubeconfig = kubeconfig
+ self.verbose = verbose
+ self._ca = ca_options
+
+
+class CAServerCert(OpenShiftCLI):
+ ''' Class to wrap the oc adm ca create-server-cert command line'''
+ def __init__(self,
+ config,
+ verbose=False):
+ ''' Constructor for oadm ca '''
+ super(CAServerCert, self).__init__(None, config.kubeconfig, verbose)
+ self.config = config
+ self.verbose = verbose
+
+ def get(self):
+ '''get the current cert file
+
+ If a file exists by the same name in the specified location then the cert exists
+ '''
+ cert = self.config.config_options['cert']['value']
+ if cert and os.path.exists(cert):
+ return open(cert).read()
+
+ return None
+
+ def create(self):
+ '''run openshift oc adm ca create-server-cert cmd'''
+
+ # Added this here as a safegaurd for stomping on the
+ # cert and key files if they exist
+ if self.config.config_options['backup']['value']:
+ import time
+ ext = time.strftime("%Y-%m-%d@%H:%M:%S", time.localtime(time.time()))
+ date_str = "%s_" + "%s" % ext
+
+ if os.path.exists(self.config.config_options['key']['value']):
+ shutil.copy(self.config.config_options['key']['value'],
+ date_str % self.config.config_options['key']['value'])
+ if os.path.exists(self.config.config_options['cert']['value']):
+ shutil.copy(self.config.config_options['cert']['value'],
+ date_str % self.config.config_options['cert']['value'])
+
+ options = self.config.to_option_list()
+
+ cmd = ['ca', 'create-server-cert']
+ cmd.extend(options)
+
+ return self.openshift_cmd(cmd, oadm=True)
+
+ def exists(self):
+ ''' check whether the certificate exists and has the clusterIP '''
+
+ cert_path = self.config.config_options['cert']['value']
+ if not os.path.exists(cert_path):
+ return False
+
+ # Would prefer pyopenssl but is not installed.
+ # When we verify it is, switch this code
+ # Here is the code to get the subject and the SAN
+ # openssl x509 -text -noout -certopt \
+ # no_header,no_version,no_serial,no_signame,no_validity,no_issuer,no_pubkey,no_sigdump,no_aux \
+ # -in /etc/origin/master/registry.crt
+ # Instead of this solution we will use a regex.
+ cert_names = []
+ hostnames = self.config.config_options['hostnames']['value'].split(',')
+ proc = subprocess.Popen(['openssl', 'x509', '-noout', '-text', '-in', cert_path],
+ stdout=subprocess.PIPE, stderr=subprocess.PIPE)
+
+ x509output, _ = proc.communicate()
+ if proc.returncode == 0:
+ regex = re.compile(r"^\s*X509v3 Subject Alternative Name:\s*?\n\s*(.*)\s*\n", re.MULTILINE)
+ match = regex.search(x509output) # E501
+ for entry in re.split(r", *", match.group(1)):
+ if entry.startswith('DNS') or entry.startswith('IP Address'):
+ cert_names.append(entry.split(':')[1])
+ # now that we have cert names let's compare
+ cert_set = set(cert_names)
+ hname_set = set(hostnames)
+ if cert_set.issubset(hname_set) and hname_set.issubset(cert_set):
+ return True
+
+ return False
+
+ @staticmethod
+ def run_ansible(params, check_mode):
+ '''run the idempotent ansible code'''
+
+ config = CAServerCertConfig(params['kubeconfig'],
+ params['debug'],
+ {'cert': {'value': params['cert'], 'include': True},
+ 'hostnames': {'value': ','.join(params['hostnames']), 'include': True},
+ 'overwrite': {'value': True, 'include': True},
+ 'key': {'value': params['key'], 'include': True},
+ 'signer_cert': {'value': params['signer_cert'], 'include': True},
+ 'signer_key': {'value': params['signer_key'], 'include': True},
+ 'signer_serial': {'value': params['signer_serial'], 'include': True},
+ 'backup': {'value': params['backup'], 'include': False},
+ })
+
+ server_cert = CAServerCert(config)
+
+ state = params['state']
+
+ if state == 'present':
+ ########
+ # Create
+ ########
+ if not server_cert.exists() or params['force']:
+
+ if check_mode:
+ return {'changed': True,
+ 'msg': "CHECK_MODE: Would have created the certificate.",
+ 'state': state}
+
+ api_rval = server_cert.create()
+
+ return {'changed': True, 'results': api_rval, 'state': state}
+
+ ########
+ # Exists
+ ########
+ api_rval = server_cert.get()
+ return {'changed': False, 'results': api_rval, 'state': state}
+
+ return {'failed': True,
+ 'msg': 'Unknown state passed. %s' % state}
+
+
+# -*- -*- -*- End included fragment: class/oc_adm_ca_server_cert.py -*- -*- -*-
+
+# -*- -*- -*- Begin included fragment: ansible/oc_adm_ca_server_cert.py -*- -*- -*-
+
+def main():
+ '''
+ ansible oc adm module for ca create-server-cert
+ '''
+
+ module = AnsibleModule(
+ argument_spec=dict(
+ state=dict(default='present', type='str', choices=['present']),
+ debug=dict(default=False, type='bool'),
+ kubeconfig=dict(default='/etc/origin/master/admin.kubeconfig', type='str'),
+ backup=dict(default=True, type='bool'),
+ force=dict(default=False, type='bool'),
+ # oc adm ca create-server-cert [options]
+ cert=dict(default=None, type='str'),
+ key=dict(default=None, type='str'),
+ signer_cert=dict(default='/etc/origin/master/ca.crt', type='str'),
+ signer_key=dict(default='/etc/origin/master/ca.key', type='str'),
+ signer_serial=dict(default='/etc/origin/master/ca.serial.txt', type='str'),
+ hostnames=dict(default=[], type='list'),
+ ),
+ supports_check_mode=True,
+ )
+
+ results = CAServerCert.run_ansible(module.params, module.check_mode)
+ if 'failed' in results:
+ return module.fail_json(**results)
+
+ return module.exit_json(**results)
+
+
+if __name__ == '__main__':
+ main()
+
+# -*- -*- -*- End included fragment: ansible/oc_adm_ca_server_cert.py -*- -*- -*-
diff --git a/roles/lib_openshift/library/oc_adm_registry.py b/roles/lib_openshift/library/oc_adm_registry.py
new file mode 100644
index 000000000..62018d758
--- /dev/null
+++ b/roles/lib_openshift/library/oc_adm_registry.py
@@ -0,0 +1,2612 @@
+#!/usr/bin/env python
+# pylint: disable=missing-docstring
+# flake8: noqa: T001
+# ___ ___ _ _ ___ ___ _ _____ ___ ___
+# / __| __| \| | __| _ \ /_\_ _| __| \
+# | (_ | _|| .` | _|| / / _ \| | | _|| |) |
+# \___|___|_|\_|___|_|_\/_/_\_\_|_|___|___/_ _____
+# | \ / _ \ | \| |/ _ \_ _| | __| \_ _|_ _|
+# | |) | (_) | | .` | (_) || | | _|| |) | | | |
+# |___/ \___/ |_|\_|\___/ |_| |___|___/___| |_|
+#
+# Copyright 2016 Red Hat, Inc. and/or its affiliates
+# and other contributors as indicated by the @author tags.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+
+# -*- -*- -*- Begin included fragment: lib/import.py -*- -*- -*-
+'''
+ OpenShiftCLI class that wraps the oc commands in a subprocess
+'''
+# pylint: disable=too-many-lines
+
+from __future__ import print_function
+import atexit
+import copy
+import json
+import os
+import re
+import shutil
+import subprocess
+import tempfile
+# pylint: disable=import-error
+try:
+ import ruamel.yaml as yaml
+except ImportError:
+ import yaml
+
+from ansible.module_utils.basic import AnsibleModule
+
+# -*- -*- -*- End included fragment: lib/import.py -*- -*- -*-
+
+# -*- -*- -*- Begin included fragment: doc/registry -*- -*- -*-
+
+DOCUMENTATION = '''
+---
+module: oc_adm_registry
+short_description: Module to manage openshift registry
+description:
+ - Manage openshift registry programmatically.
+options:
+ state:
+ description:
+ - The desired action when managing openshift registry
+ - present - update or create the registry
+ - absent - tear down the registry service and deploymentconfig
+ - list - returns the current representiation of a registry
+ required: false
+ default: False
+ aliases: []
+ kubeconfig:
+ description:
+ - The path for the kubeconfig file to use for authentication
+ required: false
+ default: /etc/origin/master/admin.kubeconfig
+ aliases: []
+ debug:
+ description:
+ - Turn on debug output.
+ required: false
+ default: False
+ aliases: []
+ name:
+ description:
+ - The name of the registry
+ required: false
+ default: None
+ aliases: []
+ namespace:
+ description:
+ - The selector when filtering on node labels
+ required: false
+ default: None
+ aliases: []
+ images:
+ description:
+ - The image to base this registry on - ${component} will be replaced with --type
+ required: 'openshift3/ose-${component}:${version}'
+ default: None
+ aliases: []
+ latest_images:
+ description:
+ - If true, attempt to use the latest image for the registry instead of the latest release.
+ required: false
+ default: False
+ aliases: []
+ labels:
+ description:
+ - A set of labels to uniquely identify the registry and its components.
+ required: false
+ default: None
+ aliases: []
+ enforce_quota:
+ description:
+ - If set, the registry will refuse to write blobs if they exceed quota limits
+ required: False
+ default: False
+ aliases: []
+ mount_host:
+ description:
+ - If set, the registry volume will be created as a host-mount at this path.
+ required: False
+ default: False
+ aliases: []
+ ports:
+ description:
+ - A comma delimited list of ports or port pairs to expose on the registry pod. The default is set for 5000.
+ required: False
+ default: [5000]
+ aliases: []
+ replicas:
+ description:
+ - The replication factor of the registry; commonly 2 when high availability is desired.
+ required: False
+ default: 1
+ aliases: []
+ selector:
+ description:
+ - Selector used to filter nodes on deployment. Used to run registries on a specific set of nodes.
+ required: False
+ default: None
+ aliases: []
+ service_account:
+ description:
+ - Name of the service account to use to run the registry pod.
+ required: False
+ default: 'registry'
+ aliases: []
+ tls_certificate:
+ description:
+ - An optional path to a PEM encoded certificate (which may contain the private key) for serving over TLS
+ required: false
+ default: None
+ aliases: []
+ tls_key:
+ description:
+ - An optional path to a PEM encoded private key for serving over TLS
+ required: false
+ default: None
+ aliases: []
+ volume_mounts:
+ description:
+ - The volume mounts for the registry.
+ required: false
+ default: None
+ aliases: []
+ daemonset:
+ description:
+ - Use a daemonset instead of a deployment config.
+ required: false
+ default: False
+ aliases: []
+ edits:
+ description:
+ - A list of modifications to make on the deploymentconfig
+ required: false
+ default: None
+ aliases: []
+ env_vars:
+ description:
+ - A dictionary of modifications to make on the deploymentconfig. e.g. FOO: BAR
+ required: false
+ default: None
+ aliases: []
+ force:
+ description:
+ - Force a registry update.
+ required: false
+ default: False
+ aliases: []
+author:
+- "Kenny Woodson <kwoodson@redhat.com>"
+extends_documentation_fragment: []
+'''
+
+EXAMPLES = '''
+- name: create a secure registry
+ oc_adm_registry:
+ name: docker-registry
+ service_account: registry
+ replicas: 2
+ namespace: default
+ selector: type=infra
+ images: "registry.ops.openshift.com/openshift3/ose-${component}:${version}"
+ env_vars:
+ REGISTRY_CONFIGURATION_PATH: /etc/registryconfig/config.yml
+ REGISTRY_HTTP_TLS_CERTIFICATE: /etc/secrets/registry.crt
+ REGISTRY_HTTP_TLS_KEY: /etc/secrets/registry.key
+ REGISTRY_HTTP_SECRET: supersecret
+ volume_mounts:
+ - path: /etc/secrets
+ name: dockercerts
+ type: secret
+ secret_name: registry-secret
+ - path: /etc/registryconfig
+ name: dockersecrets
+ type: secret
+ secret_name: docker-registry-config
+ edits:
+ - key: spec.template.spec.containers[0].livenessProbe.httpGet.scheme
+ value: HTTPS
+ action: put
+ - key: spec.template.spec.containers[0].readinessProbe.httpGet.scheme
+ value: HTTPS
+ action: put
+ - key: spec.strategy.rollingParams
+ value:
+ intervalSeconds: 1
+ maxSurge: 50%
+ maxUnavailable: 50%
+ timeoutSeconds: 600
+ updatePeriodSeconds: 1
+ action: put
+ - key: spec.template.spec.containers[0].resources.limits.memory
+ value: 2G
+ action: update
+ - key: spec.template.spec.containers[0].resources.requests.memory
+ value: 1G
+ action: update
+
+ register: registryout
+
+'''
+
+# -*- -*- -*- End included fragment: doc/registry -*- -*- -*-
+
+# -*- -*- -*- Begin included fragment: ../../lib_utils/src/class/yedit.py -*- -*- -*-
+# pylint: disable=undefined-variable,missing-docstring
+# noqa: E301,E302
+
+
+class YeditException(Exception):
+ ''' Exception class for Yedit '''
+ pass
+
+
+# pylint: disable=too-many-public-methods
+class Yedit(object):
+ ''' Class to modify yaml files '''
+ re_valid_key = r"(((\[-?\d+\])|([0-9a-zA-Z%s/_-]+)).?)+$"
+ re_key = r"(?:\[(-?\d+)\])|([0-9a-zA-Z%s/_-]+)"
+ com_sep = set(['.', '#', '|', ':'])
+
+ # pylint: disable=too-many-arguments
+ def __init__(self,
+ filename=None,
+ content=None,
+ content_type='yaml',
+ separator='.',
+ backup=False):
+ self.content = content
+ self._separator = separator
+ self.filename = filename
+ self.__yaml_dict = content
+ self.content_type = content_type
+ self.backup = backup
+ self.load(content_type=self.content_type)
+ if self.__yaml_dict is None:
+ self.__yaml_dict = {}
+
+ @property
+ def separator(self):
+ ''' getter method for yaml_dict '''
+ return self._separator
+
+ @separator.setter
+ def separator(self):
+ ''' getter method for yaml_dict '''
+ return self._separator
+
+ @property
+ def yaml_dict(self):
+ ''' getter method for yaml_dict '''
+ return self.__yaml_dict
+
+ @yaml_dict.setter
+ def yaml_dict(self, value):
+ ''' setter method for yaml_dict '''
+ self.__yaml_dict = value
+
+ @staticmethod
+ def parse_key(key, sep='.'):
+ '''parse the key allowing the appropriate separator'''
+ common_separators = list(Yedit.com_sep - set([sep]))
+ return re.findall(Yedit.re_key % ''.join(common_separators), key)
+
+ @staticmethod
+ def valid_key(key, sep='.'):
+ '''validate the incoming key'''
+ common_separators = list(Yedit.com_sep - set([sep]))
+ if not re.match(Yedit.re_valid_key % ''.join(common_separators), key):
+ return False
+
+ return True
+
+ @staticmethod
+ def remove_entry(data, key, sep='.'):
+ ''' remove data at location key '''
+ if key == '' and isinstance(data, dict):
+ data.clear()
+ return True
+ elif key == '' and isinstance(data, list):
+ del data[:]
+ return True
+
+ if not (key and Yedit.valid_key(key, sep)) and \
+ isinstance(data, (list, dict)):
+ return None
+
+ key_indexes = Yedit.parse_key(key, sep)
+ for arr_ind, dict_key in key_indexes[:-1]:
+ if dict_key and isinstance(data, dict):
+ data = data.get(dict_key, None)
+ elif (arr_ind and isinstance(data, list) and
+ int(arr_ind) <= len(data) - 1):
+ data = data[int(arr_ind)]
+ else:
+ return None
+
+ # process last index for remove
+ # expected list entry
+ if key_indexes[-1][0]:
+ if isinstance(data, list) and int(key_indexes[-1][0]) <= len(data) - 1: # noqa: E501
+ del data[int(key_indexes[-1][0])]
+ return True
+
+ # expected dict entry
+ elif key_indexes[-1][1]:
+ if isinstance(data, dict):
+ del data[key_indexes[-1][1]]
+ return True
+
+ @staticmethod
+ def add_entry(data, key, item=None, sep='.'):
+ ''' Get an item from a dictionary with key notation a.b.c
+ d = {'a': {'b': 'c'}}}
+ key = a#b
+ return c
+ '''
+ if key == '':
+ pass
+ elif (not (key and Yedit.valid_key(key, sep)) and
+ isinstance(data, (list, dict))):
+ return None
+
+ key_indexes = Yedit.parse_key(key, sep)
+ for arr_ind, dict_key in key_indexes[:-1]:
+ if dict_key:
+ if isinstance(data, dict) and dict_key in data and data[dict_key]: # noqa: E501
+ data = data[dict_key]
+ continue
+
+ elif data and not isinstance(data, dict):
+ return None
+
+ data[dict_key] = {}
+ data = data[dict_key]
+
+ elif (arr_ind and isinstance(data, list) and
+ int(arr_ind) <= len(data) - 1):
+ data = data[int(arr_ind)]
+ else:
+ return None
+
+ if key == '':
+ data = item
+
+ # process last index for add
+ # expected list entry
+ elif key_indexes[-1][0] and isinstance(data, list) and int(key_indexes[-1][0]) <= len(data) - 1: # noqa: E501
+ data[int(key_indexes[-1][0])] = item
+
+ # expected dict entry
+ elif key_indexes[-1][1] and isinstance(data, dict):
+ data[key_indexes[-1][1]] = item
+
+ return data
+
+ @staticmethod
+ def get_entry(data, key, sep='.'):
+ ''' Get an item from a dictionary with key notation a.b.c
+ d = {'a': {'b': 'c'}}}
+ key = a.b
+ return c
+ '''
+ if key == '':
+ pass
+ elif (not (key and Yedit.valid_key(key, sep)) and
+ isinstance(data, (list, dict))):
+ return None
+
+ key_indexes = Yedit.parse_key(key, sep)
+ for arr_ind, dict_key in key_indexes:
+ if dict_key and isinstance(data, dict):
+ data = data.get(dict_key, None)
+ elif (arr_ind and isinstance(data, list) and
+ int(arr_ind) <= len(data) - 1):
+ data = data[int(arr_ind)]
+ else:
+ return None
+
+ return data
+
+ @staticmethod
+ def _write(filename, contents):
+ ''' Actually write the file contents to disk. This helps with mocking. '''
+
+ tmp_filename = filename + '.yedit'
+
+ with open(tmp_filename, 'w') as yfd:
+ yfd.write(contents)
+
+ os.rename(tmp_filename, filename)
+
+ def write(self):
+ ''' write to file '''
+ if not self.filename:
+ raise YeditException('Please specify a filename.')
+
+ if self.backup and self.file_exists():
+ shutil.copy(self.filename, self.filename + '.orig')
+
+ # Try to set format attributes if supported
+ try:
+ self.yaml_dict.fa.set_block_style()
+ except AttributeError:
+ pass
+
+ # Try to use RoundTripDumper if supported.
+ try:
+ Yedit._write(self.filename, yaml.dump(self.yaml_dict, Dumper=yaml.RoundTripDumper))
+ except AttributeError:
+ Yedit._write(self.filename, yaml.safe_dump(self.yaml_dict, default_flow_style=False))
+
+ return (True, self.yaml_dict)
+
+ def read(self):
+ ''' read from file '''
+ # check if it exists
+ if self.filename is None or not self.file_exists():
+ return None
+
+ contents = None
+ with open(self.filename) as yfd:
+ contents = yfd.read()
+
+ return contents
+
+ def file_exists(self):
+ ''' return whether file exists '''
+ if os.path.exists(self.filename):
+ return True
+
+ return False
+
+ def load(self, content_type='yaml'):
+ ''' return yaml file '''
+ contents = self.read()
+
+ if not contents and not self.content:
+ return None
+
+ if self.content:
+ if isinstance(self.content, dict):
+ self.yaml_dict = self.content
+ return self.yaml_dict
+ elif isinstance(self.content, str):
+ contents = self.content
+
+ # check if it is yaml
+ try:
+ if content_type == 'yaml' and contents:
+ # Try to set format attributes if supported
+ try:
+ self.yaml_dict.fa.set_block_style()
+ except AttributeError:
+ pass
+
+ # Try to use RoundTripLoader if supported.
+ try:
+ self.yaml_dict = yaml.safe_load(contents, yaml.RoundTripLoader)
+ except AttributeError:
+ self.yaml_dict = yaml.safe_load(contents)
+
+ # Try to set format attributes if supported
+ try:
+ self.yaml_dict.fa.set_block_style()
+ except AttributeError:
+ pass
+
+ elif content_type == 'json' and contents:
+ self.yaml_dict = json.loads(contents)
+ except yaml.YAMLError as err:
+ # Error loading yaml or json
+ raise YeditException('Problem with loading yaml file. %s' % err)
+
+ return self.yaml_dict
+
+ def get(self, key):
+ ''' get a specified key'''
+ try:
+ entry = Yedit.get_entry(self.yaml_dict, key, self.separator)
+ except KeyError:
+ entry = None
+
+ return entry
+
+ def pop(self, path, key_or_item):
+ ''' remove a key, value pair from a dict or an item for a list'''
+ try:
+ entry = Yedit.get_entry(self.yaml_dict, path, self.separator)
+ except KeyError:
+ entry = None
+
+ if entry is None:
+ return (False, self.yaml_dict)
+
+ if isinstance(entry, dict):
+ # AUDIT:maybe-no-member makes sense due to fuzzy types
+ # pylint: disable=maybe-no-member
+ if key_or_item in entry:
+ entry.pop(key_or_item)
+ return (True, self.yaml_dict)
+ return (False, self.yaml_dict)
+
+ elif isinstance(entry, list):
+ # AUDIT:maybe-no-member makes sense due to fuzzy types
+ # pylint: disable=maybe-no-member
+ ind = None
+ try:
+ ind = entry.index(key_or_item)
+ except ValueError:
+ return (False, self.yaml_dict)
+
+ entry.pop(ind)
+ return (True, self.yaml_dict)
+
+ return (False, self.yaml_dict)
+
+ def delete(self, path):
+ ''' remove path from a dict'''
+ try:
+ entry = Yedit.get_entry(self.yaml_dict, path, self.separator)
+ except KeyError:
+ entry = None
+
+ if entry is None:
+ return (False, self.yaml_dict)
+
+ result = Yedit.remove_entry(self.yaml_dict, path, self.separator)
+ if not result:
+ return (False, self.yaml_dict)
+
+ return (True, self.yaml_dict)
+
+ def exists(self, path, value):
+ ''' check if value exists at path'''
+ try:
+ entry = Yedit.get_entry(self.yaml_dict, path, self.separator)
+ except KeyError:
+ entry = None
+
+ if isinstance(entry, list):
+ if value in entry:
+ return True
+ return False
+
+ elif isinstance(entry, dict):
+ if isinstance(value, dict):
+ rval = False
+ for key, val in value.items():
+ if entry[key] != val:
+ rval = False
+ break
+ else:
+ rval = True
+ return rval
+
+ return value in entry
+
+ return entry == value
+
+ def append(self, path, value):
+ '''append value to a list'''
+ try:
+ entry = Yedit.get_entry(self.yaml_dict, path, self.separator)
+ except KeyError:
+ entry = None
+
+ if entry is None:
+ self.put(path, [])
+ entry = Yedit.get_entry(self.yaml_dict, path, self.separator)
+ if not isinstance(entry, list):
+ return (False, self.yaml_dict)
+
+ # AUDIT:maybe-no-member makes sense due to loading data from
+ # a serialized format.
+ # pylint: disable=maybe-no-member
+ entry.append(value)
+ return (True, self.yaml_dict)
+
+ # pylint: disable=too-many-arguments
+ def update(self, path, value, index=None, curr_value=None):
+ ''' put path, value into a dict '''
+ try:
+ entry = Yedit.get_entry(self.yaml_dict, path, self.separator)
+ except KeyError:
+ entry = None
+
+ if isinstance(entry, dict):
+ # AUDIT:maybe-no-member makes sense due to fuzzy types
+ # pylint: disable=maybe-no-member
+ if not isinstance(value, dict):
+ raise YeditException('Cannot replace key, value entry in ' +
+ 'dict with non-dict type. value=[%s] [%s]' % (value, type(value))) # noqa: E501
+
+ entry.update(value)
+ return (True, self.yaml_dict)
+
+ elif isinstance(entry, list):
+ # AUDIT:maybe-no-member makes sense due to fuzzy types
+ # pylint: disable=maybe-no-member
+ ind = None
+ if curr_value:
+ try:
+ ind = entry.index(curr_value)
+ except ValueError:
+ return (False, self.yaml_dict)
+
+ elif index is not None:
+ ind = index
+
+ if ind is not None and entry[ind] != value:
+ entry[ind] = value
+ return (True, self.yaml_dict)
+
+ # see if it exists in the list
+ try:
+ ind = entry.index(value)
+ except ValueError:
+ # doesn't exist, append it
+ entry.append(value)
+ return (True, self.yaml_dict)
+
+ # already exists, return
+ if ind is not None:
+ return (False, self.yaml_dict)
+ return (False, self.yaml_dict)
+
+ def put(self, path, value):
+ ''' put path, value into a dict '''
+ try:
+ entry = Yedit.get_entry(self.yaml_dict, path, self.separator)
+ except KeyError:
+ entry = None
+
+ if entry == value:
+ return (False, self.yaml_dict)
+
+ # deepcopy didn't work
+ # Try to use ruamel.yaml and fallback to pyyaml
+ try:
+ tmp_copy = yaml.load(yaml.round_trip_dump(self.yaml_dict,
+ default_flow_style=False),
+ yaml.RoundTripLoader)
+ except AttributeError:
+ tmp_copy = copy.deepcopy(self.yaml_dict)
+
+ # set the format attributes if available
+ try:
+ tmp_copy.fa.set_block_style()
+ except AttributeError:
+ pass
+
+ result = Yedit.add_entry(tmp_copy, path, value, self.separator)
+ if not result:
+ return (False, self.yaml_dict)
+
+ self.yaml_dict = tmp_copy
+
+ return (True, self.yaml_dict)
+
+ def create(self, path, value):
+ ''' create a yaml file '''
+ if not self.file_exists():
+ # deepcopy didn't work
+ # Try to use ruamel.yaml and fallback to pyyaml
+ try:
+ tmp_copy = yaml.load(yaml.round_trip_dump(self.yaml_dict,
+ default_flow_style=False),
+ yaml.RoundTripLoader)
+ except AttributeError:
+ tmp_copy = copy.deepcopy(self.yaml_dict)
+
+ # set the format attributes if available
+ try:
+ tmp_copy.fa.set_block_style()
+ except AttributeError:
+ pass
+
+ result = Yedit.add_entry(tmp_copy, path, value, self.separator)
+ if result:
+ self.yaml_dict = tmp_copy
+ return (True, self.yaml_dict)
+
+ return (False, self.yaml_dict)
+
+ @staticmethod
+ def get_curr_value(invalue, val_type):
+ '''return the current value'''
+ if invalue is None:
+ return None
+
+ curr_value = invalue
+ if val_type == 'yaml':
+ curr_value = yaml.load(invalue)
+ elif val_type == 'json':
+ curr_value = json.loads(invalue)
+
+ return curr_value
+
+ @staticmethod
+ def parse_value(inc_value, vtype=''):
+ '''determine value type passed'''
+ true_bools = ['y', 'Y', 'yes', 'Yes', 'YES', 'true', 'True', 'TRUE',
+ 'on', 'On', 'ON', ]
+ false_bools = ['n', 'N', 'no', 'No', 'NO', 'false', 'False', 'FALSE',
+ 'off', 'Off', 'OFF']
+
+ # It came in as a string but you didn't specify value_type as string
+ # we will convert to bool if it matches any of the above cases
+ if isinstance(inc_value, str) and 'bool' in vtype:
+ if inc_value not in true_bools and inc_value not in false_bools:
+ raise YeditException('Not a boolean type. str=[%s] vtype=[%s]'
+ % (inc_value, vtype))
+ elif isinstance(inc_value, bool) and 'str' in vtype:
+ inc_value = str(inc_value)
+
+ # If vtype is not str then go ahead and attempt to yaml load it.
+ if isinstance(inc_value, str) and 'str' not in vtype:
+ try:
+ inc_value = yaml.load(inc_value)
+ except Exception:
+ raise YeditException('Could not determine type of incoming ' +
+ 'value. value=[%s] vtype=[%s]'
+ % (type(inc_value), vtype))
+
+ return inc_value
+
+ # pylint: disable=too-many-return-statements,too-many-branches
+ @staticmethod
+ def run_ansible(module):
+ '''perform the idempotent crud operations'''
+ yamlfile = Yedit(filename=module.params['src'],
+ backup=module.params['backup'],
+ separator=module.params['separator'])
+
+ if module.params['src']:
+ rval = yamlfile.load()
+
+ if yamlfile.yaml_dict is None and \
+ module.params['state'] != 'present':
+ return {'failed': True,
+ 'msg': 'Error opening file [%s]. Verify that the ' +
+ 'file exists, that it is has correct' +
+ ' permissions, and is valid yaml.'}
+
+ if module.params['state'] == 'list':
+ if module.params['content']:
+ content = Yedit.parse_value(module.params['content'],
+ module.params['content_type'])
+ yamlfile.yaml_dict = content
+
+ if module.params['key']:
+ rval = yamlfile.get(module.params['key']) or {}
+
+ return {'changed': False, 'result': rval, 'state': "list"}
+
+ elif module.params['state'] == 'absent':
+ if module.params['content']:
+ content = Yedit.parse_value(module.params['content'],
+ module.params['content_type'])
+ yamlfile.yaml_dict = content
+
+ if module.params['update']:
+ rval = yamlfile.pop(module.params['key'],
+ module.params['value'])
+ else:
+ rval = yamlfile.delete(module.params['key'])
+
+ if rval[0] and module.params['src']:
+ yamlfile.write()
+
+ return {'changed': rval[0], 'result': rval[1], 'state': "absent"}
+
+ elif module.params['state'] == 'present':
+ # check if content is different than what is in the file
+ if module.params['content']:
+ content = Yedit.parse_value(module.params['content'],
+ module.params['content_type'])
+
+ # We had no edits to make and the contents are the same
+ if yamlfile.yaml_dict == content and \
+ module.params['value'] is None:
+ return {'changed': False,
+ 'result': yamlfile.yaml_dict,
+ 'state': "present"}
+
+ yamlfile.yaml_dict = content
+
+ # we were passed a value; parse it
+ if module.params['value']:
+ value = Yedit.parse_value(module.params['value'],
+ module.params['value_type'])
+ key = module.params['key']
+ if module.params['update']:
+ # pylint: disable=line-too-long
+ curr_value = Yedit.get_curr_value(Yedit.parse_value(module.params['curr_value']), # noqa: E501
+ module.params['curr_value_format']) # noqa: E501
+
+ rval = yamlfile.update(key, value, module.params['index'], curr_value) # noqa: E501
+
+ elif module.params['append']:
+ rval = yamlfile.append(key, value)
+ else:
+ rval = yamlfile.put(key, value)
+
+ if rval[0] and module.params['src']:
+ yamlfile.write()
+
+ return {'changed': rval[0],
+ 'result': rval[1], 'state': "present"}
+
+ # no edits to make
+ if module.params['src']:
+ # pylint: disable=redefined-variable-type
+ rval = yamlfile.write()
+ return {'changed': rval[0],
+ 'result': rval[1],
+ 'state': "present"}
+
+ return {'failed': True, 'msg': 'Unkown state passed'}
+
+# -*- -*- -*- End included fragment: ../../lib_utils/src/class/yedit.py -*- -*- -*-
+
+# -*- -*- -*- Begin included fragment: lib/base.py -*- -*- -*-
+# pylint: disable=too-many-lines
+# noqa: E301,E302,E303,T001
+
+
+class OpenShiftCLIError(Exception):
+ '''Exception class for openshiftcli'''
+ pass
+
+
+ADDITIONAL_PATH_LOOKUPS = ['/usr/local/bin', os.path.expanduser('~/bin')]
+
+
+def locate_oc_binary():
+ ''' Find and return oc binary file '''
+ # https://github.com/openshift/openshift-ansible/issues/3410
+ # oc can be in /usr/local/bin in some cases, but that may not
+ # be in $PATH due to ansible/sudo
+ paths = os.environ.get("PATH", os.defpath).split(os.pathsep) + ADDITIONAL_PATH_LOOKUPS
+
+ oc_binary = 'oc'
+
+ # Use shutil.which if it is available, otherwise fallback to a naive path search
+ try:
+ which_result = shutil.which(oc_binary, path=os.pathsep.join(paths))
+ if which_result is not None:
+ oc_binary = which_result
+ except AttributeError:
+ for path in paths:
+ if os.path.exists(os.path.join(path, oc_binary)):
+ oc_binary = os.path.join(path, oc_binary)
+ break
+
+ return oc_binary
+
+
+# pylint: disable=too-few-public-methods
+class OpenShiftCLI(object):
+ ''' Class to wrap the command line tools '''
+ def __init__(self,
+ namespace,
+ kubeconfig='/etc/origin/master/admin.kubeconfig',
+ verbose=False,
+ all_namespaces=False):
+ ''' Constructor for OpenshiftCLI '''
+ self.namespace = namespace
+ self.verbose = verbose
+ self.kubeconfig = Utils.create_tmpfile_copy(kubeconfig)
+ self.all_namespaces = all_namespaces
+ self.oc_binary = locate_oc_binary()
+
+ # Pylint allows only 5 arguments to be passed.
+ # pylint: disable=too-many-arguments
+ def _replace_content(self, resource, rname, content, force=False, sep='.'):
+ ''' replace the current object with the content '''
+ res = self._get(resource, rname)
+ if not res['results']:
+ return res
+
+ fname = Utils.create_tmpfile(rname + '-')
+
+ yed = Yedit(fname, res['results'][0], separator=sep)
+ changes = []
+ for key, value in content.items():
+ changes.append(yed.put(key, value))
+
+ if any([change[0] for change in changes]):
+ yed.write()
+
+ atexit.register(Utils.cleanup, [fname])
+
+ return self._replace(fname, force)
+
+ return {'returncode': 0, 'updated': False}
+
+ def _replace(self, fname, force=False):
+ '''replace the current object with oc replace'''
+ cmd = ['replace', '-f', fname]
+ if force:
+ cmd.append('--force')
+ return self.openshift_cmd(cmd)
+
+ def _create_from_content(self, rname, content):
+ '''create a temporary file and then call oc create on it'''
+ fname = Utils.create_tmpfile(rname + '-')
+ yed = Yedit(fname, content=content)
+ yed.write()
+
+ atexit.register(Utils.cleanup, [fname])
+
+ return self._create(fname)
+
+ def _create(self, fname):
+ '''call oc create on a filename'''
+ return self.openshift_cmd(['create', '-f', fname])
+
+ def _delete(self, resource, rname, selector=None):
+ '''call oc delete on a resource'''
+ cmd = ['delete', resource, rname]
+ if selector:
+ cmd.append('--selector=%s' % selector)
+
+ return self.openshift_cmd(cmd)
+
+ def _process(self, template_name, create=False, params=None, template_data=None): # noqa: E501
+ '''process a template
+
+ template_name: the name of the template to process
+ create: whether to send to oc create after processing
+ params: the parameters for the template
+ template_data: the incoming template's data; instead of a file
+ '''
+ cmd = ['process']
+ if template_data:
+ cmd.extend(['-f', '-'])
+ else:
+ cmd.append(template_name)
+ if params:
+ param_str = ["%s=%s" % (key, value) for key, value in params.items()]
+ cmd.append('-v')
+ cmd.extend(param_str)
+
+ results = self.openshift_cmd(cmd, output=True, input_data=template_data)
+
+ if results['returncode'] != 0 or not create:
+ return results
+
+ fname = Utils.create_tmpfile(template_name + '-')
+ yed = Yedit(fname, results['results'])
+ yed.write()
+
+ atexit.register(Utils.cleanup, [fname])
+
+ return self.openshift_cmd(['create', '-f', fname])
+
+ def _get(self, resource, rname=None, selector=None):
+ '''return a resource by name '''
+ cmd = ['get', resource]
+ if selector:
+ cmd.append('--selector=%s' % selector)
+ elif rname:
+ cmd.append(rname)
+
+ cmd.extend(['-o', 'json'])
+
+ rval = self.openshift_cmd(cmd, output=True)
+
+ # Ensure results are retuned in an array
+ if 'items' in rval:
+ rval['results'] = rval['items']
+ elif not isinstance(rval['results'], list):
+ rval['results'] = [rval['results']]
+
+ return rval
+
+ def _schedulable(self, node=None, selector=None, schedulable=True):
+ ''' perform oadm manage-node scheduable '''
+ cmd = ['manage-node']
+ if node:
+ cmd.extend(node)
+ else:
+ cmd.append('--selector=%s' % selector)
+
+ cmd.append('--schedulable=%s' % schedulable)
+
+ return self.openshift_cmd(cmd, oadm=True, output=True, output_type='raw') # noqa: E501
+
+ def _list_pods(self, node=None, selector=None, pod_selector=None):
+ ''' perform oadm list pods
+
+ node: the node in which to list pods
+ selector: the label selector filter if provided
+ pod_selector: the pod selector filter if provided
+ '''
+ cmd = ['manage-node']
+ if node:
+ cmd.extend(node)
+ else:
+ cmd.append('--selector=%s' % selector)
+
+ if pod_selector:
+ cmd.append('--pod-selector=%s' % pod_selector)
+
+ cmd.extend(['--list-pods', '-o', 'json'])
+
+ return self.openshift_cmd(cmd, oadm=True, output=True, output_type='raw')
+
+ # pylint: disable=too-many-arguments
+ def _evacuate(self, node=None, selector=None, pod_selector=None, dry_run=False, grace_period=None, force=False):
+ ''' perform oadm manage-node evacuate '''
+ cmd = ['manage-node']
+ if node:
+ cmd.extend(node)
+ else:
+ cmd.append('--selector=%s' % selector)
+
+ if dry_run:
+ cmd.append('--dry-run')
+
+ if pod_selector:
+ cmd.append('--pod-selector=%s' % pod_selector)
+
+ if grace_period:
+ cmd.append('--grace-period=%s' % int(grace_period))
+
+ if force:
+ cmd.append('--force')
+
+ cmd.append('--evacuate')
+
+ return self.openshift_cmd(cmd, oadm=True, output=True, output_type='raw')
+
+ def _version(self):
+ ''' return the openshift version'''
+ return self.openshift_cmd(['version'], output=True, output_type='raw')
+
+ def _import_image(self, url=None, name=None, tag=None):
+ ''' perform image import '''
+ cmd = ['import-image']
+
+ image = '{0}'.format(name)
+ if tag:
+ image += ':{0}'.format(tag)
+
+ cmd.append(image)
+
+ if url:
+ cmd.append('--from={0}/{1}'.format(url, image))
+
+ cmd.append('-n{0}'.format(self.namespace))
+
+ cmd.append('--confirm')
+ return self.openshift_cmd(cmd)
+
+ def _run(self, cmds, input_data):
+ ''' Actually executes the command. This makes mocking easier. '''
+ curr_env = os.environ.copy()
+ curr_env.update({'KUBECONFIG': self.kubeconfig})
+ proc = subprocess.Popen(cmds,
+ stdin=subprocess.PIPE,
+ stdout=subprocess.PIPE,
+ stderr=subprocess.PIPE,
+ env=curr_env)
+
+ stdout, stderr = proc.communicate(input_data)
+
+ return proc.returncode, stdout.decode(), stderr.decode()
+
+ # pylint: disable=too-many-arguments,too-many-branches
+ def openshift_cmd(self, cmd, oadm=False, output=False, output_type='json', input_data=None):
+ '''Base command for oc '''
+ cmds = [self.oc_binary]
+
+ if oadm:
+ cmds.append('adm')
+
+ cmds.extend(cmd)
+
+ if self.all_namespaces:
+ cmds.extend(['--all-namespaces'])
+ elif self.namespace is not None and self.namespace.lower() not in ['none', 'emtpy']: # E501
+ cmds.extend(['-n', self.namespace])
+
+ rval = {}
+ results = ''
+ err = None
+
+ if self.verbose:
+ print(' '.join(cmds))
+
+ try:
+ returncode, stdout, stderr = self._run(cmds, input_data)
+ except OSError as ex:
+ returncode, stdout, stderr = 1, '', 'Failed to execute {}: {}'.format(subprocess.list2cmdline(cmds), ex)
+
+ rval = {"returncode": returncode,
+ "results": results,
+ "cmd": ' '.join(cmds)}
+
+ if returncode == 0:
+ if output:
+ if output_type == 'json':
+ try:
+ rval['results'] = json.loads(stdout)
+ except ValueError as err:
+ if "No JSON object could be decoded" in err.args:
+ err = err.args
+ elif output_type == 'raw':
+ rval['results'] = stdout
+
+ if self.verbose:
+ print("STDOUT: {0}".format(stdout))
+ print("STDERR: {0}".format(stderr))
+
+ if err:
+ rval.update({"err": err,
+ "stderr": stderr,
+ "stdout": stdout,
+ "cmd": cmds})
+
+ else:
+ rval.update({"stderr": stderr,
+ "stdout": stdout,
+ "results": {}})
+
+ return rval
+
+
+class Utils(object):
+ ''' utilities for openshiftcli modules '''
+
+ @staticmethod
+ def _write(filename, contents):
+ ''' Actually write the file contents to disk. This helps with mocking. '''
+
+ with open(filename, 'w') as sfd:
+ sfd.write(contents)
+
+ @staticmethod
+ def create_tmp_file_from_contents(rname, data, ftype='yaml'):
+ ''' create a file in tmp with name and contents'''
+
+ tmp = Utils.create_tmpfile(prefix=rname)
+
+ if ftype == 'yaml':
+ # AUDIT:no-member makes sense here due to ruamel.YAML/PyYAML usage
+ # pylint: disable=no-member
+ if hasattr(yaml, 'RoundTripDumper'):
+ Utils._write(tmp, yaml.dump(data, Dumper=yaml.RoundTripDumper))
+ else:
+ Utils._write(tmp, yaml.safe_dump(data, default_flow_style=False))
+
+ elif ftype == 'json':
+ Utils._write(tmp, json.dumps(data))
+ else:
+ Utils._write(tmp, data)
+
+ # Register cleanup when module is done
+ atexit.register(Utils.cleanup, [tmp])
+ return tmp
+
+ @staticmethod
+ def create_tmpfile_copy(inc_file):
+ '''create a temporary copy of a file'''
+ tmpfile = Utils.create_tmpfile('lib_openshift-')
+ Utils._write(tmpfile, open(inc_file).read())
+
+ # Cleanup the tmpfile
+ atexit.register(Utils.cleanup, [tmpfile])
+
+ return tmpfile
+
+ @staticmethod
+ def create_tmpfile(prefix='tmp'):
+ ''' Generates and returns a temporary file name '''
+
+ with tempfile.NamedTemporaryFile(prefix=prefix, delete=False) as tmp:
+ return tmp.name
+
+ @staticmethod
+ def create_tmp_files_from_contents(content, content_type=None):
+ '''Turn an array of dict: filename, content into a files array'''
+ if not isinstance(content, list):
+ content = [content]
+ files = []
+ for item in content:
+ path = Utils.create_tmp_file_from_contents(item['path'] + '-',
+ item['data'],
+ ftype=content_type)
+ files.append({'name': os.path.basename(item['path']),
+ 'path': path})
+ return files
+
+ @staticmethod
+ def cleanup(files):
+ '''Clean up on exit '''
+ for sfile in files:
+ if os.path.exists(sfile):
+ if os.path.isdir(sfile):
+ shutil.rmtree(sfile)
+ elif os.path.isfile(sfile):
+ os.remove(sfile)
+
+ @staticmethod
+ def exists(results, _name):
+ ''' Check to see if the results include the name '''
+ if not results:
+ return False
+
+ if Utils.find_result(results, _name):
+ return True
+
+ return False
+
+ @staticmethod
+ def find_result(results, _name):
+ ''' Find the specified result by name'''
+ rval = None
+ for result in results:
+ if 'metadata' in result and result['metadata']['name'] == _name:
+ rval = result
+ break
+
+ return rval
+
+ @staticmethod
+ def get_resource_file(sfile, sfile_type='yaml'):
+ ''' return the service file '''
+ contents = None
+ with open(sfile) as sfd:
+ contents = sfd.read()
+
+ if sfile_type == 'yaml':
+ # AUDIT:no-member makes sense here due to ruamel.YAML/PyYAML usage
+ # pylint: disable=no-member
+ if hasattr(yaml, 'RoundTripLoader'):
+ contents = yaml.load(contents, yaml.RoundTripLoader)
+ else:
+ contents = yaml.safe_load(contents)
+ elif sfile_type == 'json':
+ contents = json.loads(contents)
+
+ return contents
+
+ @staticmethod
+ def filter_versions(stdout):
+ ''' filter the oc version output '''
+
+ version_dict = {}
+ version_search = ['oc', 'openshift', 'kubernetes']
+
+ for line in stdout.strip().split('\n'):
+ for term in version_search:
+ if not line:
+ continue
+ if line.startswith(term):
+ version_dict[term] = line.split()[-1]
+
+ # horrible hack to get openshift version in Openshift 3.2
+ # By default "oc version in 3.2 does not return an "openshift" version
+ if "openshift" not in version_dict:
+ version_dict["openshift"] = version_dict["oc"]
+
+ return version_dict
+
+ @staticmethod
+ def add_custom_versions(versions):
+ ''' create custom versions strings '''
+
+ versions_dict = {}
+
+ for tech, version in versions.items():
+ # clean up "-" from version
+ if "-" in version:
+ version = version.split("-")[0]
+
+ if version.startswith('v'):
+ versions_dict[tech + '_numeric'] = version[1:].split('+')[0]
+ # "v3.3.0.33" is what we have, we want "3.3"
+ versions_dict[tech + '_short'] = version[1:4]
+
+ return versions_dict
+
+ @staticmethod
+ def openshift_installed():
+ ''' check if openshift is installed '''
+ import yum
+
+ yum_base = yum.YumBase()
+ if yum_base.rpmdb.searchNevra(name='atomic-openshift'):
+ return True
+
+ return False
+
+ # Disabling too-many-branches. This is a yaml dictionary comparison function
+ # pylint: disable=too-many-branches,too-many-return-statements,too-many-statements
+ @staticmethod
+ def check_def_equal(user_def, result_def, skip_keys=None, debug=False):
+ ''' Given a user defined definition, compare it with the results given back by our query. '''
+
+ # Currently these values are autogenerated and we do not need to check them
+ skip = ['metadata', 'status']
+ if skip_keys:
+ skip.extend(skip_keys)
+
+ for key, value in result_def.items():
+ if key in skip:
+ continue
+
+ # Both are lists
+ if isinstance(value, list):
+ if key not in user_def:
+ if debug:
+ print('User data does not have key [%s]' % key)
+ print('User data: %s' % user_def)
+ return False
+
+ if not isinstance(user_def[key], list):
+ if debug:
+ print('user_def[key] is not a list key=[%s] user_def[key]=%s' % (key, user_def[key]))
+ return False
+
+ if len(user_def[key]) != len(value):
+ if debug:
+ print("List lengths are not equal.")
+ print("key=[%s]: user_def[%s] != value[%s]" % (key, len(user_def[key]), len(value)))
+ print("user_def: %s" % user_def[key])
+ print("value: %s" % value)
+ return False
+
+ for values in zip(user_def[key], value):
+ if isinstance(values[0], dict) and isinstance(values[1], dict):
+ if debug:
+ print('sending list - list')
+ print(type(values[0]))
+ print(type(values[1]))
+ result = Utils.check_def_equal(values[0], values[1], skip_keys=skip_keys, debug=debug)
+ if not result:
+ print('list compare returned false')
+ return False
+
+ elif value != user_def[key]:
+ if debug:
+ print('value should be identical')
+ print(user_def[key])
+ print(value)
+ return False
+
+ # recurse on a dictionary
+ elif isinstance(value, dict):
+ if key not in user_def:
+ if debug:
+ print("user_def does not have key [%s]" % key)
+ return False
+ if not isinstance(user_def[key], dict):
+ if debug:
+ print("dict returned false: not instance of dict")
+ return False
+
+ # before passing ensure keys match
+ api_values = set(value.keys()) - set(skip)
+ user_values = set(user_def[key].keys()) - set(skip)
+ if api_values != user_values:
+ if debug:
+ print("keys are not equal in dict")
+ print(user_values)
+ print(api_values)
+ return False
+
+ result = Utils.check_def_equal(user_def[key], value, skip_keys=skip_keys, debug=debug)
+ if not result:
+ if debug:
+ print("dict returned false")
+ print(result)
+ return False
+
+ # Verify each key, value pair is the same
+ else:
+ if key not in user_def or value != user_def[key]:
+ if debug:
+ print("value not equal; user_def does not have key")
+ print(key)
+ print(value)
+ if key in user_def:
+ print(user_def[key])
+ return False
+
+ if debug:
+ print('returning true')
+ return True
+
+
+class OpenShiftCLIConfig(object):
+ '''Generic Config'''
+ def __init__(self, rname, namespace, kubeconfig, options):
+ self.kubeconfig = kubeconfig
+ self.name = rname
+ self.namespace = namespace
+ self._options = options
+
+ @property
+ def config_options(self):
+ ''' return config options '''
+ return self._options
+
+ def to_option_list(self):
+ '''return all options as a string'''
+ return self.stringify()
+
+ def stringify(self):
+ ''' return the options hash as cli params in a string '''
+ rval = []
+ for key, data in self.config_options.items():
+ if data['include'] \
+ and (data['value'] or isinstance(data['value'], int)):
+ rval.append('--%s=%s' % (key.replace('_', '-'), data['value']))
+
+ return rval
+
+
+# -*- -*- -*- End included fragment: lib/base.py -*- -*- -*-
+
+# -*- -*- -*- Begin included fragment: lib/deploymentconfig.py -*- -*- -*-
+
+
+# pylint: disable=too-many-public-methods
+class DeploymentConfig(Yedit):
+ ''' Class to model an openshift DeploymentConfig'''
+ default_deployment_config = '''
+apiVersion: v1
+kind: DeploymentConfig
+metadata:
+ name: default_dc
+ namespace: default
+spec:
+ replicas: 0
+ selector:
+ default_dc: default_dc
+ strategy:
+ resources: {}
+ rollingParams:
+ intervalSeconds: 1
+ maxSurge: 0
+ maxUnavailable: 25%
+ timeoutSeconds: 600
+ updatePercent: -25
+ updatePeriodSeconds: 1
+ type: Rolling
+ template:
+ metadata:
+ spec:
+ containers:
+ - env:
+ - name: default
+ value: default
+ image: default
+ imagePullPolicy: IfNotPresent
+ name: default_dc
+ ports:
+ - containerPort: 8000
+ hostPort: 8000
+ protocol: TCP
+ name: default_port
+ resources: {}
+ terminationMessagePath: /dev/termination-log
+ dnsPolicy: ClusterFirst
+ hostNetwork: true
+ nodeSelector:
+ type: compute
+ restartPolicy: Always
+ securityContext: {}
+ serviceAccount: default
+ serviceAccountName: default
+ terminationGracePeriodSeconds: 30
+ triggers:
+ - type: ConfigChange
+'''
+
+ replicas_path = "spec.replicas"
+ env_path = "spec.template.spec.containers[0].env"
+ volumes_path = "spec.template.spec.volumes"
+ container_path = "spec.template.spec.containers"
+ volume_mounts_path = "spec.template.spec.containers[0].volumeMounts"
+
+ def __init__(self, content=None):
+ ''' Constructor for deploymentconfig '''
+ if not content:
+ content = DeploymentConfig.default_deployment_config
+
+ super(DeploymentConfig, self).__init__(content=content)
+
+ def add_env_value(self, key, value):
+ ''' add key, value pair to env array '''
+ rval = False
+ env = self.get_env_vars()
+ if env:
+ env.append({'name': key, 'value': value})
+ rval = True
+ else:
+ result = self.put(DeploymentConfig.env_path, {'name': key, 'value': value})
+ rval = result[0]
+
+ return rval
+
+ def exists_env_value(self, key, value):
+ ''' return whether a key, value pair exists '''
+ results = self.get_env_vars()
+ if not results:
+ return False
+
+ for result in results:
+ if result['name'] == key and result['value'] == value:
+ return True
+
+ return False
+
+ def exists_env_key(self, key):
+ ''' return whether a key, value pair exists '''
+ results = self.get_env_vars()
+ if not results:
+ return False
+
+ for result in results:
+ if result['name'] == key:
+ return True
+
+ return False
+
+ def get_env_var(self, key):
+ '''return a environment variables '''
+ results = self.get(DeploymentConfig.env_path) or []
+ if not results:
+ return None
+
+ for env_var in results:
+ if env_var['name'] == key:
+ return env_var
+
+ return None
+
+ def get_env_vars(self):
+ '''return a environment variables '''
+ return self.get(DeploymentConfig.env_path) or []
+
+ def delete_env_var(self, keys):
+ '''delete a list of keys '''
+ if not isinstance(keys, list):
+ keys = [keys]
+
+ env_vars_array = self.get_env_vars()
+ modified = False
+ idx = None
+ for key in keys:
+ for env_idx, env_var in enumerate(env_vars_array):
+ if env_var['name'] == key:
+ idx = env_idx
+ break
+
+ if idx:
+ modified = True
+ del env_vars_array[idx]
+
+ if modified:
+ return True
+
+ return False
+
+ def update_env_var(self, key, value):
+ '''place an env in the env var list'''
+
+ env_vars_array = self.get_env_vars()
+ idx = None
+ for env_idx, env_var in enumerate(env_vars_array):
+ if env_var['name'] == key:
+ idx = env_idx
+ break
+
+ if idx:
+ env_vars_array[idx]['value'] = value
+ else:
+ self.add_env_value(key, value)
+
+ return True
+
+ def exists_volume_mount(self, volume_mount):
+ ''' return whether a volume mount exists '''
+ exist_volume_mounts = self.get_volume_mounts()
+
+ if not exist_volume_mounts:
+ return False
+
+ volume_mount_found = False
+ for exist_volume_mount in exist_volume_mounts:
+ if exist_volume_mount['name'] == volume_mount['name']:
+ volume_mount_found = True
+ break
+
+ return volume_mount_found
+
+ def exists_volume(self, volume):
+ ''' return whether a volume exists '''
+ exist_volumes = self.get_volumes()
+
+ volume_found = False
+ for exist_volume in exist_volumes:
+ if exist_volume['name'] == volume['name']:
+ volume_found = True
+ break
+
+ return volume_found
+
+ def find_volume_by_name(self, volume, mounts=False):
+ ''' return the index of a volume '''
+ volumes = []
+ if mounts:
+ volumes = self.get_volume_mounts()
+ else:
+ volumes = self.get_volumes()
+ for exist_volume in volumes:
+ if exist_volume['name'] == volume['name']:
+ return exist_volume
+
+ return None
+
+ def get_replicas(self):
+ ''' return replicas setting '''
+ return self.get(DeploymentConfig.replicas_path)
+
+ def get_volume_mounts(self):
+ '''return volume mount information '''
+ return self.get_volumes(mounts=True)
+
+ def get_volumes(self, mounts=False):
+ '''return volume mount information '''
+ if mounts:
+ return self.get(DeploymentConfig.volume_mounts_path) or []
+
+ return self.get(DeploymentConfig.volumes_path) or []
+
+ def delete_volume_by_name(self, volume):
+ '''delete a volume '''
+ modified = False
+ exist_volume_mounts = self.get_volume_mounts()
+ exist_volumes = self.get_volumes()
+ del_idx = None
+ for idx, exist_volume in enumerate(exist_volumes):
+ if 'name' in exist_volume and exist_volume['name'] == volume['name']:
+ del_idx = idx
+ break
+
+ if del_idx != None:
+ del exist_volumes[del_idx]
+ modified = True
+
+ del_idx = None
+ for idx, exist_volume_mount in enumerate(exist_volume_mounts):
+ if 'name' in exist_volume_mount and exist_volume_mount['name'] == volume['name']:
+ del_idx = idx
+ break
+
+ if del_idx != None:
+ del exist_volume_mounts[idx]
+ modified = True
+
+ return modified
+
+ def add_volume_mount(self, volume_mount):
+ ''' add a volume or volume mount to the proper location '''
+ exist_volume_mounts = self.get_volume_mounts()
+
+ if not exist_volume_mounts and volume_mount:
+ self.put(DeploymentConfig.volume_mounts_path, [volume_mount])
+ else:
+ exist_volume_mounts.append(volume_mount)
+
+ def add_volume(self, volume):
+ ''' add a volume or volume mount to the proper location '''
+ exist_volumes = self.get_volumes()
+ if not volume:
+ return
+
+ if not exist_volumes:
+ self.put(DeploymentConfig.volumes_path, [volume])
+ else:
+ exist_volumes.append(volume)
+
+ def update_replicas(self, replicas):
+ ''' update replicas value '''
+ self.put(DeploymentConfig.replicas_path, replicas)
+
+ def update_volume(self, volume):
+ '''place an env in the env var list'''
+ exist_volumes = self.get_volumes()
+
+ if not volume:
+ return False
+
+ # update the volume
+ update_idx = None
+ for idx, exist_vol in enumerate(exist_volumes):
+ if exist_vol['name'] == volume['name']:
+ update_idx = idx
+ break
+
+ if update_idx != None:
+ exist_volumes[update_idx] = volume
+ else:
+ self.add_volume(volume)
+
+ return True
+
+ def update_volume_mount(self, volume_mount):
+ '''place an env in the env var list'''
+ modified = False
+
+ exist_volume_mounts = self.get_volume_mounts()
+
+ if not volume_mount:
+ return False
+
+ # update the volume mount
+ for exist_vol_mount in exist_volume_mounts:
+ if exist_vol_mount['name'] == volume_mount['name']:
+ if 'mountPath' in exist_vol_mount and \
+ str(exist_vol_mount['mountPath']) != str(volume_mount['mountPath']):
+ exist_vol_mount['mountPath'] = volume_mount['mountPath']
+ modified = True
+ break
+
+ if not modified:
+ self.add_volume_mount(volume_mount)
+ modified = True
+
+ return modified
+
+ def needs_update_volume(self, volume, volume_mount):
+ ''' verify a volume update is needed '''
+ exist_volume = self.find_volume_by_name(volume)
+ exist_volume_mount = self.find_volume_by_name(volume, mounts=True)
+ results = []
+ results.append(exist_volume['name'] == volume['name'])
+
+ if 'secret' in volume:
+ results.append('secret' in exist_volume)
+ results.append(exist_volume['secret']['secretName'] == volume['secret']['secretName'])
+ results.append(exist_volume_mount['name'] == volume_mount['name'])
+ results.append(exist_volume_mount['mountPath'] == volume_mount['mountPath'])
+
+ elif 'emptyDir' in volume:
+ results.append(exist_volume_mount['name'] == volume['name'])
+ results.append(exist_volume_mount['mountPath'] == volume_mount['mountPath'])
+
+ elif 'persistentVolumeClaim' in volume:
+ pvc = 'persistentVolumeClaim'
+ results.append(pvc in exist_volume)
+ if results[-1]:
+ results.append(exist_volume[pvc]['claimName'] == volume[pvc]['claimName'])
+
+ if 'claimSize' in volume[pvc]:
+ results.append(exist_volume[pvc]['claimSize'] == volume[pvc]['claimSize'])
+
+ elif 'hostpath' in volume:
+ results.append('hostPath' in exist_volume)
+ results.append(exist_volume['hostPath']['path'] == volume_mount['mountPath'])
+
+ return not all(results)
+
+ def needs_update_replicas(self, replicas):
+ ''' verify whether a replica update is needed '''
+ current_reps = self.get(DeploymentConfig.replicas_path)
+ return not current_reps == replicas
+
+# -*- -*- -*- End included fragment: lib/deploymentconfig.py -*- -*- -*-
+
+# -*- -*- -*- Begin included fragment: lib/secret.py -*- -*- -*-
+
+# pylint: disable=too-many-instance-attributes
+class SecretConfig(object):
+ ''' Handle secret options '''
+ # pylint: disable=too-many-arguments
+ def __init__(self,
+ sname,
+ namespace,
+ kubeconfig,
+ secrets=None):
+ ''' constructor for handling secret options '''
+ self.kubeconfig = kubeconfig
+ self.name = sname
+ self.namespace = namespace
+ self.secrets = secrets
+ self.data = {}
+
+ self.create_dict()
+
+ def create_dict(self):
+ ''' assign the correct properties for a secret dict '''
+ self.data['apiVersion'] = 'v1'
+ self.data['kind'] = 'Secret'
+ self.data['metadata'] = {}
+ self.data['metadata']['name'] = self.name
+ self.data['metadata']['namespace'] = self.namespace
+ self.data['data'] = {}
+ if self.secrets:
+ for key, value in self.secrets.items():
+ self.data['data'][key] = value
+
+# pylint: disable=too-many-instance-attributes
+class Secret(Yedit):
+ ''' Class to wrap the oc command line tools '''
+ secret_path = "data"
+ kind = 'secret'
+
+ def __init__(self, content):
+ '''secret constructor'''
+ super(Secret, self).__init__(content=content)
+ self._secrets = None
+
+ @property
+ def secrets(self):
+ '''secret property getter'''
+ if self._secrets is None:
+ self._secrets = self.get_secrets()
+ return self._secrets
+
+ @secrets.setter
+ def secrets(self):
+ '''secret property setter'''
+ if self._secrets is None:
+ self._secrets = self.get_secrets()
+ return self._secrets
+
+ def get_secrets(self):
+ ''' returns all of the defined secrets '''
+ return self.get(Secret.secret_path) or {}
+
+ def add_secret(self, key, value):
+ ''' add a secret '''
+ if self.secrets:
+ self.secrets[key] = value
+ else:
+ self.put(Secret.secret_path, {key: value})
+
+ return True
+
+ def delete_secret(self, key):
+ ''' delete secret'''
+ try:
+ del self.secrets[key]
+ except KeyError as _:
+ return False
+
+ return True
+
+ def find_secret(self, key):
+ ''' find secret'''
+ rval = None
+ try:
+ rval = self.secrets[key]
+ except KeyError as _:
+ return None
+
+ return {'key': key, 'value': rval}
+
+ def update_secret(self, key, value):
+ ''' update a secret'''
+ if key in self.secrets:
+ self.secrets[key] = value
+ else:
+ self.add_secret(key, value)
+
+ return True
+
+# -*- -*- -*- End included fragment: lib/secret.py -*- -*- -*-
+
+# -*- -*- -*- Begin included fragment: lib/service.py -*- -*- -*-
+
+
+# pylint: disable=too-many-instance-attributes
+class ServiceConfig(object):
+ ''' Handle service options '''
+ # pylint: disable=too-many-arguments
+ def __init__(self,
+ sname,
+ namespace,
+ ports,
+ selector=None,
+ labels=None,
+ cluster_ip=None,
+ portal_ip=None,
+ session_affinity=None,
+ service_type=None):
+ ''' constructor for handling service options '''
+ self.name = sname
+ self.namespace = namespace
+ self.ports = ports
+ self.selector = selector
+ self.labels = labels
+ self.cluster_ip = cluster_ip
+ self.portal_ip = portal_ip
+ self.session_affinity = session_affinity
+ self.service_type = service_type
+ self.data = {}
+
+ self.create_dict()
+
+ def create_dict(self):
+ ''' instantiates a service dict '''
+ self.data['apiVersion'] = 'v1'
+ self.data['kind'] = 'Service'
+ self.data['metadata'] = {}
+ self.data['metadata']['name'] = self.name
+ self.data['metadata']['namespace'] = self.namespace
+ if self.labels:
+ for lab, lab_value in self.labels.items():
+ self.data['metadata'][lab] = lab_value
+ self.data['spec'] = {}
+
+ if self.ports:
+ self.data['spec']['ports'] = self.ports
+ else:
+ self.data['spec']['ports'] = []
+
+ if self.selector:
+ self.data['spec']['selector'] = self.selector
+
+ self.data['spec']['sessionAffinity'] = self.session_affinity or 'None'
+
+ if self.cluster_ip:
+ self.data['spec']['clusterIP'] = self.cluster_ip
+
+ if self.portal_ip:
+ self.data['spec']['portalIP'] = self.portal_ip
+
+ if self.service_type:
+ self.data['spec']['type'] = self.service_type
+
+# pylint: disable=too-many-instance-attributes,too-many-public-methods
+class Service(Yedit):
+ ''' Class to model the oc service object '''
+ port_path = "spec.ports"
+ portal_ip = "spec.portalIP"
+ cluster_ip = "spec.clusterIP"
+ selector_path = 'spec.selector'
+ kind = 'Service'
+
+ def __init__(self, content):
+ '''Service constructor'''
+ super(Service, self).__init__(content=content)
+
+ def get_ports(self):
+ ''' get a list of ports '''
+ return self.get(Service.port_path) or []
+
+ def get_selector(self):
+ ''' get the service selector'''
+ return self.get(Service.selector_path) or {}
+
+ def add_ports(self, inc_ports):
+ ''' add a port object to the ports list '''
+ if not isinstance(inc_ports, list):
+ inc_ports = [inc_ports]
+
+ ports = self.get_ports()
+ if not ports:
+ self.put(Service.port_path, inc_ports)
+ else:
+ ports.extend(inc_ports)
+
+ return True
+
+ def find_ports(self, inc_port):
+ ''' find a specific port '''
+ for port in self.get_ports():
+ if port['port'] == inc_port['port']:
+ return port
+
+ return None
+
+ def delete_ports(self, inc_ports):
+ ''' remove a port from a service '''
+ if not isinstance(inc_ports, list):
+ inc_ports = [inc_ports]
+
+ ports = self.get(Service.port_path) or []
+
+ if not ports:
+ return True
+
+ removed = False
+ for inc_port in inc_ports:
+ port = self.find_ports(inc_port)
+ if port:
+ ports.remove(port)
+ removed = True
+
+ return removed
+
+ def add_cluster_ip(self, sip):
+ '''add cluster ip'''
+ self.put(Service.cluster_ip, sip)
+
+ def add_portal_ip(self, pip):
+ '''add cluster ip'''
+ self.put(Service.portal_ip, pip)
+
+# -*- -*- -*- End included fragment: lib/service.py -*- -*- -*-
+
+# -*- -*- -*- Begin included fragment: lib/volume.py -*- -*- -*-
+
+class Volume(object):
+ ''' Class to model an openshift volume object'''
+ volume_mounts_path = {"pod": "spec.containers[0].volumeMounts",
+ "dc": "spec.template.spec.containers[0].volumeMounts",
+ "rc": "spec.template.spec.containers[0].volumeMounts",
+ }
+ volumes_path = {"pod": "spec.volumes",
+ "dc": "spec.template.spec.volumes",
+ "rc": "spec.template.spec.volumes",
+ }
+
+ @staticmethod
+ def create_volume_structure(volume_info):
+ ''' return a properly structured volume '''
+ volume_mount = None
+ volume = {'name': volume_info['name']}
+ volume_type = volume_info['type'].lower()
+ if volume_type == 'secret':
+ volume['secret'] = {}
+ volume[volume_info['type']] = {'secretName': volume_info['secret_name']}
+ volume_mount = {'mountPath': volume_info['path'],
+ 'name': volume_info['name']}
+ elif volume_type == 'emptydir':
+ volume['emptyDir'] = {}
+ volume_mount = {'mountPath': volume_info['path'],
+ 'name': volume_info['name']}
+ elif volume_type == 'pvc' or volume_type == 'persistentvolumeclaim':
+ volume['persistentVolumeClaim'] = {}
+ volume['persistentVolumeClaim']['claimName'] = volume_info['claimName']
+ volume['persistentVolumeClaim']['claimSize'] = volume_info['claimSize']
+ elif volume_type == 'hostpath':
+ volume['hostPath'] = {}
+ volume['hostPath']['path'] = volume_info['path']
+
+ return (volume, volume_mount)
+
+# -*- -*- -*- End included fragment: lib/volume.py -*- -*- -*-
+
+# -*- -*- -*- Begin included fragment: class/oc_version.py -*- -*- -*-
+
+
+# pylint: disable=too-many-instance-attributes
+class OCVersion(OpenShiftCLI):
+ ''' Class to wrap the oc command line tools '''
+ # pylint allows 5
+ # pylint: disable=too-many-arguments
+ def __init__(self,
+ config,
+ debug):
+ ''' Constructor for OCVersion '''
+ super(OCVersion, self).__init__(None, config)
+ self.debug = debug
+
+ def get(self):
+ '''get and return version information '''
+
+ results = {}
+
+ version_results = self._version()
+
+ if version_results['returncode'] == 0:
+ filtered_vers = Utils.filter_versions(version_results['results'])
+ custom_vers = Utils.add_custom_versions(filtered_vers)
+
+ results['returncode'] = version_results['returncode']
+ results.update(filtered_vers)
+ results.update(custom_vers)
+
+ return results
+
+ raise OpenShiftCLIError('Problem detecting openshift version.')
+
+ @staticmethod
+ def run_ansible(params):
+ '''run the idempotent ansible code'''
+ oc_version = OCVersion(params['kubeconfig'], params['debug'])
+
+ if params['state'] == 'list':
+
+ #pylint: disable=protected-access
+ result = oc_version.get()
+ return {'state': params['state'],
+ 'results': result,
+ 'changed': False}
+
+# -*- -*- -*- End included fragment: class/oc_version.py -*- -*- -*-
+
+# -*- -*- -*- Begin included fragment: class/oc_adm_registry.py -*- -*- -*-
+
+class RegistryException(Exception):
+ ''' Registry Exception Class '''
+ pass
+
+
+class RegistryConfig(OpenShiftCLIConfig):
+ ''' RegistryConfig is a DTO for the registry. '''
+ def __init__(self, rname, namespace, kubeconfig, registry_options):
+ super(RegistryConfig, self).__init__(rname, namespace, kubeconfig, registry_options)
+
+
+class Registry(OpenShiftCLI):
+ ''' Class to wrap the oc command line tools '''
+
+ volume_mount_path = 'spec.template.spec.containers[0].volumeMounts'
+ volume_path = 'spec.template.spec.volumes'
+ env_path = 'spec.template.spec.containers[0].env'
+
+ def __init__(self,
+ registry_config,
+ verbose=False):
+ ''' Constructor for Registry
+
+ a registry consists of 3 or more parts
+ - dc/docker-registry
+ - svc/docker-registry
+
+ Parameters:
+ :registry_config:
+ :verbose:
+ '''
+ super(Registry, self).__init__(registry_config.namespace, registry_config.kubeconfig, verbose)
+ self.version = OCVersion(registry_config.kubeconfig, verbose)
+ self.svc_ip = None
+ self.portal_ip = None
+ self.config = registry_config
+ self.verbose = verbose
+ self.registry_parts = [{'kind': 'dc', 'name': self.config.name},
+ {'kind': 'svc', 'name': self.config.name},
+ ]
+
+ self.__prepared_registry = None
+ self.volume_mounts = []
+ self.volumes = []
+ if self.config.config_options['volume_mounts']['value']:
+ for volume in self.config.config_options['volume_mounts']['value']:
+ volume_info = {'secret_name': volume.get('secret_name', None),
+ 'name': volume.get('name', None),
+ 'type': volume.get('type', None),
+ 'path': volume.get('path', None),
+ 'claimName': volume.get('claim_name', None),
+ 'claimSize': volume.get('claim_size', None),
+ }
+
+ vol, vol_mount = Volume.create_volume_structure(volume_info)
+ self.volumes.append(vol)
+ self.volume_mounts.append(vol_mount)
+
+ self.dconfig = None
+ self.svc = None
+
+ @property
+ def deploymentconfig(self):
+ ''' deploymentconfig property '''
+ return self.dconfig
+
+ @deploymentconfig.setter
+ def deploymentconfig(self, config):
+ ''' setter for deploymentconfig property '''
+ self.dconfig = config
+
+ @property
+ def service(self):
+ ''' service property '''
+ return self.svc
+
+ @service.setter
+ def service(self, config):
+ ''' setter for service property '''
+ self.svc = config
+
+ @property
+ def prepared_registry(self):
+ ''' prepared_registry property '''
+ if not self.__prepared_registry:
+ results = self.prepare_registry()
+ if not results:
+ raise RegistryException('Could not perform registry preparation.')
+ self.__prepared_registry = results
+
+ return self.__prepared_registry
+
+ @prepared_registry.setter
+ def prepared_registry(self, data):
+ ''' setter method for prepared_registry attribute '''
+ self.__prepared_registry = data
+
+ def get(self):
+ ''' return the self.registry_parts '''
+ self.deploymentconfig = None
+ self.service = None
+
+ rval = 0
+ for part in self.registry_parts:
+ result = self._get(part['kind'], rname=part['name'])
+ if result['returncode'] == 0 and part['kind'] == 'dc':
+ self.deploymentconfig = DeploymentConfig(result['results'][0])
+ elif result['returncode'] == 0 and part['kind'] == 'svc':
+ self.service = Service(result['results'][0])
+
+ if result['returncode'] != 0:
+ rval = result['returncode']
+
+
+ return {'returncode': rval, 'deploymentconfig': self.deploymentconfig, 'service': self.service}
+
+ def exists(self):
+ '''does the object exist?'''
+ self.get()
+ if self.deploymentconfig and self.service:
+ return True
+
+ return False
+
+ def delete(self, complete=True):
+ '''return all pods '''
+ parts = []
+ for part in self.registry_parts:
+ if not complete and part['kind'] == 'svc':
+ continue
+ parts.append(self._delete(part['kind'], part['name']))
+
+ # Clean up returned results
+ rval = 0
+ for part in parts:
+ # pylint: disable=invalid-sequence-index
+ if 'returncode' in part and part['returncode'] != 0:
+ rval = part['returncode']
+
+ return {'returncode': rval, 'results': parts}
+
+ def prepare_registry(self):
+ ''' prepare a registry for instantiation '''
+ options = self.config.to_option_list()
+
+ cmd = ['registry', '-n', self.config.namespace]
+ cmd.extend(options)
+ cmd.extend(['--dry-run=True', '-o', 'json'])
+
+ results = self.openshift_cmd(cmd, oadm=True, output=True, output_type='json')
+ # probably need to parse this
+ # pylint thinks results is a string
+ # pylint: disable=no-member
+ if results['returncode'] != 0 and 'items' in results['results']:
+ return results
+
+ service = None
+ deploymentconfig = None
+ # pylint: disable=invalid-sequence-index
+ for res in results['results']['items']:
+ if res['kind'] == 'DeploymentConfig':
+ deploymentconfig = DeploymentConfig(res)
+ elif res['kind'] == 'Service':
+ service = Service(res)
+
+ # Verify we got a service and a deploymentconfig
+ if not service or not deploymentconfig:
+ return results
+
+ # results will need to get parsed here and modifications added
+ deploymentconfig = DeploymentConfig(self.add_modifications(deploymentconfig))
+
+ # modify service ip
+ if self.svc_ip:
+ service.put('spec.clusterIP', self.svc_ip)
+ if self.portal_ip:
+ service.put('spec.portalIP', self.portal_ip)
+
+ # the dry-run doesn't apply the selector correctly
+ service.put('spec.selector', self.service.get_selector())
+
+ # need to create the service and the deploymentconfig
+ service_file = Utils.create_tmp_file_from_contents('service', service.yaml_dict)
+ deployment_file = Utils.create_tmp_file_from_contents('deploymentconfig', deploymentconfig.yaml_dict)
+
+ return {"service": service,
+ "service_file": service_file,
+ "service_update": False,
+ "deployment": deploymentconfig,
+ "deployment_file": deployment_file,
+ "deployment_update": False}
+
+ def create(self):
+ '''Create a registry'''
+ results = []
+ self.needs_update()
+ # if the object is none, then we need to create it
+ # if the object needs an update, then we should call replace
+ # Handle the deploymentconfig
+ if self.deploymentconfig is None:
+ results.append(self._create(self.prepared_registry['deployment_file']))
+ elif self.prepared_registry['deployment_update']:
+ results.append(self._replace(self.prepared_registry['deployment_file']))
+
+ # Handle the service
+ if self.service is None:
+ results.append(self._create(self.prepared_registry['service_file']))
+ elif self.prepared_registry['service_update']:
+ results.append(self._replace(self.prepared_registry['service_file']))
+
+ # Clean up returned results
+ rval = 0
+ for result in results:
+ # pylint: disable=invalid-sequence-index
+ if 'returncode' in result and result['returncode'] != 0:
+ rval = result['returncode']
+
+ return {'returncode': rval, 'results': results}
+
+ def update(self):
+ '''run update for the registry. This performs a replace if required'''
+ # Store the current service IP
+ if self.service:
+ svcip = self.service.get('spec.clusterIP')
+ if svcip:
+ self.svc_ip = svcip
+ portip = self.service.get('spec.portalIP')
+ if portip:
+ self.portal_ip = portip
+
+ results = []
+ if self.prepared_registry['deployment_update']:
+ results.append(self._replace(self.prepared_registry['deployment_file']))
+ if self.prepared_registry['service_update']:
+ results.append(self._replace(self.prepared_registry['service_file']))
+
+ # Clean up returned results
+ rval = 0
+ for result in results:
+ if result['returncode'] != 0:
+ rval = result['returncode']
+
+ return {'returncode': rval, 'results': results}
+
+ def add_modifications(self, deploymentconfig):
+ ''' update a deployment config with changes '''
+ # The environment variable for REGISTRY_HTTP_SECRET is autogenerated
+ # We should set the generated deploymentconfig to the in memory version
+ # the following modifications will overwrite if needed
+ if self.deploymentconfig:
+ result = self.deploymentconfig.get_env_var('REGISTRY_HTTP_SECRET')
+ if result:
+ deploymentconfig.update_env_var('REGISTRY_HTTP_SECRET', result['value'])
+
+ # Currently we know that our deployment of a registry requires a few extra modifications
+ # Modification 1
+ # we need specific environment variables to be set
+ for key, value in self.config.config_options['env_vars'].get('value', {}).items():
+ if not deploymentconfig.exists_env_key(key):
+ deploymentconfig.add_env_value(key, value)
+ else:
+ deploymentconfig.update_env_var(key, value)
+
+ # Modification 2
+ # we need specific volume variables to be set
+ for volume in self.volumes:
+ deploymentconfig.update_volume(volume)
+
+ for vol_mount in self.volume_mounts:
+ deploymentconfig.update_volume_mount(vol_mount)
+
+ # Modification 3
+ # Edits
+ edit_results = []
+ for edit in self.config.config_options['edits'].get('value', []):
+ if edit['action'] == 'put':
+ edit_results.append(deploymentconfig.put(edit['key'],
+ edit['value']))
+ if edit['action'] == 'update':
+ edit_results.append(deploymentconfig.update(edit['key'],
+ edit['value'],
+ edit.get('index', None),
+ edit.get('curr_value', None)))
+ if edit['action'] == 'append':
+ edit_results.append(deploymentconfig.append(edit['key'],
+ edit['value']))
+
+ if edit_results and not any([res[0] for res in edit_results]):
+ return None
+
+ return deploymentconfig.yaml_dict
+
+ def needs_update(self):
+ ''' check to see if we need to update '''
+ exclude_list = ['clusterIP', 'portalIP', 'type', 'protocol']
+ if self.service is None or \
+ not Utils.check_def_equal(self.prepared_registry['service'].yaml_dict,
+ self.service.yaml_dict,
+ exclude_list,
+ debug=self.verbose):
+ self.prepared_registry['service_update'] = True
+
+ exclude_list = ['dnsPolicy',
+ 'terminationGracePeriodSeconds',
+ 'restartPolicy', 'timeoutSeconds',
+ 'livenessProbe', 'readinessProbe',
+ 'terminationMessagePath',
+ 'securityContext',
+ 'imagePullPolicy',
+ 'protocol', # ports.portocol: TCP
+ 'type', # strategy: {'type': 'rolling'}
+ 'defaultMode', # added on secrets
+ 'activeDeadlineSeconds', # added in 1.5 for timeouts
+ ]
+
+ if self.deploymentconfig is None or \
+ not Utils.check_def_equal(self.prepared_registry['deployment'].yaml_dict,
+ self.deploymentconfig.yaml_dict,
+ exclude_list,
+ debug=self.verbose):
+ self.prepared_registry['deployment_update'] = True
+
+ return self.prepared_registry['deployment_update'] or self.prepared_registry['service_update'] or False
+
+ # In the future, we would like to break out each ansible state into a function.
+ # pylint: disable=too-many-branches,too-many-return-statements
+ @staticmethod
+ def run_ansible(params, check_mode):
+ '''run idempotent ansible code'''
+
+ rconfig = RegistryConfig(params['name'],
+ params['namespace'],
+ params['kubeconfig'],
+ {'images': {'value': params['images'], 'include': True},
+ 'latest_images': {'value': params['latest_images'], 'include': True},
+ 'labels': {'value': params['labels'], 'include': True},
+ 'ports': {'value': ','.join(params['ports']), 'include': True},
+ 'replicas': {'value': params['replicas'], 'include': True},
+ 'selector': {'value': params['selector'], 'include': True},
+ 'service_account': {'value': params['service_account'], 'include': True},
+ 'mount_host': {'value': params['mount_host'], 'include': True},
+ 'env_vars': {'value': params['env_vars'], 'include': False},
+ 'volume_mounts': {'value': params['volume_mounts'], 'include': False},
+ 'edits': {'value': params['edits'], 'include': False},
+ 'enforce_quota': {'value': params['enforce_quota'], 'include': True},
+ 'daemonset': {'value': params['daemonset'], 'include': True},
+ 'tls_key': {'value': params['tls_key'], 'include': True},
+ 'tls_certificate': {'value': params['tls_certificate'], 'include': True},
+ })
+
+
+ ocregistry = Registry(rconfig, params['debug'])
+
+ api_rval = ocregistry.get()
+
+ state = params['state']
+ ########
+ # get
+ ########
+ if state == 'list':
+
+ if api_rval['returncode'] != 0:
+ return {'failed': True, 'msg': api_rval}
+
+ return {'changed': False, 'results': api_rval, 'state': state}
+
+ ########
+ # Delete
+ ########
+ if state == 'absent':
+ if not ocregistry.exists():
+ return {'changed': False, 'state': state}
+
+ if check_mode:
+ return {'changed': True, 'msg': 'CHECK_MODE: Would have performed a delete.'}
+
+ # Unsure as to why this is angry with the return type.
+ # pylint: disable=redefined-variable-type
+ api_rval = ocregistry.delete()
+
+ if api_rval['returncode'] != 0:
+ return {'failed': True, 'msg': api_rval}
+
+ return {'changed': True, 'results': api_rval, 'state': state}
+
+ if state == 'present':
+ ########
+ # Create
+ ########
+ if not ocregistry.exists():
+
+ if check_mode:
+ return {'changed': True, 'msg': 'CHECK_MODE: Would have performed a create.'}
+
+ api_rval = ocregistry.create()
+
+ if api_rval['returncode'] != 0:
+ return {'failed': True, 'msg': api_rval}
+
+ return {'changed': True, 'results': api_rval, 'state': state}
+
+ ########
+ # Update
+ ########
+ if not params['force'] and not ocregistry.needs_update():
+ return {'changed': False, 'state': state}
+
+ if check_mode:
+ return {'changed': True, 'msg': 'CHECK_MODE: Would have performed an update.'}
+
+ api_rval = ocregistry.update()
+
+ if api_rval['returncode'] != 0:
+ return {'failed': True, 'msg': api_rval}
+
+ return {'changed': True, 'results': api_rval, 'state': state}
+
+ return {'failed': True, 'msg': 'Unknown state passed. %s' % state}
+
+# -*- -*- -*- End included fragment: class/oc_adm_registry.py -*- -*- -*-
+
+# -*- -*- -*- Begin included fragment: ansible/oc_adm_registry.py -*- -*- -*-
+
+def main():
+ '''
+ ansible oc module for registry
+ '''
+
+ module = AnsibleModule(
+ argument_spec=dict(
+ state=dict(default='present', type='str',
+ choices=['present', 'absent']),
+ debug=dict(default=False, type='bool'),
+ namespace=dict(default='default', type='str'),
+ name=dict(default=None, required=True, type='str'),
+
+ kubeconfig=dict(default='/etc/origin/master/admin.kubeconfig', type='str'),
+ images=dict(default=None, type='str'),
+ latest_images=dict(default=False, type='bool'),
+ labels=dict(default=None, type='list'),
+ ports=dict(default=['5000'], type='list'),
+ replicas=dict(default=1, type='int'),
+ selector=dict(default=None, type='str'),
+ service_account=dict(default='registry', type='str'),
+ mount_host=dict(default=None, type='str'),
+ volume_mounts=dict(default=None, type='list'),
+ env_vars=dict(default={}, type='dict'),
+ edits=dict(default=[], type='list'),
+ enforce_quota=dict(default=False, type='bool'),
+ force=dict(default=False, type='bool'),
+ daemonset=dict(default=False, type='bool'),
+ tls_key=dict(default=None, type='str'),
+ tls_certificate=dict(default=None, type='str'),
+ ),
+
+ supports_check_mode=True,
+ )
+
+ results = Registry.run_ansible(module.params, module.check_mode)
+ if 'failed' in results:
+ module.fail_json(**results)
+
+ module.exit_json(**results)
+
+
+if __name__ == '__main__':
+ main()
+
+# -*- -*- -*- End included fragment: ansible/oc_adm_registry.py -*- -*- -*-
diff --git a/roles/lib_openshift/library/oc_adm_router.py b/roles/lib_openshift/library/oc_adm_router.py
new file mode 100644
index 000000000..bb4ce5e70
--- /dev/null
+++ b/roles/lib_openshift/library/oc_adm_router.py
@@ -0,0 +1,3071 @@
+#!/usr/bin/env python
+# pylint: disable=missing-docstring
+# flake8: noqa: T001
+# ___ ___ _ _ ___ ___ _ _____ ___ ___
+# / __| __| \| | __| _ \ /_\_ _| __| \
+# | (_ | _|| .` | _|| / / _ \| | | _|| |) |
+# \___|___|_|\_|___|_|_\/_/_\_\_|_|___|___/_ _____
+# | \ / _ \ | \| |/ _ \_ _| | __| \_ _|_ _|
+# | |) | (_) | | .` | (_) || | | _|| |) | | | |
+# |___/ \___/ |_|\_|\___/ |_| |___|___/___| |_|
+#
+# Copyright 2016 Red Hat, Inc. and/or its affiliates
+# and other contributors as indicated by the @author tags.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+
+# -*- -*- -*- Begin included fragment: lib/import.py -*- -*- -*-
+'''
+ OpenShiftCLI class that wraps the oc commands in a subprocess
+'''
+# pylint: disable=too-many-lines
+
+from __future__ import print_function
+import atexit
+import copy
+import json
+import os
+import re
+import shutil
+import subprocess
+import tempfile
+# pylint: disable=import-error
+try:
+ import ruamel.yaml as yaml
+except ImportError:
+ import yaml
+
+from ansible.module_utils.basic import AnsibleModule
+
+# -*- -*- -*- End included fragment: lib/import.py -*- -*- -*-
+
+# -*- -*- -*- Begin included fragment: doc/router -*- -*- -*-
+
+DOCUMENTATION = '''
+---
+module: oc_adm_router
+short_description: Module to manage openshift router
+description:
+ - Manage openshift router programmatically.
+options:
+ state:
+ description:
+ - Whether to create or delete the router
+ - present - create the router
+ - absent - remove the router
+ - list - return the current representation of a router
+ required: false
+ default: present
+ choices:
+ - present
+ - absent
+ aliases: []
+ kubeconfig:
+ description:
+ - The path for the kubeconfig file to use for authentication
+ required: false
+ default: /etc/origin/master/admin.kubeconfig
+ aliases: []
+ debug:
+ description:
+ - Turn on debug output.
+ required: false
+ default: False
+ aliases: []
+ name:
+ description:
+ - The name of the router
+ required: false
+ default: router
+ aliases: []
+ namespace:
+ description:
+ - The namespace where to manage the router.
+ required: false
+ default: default
+ aliases: []
+ images:
+ description:
+ - The image to base this router on - ${component} will be replaced with --type
+ required: 'openshift3/ose-${component}:${version}'
+ default: None
+ aliases: []
+ latest_images:
+ description:
+ - If true, attempt to use the latest image for the registry instead of the latest release.
+ required: false
+ default: False
+ aliases: []
+ labels:
+ description:
+ - A set of labels to uniquely identify the registry and its components.
+ required: false
+ default: None
+ aliases: []
+ ports:
+ description:
+ - A list of strings in the 'port:port' format
+ required: False
+ default:
+ - 80:80
+ - 443:443
+ aliases: []
+ replicas:
+ description:
+ - The replication factor of the registry; commonly 2 when high availability is desired.
+ required: False
+ default: 1
+ aliases: []
+ selector:
+ description:
+ - Selector used to filter nodes on deployment. Used to run routers on a specific set of nodes.
+ required: False
+ default: None
+ aliases: []
+ service_account:
+ description:
+ - Name of the service account to use to run the router pod.
+ required: False
+ default: router
+ aliases: []
+ router_type:
+ description:
+ - The router image to use - if you specify --images this flag may be ignored.
+ required: false
+ default: haproxy-router
+ aliases: []
+ external_host:
+ description:
+ - If the underlying router implementation connects with an external host, this is the external host's hostname.
+ required: false
+ default: None
+ aliases: []
+ external_host_vserver:
+ description:
+ - If the underlying router implementation uses virtual servers, this is the name of the virtual server for HTTP connections.
+ required: false
+ default: None
+ aliases: []
+ external_host_insecure:
+ description:
+ - If the underlying router implementation connects with an external host
+ - over a secure connection, this causes the router to skip strict certificate verification with the external host.
+ required: false
+ default: False
+ aliases: []
+ external_host_partition_path:
+ description:
+ - If the underlying router implementation uses partitions for control boundaries, this is the path to use for that partition.
+ required: false
+ default: None
+ aliases: []
+ external_host_username:
+ description:
+ - If the underlying router implementation connects with an external host, this is the username for authenticating with the external host.
+ required: false
+ default: None
+ aliases: []
+ external_host_password:
+ description:
+ - If the underlying router implementation connects with an external host, this is the password for authenticating with the external host.
+ required: false
+ default: None
+ aliases: []
+ external_host_private_key:
+ description:
+ - If the underlying router implementation requires an SSH private key, this is the path to the private key file.
+ required: false
+ default: None
+ aliases: []
+ expose_metrics:
+ description:
+ - This is a hint to run an extra container in the pod to expose metrics - the image
+ - will either be set depending on the router implementation or provided with --metrics-image.
+ required: false
+ default: False
+ aliases: []
+ metrics_image:
+ description:
+ - If expose_metrics is specified this is the image to use to run a sidecar container
+ - in the pod exposing metrics. If not set and --expose-metrics is true the image will
+ - depend on router implementation.
+ required: false
+ default: None
+ aliases: []
+author:
+- "Kenny Woodson <kwoodson@redhat.com>"
+extends_documentation_fragment:
+- There are some exceptions to note when doing the idempotency in this module.
+- The strategy is to use the oc adm router command to generate a default
+- configuration when creating or updating a router. Often times there
+- differences from the generated template and what is in memory in openshift.
+- We make exceptions to not check these specific values when comparing objects.
+- Here are a list of exceptions:
+- - DeploymentConfig:
+ - dnsPolicy
+ - terminationGracePeriodSeconds
+ - restartPolicy
+ - timeoutSeconds
+ - livenessProbe
+ - readinessProbe
+ - terminationMessagePath
+ - hostPort
+ - defaultMode
+ - Service:
+ - portalIP
+ - clusterIP
+ - sessionAffinity
+ - type
+ - ServiceAccount:
+ - secrets
+ - imagePullSecrets
+'''
+
+EXAMPLES = '''
+- name: create routers
+ oc_adm_router:
+ name: router
+ service_account: router
+ replicas: 2
+ namespace: default
+ selector: type=infra
+ cert_file: /etc/origin/master/named_certificates/router.crt
+ key_file: /etc/origin/master/named_certificates/router.key
+ cacert_file: /etc/origin/master/named_certificates/router.ca
+ edits:
+ - key: spec.strategy.rollingParams
+ value:
+ intervalSeconds: 1
+ maxSurge: 50%
+ maxUnavailable: 50%
+ timeoutSeconds: 600
+ updatePeriodSeconds: 1
+ action: put
+ - key: spec.template.spec.containers[0].resources.limits.memory
+ value: 2G
+ action: put
+ - key: spec.template.spec.containers[0].resources.requests.memory
+ value: 1G
+ action: put
+ - key: spec.template.spec.containers[0].env
+ value:
+ name: EXTENDED_VALIDATION
+ value: 'false'
+ action: update
+ register: router_out
+ run_once: True
+'''
+
+# -*- -*- -*- End included fragment: doc/router -*- -*- -*-
+
+# -*- -*- -*- Begin included fragment: ../../lib_utils/src/class/yedit.py -*- -*- -*-
+# pylint: disable=undefined-variable,missing-docstring
+# noqa: E301,E302
+
+
+class YeditException(Exception):
+ ''' Exception class for Yedit '''
+ pass
+
+
+# pylint: disable=too-many-public-methods
+class Yedit(object):
+ ''' Class to modify yaml files '''
+ re_valid_key = r"(((\[-?\d+\])|([0-9a-zA-Z%s/_-]+)).?)+$"
+ re_key = r"(?:\[(-?\d+)\])|([0-9a-zA-Z%s/_-]+)"
+ com_sep = set(['.', '#', '|', ':'])
+
+ # pylint: disable=too-many-arguments
+ def __init__(self,
+ filename=None,
+ content=None,
+ content_type='yaml',
+ separator='.',
+ backup=False):
+ self.content = content
+ self._separator = separator
+ self.filename = filename
+ self.__yaml_dict = content
+ self.content_type = content_type
+ self.backup = backup
+ self.load(content_type=self.content_type)
+ if self.__yaml_dict is None:
+ self.__yaml_dict = {}
+
+ @property
+ def separator(self):
+ ''' getter method for yaml_dict '''
+ return self._separator
+
+ @separator.setter
+ def separator(self):
+ ''' getter method for yaml_dict '''
+ return self._separator
+
+ @property
+ def yaml_dict(self):
+ ''' getter method for yaml_dict '''
+ return self.__yaml_dict
+
+ @yaml_dict.setter
+ def yaml_dict(self, value):
+ ''' setter method for yaml_dict '''
+ self.__yaml_dict = value
+
+ @staticmethod
+ def parse_key(key, sep='.'):
+ '''parse the key allowing the appropriate separator'''
+ common_separators = list(Yedit.com_sep - set([sep]))
+ return re.findall(Yedit.re_key % ''.join(common_separators), key)
+
+ @staticmethod
+ def valid_key(key, sep='.'):
+ '''validate the incoming key'''
+ common_separators = list(Yedit.com_sep - set([sep]))
+ if not re.match(Yedit.re_valid_key % ''.join(common_separators), key):
+ return False
+
+ return True
+
+ @staticmethod
+ def remove_entry(data, key, sep='.'):
+ ''' remove data at location key '''
+ if key == '' and isinstance(data, dict):
+ data.clear()
+ return True
+ elif key == '' and isinstance(data, list):
+ del data[:]
+ return True
+
+ if not (key and Yedit.valid_key(key, sep)) and \
+ isinstance(data, (list, dict)):
+ return None
+
+ key_indexes = Yedit.parse_key(key, sep)
+ for arr_ind, dict_key in key_indexes[:-1]:
+ if dict_key and isinstance(data, dict):
+ data = data.get(dict_key, None)
+ elif (arr_ind and isinstance(data, list) and
+ int(arr_ind) <= len(data) - 1):
+ data = data[int(arr_ind)]
+ else:
+ return None
+
+ # process last index for remove
+ # expected list entry
+ if key_indexes[-1][0]:
+ if isinstance(data, list) and int(key_indexes[-1][0]) <= len(data) - 1: # noqa: E501
+ del data[int(key_indexes[-1][0])]
+ return True
+
+ # expected dict entry
+ elif key_indexes[-1][1]:
+ if isinstance(data, dict):
+ del data[key_indexes[-1][1]]
+ return True
+
+ @staticmethod
+ def add_entry(data, key, item=None, sep='.'):
+ ''' Get an item from a dictionary with key notation a.b.c
+ d = {'a': {'b': 'c'}}}
+ key = a#b
+ return c
+ '''
+ if key == '':
+ pass
+ elif (not (key and Yedit.valid_key(key, sep)) and
+ isinstance(data, (list, dict))):
+ return None
+
+ key_indexes = Yedit.parse_key(key, sep)
+ for arr_ind, dict_key in key_indexes[:-1]:
+ if dict_key:
+ if isinstance(data, dict) and dict_key in data and data[dict_key]: # noqa: E501
+ data = data[dict_key]
+ continue
+
+ elif data and not isinstance(data, dict):
+ return None
+
+ data[dict_key] = {}
+ data = data[dict_key]
+
+ elif (arr_ind and isinstance(data, list) and
+ int(arr_ind) <= len(data) - 1):
+ data = data[int(arr_ind)]
+ else:
+ return None
+
+ if key == '':
+ data = item
+
+ # process last index for add
+ # expected list entry
+ elif key_indexes[-1][0] and isinstance(data, list) and int(key_indexes[-1][0]) <= len(data) - 1: # noqa: E501
+ data[int(key_indexes[-1][0])] = item
+
+ # expected dict entry
+ elif key_indexes[-1][1] and isinstance(data, dict):
+ data[key_indexes[-1][1]] = item
+
+ return data
+
+ @staticmethod
+ def get_entry(data, key, sep='.'):
+ ''' Get an item from a dictionary with key notation a.b.c
+ d = {'a': {'b': 'c'}}}
+ key = a.b
+ return c
+ '''
+ if key == '':
+ pass
+ elif (not (key and Yedit.valid_key(key, sep)) and
+ isinstance(data, (list, dict))):
+ return None
+
+ key_indexes = Yedit.parse_key(key, sep)
+ for arr_ind, dict_key in key_indexes:
+ if dict_key and isinstance(data, dict):
+ data = data.get(dict_key, None)
+ elif (arr_ind and isinstance(data, list) and
+ int(arr_ind) <= len(data) - 1):
+ data = data[int(arr_ind)]
+ else:
+ return None
+
+ return data
+
+ @staticmethod
+ def _write(filename, contents):
+ ''' Actually write the file contents to disk. This helps with mocking. '''
+
+ tmp_filename = filename + '.yedit'
+
+ with open(tmp_filename, 'w') as yfd:
+ yfd.write(contents)
+
+ os.rename(tmp_filename, filename)
+
+ def write(self):
+ ''' write to file '''
+ if not self.filename:
+ raise YeditException('Please specify a filename.')
+
+ if self.backup and self.file_exists():
+ shutil.copy(self.filename, self.filename + '.orig')
+
+ # Try to set format attributes if supported
+ try:
+ self.yaml_dict.fa.set_block_style()
+ except AttributeError:
+ pass
+
+ # Try to use RoundTripDumper if supported.
+ try:
+ Yedit._write(self.filename, yaml.dump(self.yaml_dict, Dumper=yaml.RoundTripDumper))
+ except AttributeError:
+ Yedit._write(self.filename, yaml.safe_dump(self.yaml_dict, default_flow_style=False))
+
+ return (True, self.yaml_dict)
+
+ def read(self):
+ ''' read from file '''
+ # check if it exists
+ if self.filename is None or not self.file_exists():
+ return None
+
+ contents = None
+ with open(self.filename) as yfd:
+ contents = yfd.read()
+
+ return contents
+
+ def file_exists(self):
+ ''' return whether file exists '''
+ if os.path.exists(self.filename):
+ return True
+
+ return False
+
+ def load(self, content_type='yaml'):
+ ''' return yaml file '''
+ contents = self.read()
+
+ if not contents and not self.content:
+ return None
+
+ if self.content:
+ if isinstance(self.content, dict):
+ self.yaml_dict = self.content
+ return self.yaml_dict
+ elif isinstance(self.content, str):
+ contents = self.content
+
+ # check if it is yaml
+ try:
+ if content_type == 'yaml' and contents:
+ # Try to set format attributes if supported
+ try:
+ self.yaml_dict.fa.set_block_style()
+ except AttributeError:
+ pass
+
+ # Try to use RoundTripLoader if supported.
+ try:
+ self.yaml_dict = yaml.safe_load(contents, yaml.RoundTripLoader)
+ except AttributeError:
+ self.yaml_dict = yaml.safe_load(contents)
+
+ # Try to set format attributes if supported
+ try:
+ self.yaml_dict.fa.set_block_style()
+ except AttributeError:
+ pass
+
+ elif content_type == 'json' and contents:
+ self.yaml_dict = json.loads(contents)
+ except yaml.YAMLError as err:
+ # Error loading yaml or json
+ raise YeditException('Problem with loading yaml file. %s' % err)
+
+ return self.yaml_dict
+
+ def get(self, key):
+ ''' get a specified key'''
+ try:
+ entry = Yedit.get_entry(self.yaml_dict, key, self.separator)
+ except KeyError:
+ entry = None
+
+ return entry
+
+ def pop(self, path, key_or_item):
+ ''' remove a key, value pair from a dict or an item for a list'''
+ try:
+ entry = Yedit.get_entry(self.yaml_dict, path, self.separator)
+ except KeyError:
+ entry = None
+
+ if entry is None:
+ return (False, self.yaml_dict)
+
+ if isinstance(entry, dict):
+ # AUDIT:maybe-no-member makes sense due to fuzzy types
+ # pylint: disable=maybe-no-member
+ if key_or_item in entry:
+ entry.pop(key_or_item)
+ return (True, self.yaml_dict)
+ return (False, self.yaml_dict)
+
+ elif isinstance(entry, list):
+ # AUDIT:maybe-no-member makes sense due to fuzzy types
+ # pylint: disable=maybe-no-member
+ ind = None
+ try:
+ ind = entry.index(key_or_item)
+ except ValueError:
+ return (False, self.yaml_dict)
+
+ entry.pop(ind)
+ return (True, self.yaml_dict)
+
+ return (False, self.yaml_dict)
+
+ def delete(self, path):
+ ''' remove path from a dict'''
+ try:
+ entry = Yedit.get_entry(self.yaml_dict, path, self.separator)
+ except KeyError:
+ entry = None
+
+ if entry is None:
+ return (False, self.yaml_dict)
+
+ result = Yedit.remove_entry(self.yaml_dict, path, self.separator)
+ if not result:
+ return (False, self.yaml_dict)
+
+ return (True, self.yaml_dict)
+
+ def exists(self, path, value):
+ ''' check if value exists at path'''
+ try:
+ entry = Yedit.get_entry(self.yaml_dict, path, self.separator)
+ except KeyError:
+ entry = None
+
+ if isinstance(entry, list):
+ if value in entry:
+ return True
+ return False
+
+ elif isinstance(entry, dict):
+ if isinstance(value, dict):
+ rval = False
+ for key, val in value.items():
+ if entry[key] != val:
+ rval = False
+ break
+ else:
+ rval = True
+ return rval
+
+ return value in entry
+
+ return entry == value
+
+ def append(self, path, value):
+ '''append value to a list'''
+ try:
+ entry = Yedit.get_entry(self.yaml_dict, path, self.separator)
+ except KeyError:
+ entry = None
+
+ if entry is None:
+ self.put(path, [])
+ entry = Yedit.get_entry(self.yaml_dict, path, self.separator)
+ if not isinstance(entry, list):
+ return (False, self.yaml_dict)
+
+ # AUDIT:maybe-no-member makes sense due to loading data from
+ # a serialized format.
+ # pylint: disable=maybe-no-member
+ entry.append(value)
+ return (True, self.yaml_dict)
+
+ # pylint: disable=too-many-arguments
+ def update(self, path, value, index=None, curr_value=None):
+ ''' put path, value into a dict '''
+ try:
+ entry = Yedit.get_entry(self.yaml_dict, path, self.separator)
+ except KeyError:
+ entry = None
+
+ if isinstance(entry, dict):
+ # AUDIT:maybe-no-member makes sense due to fuzzy types
+ # pylint: disable=maybe-no-member
+ if not isinstance(value, dict):
+ raise YeditException('Cannot replace key, value entry in ' +
+ 'dict with non-dict type. value=[%s] [%s]' % (value, type(value))) # noqa: E501
+
+ entry.update(value)
+ return (True, self.yaml_dict)
+
+ elif isinstance(entry, list):
+ # AUDIT:maybe-no-member makes sense due to fuzzy types
+ # pylint: disable=maybe-no-member
+ ind = None
+ if curr_value:
+ try:
+ ind = entry.index(curr_value)
+ except ValueError:
+ return (False, self.yaml_dict)
+
+ elif index is not None:
+ ind = index
+
+ if ind is not None and entry[ind] != value:
+ entry[ind] = value
+ return (True, self.yaml_dict)
+
+ # see if it exists in the list
+ try:
+ ind = entry.index(value)
+ except ValueError:
+ # doesn't exist, append it
+ entry.append(value)
+ return (True, self.yaml_dict)
+
+ # already exists, return
+ if ind is not None:
+ return (False, self.yaml_dict)
+ return (False, self.yaml_dict)
+
+ def put(self, path, value):
+ ''' put path, value into a dict '''
+ try:
+ entry = Yedit.get_entry(self.yaml_dict, path, self.separator)
+ except KeyError:
+ entry = None
+
+ if entry == value:
+ return (False, self.yaml_dict)
+
+ # deepcopy didn't work
+ # Try to use ruamel.yaml and fallback to pyyaml
+ try:
+ tmp_copy = yaml.load(yaml.round_trip_dump(self.yaml_dict,
+ default_flow_style=False),
+ yaml.RoundTripLoader)
+ except AttributeError:
+ tmp_copy = copy.deepcopy(self.yaml_dict)
+
+ # set the format attributes if available
+ try:
+ tmp_copy.fa.set_block_style()
+ except AttributeError:
+ pass
+
+ result = Yedit.add_entry(tmp_copy, path, value, self.separator)
+ if not result:
+ return (False, self.yaml_dict)
+
+ self.yaml_dict = tmp_copy
+
+ return (True, self.yaml_dict)
+
+ def create(self, path, value):
+ ''' create a yaml file '''
+ if not self.file_exists():
+ # deepcopy didn't work
+ # Try to use ruamel.yaml and fallback to pyyaml
+ try:
+ tmp_copy = yaml.load(yaml.round_trip_dump(self.yaml_dict,
+ default_flow_style=False),
+ yaml.RoundTripLoader)
+ except AttributeError:
+ tmp_copy = copy.deepcopy(self.yaml_dict)
+
+ # set the format attributes if available
+ try:
+ tmp_copy.fa.set_block_style()
+ except AttributeError:
+ pass
+
+ result = Yedit.add_entry(tmp_copy, path, value, self.separator)
+ if result:
+ self.yaml_dict = tmp_copy
+ return (True, self.yaml_dict)
+
+ return (False, self.yaml_dict)
+
+ @staticmethod
+ def get_curr_value(invalue, val_type):
+ '''return the current value'''
+ if invalue is None:
+ return None
+
+ curr_value = invalue
+ if val_type == 'yaml':
+ curr_value = yaml.load(invalue)
+ elif val_type == 'json':
+ curr_value = json.loads(invalue)
+
+ return curr_value
+
+ @staticmethod
+ def parse_value(inc_value, vtype=''):
+ '''determine value type passed'''
+ true_bools = ['y', 'Y', 'yes', 'Yes', 'YES', 'true', 'True', 'TRUE',
+ 'on', 'On', 'ON', ]
+ false_bools = ['n', 'N', 'no', 'No', 'NO', 'false', 'False', 'FALSE',
+ 'off', 'Off', 'OFF']
+
+ # It came in as a string but you didn't specify value_type as string
+ # we will convert to bool if it matches any of the above cases
+ if isinstance(inc_value, str) and 'bool' in vtype:
+ if inc_value not in true_bools and inc_value not in false_bools:
+ raise YeditException('Not a boolean type. str=[%s] vtype=[%s]'
+ % (inc_value, vtype))
+ elif isinstance(inc_value, bool) and 'str' in vtype:
+ inc_value = str(inc_value)
+
+ # If vtype is not str then go ahead and attempt to yaml load it.
+ if isinstance(inc_value, str) and 'str' not in vtype:
+ try:
+ inc_value = yaml.load(inc_value)
+ except Exception:
+ raise YeditException('Could not determine type of incoming ' +
+ 'value. value=[%s] vtype=[%s]'
+ % (type(inc_value), vtype))
+
+ return inc_value
+
+ # pylint: disable=too-many-return-statements,too-many-branches
+ @staticmethod
+ def run_ansible(module):
+ '''perform the idempotent crud operations'''
+ yamlfile = Yedit(filename=module.params['src'],
+ backup=module.params['backup'],
+ separator=module.params['separator'])
+
+ if module.params['src']:
+ rval = yamlfile.load()
+
+ if yamlfile.yaml_dict is None and \
+ module.params['state'] != 'present':
+ return {'failed': True,
+ 'msg': 'Error opening file [%s]. Verify that the ' +
+ 'file exists, that it is has correct' +
+ ' permissions, and is valid yaml.'}
+
+ if module.params['state'] == 'list':
+ if module.params['content']:
+ content = Yedit.parse_value(module.params['content'],
+ module.params['content_type'])
+ yamlfile.yaml_dict = content
+
+ if module.params['key']:
+ rval = yamlfile.get(module.params['key']) or {}
+
+ return {'changed': False, 'result': rval, 'state': "list"}
+
+ elif module.params['state'] == 'absent':
+ if module.params['content']:
+ content = Yedit.parse_value(module.params['content'],
+ module.params['content_type'])
+ yamlfile.yaml_dict = content
+
+ if module.params['update']:
+ rval = yamlfile.pop(module.params['key'],
+ module.params['value'])
+ else:
+ rval = yamlfile.delete(module.params['key'])
+
+ if rval[0] and module.params['src']:
+ yamlfile.write()
+
+ return {'changed': rval[0], 'result': rval[1], 'state': "absent"}
+
+ elif module.params['state'] == 'present':
+ # check if content is different than what is in the file
+ if module.params['content']:
+ content = Yedit.parse_value(module.params['content'],
+ module.params['content_type'])
+
+ # We had no edits to make and the contents are the same
+ if yamlfile.yaml_dict == content and \
+ module.params['value'] is None:
+ return {'changed': False,
+ 'result': yamlfile.yaml_dict,
+ 'state': "present"}
+
+ yamlfile.yaml_dict = content
+
+ # we were passed a value; parse it
+ if module.params['value']:
+ value = Yedit.parse_value(module.params['value'],
+ module.params['value_type'])
+ key = module.params['key']
+ if module.params['update']:
+ # pylint: disable=line-too-long
+ curr_value = Yedit.get_curr_value(Yedit.parse_value(module.params['curr_value']), # noqa: E501
+ module.params['curr_value_format']) # noqa: E501
+
+ rval = yamlfile.update(key, value, module.params['index'], curr_value) # noqa: E501
+
+ elif module.params['append']:
+ rval = yamlfile.append(key, value)
+ else:
+ rval = yamlfile.put(key, value)
+
+ if rval[0] and module.params['src']:
+ yamlfile.write()
+
+ return {'changed': rval[0],
+ 'result': rval[1], 'state': "present"}
+
+ # no edits to make
+ if module.params['src']:
+ # pylint: disable=redefined-variable-type
+ rval = yamlfile.write()
+ return {'changed': rval[0],
+ 'result': rval[1],
+ 'state': "present"}
+
+ return {'failed': True, 'msg': 'Unkown state passed'}
+
+# -*- -*- -*- End included fragment: ../../lib_utils/src/class/yedit.py -*- -*- -*-
+
+# -*- -*- -*- Begin included fragment: lib/base.py -*- -*- -*-
+# pylint: disable=too-many-lines
+# noqa: E301,E302,E303,T001
+
+
+class OpenShiftCLIError(Exception):
+ '''Exception class for openshiftcli'''
+ pass
+
+
+ADDITIONAL_PATH_LOOKUPS = ['/usr/local/bin', os.path.expanduser('~/bin')]
+
+
+def locate_oc_binary():
+ ''' Find and return oc binary file '''
+ # https://github.com/openshift/openshift-ansible/issues/3410
+ # oc can be in /usr/local/bin in some cases, but that may not
+ # be in $PATH due to ansible/sudo
+ paths = os.environ.get("PATH", os.defpath).split(os.pathsep) + ADDITIONAL_PATH_LOOKUPS
+
+ oc_binary = 'oc'
+
+ # Use shutil.which if it is available, otherwise fallback to a naive path search
+ try:
+ which_result = shutil.which(oc_binary, path=os.pathsep.join(paths))
+ if which_result is not None:
+ oc_binary = which_result
+ except AttributeError:
+ for path in paths:
+ if os.path.exists(os.path.join(path, oc_binary)):
+ oc_binary = os.path.join(path, oc_binary)
+ break
+
+ return oc_binary
+
+
+# pylint: disable=too-few-public-methods
+class OpenShiftCLI(object):
+ ''' Class to wrap the command line tools '''
+ def __init__(self,
+ namespace,
+ kubeconfig='/etc/origin/master/admin.kubeconfig',
+ verbose=False,
+ all_namespaces=False):
+ ''' Constructor for OpenshiftCLI '''
+ self.namespace = namespace
+ self.verbose = verbose
+ self.kubeconfig = Utils.create_tmpfile_copy(kubeconfig)
+ self.all_namespaces = all_namespaces
+ self.oc_binary = locate_oc_binary()
+
+ # Pylint allows only 5 arguments to be passed.
+ # pylint: disable=too-many-arguments
+ def _replace_content(self, resource, rname, content, force=False, sep='.'):
+ ''' replace the current object with the content '''
+ res = self._get(resource, rname)
+ if not res['results']:
+ return res
+
+ fname = Utils.create_tmpfile(rname + '-')
+
+ yed = Yedit(fname, res['results'][0], separator=sep)
+ changes = []
+ for key, value in content.items():
+ changes.append(yed.put(key, value))
+
+ if any([change[0] for change in changes]):
+ yed.write()
+
+ atexit.register(Utils.cleanup, [fname])
+
+ return self._replace(fname, force)
+
+ return {'returncode': 0, 'updated': False}
+
+ def _replace(self, fname, force=False):
+ '''replace the current object with oc replace'''
+ cmd = ['replace', '-f', fname]
+ if force:
+ cmd.append('--force')
+ return self.openshift_cmd(cmd)
+
+ def _create_from_content(self, rname, content):
+ '''create a temporary file and then call oc create on it'''
+ fname = Utils.create_tmpfile(rname + '-')
+ yed = Yedit(fname, content=content)
+ yed.write()
+
+ atexit.register(Utils.cleanup, [fname])
+
+ return self._create(fname)
+
+ def _create(self, fname):
+ '''call oc create on a filename'''
+ return self.openshift_cmd(['create', '-f', fname])
+
+ def _delete(self, resource, rname, selector=None):
+ '''call oc delete on a resource'''
+ cmd = ['delete', resource, rname]
+ if selector:
+ cmd.append('--selector=%s' % selector)
+
+ return self.openshift_cmd(cmd)
+
+ def _process(self, template_name, create=False, params=None, template_data=None): # noqa: E501
+ '''process a template
+
+ template_name: the name of the template to process
+ create: whether to send to oc create after processing
+ params: the parameters for the template
+ template_data: the incoming template's data; instead of a file
+ '''
+ cmd = ['process']
+ if template_data:
+ cmd.extend(['-f', '-'])
+ else:
+ cmd.append(template_name)
+ if params:
+ param_str = ["%s=%s" % (key, value) for key, value in params.items()]
+ cmd.append('-v')
+ cmd.extend(param_str)
+
+ results = self.openshift_cmd(cmd, output=True, input_data=template_data)
+
+ if results['returncode'] != 0 or not create:
+ return results
+
+ fname = Utils.create_tmpfile(template_name + '-')
+ yed = Yedit(fname, results['results'])
+ yed.write()
+
+ atexit.register(Utils.cleanup, [fname])
+
+ return self.openshift_cmd(['create', '-f', fname])
+
+ def _get(self, resource, rname=None, selector=None):
+ '''return a resource by name '''
+ cmd = ['get', resource]
+ if selector:
+ cmd.append('--selector=%s' % selector)
+ elif rname:
+ cmd.append(rname)
+
+ cmd.extend(['-o', 'json'])
+
+ rval = self.openshift_cmd(cmd, output=True)
+
+ # Ensure results are retuned in an array
+ if 'items' in rval:
+ rval['results'] = rval['items']
+ elif not isinstance(rval['results'], list):
+ rval['results'] = [rval['results']]
+
+ return rval
+
+ def _schedulable(self, node=None, selector=None, schedulable=True):
+ ''' perform oadm manage-node scheduable '''
+ cmd = ['manage-node']
+ if node:
+ cmd.extend(node)
+ else:
+ cmd.append('--selector=%s' % selector)
+
+ cmd.append('--schedulable=%s' % schedulable)
+
+ return self.openshift_cmd(cmd, oadm=True, output=True, output_type='raw') # noqa: E501
+
+ def _list_pods(self, node=None, selector=None, pod_selector=None):
+ ''' perform oadm list pods
+
+ node: the node in which to list pods
+ selector: the label selector filter if provided
+ pod_selector: the pod selector filter if provided
+ '''
+ cmd = ['manage-node']
+ if node:
+ cmd.extend(node)
+ else:
+ cmd.append('--selector=%s' % selector)
+
+ if pod_selector:
+ cmd.append('--pod-selector=%s' % pod_selector)
+
+ cmd.extend(['--list-pods', '-o', 'json'])
+
+ return self.openshift_cmd(cmd, oadm=True, output=True, output_type='raw')
+
+ # pylint: disable=too-many-arguments
+ def _evacuate(self, node=None, selector=None, pod_selector=None, dry_run=False, grace_period=None, force=False):
+ ''' perform oadm manage-node evacuate '''
+ cmd = ['manage-node']
+ if node:
+ cmd.extend(node)
+ else:
+ cmd.append('--selector=%s' % selector)
+
+ if dry_run:
+ cmd.append('--dry-run')
+
+ if pod_selector:
+ cmd.append('--pod-selector=%s' % pod_selector)
+
+ if grace_period:
+ cmd.append('--grace-period=%s' % int(grace_period))
+
+ if force:
+ cmd.append('--force')
+
+ cmd.append('--evacuate')
+
+ return self.openshift_cmd(cmd, oadm=True, output=True, output_type='raw')
+
+ def _version(self):
+ ''' return the openshift version'''
+ return self.openshift_cmd(['version'], output=True, output_type='raw')
+
+ def _import_image(self, url=None, name=None, tag=None):
+ ''' perform image import '''
+ cmd = ['import-image']
+
+ image = '{0}'.format(name)
+ if tag:
+ image += ':{0}'.format(tag)
+
+ cmd.append(image)
+
+ if url:
+ cmd.append('--from={0}/{1}'.format(url, image))
+
+ cmd.append('-n{0}'.format(self.namespace))
+
+ cmd.append('--confirm')
+ return self.openshift_cmd(cmd)
+
+ def _run(self, cmds, input_data):
+ ''' Actually executes the command. This makes mocking easier. '''
+ curr_env = os.environ.copy()
+ curr_env.update({'KUBECONFIG': self.kubeconfig})
+ proc = subprocess.Popen(cmds,
+ stdin=subprocess.PIPE,
+ stdout=subprocess.PIPE,
+ stderr=subprocess.PIPE,
+ env=curr_env)
+
+ stdout, stderr = proc.communicate(input_data)
+
+ return proc.returncode, stdout.decode(), stderr.decode()
+
+ # pylint: disable=too-many-arguments,too-many-branches
+ def openshift_cmd(self, cmd, oadm=False, output=False, output_type='json', input_data=None):
+ '''Base command for oc '''
+ cmds = [self.oc_binary]
+
+ if oadm:
+ cmds.append('adm')
+
+ cmds.extend(cmd)
+
+ if self.all_namespaces:
+ cmds.extend(['--all-namespaces'])
+ elif self.namespace is not None and self.namespace.lower() not in ['none', 'emtpy']: # E501
+ cmds.extend(['-n', self.namespace])
+
+ rval = {}
+ results = ''
+ err = None
+
+ if self.verbose:
+ print(' '.join(cmds))
+
+ try:
+ returncode, stdout, stderr = self._run(cmds, input_data)
+ except OSError as ex:
+ returncode, stdout, stderr = 1, '', 'Failed to execute {}: {}'.format(subprocess.list2cmdline(cmds), ex)
+
+ rval = {"returncode": returncode,
+ "results": results,
+ "cmd": ' '.join(cmds)}
+
+ if returncode == 0:
+ if output:
+ if output_type == 'json':
+ try:
+ rval['results'] = json.loads(stdout)
+ except ValueError as err:
+ if "No JSON object could be decoded" in err.args:
+ err = err.args
+ elif output_type == 'raw':
+ rval['results'] = stdout
+
+ if self.verbose:
+ print("STDOUT: {0}".format(stdout))
+ print("STDERR: {0}".format(stderr))
+
+ if err:
+ rval.update({"err": err,
+ "stderr": stderr,
+ "stdout": stdout,
+ "cmd": cmds})
+
+ else:
+ rval.update({"stderr": stderr,
+ "stdout": stdout,
+ "results": {}})
+
+ return rval
+
+
+class Utils(object):
+ ''' utilities for openshiftcli modules '''
+
+ @staticmethod
+ def _write(filename, contents):
+ ''' Actually write the file contents to disk. This helps with mocking. '''
+
+ with open(filename, 'w') as sfd:
+ sfd.write(contents)
+
+ @staticmethod
+ def create_tmp_file_from_contents(rname, data, ftype='yaml'):
+ ''' create a file in tmp with name and contents'''
+
+ tmp = Utils.create_tmpfile(prefix=rname)
+
+ if ftype == 'yaml':
+ # AUDIT:no-member makes sense here due to ruamel.YAML/PyYAML usage
+ # pylint: disable=no-member
+ if hasattr(yaml, 'RoundTripDumper'):
+ Utils._write(tmp, yaml.dump(data, Dumper=yaml.RoundTripDumper))
+ else:
+ Utils._write(tmp, yaml.safe_dump(data, default_flow_style=False))
+
+ elif ftype == 'json':
+ Utils._write(tmp, json.dumps(data))
+ else:
+ Utils._write(tmp, data)
+
+ # Register cleanup when module is done
+ atexit.register(Utils.cleanup, [tmp])
+ return tmp
+
+ @staticmethod
+ def create_tmpfile_copy(inc_file):
+ '''create a temporary copy of a file'''
+ tmpfile = Utils.create_tmpfile('lib_openshift-')
+ Utils._write(tmpfile, open(inc_file).read())
+
+ # Cleanup the tmpfile
+ atexit.register(Utils.cleanup, [tmpfile])
+
+ return tmpfile
+
+ @staticmethod
+ def create_tmpfile(prefix='tmp'):
+ ''' Generates and returns a temporary file name '''
+
+ with tempfile.NamedTemporaryFile(prefix=prefix, delete=False) as tmp:
+ return tmp.name
+
+ @staticmethod
+ def create_tmp_files_from_contents(content, content_type=None):
+ '''Turn an array of dict: filename, content into a files array'''
+ if not isinstance(content, list):
+ content = [content]
+ files = []
+ for item in content:
+ path = Utils.create_tmp_file_from_contents(item['path'] + '-',
+ item['data'],
+ ftype=content_type)
+ files.append({'name': os.path.basename(item['path']),
+ 'path': path})
+ return files
+
+ @staticmethod
+ def cleanup(files):
+ '''Clean up on exit '''
+ for sfile in files:
+ if os.path.exists(sfile):
+ if os.path.isdir(sfile):
+ shutil.rmtree(sfile)
+ elif os.path.isfile(sfile):
+ os.remove(sfile)
+
+ @staticmethod
+ def exists(results, _name):
+ ''' Check to see if the results include the name '''
+ if not results:
+ return False
+
+ if Utils.find_result(results, _name):
+ return True
+
+ return False
+
+ @staticmethod
+ def find_result(results, _name):
+ ''' Find the specified result by name'''
+ rval = None
+ for result in results:
+ if 'metadata' in result and result['metadata']['name'] == _name:
+ rval = result
+ break
+
+ return rval
+
+ @staticmethod
+ def get_resource_file(sfile, sfile_type='yaml'):
+ ''' return the service file '''
+ contents = None
+ with open(sfile) as sfd:
+ contents = sfd.read()
+
+ if sfile_type == 'yaml':
+ # AUDIT:no-member makes sense here due to ruamel.YAML/PyYAML usage
+ # pylint: disable=no-member
+ if hasattr(yaml, 'RoundTripLoader'):
+ contents = yaml.load(contents, yaml.RoundTripLoader)
+ else:
+ contents = yaml.safe_load(contents)
+ elif sfile_type == 'json':
+ contents = json.loads(contents)
+
+ return contents
+
+ @staticmethod
+ def filter_versions(stdout):
+ ''' filter the oc version output '''
+
+ version_dict = {}
+ version_search = ['oc', 'openshift', 'kubernetes']
+
+ for line in stdout.strip().split('\n'):
+ for term in version_search:
+ if not line:
+ continue
+ if line.startswith(term):
+ version_dict[term] = line.split()[-1]
+
+ # horrible hack to get openshift version in Openshift 3.2
+ # By default "oc version in 3.2 does not return an "openshift" version
+ if "openshift" not in version_dict:
+ version_dict["openshift"] = version_dict["oc"]
+
+ return version_dict
+
+ @staticmethod
+ def add_custom_versions(versions):
+ ''' create custom versions strings '''
+
+ versions_dict = {}
+
+ for tech, version in versions.items():
+ # clean up "-" from version
+ if "-" in version:
+ version = version.split("-")[0]
+
+ if version.startswith('v'):
+ versions_dict[tech + '_numeric'] = version[1:].split('+')[0]
+ # "v3.3.0.33" is what we have, we want "3.3"
+ versions_dict[tech + '_short'] = version[1:4]
+
+ return versions_dict
+
+ @staticmethod
+ def openshift_installed():
+ ''' check if openshift is installed '''
+ import yum
+
+ yum_base = yum.YumBase()
+ if yum_base.rpmdb.searchNevra(name='atomic-openshift'):
+ return True
+
+ return False
+
+ # Disabling too-many-branches. This is a yaml dictionary comparison function
+ # pylint: disable=too-many-branches,too-many-return-statements,too-many-statements
+ @staticmethod
+ def check_def_equal(user_def, result_def, skip_keys=None, debug=False):
+ ''' Given a user defined definition, compare it with the results given back by our query. '''
+
+ # Currently these values are autogenerated and we do not need to check them
+ skip = ['metadata', 'status']
+ if skip_keys:
+ skip.extend(skip_keys)
+
+ for key, value in result_def.items():
+ if key in skip:
+ continue
+
+ # Both are lists
+ if isinstance(value, list):
+ if key not in user_def:
+ if debug:
+ print('User data does not have key [%s]' % key)
+ print('User data: %s' % user_def)
+ return False
+
+ if not isinstance(user_def[key], list):
+ if debug:
+ print('user_def[key] is not a list key=[%s] user_def[key]=%s' % (key, user_def[key]))
+ return False
+
+ if len(user_def[key]) != len(value):
+ if debug:
+ print("List lengths are not equal.")
+ print("key=[%s]: user_def[%s] != value[%s]" % (key, len(user_def[key]), len(value)))
+ print("user_def: %s" % user_def[key])
+ print("value: %s" % value)
+ return False
+
+ for values in zip(user_def[key], value):
+ if isinstance(values[0], dict) and isinstance(values[1], dict):
+ if debug:
+ print('sending list - list')
+ print(type(values[0]))
+ print(type(values[1]))
+ result = Utils.check_def_equal(values[0], values[1], skip_keys=skip_keys, debug=debug)
+ if not result:
+ print('list compare returned false')
+ return False
+
+ elif value != user_def[key]:
+ if debug:
+ print('value should be identical')
+ print(user_def[key])
+ print(value)
+ return False
+
+ # recurse on a dictionary
+ elif isinstance(value, dict):
+ if key not in user_def:
+ if debug:
+ print("user_def does not have key [%s]" % key)
+ return False
+ if not isinstance(user_def[key], dict):
+ if debug:
+ print("dict returned false: not instance of dict")
+ return False
+
+ # before passing ensure keys match
+ api_values = set(value.keys()) - set(skip)
+ user_values = set(user_def[key].keys()) - set(skip)
+ if api_values != user_values:
+ if debug:
+ print("keys are not equal in dict")
+ print(user_values)
+ print(api_values)
+ return False
+
+ result = Utils.check_def_equal(user_def[key], value, skip_keys=skip_keys, debug=debug)
+ if not result:
+ if debug:
+ print("dict returned false")
+ print(result)
+ return False
+
+ # Verify each key, value pair is the same
+ else:
+ if key not in user_def or value != user_def[key]:
+ if debug:
+ print("value not equal; user_def does not have key")
+ print(key)
+ print(value)
+ if key in user_def:
+ print(user_def[key])
+ return False
+
+ if debug:
+ print('returning true')
+ return True
+
+
+class OpenShiftCLIConfig(object):
+ '''Generic Config'''
+ def __init__(self, rname, namespace, kubeconfig, options):
+ self.kubeconfig = kubeconfig
+ self.name = rname
+ self.namespace = namespace
+ self._options = options
+
+ @property
+ def config_options(self):
+ ''' return config options '''
+ return self._options
+
+ def to_option_list(self):
+ '''return all options as a string'''
+ return self.stringify()
+
+ def stringify(self):
+ ''' return the options hash as cli params in a string '''
+ rval = []
+ for key, data in self.config_options.items():
+ if data['include'] \
+ and (data['value'] or isinstance(data['value'], int)):
+ rval.append('--%s=%s' % (key.replace('_', '-'), data['value']))
+
+ return rval
+
+
+# -*- -*- -*- End included fragment: lib/base.py -*- -*- -*-
+
+# -*- -*- -*- Begin included fragment: lib/service.py -*- -*- -*-
+
+
+# pylint: disable=too-many-instance-attributes
+class ServiceConfig(object):
+ ''' Handle service options '''
+ # pylint: disable=too-many-arguments
+ def __init__(self,
+ sname,
+ namespace,
+ ports,
+ selector=None,
+ labels=None,
+ cluster_ip=None,
+ portal_ip=None,
+ session_affinity=None,
+ service_type=None):
+ ''' constructor for handling service options '''
+ self.name = sname
+ self.namespace = namespace
+ self.ports = ports
+ self.selector = selector
+ self.labels = labels
+ self.cluster_ip = cluster_ip
+ self.portal_ip = portal_ip
+ self.session_affinity = session_affinity
+ self.service_type = service_type
+ self.data = {}
+
+ self.create_dict()
+
+ def create_dict(self):
+ ''' instantiates a service dict '''
+ self.data['apiVersion'] = 'v1'
+ self.data['kind'] = 'Service'
+ self.data['metadata'] = {}
+ self.data['metadata']['name'] = self.name
+ self.data['metadata']['namespace'] = self.namespace
+ if self.labels:
+ for lab, lab_value in self.labels.items():
+ self.data['metadata'][lab] = lab_value
+ self.data['spec'] = {}
+
+ if self.ports:
+ self.data['spec']['ports'] = self.ports
+ else:
+ self.data['spec']['ports'] = []
+
+ if self.selector:
+ self.data['spec']['selector'] = self.selector
+
+ self.data['spec']['sessionAffinity'] = self.session_affinity or 'None'
+
+ if self.cluster_ip:
+ self.data['spec']['clusterIP'] = self.cluster_ip
+
+ if self.portal_ip:
+ self.data['spec']['portalIP'] = self.portal_ip
+
+ if self.service_type:
+ self.data['spec']['type'] = self.service_type
+
+# pylint: disable=too-many-instance-attributes,too-many-public-methods
+class Service(Yedit):
+ ''' Class to model the oc service object '''
+ port_path = "spec.ports"
+ portal_ip = "spec.portalIP"
+ cluster_ip = "spec.clusterIP"
+ selector_path = 'spec.selector'
+ kind = 'Service'
+
+ def __init__(self, content):
+ '''Service constructor'''
+ super(Service, self).__init__(content=content)
+
+ def get_ports(self):
+ ''' get a list of ports '''
+ return self.get(Service.port_path) or []
+
+ def get_selector(self):
+ ''' get the service selector'''
+ return self.get(Service.selector_path) or {}
+
+ def add_ports(self, inc_ports):
+ ''' add a port object to the ports list '''
+ if not isinstance(inc_ports, list):
+ inc_ports = [inc_ports]
+
+ ports = self.get_ports()
+ if not ports:
+ self.put(Service.port_path, inc_ports)
+ else:
+ ports.extend(inc_ports)
+
+ return True
+
+ def find_ports(self, inc_port):
+ ''' find a specific port '''
+ for port in self.get_ports():
+ if port['port'] == inc_port['port']:
+ return port
+
+ return None
+
+ def delete_ports(self, inc_ports):
+ ''' remove a port from a service '''
+ if not isinstance(inc_ports, list):
+ inc_ports = [inc_ports]
+
+ ports = self.get(Service.port_path) or []
+
+ if not ports:
+ return True
+
+ removed = False
+ for inc_port in inc_ports:
+ port = self.find_ports(inc_port)
+ if port:
+ ports.remove(port)
+ removed = True
+
+ return removed
+
+ def add_cluster_ip(self, sip):
+ '''add cluster ip'''
+ self.put(Service.cluster_ip, sip)
+
+ def add_portal_ip(self, pip):
+ '''add cluster ip'''
+ self.put(Service.portal_ip, pip)
+
+# -*- -*- -*- End included fragment: lib/service.py -*- -*- -*-
+
+# -*- -*- -*- Begin included fragment: lib/deploymentconfig.py -*- -*- -*-
+
+
+# pylint: disable=too-many-public-methods
+class DeploymentConfig(Yedit):
+ ''' Class to model an openshift DeploymentConfig'''
+ default_deployment_config = '''
+apiVersion: v1
+kind: DeploymentConfig
+metadata:
+ name: default_dc
+ namespace: default
+spec:
+ replicas: 0
+ selector:
+ default_dc: default_dc
+ strategy:
+ resources: {}
+ rollingParams:
+ intervalSeconds: 1
+ maxSurge: 0
+ maxUnavailable: 25%
+ timeoutSeconds: 600
+ updatePercent: -25
+ updatePeriodSeconds: 1
+ type: Rolling
+ template:
+ metadata:
+ spec:
+ containers:
+ - env:
+ - name: default
+ value: default
+ image: default
+ imagePullPolicy: IfNotPresent
+ name: default_dc
+ ports:
+ - containerPort: 8000
+ hostPort: 8000
+ protocol: TCP
+ name: default_port
+ resources: {}
+ terminationMessagePath: /dev/termination-log
+ dnsPolicy: ClusterFirst
+ hostNetwork: true
+ nodeSelector:
+ type: compute
+ restartPolicy: Always
+ securityContext: {}
+ serviceAccount: default
+ serviceAccountName: default
+ terminationGracePeriodSeconds: 30
+ triggers:
+ - type: ConfigChange
+'''
+
+ replicas_path = "spec.replicas"
+ env_path = "spec.template.spec.containers[0].env"
+ volumes_path = "spec.template.spec.volumes"
+ container_path = "spec.template.spec.containers"
+ volume_mounts_path = "spec.template.spec.containers[0].volumeMounts"
+
+ def __init__(self, content=None):
+ ''' Constructor for deploymentconfig '''
+ if not content:
+ content = DeploymentConfig.default_deployment_config
+
+ super(DeploymentConfig, self).__init__(content=content)
+
+ def add_env_value(self, key, value):
+ ''' add key, value pair to env array '''
+ rval = False
+ env = self.get_env_vars()
+ if env:
+ env.append({'name': key, 'value': value})
+ rval = True
+ else:
+ result = self.put(DeploymentConfig.env_path, {'name': key, 'value': value})
+ rval = result[0]
+
+ return rval
+
+ def exists_env_value(self, key, value):
+ ''' return whether a key, value pair exists '''
+ results = self.get_env_vars()
+ if not results:
+ return False
+
+ for result in results:
+ if result['name'] == key and result['value'] == value:
+ return True
+
+ return False
+
+ def exists_env_key(self, key):
+ ''' return whether a key, value pair exists '''
+ results = self.get_env_vars()
+ if not results:
+ return False
+
+ for result in results:
+ if result['name'] == key:
+ return True
+
+ return False
+
+ def get_env_var(self, key):
+ '''return a environment variables '''
+ results = self.get(DeploymentConfig.env_path) or []
+ if not results:
+ return None
+
+ for env_var in results:
+ if env_var['name'] == key:
+ return env_var
+
+ return None
+
+ def get_env_vars(self):
+ '''return a environment variables '''
+ return self.get(DeploymentConfig.env_path) or []
+
+ def delete_env_var(self, keys):
+ '''delete a list of keys '''
+ if not isinstance(keys, list):
+ keys = [keys]
+
+ env_vars_array = self.get_env_vars()
+ modified = False
+ idx = None
+ for key in keys:
+ for env_idx, env_var in enumerate(env_vars_array):
+ if env_var['name'] == key:
+ idx = env_idx
+ break
+
+ if idx:
+ modified = True
+ del env_vars_array[idx]
+
+ if modified:
+ return True
+
+ return False
+
+ def update_env_var(self, key, value):
+ '''place an env in the env var list'''
+
+ env_vars_array = self.get_env_vars()
+ idx = None
+ for env_idx, env_var in enumerate(env_vars_array):
+ if env_var['name'] == key:
+ idx = env_idx
+ break
+
+ if idx:
+ env_vars_array[idx]['value'] = value
+ else:
+ self.add_env_value(key, value)
+
+ return True
+
+ def exists_volume_mount(self, volume_mount):
+ ''' return whether a volume mount exists '''
+ exist_volume_mounts = self.get_volume_mounts()
+
+ if not exist_volume_mounts:
+ return False
+
+ volume_mount_found = False
+ for exist_volume_mount in exist_volume_mounts:
+ if exist_volume_mount['name'] == volume_mount['name']:
+ volume_mount_found = True
+ break
+
+ return volume_mount_found
+
+ def exists_volume(self, volume):
+ ''' return whether a volume exists '''
+ exist_volumes = self.get_volumes()
+
+ volume_found = False
+ for exist_volume in exist_volumes:
+ if exist_volume['name'] == volume['name']:
+ volume_found = True
+ break
+
+ return volume_found
+
+ def find_volume_by_name(self, volume, mounts=False):
+ ''' return the index of a volume '''
+ volumes = []
+ if mounts:
+ volumes = self.get_volume_mounts()
+ else:
+ volumes = self.get_volumes()
+ for exist_volume in volumes:
+ if exist_volume['name'] == volume['name']:
+ return exist_volume
+
+ return None
+
+ def get_replicas(self):
+ ''' return replicas setting '''
+ return self.get(DeploymentConfig.replicas_path)
+
+ def get_volume_mounts(self):
+ '''return volume mount information '''
+ return self.get_volumes(mounts=True)
+
+ def get_volumes(self, mounts=False):
+ '''return volume mount information '''
+ if mounts:
+ return self.get(DeploymentConfig.volume_mounts_path) or []
+
+ return self.get(DeploymentConfig.volumes_path) or []
+
+ def delete_volume_by_name(self, volume):
+ '''delete a volume '''
+ modified = False
+ exist_volume_mounts = self.get_volume_mounts()
+ exist_volumes = self.get_volumes()
+ del_idx = None
+ for idx, exist_volume in enumerate(exist_volumes):
+ if 'name' in exist_volume and exist_volume['name'] == volume['name']:
+ del_idx = idx
+ break
+
+ if del_idx != None:
+ del exist_volumes[del_idx]
+ modified = True
+
+ del_idx = None
+ for idx, exist_volume_mount in enumerate(exist_volume_mounts):
+ if 'name' in exist_volume_mount and exist_volume_mount['name'] == volume['name']:
+ del_idx = idx
+ break
+
+ if del_idx != None:
+ del exist_volume_mounts[idx]
+ modified = True
+
+ return modified
+
+ def add_volume_mount(self, volume_mount):
+ ''' add a volume or volume mount to the proper location '''
+ exist_volume_mounts = self.get_volume_mounts()
+
+ if not exist_volume_mounts and volume_mount:
+ self.put(DeploymentConfig.volume_mounts_path, [volume_mount])
+ else:
+ exist_volume_mounts.append(volume_mount)
+
+ def add_volume(self, volume):
+ ''' add a volume or volume mount to the proper location '''
+ exist_volumes = self.get_volumes()
+ if not volume:
+ return
+
+ if not exist_volumes:
+ self.put(DeploymentConfig.volumes_path, [volume])
+ else:
+ exist_volumes.append(volume)
+
+ def update_replicas(self, replicas):
+ ''' update replicas value '''
+ self.put(DeploymentConfig.replicas_path, replicas)
+
+ def update_volume(self, volume):
+ '''place an env in the env var list'''
+ exist_volumes = self.get_volumes()
+
+ if not volume:
+ return False
+
+ # update the volume
+ update_idx = None
+ for idx, exist_vol in enumerate(exist_volumes):
+ if exist_vol['name'] == volume['name']:
+ update_idx = idx
+ break
+
+ if update_idx != None:
+ exist_volumes[update_idx] = volume
+ else:
+ self.add_volume(volume)
+
+ return True
+
+ def update_volume_mount(self, volume_mount):
+ '''place an env in the env var list'''
+ modified = False
+
+ exist_volume_mounts = self.get_volume_mounts()
+
+ if not volume_mount:
+ return False
+
+ # update the volume mount
+ for exist_vol_mount in exist_volume_mounts:
+ if exist_vol_mount['name'] == volume_mount['name']:
+ if 'mountPath' in exist_vol_mount and \
+ str(exist_vol_mount['mountPath']) != str(volume_mount['mountPath']):
+ exist_vol_mount['mountPath'] = volume_mount['mountPath']
+ modified = True
+ break
+
+ if not modified:
+ self.add_volume_mount(volume_mount)
+ modified = True
+
+ return modified
+
+ def needs_update_volume(self, volume, volume_mount):
+ ''' verify a volume update is needed '''
+ exist_volume = self.find_volume_by_name(volume)
+ exist_volume_mount = self.find_volume_by_name(volume, mounts=True)
+ results = []
+ results.append(exist_volume['name'] == volume['name'])
+
+ if 'secret' in volume:
+ results.append('secret' in exist_volume)
+ results.append(exist_volume['secret']['secretName'] == volume['secret']['secretName'])
+ results.append(exist_volume_mount['name'] == volume_mount['name'])
+ results.append(exist_volume_mount['mountPath'] == volume_mount['mountPath'])
+
+ elif 'emptyDir' in volume:
+ results.append(exist_volume_mount['name'] == volume['name'])
+ results.append(exist_volume_mount['mountPath'] == volume_mount['mountPath'])
+
+ elif 'persistentVolumeClaim' in volume:
+ pvc = 'persistentVolumeClaim'
+ results.append(pvc in exist_volume)
+ if results[-1]:
+ results.append(exist_volume[pvc]['claimName'] == volume[pvc]['claimName'])
+
+ if 'claimSize' in volume[pvc]:
+ results.append(exist_volume[pvc]['claimSize'] == volume[pvc]['claimSize'])
+
+ elif 'hostpath' in volume:
+ results.append('hostPath' in exist_volume)
+ results.append(exist_volume['hostPath']['path'] == volume_mount['mountPath'])
+
+ return not all(results)
+
+ def needs_update_replicas(self, replicas):
+ ''' verify whether a replica update is needed '''
+ current_reps = self.get(DeploymentConfig.replicas_path)
+ return not current_reps == replicas
+
+# -*- -*- -*- End included fragment: lib/deploymentconfig.py -*- -*- -*-
+
+# -*- -*- -*- Begin included fragment: lib/serviceaccount.py -*- -*- -*-
+
+class ServiceAccountConfig(object):
+ '''Service account config class
+
+ This class stores the options and returns a default service account
+ '''
+
+ # pylint: disable=too-many-arguments
+ def __init__(self, sname, namespace, kubeconfig, secrets=None, image_pull_secrets=None):
+ self.name = sname
+ self.kubeconfig = kubeconfig
+ self.namespace = namespace
+ self.secrets = secrets or []
+ self.image_pull_secrets = image_pull_secrets or []
+ self.data = {}
+ self.create_dict()
+
+ def create_dict(self):
+ ''' instantiate a properly structured volume '''
+ self.data['apiVersion'] = 'v1'
+ self.data['kind'] = 'ServiceAccount'
+ self.data['metadata'] = {}
+ self.data['metadata']['name'] = self.name
+ self.data['metadata']['namespace'] = self.namespace
+
+ self.data['secrets'] = []
+ if self.secrets:
+ for sec in self.secrets:
+ self.data['secrets'].append({"name": sec})
+
+ self.data['imagePullSecrets'] = []
+ if self.image_pull_secrets:
+ for sec in self.image_pull_secrets:
+ self.data['imagePullSecrets'].append({"name": sec})
+
+class ServiceAccount(Yedit):
+ ''' Class to wrap the oc command line tools '''
+ image_pull_secrets_path = "imagePullSecrets"
+ secrets_path = "secrets"
+
+ def __init__(self, content):
+ '''ServiceAccount constructor'''
+ super(ServiceAccount, self).__init__(content=content)
+ self._secrets = None
+ self._image_pull_secrets = None
+
+ @property
+ def image_pull_secrets(self):
+ ''' property for image_pull_secrets '''
+ if self._image_pull_secrets is None:
+ self._image_pull_secrets = self.get(ServiceAccount.image_pull_secrets_path) or []
+ return self._image_pull_secrets
+
+ @image_pull_secrets.setter
+ def image_pull_secrets(self, secrets):
+ ''' property for secrets '''
+ self._image_pull_secrets = secrets
+
+ @property
+ def secrets(self):
+ ''' property for secrets '''
+ if not self._secrets:
+ self._secrets = self.get(ServiceAccount.secrets_path) or []
+ return self._secrets
+
+ @secrets.setter
+ def secrets(self, secrets):
+ ''' property for secrets '''
+ self._secrets = secrets
+
+ def delete_secret(self, inc_secret):
+ ''' remove a secret '''
+ remove_idx = None
+ for idx, sec in enumerate(self.secrets):
+ if sec['name'] == inc_secret:
+ remove_idx = idx
+ break
+
+ if remove_idx:
+ del self.secrets[remove_idx]
+ return True
+
+ return False
+
+ def delete_image_pull_secret(self, inc_secret):
+ ''' remove a image_pull_secret '''
+ remove_idx = None
+ for idx, sec in enumerate(self.image_pull_secrets):
+ if sec['name'] == inc_secret:
+ remove_idx = idx
+ break
+
+ if remove_idx:
+ del self.image_pull_secrets[remove_idx]
+ return True
+
+ return False
+
+ def find_secret(self, inc_secret):
+ '''find secret'''
+ for secret in self.secrets:
+ if secret['name'] == inc_secret:
+ return secret
+
+ return None
+
+ def find_image_pull_secret(self, inc_secret):
+ '''find secret'''
+ for secret in self.image_pull_secrets:
+ if secret['name'] == inc_secret:
+ return secret
+
+ return None
+
+ def add_secret(self, inc_secret):
+ '''add secret'''
+ if self.secrets:
+ self.secrets.append({"name": inc_secret}) # pylint: disable=no-member
+ else:
+ self.put(ServiceAccount.secrets_path, [{"name": inc_secret}])
+
+ def add_image_pull_secret(self, inc_secret):
+ '''add image_pull_secret'''
+ if self.image_pull_secrets:
+ self.image_pull_secrets.append({"name": inc_secret}) # pylint: disable=no-member
+ else:
+ self.put(ServiceAccount.image_pull_secrets_path, [{"name": inc_secret}])
+
+# -*- -*- -*- End included fragment: lib/serviceaccount.py -*- -*- -*-
+
+# -*- -*- -*- Begin included fragment: lib/secret.py -*- -*- -*-
+
+# pylint: disable=too-many-instance-attributes
+class SecretConfig(object):
+ ''' Handle secret options '''
+ # pylint: disable=too-many-arguments
+ def __init__(self,
+ sname,
+ namespace,
+ kubeconfig,
+ secrets=None):
+ ''' constructor for handling secret options '''
+ self.kubeconfig = kubeconfig
+ self.name = sname
+ self.namespace = namespace
+ self.secrets = secrets
+ self.data = {}
+
+ self.create_dict()
+
+ def create_dict(self):
+ ''' assign the correct properties for a secret dict '''
+ self.data['apiVersion'] = 'v1'
+ self.data['kind'] = 'Secret'
+ self.data['metadata'] = {}
+ self.data['metadata']['name'] = self.name
+ self.data['metadata']['namespace'] = self.namespace
+ self.data['data'] = {}
+ if self.secrets:
+ for key, value in self.secrets.items():
+ self.data['data'][key] = value
+
+# pylint: disable=too-many-instance-attributes
+class Secret(Yedit):
+ ''' Class to wrap the oc command line tools '''
+ secret_path = "data"
+ kind = 'secret'
+
+ def __init__(self, content):
+ '''secret constructor'''
+ super(Secret, self).__init__(content=content)
+ self._secrets = None
+
+ @property
+ def secrets(self):
+ '''secret property getter'''
+ if self._secrets is None:
+ self._secrets = self.get_secrets()
+ return self._secrets
+
+ @secrets.setter
+ def secrets(self):
+ '''secret property setter'''
+ if self._secrets is None:
+ self._secrets = self.get_secrets()
+ return self._secrets
+
+ def get_secrets(self):
+ ''' returns all of the defined secrets '''
+ return self.get(Secret.secret_path) or {}
+
+ def add_secret(self, key, value):
+ ''' add a secret '''
+ if self.secrets:
+ self.secrets[key] = value
+ else:
+ self.put(Secret.secret_path, {key: value})
+
+ return True
+
+ def delete_secret(self, key):
+ ''' delete secret'''
+ try:
+ del self.secrets[key]
+ except KeyError as _:
+ return False
+
+ return True
+
+ def find_secret(self, key):
+ ''' find secret'''
+ rval = None
+ try:
+ rval = self.secrets[key]
+ except KeyError as _:
+ return None
+
+ return {'key': key, 'value': rval}
+
+ def update_secret(self, key, value):
+ ''' update a secret'''
+ if key in self.secrets:
+ self.secrets[key] = value
+ else:
+ self.add_secret(key, value)
+
+ return True
+
+# -*- -*- -*- End included fragment: lib/secret.py -*- -*- -*-
+
+# -*- -*- -*- Begin included fragment: lib/rolebinding.py -*- -*- -*-
+
+# pylint: disable=too-many-instance-attributes
+class RoleBindingConfig(object):
+ ''' Handle rolebinding config '''
+ # pylint: disable=too-many-arguments
+ def __init__(self,
+ name,
+ namespace,
+ kubeconfig,
+ group_names=None,
+ role_ref=None,
+ subjects=None,
+ usernames=None):
+ ''' constructor for handling rolebinding options '''
+ self.kubeconfig = kubeconfig
+ self.name = name
+ self.namespace = namespace
+ self.group_names = group_names
+ self.role_ref = role_ref
+ self.subjects = subjects
+ self.usernames = usernames
+ self.data = {}
+
+ self.create_dict()
+
+ def create_dict(self):
+ ''' create a default rolebinding as a dict '''
+ self.data['apiVersion'] = 'v1'
+ self.data['kind'] = 'RoleBinding'
+ self.data['groupNames'] = self.group_names
+ self.data['metadata']['name'] = self.name
+ self.data['metadata']['namespace'] = self.namespace
+
+ self.data['roleRef'] = self.role_ref
+ self.data['subjects'] = self.subjects
+ self.data['userNames'] = self.usernames
+
+
+# pylint: disable=too-many-instance-attributes,too-many-public-methods
+class RoleBinding(Yedit):
+ ''' Class to model a rolebinding openshift object'''
+ group_names_path = "groupNames"
+ role_ref_path = "roleRef"
+ subjects_path = "subjects"
+ user_names_path = "userNames"
+
+ kind = 'RoleBinding'
+
+ def __init__(self, content):
+ '''RoleBinding constructor'''
+ super(RoleBinding, self).__init__(content=content)
+ self._subjects = None
+ self._role_ref = None
+ self._group_names = None
+ self._user_names = None
+
+ @property
+ def subjects(self):
+ ''' subjects property '''
+ if self._subjects is None:
+ self._subjects = self.get_subjects()
+ return self._subjects
+
+ @subjects.setter
+ def subjects(self, data):
+ ''' subjects property setter'''
+ self._subjects = data
+
+ @property
+ def role_ref(self):
+ ''' role_ref property '''
+ if self._role_ref is None:
+ self._role_ref = self.get_role_ref()
+ return self._role_ref
+
+ @role_ref.setter
+ def role_ref(self, data):
+ ''' role_ref property setter'''
+ self._role_ref = data
+
+ @property
+ def group_names(self):
+ ''' group_names property '''
+ if self._group_names is None:
+ self._group_names = self.get_group_names()
+ return self._group_names
+
+ @group_names.setter
+ def group_names(self, data):
+ ''' group_names property setter'''
+ self._group_names = data
+
+ @property
+ def user_names(self):
+ ''' user_names property '''
+ if self._user_names is None:
+ self._user_names = self.get_user_names()
+ return self._user_names
+
+ @user_names.setter
+ def user_names(self, data):
+ ''' user_names property setter'''
+ self._user_names = data
+
+ def get_group_names(self):
+ ''' return groupNames '''
+ return self.get(RoleBinding.group_names_path) or []
+
+ def get_user_names(self):
+ ''' return usernames '''
+ return self.get(RoleBinding.user_names_path) or []
+
+ def get_role_ref(self):
+ ''' return role_ref '''
+ return self.get(RoleBinding.role_ref_path) or {}
+
+ def get_subjects(self):
+ ''' return subjects '''
+ return self.get(RoleBinding.subjects_path) or []
+
+ #### ADD #####
+ def add_subject(self, inc_subject):
+ ''' add a subject '''
+ if self.subjects:
+ # pylint: disable=no-member
+ self.subjects.append(inc_subject)
+ else:
+ self.put(RoleBinding.subjects_path, [inc_subject])
+
+ return True
+
+ def add_role_ref(self, inc_role_ref):
+ ''' add a role_ref '''
+ if not self.role_ref:
+ self.put(RoleBinding.role_ref_path, {"name": inc_role_ref})
+ return True
+
+ return False
+
+ def add_group_names(self, inc_group_names):
+ ''' add a group_names '''
+ if self.group_names:
+ # pylint: disable=no-member
+ self.group_names.append(inc_group_names)
+ else:
+ self.put(RoleBinding.group_names_path, [inc_group_names])
+
+ return True
+
+ def add_user_name(self, inc_user_name):
+ ''' add a username '''
+ if self.user_names:
+ # pylint: disable=no-member
+ self.user_names.append(inc_user_name)
+ else:
+ self.put(RoleBinding.user_names_path, [inc_user_name])
+
+ return True
+
+ #### /ADD #####
+
+ #### Remove #####
+ def remove_subject(self, inc_subject):
+ ''' remove a subject '''
+ try:
+ # pylint: disable=no-member
+ self.subjects.remove(inc_subject)
+ except ValueError as _:
+ return False
+
+ return True
+
+ def remove_role_ref(self, inc_role_ref):
+ ''' remove a role_ref '''
+ if self.role_ref and self.role_ref['name'] == inc_role_ref:
+ del self.role_ref['name']
+ return True
+
+ return False
+
+ def remove_group_name(self, inc_group_name):
+ ''' remove a groupname '''
+ try:
+ # pylint: disable=no-member
+ self.group_names.remove(inc_group_name)
+ except ValueError as _:
+ return False
+
+ return True
+
+ def remove_user_name(self, inc_user_name):
+ ''' remove a username '''
+ try:
+ # pylint: disable=no-member
+ self.user_names.remove(inc_user_name)
+ except ValueError as _:
+ return False
+
+ return True
+
+ #### /REMOVE #####
+
+ #### UPDATE #####
+ def update_subject(self, inc_subject):
+ ''' update a subject '''
+ try:
+ # pylint: disable=no-member
+ index = self.subjects.index(inc_subject)
+ except ValueError as _:
+ return self.add_subject(inc_subject)
+
+ self.subjects[index] = inc_subject
+
+ return True
+
+ def update_group_name(self, inc_group_name):
+ ''' update a groupname '''
+ try:
+ # pylint: disable=no-member
+ index = self.group_names.index(inc_group_name)
+ except ValueError as _:
+ return self.add_group_names(inc_group_name)
+
+ self.group_names[index] = inc_group_name
+
+ return True
+
+ def update_user_name(self, inc_user_name):
+ ''' update a username '''
+ try:
+ # pylint: disable=no-member
+ index = self.user_names.index(inc_user_name)
+ except ValueError as _:
+ return self.add_user_name(inc_user_name)
+
+ self.user_names[index] = inc_user_name
+
+ return True
+
+ def update_role_ref(self, inc_role_ref):
+ ''' update a role_ref '''
+ self.role_ref['name'] = inc_role_ref
+
+ return True
+
+ #### /UPDATE #####
+
+ #### FIND ####
+ def find_subject(self, inc_subject):
+ ''' find a subject '''
+ index = None
+ try:
+ # pylint: disable=no-member
+ index = self.subjects.index(inc_subject)
+ except ValueError as _:
+ return index
+
+ return index
+
+ def find_group_name(self, inc_group_name):
+ ''' find a group_name '''
+ index = None
+ try:
+ # pylint: disable=no-member
+ index = self.group_names.index(inc_group_name)
+ except ValueError as _:
+ return index
+
+ return index
+
+ def find_user_name(self, inc_user_name):
+ ''' find a user_name '''
+ index = None
+ try:
+ # pylint: disable=no-member
+ index = self.user_names.index(inc_user_name)
+ except ValueError as _:
+ return index
+
+ return index
+
+ def find_role_ref(self, inc_role_ref):
+ ''' find a user_name '''
+ if self.role_ref and self.role_ref['name'] == inc_role_ref['name']:
+ return self.role_ref
+
+ return None
+
+# -*- -*- -*- End included fragment: lib/rolebinding.py -*- -*- -*-
+
+# -*- -*- -*- Begin included fragment: class/oc_adm_router.py -*- -*- -*-
+
+
+class RouterException(Exception):
+ ''' Router exception'''
+ pass
+
+
+class RouterConfig(OpenShiftCLIConfig):
+ ''' RouterConfig is a DTO for the router. '''
+ def __init__(self, rname, namespace, kubeconfig, router_options):
+ super(RouterConfig, self).__init__(rname, namespace, kubeconfig, router_options)
+
+
+class Router(OpenShiftCLI):
+ ''' Class to wrap the oc command line tools '''
+ def __init__(self,
+ router_config,
+ verbose=False):
+ ''' Constructor for OpenshiftOC
+
+ a router consists of 3 or more parts
+ - dc/router
+ - svc/router
+ - sa/router
+ - secret/router-certs
+ - clusterrolebinding/router-router-role
+ '''
+ super(Router, self).__init__('default', router_config.kubeconfig, verbose)
+ self.config = router_config
+ self.verbose = verbose
+ self.router_parts = [{'kind': 'dc', 'name': self.config.name},
+ {'kind': 'svc', 'name': self.config.name},
+ {'kind': 'sa', 'name': self.config.config_options['service_account']['value']},
+ {'kind': 'secret', 'name': self.config.name + '-certs'},
+ {'kind': 'clusterrolebinding', 'name': 'router-' + self.config.name + '-role'},
+ ]
+
+ self.__prepared_router = None
+ self.dconfig = None
+ self.svc = None
+ self._secret = None
+ self._serviceaccount = None
+ self._rolebinding = None
+
+ @property
+ def prepared_router(self):
+ ''' property for the prepared router'''
+ if self.__prepared_router is None:
+ results = self._prepare_router()
+ if not results or 'returncode' in results and results['returncode'] != 0:
+ if 'stderr' in results:
+ raise RouterException('Could not perform router preparation: %s' % results['stderr'])
+
+ raise RouterException('Could not perform router preparation.')
+ self.__prepared_router = results
+
+ return self.__prepared_router
+
+ @prepared_router.setter
+ def prepared_router(self, obj):
+ '''setter for the prepared_router'''
+ self.__prepared_router = obj
+
+ @property
+ def deploymentconfig(self):
+ ''' property deploymentconfig'''
+ return self.dconfig
+
+ @deploymentconfig.setter
+ def deploymentconfig(self, config):
+ ''' setter for property deploymentconfig '''
+ self.dconfig = config
+
+ @property
+ def service(self):
+ ''' property for service '''
+ return self.svc
+
+ @service.setter
+ def service(self, config):
+ ''' setter for property service '''
+ self.svc = config
+
+ @property
+ def secret(self):
+ ''' property secret '''
+ return self._secret
+
+ @secret.setter
+ def secret(self, config):
+ ''' setter for property secret '''
+ self._secret = config
+
+ @property
+ def serviceaccount(self):
+ ''' property for serviceaccount '''
+ return self._serviceaccount
+
+ @serviceaccount.setter
+ def serviceaccount(self, config):
+ ''' setter for property serviceaccount '''
+ self._serviceaccount = config
+
+ @property
+ def rolebinding(self):
+ ''' property rolebinding '''
+ return self._rolebinding
+
+ @rolebinding.setter
+ def rolebinding(self, config):
+ ''' setter for property rolebinding '''
+ self._rolebinding = config
+
+ def get_object_by_kind(self, kind):
+ '''return the current object kind by name'''
+ if re.match("^(dc|deploymentconfig)$", kind, flags=re.IGNORECASE):
+ return self.deploymentconfig
+ elif re.match("^(svc|service)$", kind, flags=re.IGNORECASE):
+ return self.service
+ elif re.match("^(sa|serviceaccount)$", kind, flags=re.IGNORECASE):
+ return self.serviceaccount
+ elif re.match("secret", kind, flags=re.IGNORECASE):
+ return self.secret
+ elif re.match("clusterrolebinding", kind, flags=re.IGNORECASE):
+ return self.rolebinding
+
+ return None
+
+ def get(self):
+ ''' return the self.router_parts '''
+ self.service = None
+ self.deploymentconfig = None
+ self.serviceaccount = None
+ self.secret = None
+ self.rolebinding = None
+ for part in self.router_parts:
+ result = self._get(part['kind'], rname=part['name'])
+ if result['returncode'] == 0 and part['kind'] == 'dc':
+ self.deploymentconfig = DeploymentConfig(result['results'][0])
+ elif result['returncode'] == 0 and part['kind'] == 'svc':
+ self.service = Service(content=result['results'][0])
+ elif result['returncode'] == 0 and part['kind'] == 'sa':
+ self.serviceaccount = ServiceAccount(content=result['results'][0])
+ elif result['returncode'] == 0 and part['kind'] == 'secret':
+ self.secret = Secret(content=result['results'][0])
+ elif result['returncode'] == 0 and part['kind'] == 'clusterrolebinding':
+ self.rolebinding = RoleBinding(content=result['results'][0])
+
+ return {'deploymentconfig': self.deploymentconfig,
+ 'service': self.service,
+ 'serviceaccount': self.serviceaccount,
+ 'secret': self.secret,
+ 'clusterrolebinding': self.rolebinding,
+ }
+
+ def exists(self):
+ '''return a whether svc or dc exists '''
+ if self.deploymentconfig and self.service and self.secret and self.serviceaccount:
+ return True
+
+ return False
+
+ def delete(self):
+ '''return all pods '''
+ parts = []
+ for part in self.router_parts:
+ parts.append(self._delete(part['kind'], part['name']))
+
+ rval = 0
+ for part in parts:
+ if part['returncode'] != 0 and not 'already exist' in part['stderr']:
+ rval = part['returncode']
+
+ return {'returncode': rval, 'results': parts}
+
+ def add_modifications(self, deploymentconfig):
+ '''modify the deployment config'''
+ # We want modifications in the form of edits coming in from the module.
+ # Let's apply these here
+ edit_results = []
+ for edit in self.config.config_options['edits'].get('value', []):
+ if edit['action'] == 'put':
+ edit_results.append(deploymentconfig.put(edit['key'],
+ edit['value']))
+ if edit['action'] == 'update':
+ edit_results.append(deploymentconfig.update(edit['key'],
+ edit['value'],
+ edit.get('index', None),
+ edit.get('curr_value', None)))
+ if edit['action'] == 'append':
+ edit_results.append(deploymentconfig.append(edit['key'],
+ edit['value']))
+
+ if edit_results and not any([res[0] for res in edit_results]):
+ return None
+
+ return deploymentconfig
+
+ # pylint: disable=too-many-branches
+ def _prepare_router(self):
+ '''prepare router for instantiation'''
+ # if cacert, key, and cert were passed, combine them into a pem file
+ if (self.config.config_options['cacert_file']['value'] and
+ self.config.config_options['cert_file']['value'] and
+ self.config.config_options['key_file']['value']):
+
+ router_pem = '/tmp/router.pem'
+ with open(router_pem, 'w') as rfd:
+ rfd.write(open(self.config.config_options['cert_file']['value']).read())
+ rfd.write(open(self.config.config_options['key_file']['value']).read())
+ if self.config.config_options['cacert_file']['value'] and \
+ os.path.exists(self.config.config_options['cacert_file']['value']):
+ rfd.write(open(self.config.config_options['cacert_file']['value']).read())
+
+ atexit.register(Utils.cleanup, [router_pem])
+
+ self.config.config_options['default_cert']['value'] = router_pem
+
+ elif self.config.config_options['default_cert']['value'] is None:
+ # No certificate was passed to us. do not pass one to oc adm router
+ self.config.config_options['default_cert']['include'] = False
+
+ options = self.config.to_option_list()
+
+ cmd = ['router', self.config.name, '-n', self.config.namespace]
+ cmd.extend(options)
+ cmd.extend(['--dry-run=True', '-o', 'json'])
+
+ results = self.openshift_cmd(cmd, oadm=True, output=True, output_type='json')
+
+ # pylint: disable=maybe-no-member
+ if results['returncode'] != 0 or 'items' not in results['results']:
+ return results
+
+ oc_objects = {'DeploymentConfig': {'obj': None, 'path': None, 'update': False},
+ 'Secret': {'obj': None, 'path': None, 'update': False},
+ 'ServiceAccount': {'obj': None, 'path': None, 'update': False},
+ 'ClusterRoleBinding': {'obj': None, 'path': None, 'update': False},
+ 'Service': {'obj': None, 'path': None, 'update': False},
+ }
+ # pylint: disable=invalid-sequence-index
+ for res in results['results']['items']:
+ if res['kind'] == 'DeploymentConfig':
+ oc_objects['DeploymentConfig']['obj'] = DeploymentConfig(res)
+ elif res['kind'] == 'Service':
+ oc_objects['Service']['obj'] = Service(res)
+ elif res['kind'] == 'ServiceAccount':
+ oc_objects['ServiceAccount']['obj'] = ServiceAccount(res)
+ elif res['kind'] == 'Secret':
+ oc_objects['Secret']['obj'] = Secret(res)
+ elif res['kind'] == 'ClusterRoleBinding':
+ oc_objects['ClusterRoleBinding']['obj'] = RoleBinding(res)
+
+ # Currently only deploymentconfig needs updating
+ # Verify we got a deploymentconfig
+ if not oc_objects['DeploymentConfig']['obj']:
+ return results
+
+ # add modifications added
+ oc_objects['DeploymentConfig']['obj'] = self.add_modifications(oc_objects['DeploymentConfig']['obj'])
+
+ for oc_type, oc_data in oc_objects.items():
+ if oc_data['obj'] is not None:
+ oc_data['path'] = Utils.create_tmp_file_from_contents(oc_type, oc_data['obj'].yaml_dict)
+
+ return oc_objects
+
+ def create(self):
+ '''Create a router
+
+ This includes the different parts:
+ - deploymentconfig
+ - service
+ - serviceaccount
+ - secrets
+ - clusterrolebinding
+ '''
+ results = []
+ self.needs_update()
+
+ import time
+ # pylint: disable=maybe-no-member
+ for kind, oc_data in self.prepared_router.items():
+ if oc_data['obj'] is not None:
+ time.sleep(1)
+ if self.get_object_by_kind(kind) is None:
+ results.append(self._create(oc_data['path']))
+
+ elif oc_data['update']:
+ results.append(self._replace(oc_data['path']))
+
+
+ rval = 0
+ for result in results:
+ if result['returncode'] != 0 and not 'already exist' in result['stderr']:
+ rval = result['returncode']
+
+ return {'returncode': rval, 'results': results}
+
+ def update(self):
+ '''run update for the router. This performs a replace'''
+ results = []
+
+ # pylint: disable=maybe-no-member
+ for _, oc_data in self.prepared_router.items():
+ if oc_data['update']:
+ results.append(self._replace(oc_data['path']))
+
+ rval = 0
+ for result in results:
+ if result['returncode'] != 0:
+ rval = result['returncode']
+
+ return {'returncode': rval, 'results': results}
+
+ # pylint: disable=too-many-return-statements,too-many-branches
+ def needs_update(self):
+ ''' check to see if we need to update '''
+ # ServiceAccount:
+ # Need to determine changes from the pregenerated ones from the original
+ # Since these are auto generated, we can skip
+ skip = ['secrets', 'imagePullSecrets']
+ if self.serviceaccount is None or \
+ not Utils.check_def_equal(self.prepared_router['ServiceAccount']['obj'].yaml_dict,
+ self.serviceaccount.yaml_dict,
+ skip_keys=skip,
+ debug=self.verbose):
+ self.prepared_router['ServiceAccount']['update'] = True
+
+ # Secret:
+ # See if one was generated from our dry-run and verify it if needed
+ if self.prepared_router['Secret']['obj']:
+ if not self.secret:
+ self.prepared_router['Secret']['update'] = True
+
+ if self.secret is None or \
+ not Utils.check_def_equal(self.prepared_router['Secret']['obj'].yaml_dict,
+ self.secret.yaml_dict,
+ skip_keys=skip,
+ debug=self.verbose):
+ self.prepared_router['Secret']['update'] = True
+
+ # Service:
+ # Fix the ports to have protocol=TCP
+ for port in self.prepared_router['Service']['obj'].get('spec.ports'):
+ port['protocol'] = 'TCP'
+
+ skip = ['portalIP', 'clusterIP', 'sessionAffinity', 'type']
+ if self.service is None or \
+ not Utils.check_def_equal(self.prepared_router['Service']['obj'].yaml_dict,
+ self.service.yaml_dict,
+ skip_keys=skip,
+ debug=self.verbose):
+ self.prepared_router['Service']['update'] = True
+
+ # DeploymentConfig:
+ # Router needs some exceptions.
+ # We do not want to check the autogenerated password for stats admin
+ if self.deploymentconfig is not None:
+ if not self.config.config_options['stats_password']['value']:
+ for idx, env_var in enumerate(self.prepared_router['DeploymentConfig']['obj'].get(\
+ 'spec.template.spec.containers[0].env') or []):
+ if env_var['name'] == 'STATS_PASSWORD':
+ env_var['value'] = \
+ self.deploymentconfig.get('spec.template.spec.containers[0].env[%s].value' % idx)
+ break
+
+ # dry-run doesn't add the protocol to the ports section. We will manually do that.
+ for idx, port in enumerate(self.prepared_router['DeploymentConfig']['obj'].get(\
+ 'spec.template.spec.containers[0].ports') or []):
+ if not 'protocol' in port:
+ port['protocol'] = 'TCP'
+
+ # These are different when generating
+ skip = ['dnsPolicy',
+ 'terminationGracePeriodSeconds',
+ 'restartPolicy', 'timeoutSeconds',
+ 'livenessProbe', 'readinessProbe',
+ 'terminationMessagePath', 'hostPort',
+ 'defaultMode',
+ ]
+
+ if self.deploymentconfig is None or \
+ not Utils.check_def_equal(self.prepared_router['DeploymentConfig']['obj'].yaml_dict,
+ self.deploymentconfig.yaml_dict,
+ skip_keys=skip,
+ debug=self.verbose):
+ self.prepared_router['DeploymentConfig']['update'] = True
+
+ # Check if any of the parts need updating, if so, return True
+ # else, no need to update
+ # pylint: disable=no-member
+ return any([self.prepared_router[oc_type]['update'] for oc_type in self.prepared_router.keys()])
+
+ @staticmethod
+ def run_ansible(params, check_mode):
+ '''run ansible idempotent code'''
+
+ rconfig = RouterConfig(params['name'],
+ params['namespace'],
+ params['kubeconfig'],
+ {'default_cert': {'value': params['default_cert'], 'include': True},
+ 'cert_file': {'value': params['cert_file'], 'include': False},
+ 'key_file': {'value': params['key_file'], 'include': False},
+ 'images': {'value': params['images'], 'include': True},
+ 'latest_images': {'value': params['latest_images'], 'include': True},
+ 'labels': {'value': params['labels'], 'include': True},
+ 'ports': {'value': ','.join(params['ports']), 'include': True},
+ 'replicas': {'value': params['replicas'], 'include': True},
+ 'selector': {'value': params['selector'], 'include': True},
+ 'service_account': {'value': params['service_account'], 'include': True},
+ 'router_type': {'value': params['router_type'], 'include': False},
+ 'host_network': {'value': params['host_network'], 'include': True},
+ 'external_host': {'value': params['external_host'], 'include': True},
+ 'external_host_vserver': {'value': params['external_host_vserver'],
+ 'include': True},
+ 'external_host_insecure': {'value': params['external_host_insecure'],
+ 'include': True},
+ 'external_host_partition_path': {'value': params['external_host_partition_path'],
+ 'include': True},
+ 'external_host_username': {'value': params['external_host_username'],
+ 'include': True},
+ 'external_host_password': {'value': params['external_host_password'],
+ 'include': True},
+ 'external_host_private_key': {'value': params['external_host_private_key'],
+ 'include': True},
+ 'expose_metrics': {'value': params['expose_metrics'], 'include': True},
+ 'metrics_image': {'value': params['metrics_image'], 'include': True},
+ 'stats_user': {'value': params['stats_user'], 'include': True},
+ 'stats_password': {'value': params['stats_password'], 'include': True},
+ 'stats_port': {'value': params['stats_port'], 'include': True},
+ # extra
+ 'cacert_file': {'value': params['cacert_file'], 'include': False},
+ # edits
+ 'edits': {'value': params['edits'], 'include': False},
+ })
+
+
+ state = params['state']
+
+ ocrouter = Router(rconfig, verbose=params['debug'])
+
+ api_rval = ocrouter.get()
+
+ ########
+ # get
+ ########
+ if state == 'list':
+ return {'changed': False, 'results': api_rval, 'state': state}
+
+ ########
+ # Delete
+ ########
+ if state == 'absent':
+ if not ocrouter.exists():
+ return {'changed': False, 'state': state}
+
+ if check_mode:
+ return {'changed': True, 'msg': 'CHECK_MODE: Would have performed a delete.'}
+
+ # In case of delete we return a list of each object
+ # that represents a router and its result in a list
+ # pylint: disable=redefined-variable-type
+ api_rval = ocrouter.delete()
+
+ return {'changed': True, 'results': api_rval, 'state': state}
+
+ if state == 'present':
+ ########
+ # Create
+ ########
+ if not ocrouter.exists():
+
+ if check_mode:
+ return {'changed': True, 'msg': 'CHECK_MODE: Would have performed a create.'}
+
+ api_rval = ocrouter.create()
+
+ if api_rval['returncode'] != 0:
+ return {'failed': True, 'msg': api_rval}
+
+ return {'changed': True, 'results': api_rval, 'state': state}
+
+ ########
+ # Update
+ ########
+ if not ocrouter.needs_update():
+ return {'changed': False, 'state': state}
+
+ if check_mode:
+ return {'changed': False, 'msg': 'CHECK_MODE: Would have performed an update.'}
+
+ api_rval = ocrouter.update()
+
+ if api_rval['returncode'] != 0:
+ return {'failed': True, 'msg': api_rval}
+
+ return {'changed': True, 'results': api_rval, 'state': state}
+
+# -*- -*- -*- End included fragment: class/oc_adm_router.py -*- -*- -*-
+
+# -*- -*- -*- Begin included fragment: ansible/oc_adm_router.py -*- -*- -*-
+
+
+def main():
+ '''
+ ansible oc module for router
+ '''
+
+ module = AnsibleModule(
+ argument_spec=dict(
+ state=dict(default='present', type='str',
+ choices=['present', 'absent']),
+ debug=dict(default=False, type='bool'),
+ namespace=dict(default='default', type='str'),
+ name=dict(default='router', type='str'),
+
+ kubeconfig=dict(default='/etc/origin/master/admin.kubeconfig', type='str'),
+ default_cert=dict(default=None, type='str'),
+ cert_file=dict(default=None, type='str'),
+ key_file=dict(default=None, type='str'),
+ images=dict(default=None, type='str'), #'openshift3/ose-${component}:${version}'
+ latest_images=dict(default=False, type='bool'),
+ labels=dict(default=None, type='list'),
+ ports=dict(default=['80:80', '443:443'], type='list'),
+ replicas=dict(default=1, type='int'),
+ selector=dict(default=None, type='str'),
+ service_account=dict(default='router', type='str'),
+ router_type=dict(default='haproxy-router', type='str'),
+ host_network=dict(default=True, type='bool'),
+ # external host options
+ external_host=dict(default=None, type='str'),
+ external_host_vserver=dict(default=None, type='str'),
+ external_host_insecure=dict(default=False, type='bool'),
+ external_host_partition_path=dict(default=None, type='str'),
+ external_host_username=dict(default=None, type='str'),
+ external_host_password=dict(default=None, type='str'),
+ external_host_private_key=dict(default=None, type='str'),
+ # Metrics
+ expose_metrics=dict(default=False, type='bool'),
+ metrics_image=dict(default=None, type='str'),
+ # Stats
+ stats_user=dict(default=None, type='str'),
+ stats_password=dict(default=None, type='str'),
+ stats_port=dict(default=1936, type='int'),
+ # extra
+ cacert_file=dict(default=None, type='str'),
+ # edits
+ edits=dict(default=[], type='list'),
+ ),
+ mutually_exclusive=[["router_type", "images"],
+ ["key_file", "default_cert"],
+ ["cert_file", "default_cert"],
+ ["cacert_file", "default_cert"],
+ ],
+
+ required_together=[['cacert_file', 'cert_file', 'key_file']],
+ supports_check_mode=True,
+ )
+ results = Router.run_ansible(module.params, module.check_mode)
+
+ if 'failed' in results:
+ module.fail_json(**results)
+
+ module.exit_json(**results)
+
+
+if __name__ == '__main__':
+ main()
+
+# -*- -*- -*- End included fragment: ansible/oc_adm_router.py -*- -*- -*-
diff --git a/roles/lib_openshift/library/oc_atomic_container.py b/roles/lib_openshift/library/oc_atomic_container.py
new file mode 100644
index 000000000..d2620b4cc
--- /dev/null
+++ b/roles/lib_openshift/library/oc_atomic_container.py
@@ -0,0 +1,203 @@
+#!/usr/bin/env python
+# pylint: disable=missing-docstring
+# flake8: noqa: T001
+# ___ ___ _ _ ___ ___ _ _____ ___ ___
+# / __| __| \| | __| _ \ /_\_ _| __| \
+# | (_ | _|| .` | _|| / / _ \| | | _|| |) |
+# \___|___|_|\_|___|_|_\/_/_\_\_|_|___|___/_ _____
+# | \ / _ \ | \| |/ _ \_ _| | __| \_ _|_ _|
+# | |) | (_) | | .` | (_) || | | _|| |) | | | |
+# |___/ \___/ |_|\_|\___/ |_| |___|___/___| |_|
+#
+# Copyright 2016 Red Hat, Inc. and/or its affiliates
+# and other contributors as indicated by the @author tags.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+
+# -*- -*- -*- Begin included fragment: doc/atomic_container -*- -*- -*-
+
+DOCUMENTATION = '''
+---
+module: oc_atomic_container
+short_description: Manage the container images on the atomic host platform
+description:
+ - Manage the container images on the atomic host platform
+ - Allows to execute the commands on the container images
+requirements:
+ - atomic
+ - "python >= 2.6"
+options:
+ name:
+ description:
+ - Name of the container
+ required: True
+ default: null
+ image:
+ description:
+ - The image to use to install the container
+ required: True
+ default: null
+ state:
+ description:
+ - State of the container
+ required: True
+ choices: ["latest", "absent", "latest", "rollback"]
+ default: "latest"
+ values:
+ description:
+ - Values for the installation of the container
+ required: False
+ default: None
+'''
+
+# -*- -*- -*- End included fragment: doc/atomic_container -*- -*- -*-
+
+# -*- -*- -*- Begin included fragment: ansible/oc_atomic_container.py -*- -*- -*-
+
+# pylint: disable=wrong-import-position,too-many-branches,invalid-name
+import json
+from ansible.module_utils.basic import AnsibleModule
+
+
+def _install(module, container, image, values_list):
+ ''' install a container using atomic CLI. values_list is the list of --set arguments.
+ container is the name given to the container. image is the image to use for the installation. '''
+ args = ['atomic', 'install', "--system", '--name=%s' % container] + values_list + [image]
+ rc, out, err = module.run_command(args, check_rc=False)
+ if rc != 0:
+ return rc, out, err, False
+ else:
+ changed = "Extracting" in out
+ return rc, out, err, changed
+
+def _uninstall(module, name):
+ ''' uninstall an atomic container by its name. '''
+ args = ['atomic', 'uninstall', name]
+ rc, out, err = module.run_command(args, check_rc=False)
+ return rc, out, err, False
+
+
+def do_install(module, container, image, values_list):
+ ''' install a container and exit the module. '''
+ rc, out, err, changed = _install(module, container, image, values_list)
+ if rc != 0:
+ module.fail_json(rc=rc, msg=err)
+ else:
+ module.exit_json(msg=out, changed=changed)
+
+
+def do_uninstall(module, name):
+ ''' uninstall a container and exit the module. '''
+ rc, out, err, changed = _uninstall(module, name)
+ if rc != 0:
+ module.fail_json(rc=rc, msg=err)
+ module.exit_json(msg=out, changed=changed)
+
+
+def do_update(module, container, old_image, image, values_list):
+ ''' update a container and exit the module. If the container uses a different
+ image than the current installed one, then first uninstall the old one '''
+
+ # the image we want is different than the installed one
+ if old_image != image:
+ rc, out, err, _ = _uninstall(module, container)
+ if rc != 0:
+ module.fail_json(rc=rc, msg=err)
+ return do_install(module, container, image, values_list)
+
+ # if the image didn't change, use "atomic containers update"
+ args = ['atomic', 'containers', 'update'] + values_list + [container]
+ rc, out, err = module.run_command(args, check_rc=False)
+ if rc != 0:
+ module.fail_json(rc=rc, msg=err)
+ else:
+ changed = "Extracting" in out
+ module.exit_json(msg=out, changed=changed)
+
+
+def do_rollback(module, name):
+ ''' move to the previous deployment of the container, if present, and exit the module. '''
+ args = ['atomic', 'containers', 'rollback', name]
+ rc, out, err = module.run_command(args, check_rc=False)
+ if rc != 0:
+ module.fail_json(rc=rc, msg=err)
+ else:
+ changed = "Rolling back" in out
+ module.exit_json(msg=out, changed=changed)
+
+
+def core(module):
+ ''' entrypoint for the module. '''
+ name = module.params['name']
+ image = module.params['image']
+ values = module.params['values']
+ state = module.params['state']
+
+ module.run_command_environ_update = dict(LANG='C', LC_ALL='C', LC_MESSAGES='C')
+ out = {}
+ err = {}
+ rc = 0
+
+ values_list = ["--set=%s" % x for x in values] if values else []
+
+ args = ['atomic', 'containers', 'list', '--json', '--all', '-f', 'container=%s' % name]
+ rc, out, err = module.run_command(args, check_rc=False)
+ if rc != 0:
+ module.fail_json(rc=rc, msg=err)
+ return
+
+ containers = json.loads(out)
+ present = len(containers) > 0
+ old_image = containers[0]["image_name"] if present else None
+
+ if state == 'present' and present:
+ module.exit_json(msg=out, changed=False)
+ elif (state in ['latest', 'present']) and not present:
+ do_install(module, name, image, values_list)
+ elif state == 'latest':
+ do_update(module, name, old_image, image, values_list)
+ elif state == 'absent':
+ if not present:
+ module.exit_json(msg="", changed=False)
+ else:
+ do_uninstall(module, name)
+ elif state == 'rollback':
+ do_rollback(module, name)
+
+
+def main():
+ module = AnsibleModule(
+ argument_spec=dict(
+ name=dict(default=None, required=True),
+ image=dict(default=None, required=True),
+ state=dict(default='latest', choices=['present', 'absent', 'latest', 'rollback']),
+ values=dict(type='list', default=[]),
+ ),
+ )
+
+ # Verify that the platform supports atomic command
+ rc, _, err = module.run_command('atomic -v', check_rc=False)
+ if rc != 0:
+ module.fail_json(msg="Error in running atomic command", err=err)
+
+ try:
+ core(module)
+ except Exception as e: # pylint: disable=broad-except
+ module.fail_json(msg=str(e))
+
+
+if __name__ == '__main__':
+ main()
+
+# -*- -*- -*- End included fragment: ansible/oc_atomic_container.py -*- -*- -*-
diff --git a/roles/lib_openshift/library/oc_edit.py b/roles/lib_openshift/library/oc_edit.py
index a565b32f2..aec64202f 100644
--- a/roles/lib_openshift/library/oc_edit.py
+++ b/roles/lib_openshift/library/oc_edit.py
@@ -33,6 +33,7 @@
from __future__ import print_function
import atexit
+import copy
import json
import os
import re
@@ -40,7 +41,11 @@ import shutil
import subprocess
import tempfile
# pylint: disable=import-error
-import ruamel.yaml as yaml
+try:
+ import ruamel.yaml as yaml
+except ImportError:
+ import yaml
+
from ansible.module_utils.basic import AnsibleModule
# -*- -*- -*- End included fragment: lib/import.py -*- -*- -*-
@@ -164,6 +169,7 @@ oc_edit:
# -*- -*- -*- End included fragment: doc/edit -*- -*- -*-
# -*- -*- -*- Begin included fragment: ../../lib_utils/src/class/yedit.py -*- -*- -*-
+# pylint: disable=undefined-variable,missing-docstring
# noqa: E301,E302
@@ -358,11 +364,17 @@ class Yedit(object):
if self.backup and self.file_exists():
shutil.copy(self.filename, self.filename + '.orig')
- # pylint: disable=no-member
- if hasattr(self.yaml_dict, 'fa'):
+ # Try to set format attributes if supported
+ try:
self.yaml_dict.fa.set_block_style()
+ except AttributeError:
+ pass
- Yedit._write(self.filename, yaml.dump(self.yaml_dict, Dumper=yaml.RoundTripDumper))
+ # Try to use RoundTripDumper if supported.
+ try:
+ Yedit._write(self.filename, yaml.dump(self.yaml_dict, Dumper=yaml.RoundTripDumper))
+ except AttributeError:
+ Yedit._write(self.filename, yaml.safe_dump(self.yaml_dict, default_flow_style=False))
return (True, self.yaml_dict)
@@ -402,10 +414,24 @@ class Yedit(object):
# check if it is yaml
try:
if content_type == 'yaml' and contents:
- self.yaml_dict = yaml.load(contents, yaml.RoundTripLoader)
- # pylint: disable=no-member
- if hasattr(self.yaml_dict, 'fa'):
+ # Try to set format attributes if supported
+ try:
+ self.yaml_dict.fa.set_block_style()
+ except AttributeError:
+ pass
+
+ # Try to use RoundTripLoader if supported.
+ try:
+ self.yaml_dict = yaml.safe_load(contents, yaml.RoundTripLoader)
+ except AttributeError:
+ self.yaml_dict = yaml.safe_load(contents)
+
+ # Try to set format attributes if supported
+ try:
self.yaml_dict.fa.set_block_style()
+ except AttributeError:
+ pass
+
elif content_type == 'json' and contents:
self.yaml_dict = json.loads(contents)
except yaml.YAMLError as err:
@@ -434,14 +460,16 @@ class Yedit(object):
return (False, self.yaml_dict)
if isinstance(entry, dict):
- # pylint: disable=no-member,maybe-no-member
+ # AUDIT:maybe-no-member makes sense due to fuzzy types
+ # pylint: disable=maybe-no-member
if key_or_item in entry:
entry.pop(key_or_item)
return (True, self.yaml_dict)
return (False, self.yaml_dict)
elif isinstance(entry, list):
- # pylint: disable=no-member,maybe-no-member
+ # AUDIT:maybe-no-member makes sense due to fuzzy types
+ # pylint: disable=maybe-no-member
ind = None
try:
ind = entry.index(key_or_item)
@@ -509,7 +537,9 @@ class Yedit(object):
if not isinstance(entry, list):
return (False, self.yaml_dict)
- # pylint: disable=no-member,maybe-no-member
+ # AUDIT:maybe-no-member makes sense due to loading data from
+ # a serialized format.
+ # pylint: disable=maybe-no-member
entry.append(value)
return (True, self.yaml_dict)
@@ -522,7 +552,8 @@ class Yedit(object):
entry = None
if isinstance(entry, dict):
- # pylint: disable=no-member,maybe-no-member
+ # AUDIT:maybe-no-member makes sense due to fuzzy types
+ # pylint: disable=maybe-no-member
if not isinstance(value, dict):
raise YeditException('Cannot replace key, value entry in ' +
'dict with non-dict type. value=[%s] [%s]' % (value, type(value))) # noqa: E501
@@ -531,7 +562,8 @@ class Yedit(object):
return (True, self.yaml_dict)
elif isinstance(entry, list):
- # pylint: disable=no-member,maybe-no-member
+ # AUDIT:maybe-no-member makes sense due to fuzzy types
+ # pylint: disable=maybe-no-member
ind = None
if curr_value:
try:
@@ -570,12 +602,20 @@ class Yedit(object):
return (False, self.yaml_dict)
# deepcopy didn't work
- tmp_copy = yaml.load(yaml.round_trip_dump(self.yaml_dict,
- default_flow_style=False),
- yaml.RoundTripLoader)
- # pylint: disable=no-member
- if hasattr(self.yaml_dict, 'fa'):
+ # Try to use ruamel.yaml and fallback to pyyaml
+ try:
+ tmp_copy = yaml.load(yaml.round_trip_dump(self.yaml_dict,
+ default_flow_style=False),
+ yaml.RoundTripLoader)
+ except AttributeError:
+ tmp_copy = copy.deepcopy(self.yaml_dict)
+
+ # set the format attributes if available
+ try:
tmp_copy.fa.set_block_style()
+ except AttributeError:
+ pass
+
result = Yedit.add_entry(tmp_copy, path, value, self.separator)
if not result:
return (False, self.yaml_dict)
@@ -588,11 +628,20 @@ class Yedit(object):
''' create a yaml file '''
if not self.file_exists():
# deepcopy didn't work
- tmp_copy = yaml.load(yaml.round_trip_dump(self.yaml_dict, default_flow_style=False), # noqa: E501
- yaml.RoundTripLoader)
- # pylint: disable=no-member
- if hasattr(self.yaml_dict, 'fa'):
+ # Try to use ruamel.yaml and fallback to pyyaml
+ try:
+ tmp_copy = yaml.load(yaml.round_trip_dump(self.yaml_dict,
+ default_flow_style=False),
+ yaml.RoundTripLoader)
+ except AttributeError:
+ tmp_copy = copy.deepcopy(self.yaml_dict)
+
+ # set the format attributes if available
+ try:
tmp_copy.fa.set_block_style()
+ except AttributeError:
+ pass
+
result = Yedit.add_entry(tmp_copy, path, value, self.separator)
if result:
self.yaml_dict = tmp_copy
@@ -748,6 +797,32 @@ class OpenShiftCLIError(Exception):
pass
+ADDITIONAL_PATH_LOOKUPS = ['/usr/local/bin', os.path.expanduser('~/bin')]
+
+
+def locate_oc_binary():
+ ''' Find and return oc binary file '''
+ # https://github.com/openshift/openshift-ansible/issues/3410
+ # oc can be in /usr/local/bin in some cases, but that may not
+ # be in $PATH due to ansible/sudo
+ paths = os.environ.get("PATH", os.defpath).split(os.pathsep) + ADDITIONAL_PATH_LOOKUPS
+
+ oc_binary = 'oc'
+
+ # Use shutil.which if it is available, otherwise fallback to a naive path search
+ try:
+ which_result = shutil.which(oc_binary, path=os.pathsep.join(paths))
+ if which_result is not None:
+ oc_binary = which_result
+ except AttributeError:
+ for path in paths:
+ if os.path.exists(os.path.join(path, oc_binary)):
+ oc_binary = os.path.join(path, oc_binary)
+ break
+
+ return oc_binary
+
+
# pylint: disable=too-few-public-methods
class OpenShiftCLI(object):
''' Class to wrap the command line tools '''
@@ -761,6 +836,7 @@ class OpenShiftCLI(object):
self.verbose = verbose
self.kubeconfig = Utils.create_tmpfile_copy(kubeconfig)
self.all_namespaces = all_namespaces
+ self.oc_binary = locate_oc_binary()
# Pylint allows only 5 arguments to be passed.
# pylint: disable=too-many-arguments
@@ -957,24 +1033,23 @@ class OpenShiftCLI(object):
stdout, stderr = proc.communicate(input_data)
- return proc.returncode, stdout, stderr
+ return proc.returncode, stdout.decode(), stderr.decode()
# pylint: disable=too-many-arguments,too-many-branches
def openshift_cmd(self, cmd, oadm=False, output=False, output_type='json', input_data=None):
'''Base command for oc '''
- cmds = []
+ cmds = [self.oc_binary]
+
if oadm:
- cmds = ['oadm']
- else:
- cmds = ['oc']
+ cmds.append('adm')
+
+ cmds.extend(cmd)
if self.all_namespaces:
cmds.extend(['--all-namespaces'])
elif self.namespace is not None and self.namespace.lower() not in ['none', 'emtpy']: # E501
cmds.extend(['-n', self.namespace])
- cmds.extend(cmd)
-
rval = {}
results = ''
err = None
@@ -982,7 +1057,10 @@ class OpenShiftCLI(object):
if self.verbose:
print(' '.join(cmds))
- returncode, stdout, stderr = self._run(cmds, input_data)
+ try:
+ returncode, stdout, stderr = self._run(cmds, input_data)
+ except OSError as ex:
+ returncode, stdout, stderr = 1, '', 'Failed to execute {}: {}'.format(subprocess.list2cmdline(cmds), ex)
rval = {"returncode": returncode,
"results": results,
@@ -1034,7 +1112,13 @@ class Utils(object):
tmp = Utils.create_tmpfile(prefix=rname)
if ftype == 'yaml':
- Utils._write(tmp, yaml.dump(data, Dumper=yaml.RoundTripDumper))
+ # AUDIT:no-member makes sense here due to ruamel.YAML/PyYAML usage
+ # pylint: disable=no-member
+ if hasattr(yaml, 'RoundTripDumper'):
+ Utils._write(tmp, yaml.dump(data, Dumper=yaml.RoundTripDumper))
+ else:
+ Utils._write(tmp, yaml.safe_dump(data, default_flow_style=False))
+
elif ftype == 'json':
Utils._write(tmp, json.dumps(data))
else:
@@ -1116,7 +1200,12 @@ class Utils(object):
contents = sfd.read()
if sfile_type == 'yaml':
- contents = yaml.load(contents, yaml.RoundTripLoader)
+ # AUDIT:no-member makes sense here due to ruamel.YAML/PyYAML usage
+ # pylint: disable=no-member
+ if hasattr(yaml, 'RoundTripLoader'):
+ contents = yaml.load(contents, yaml.RoundTripLoader)
+ else:
+ contents = yaml.safe_load(contents)
elif sfile_type == 'json':
contents = json.loads(contents)
@@ -1222,8 +1311,8 @@ class Utils(object):
elif value != user_def[key]:
if debug:
print('value should be identical')
- print(value)
print(user_def[key])
+ print(value)
return False
# recurse on a dictionary
@@ -1243,8 +1332,8 @@ class Utils(object):
if api_values != user_values:
if debug:
print("keys are not equal in dict")
- print(api_values)
print(user_values)
+ print(api_values)
return False
result = Utils.check_def_equal(user_def[key], value, skip_keys=skip_keys, debug=debug)
diff --git a/roles/lib_openshift/library/oc_env.py b/roles/lib_openshift/library/oc_env.py
index e00f5cdcc..e164ecf95 100644
--- a/roles/lib_openshift/library/oc_env.py
+++ b/roles/lib_openshift/library/oc_env.py
@@ -33,6 +33,7 @@
from __future__ import print_function
import atexit
+import copy
import json
import os
import re
@@ -40,7 +41,11 @@ import shutil
import subprocess
import tempfile
# pylint: disable=import-error
-import ruamel.yaml as yaml
+try:
+ import ruamel.yaml as yaml
+except ImportError:
+ import yaml
+
from ansible.module_utils.basic import AnsibleModule
# -*- -*- -*- End included fragment: lib/import.py -*- -*- -*-
@@ -131,6 +136,7 @@ EXAMPLES = '''
# -*- -*- -*- End included fragment: doc/env -*- -*- -*-
# -*- -*- -*- Begin included fragment: ../../lib_utils/src/class/yedit.py -*- -*- -*-
+# pylint: disable=undefined-variable,missing-docstring
# noqa: E301,E302
@@ -325,11 +331,17 @@ class Yedit(object):
if self.backup and self.file_exists():
shutil.copy(self.filename, self.filename + '.orig')
- # pylint: disable=no-member
- if hasattr(self.yaml_dict, 'fa'):
+ # Try to set format attributes if supported
+ try:
self.yaml_dict.fa.set_block_style()
+ except AttributeError:
+ pass
- Yedit._write(self.filename, yaml.dump(self.yaml_dict, Dumper=yaml.RoundTripDumper))
+ # Try to use RoundTripDumper if supported.
+ try:
+ Yedit._write(self.filename, yaml.dump(self.yaml_dict, Dumper=yaml.RoundTripDumper))
+ except AttributeError:
+ Yedit._write(self.filename, yaml.safe_dump(self.yaml_dict, default_flow_style=False))
return (True, self.yaml_dict)
@@ -369,10 +381,24 @@ class Yedit(object):
# check if it is yaml
try:
if content_type == 'yaml' and contents:
- self.yaml_dict = yaml.load(contents, yaml.RoundTripLoader)
- # pylint: disable=no-member
- if hasattr(self.yaml_dict, 'fa'):
+ # Try to set format attributes if supported
+ try:
+ self.yaml_dict.fa.set_block_style()
+ except AttributeError:
+ pass
+
+ # Try to use RoundTripLoader if supported.
+ try:
+ self.yaml_dict = yaml.safe_load(contents, yaml.RoundTripLoader)
+ except AttributeError:
+ self.yaml_dict = yaml.safe_load(contents)
+
+ # Try to set format attributes if supported
+ try:
self.yaml_dict.fa.set_block_style()
+ except AttributeError:
+ pass
+
elif content_type == 'json' and contents:
self.yaml_dict = json.loads(contents)
except yaml.YAMLError as err:
@@ -401,14 +427,16 @@ class Yedit(object):
return (False, self.yaml_dict)
if isinstance(entry, dict):
- # pylint: disable=no-member,maybe-no-member
+ # AUDIT:maybe-no-member makes sense due to fuzzy types
+ # pylint: disable=maybe-no-member
if key_or_item in entry:
entry.pop(key_or_item)
return (True, self.yaml_dict)
return (False, self.yaml_dict)
elif isinstance(entry, list):
- # pylint: disable=no-member,maybe-no-member
+ # AUDIT:maybe-no-member makes sense due to fuzzy types
+ # pylint: disable=maybe-no-member
ind = None
try:
ind = entry.index(key_or_item)
@@ -476,7 +504,9 @@ class Yedit(object):
if not isinstance(entry, list):
return (False, self.yaml_dict)
- # pylint: disable=no-member,maybe-no-member
+ # AUDIT:maybe-no-member makes sense due to loading data from
+ # a serialized format.
+ # pylint: disable=maybe-no-member
entry.append(value)
return (True, self.yaml_dict)
@@ -489,7 +519,8 @@ class Yedit(object):
entry = None
if isinstance(entry, dict):
- # pylint: disable=no-member,maybe-no-member
+ # AUDIT:maybe-no-member makes sense due to fuzzy types
+ # pylint: disable=maybe-no-member
if not isinstance(value, dict):
raise YeditException('Cannot replace key, value entry in ' +
'dict with non-dict type. value=[%s] [%s]' % (value, type(value))) # noqa: E501
@@ -498,7 +529,8 @@ class Yedit(object):
return (True, self.yaml_dict)
elif isinstance(entry, list):
- # pylint: disable=no-member,maybe-no-member
+ # AUDIT:maybe-no-member makes sense due to fuzzy types
+ # pylint: disable=maybe-no-member
ind = None
if curr_value:
try:
@@ -537,12 +569,20 @@ class Yedit(object):
return (False, self.yaml_dict)
# deepcopy didn't work
- tmp_copy = yaml.load(yaml.round_trip_dump(self.yaml_dict,
- default_flow_style=False),
- yaml.RoundTripLoader)
- # pylint: disable=no-member
- if hasattr(self.yaml_dict, 'fa'):
+ # Try to use ruamel.yaml and fallback to pyyaml
+ try:
+ tmp_copy = yaml.load(yaml.round_trip_dump(self.yaml_dict,
+ default_flow_style=False),
+ yaml.RoundTripLoader)
+ except AttributeError:
+ tmp_copy = copy.deepcopy(self.yaml_dict)
+
+ # set the format attributes if available
+ try:
tmp_copy.fa.set_block_style()
+ except AttributeError:
+ pass
+
result = Yedit.add_entry(tmp_copy, path, value, self.separator)
if not result:
return (False, self.yaml_dict)
@@ -555,11 +595,20 @@ class Yedit(object):
''' create a yaml file '''
if not self.file_exists():
# deepcopy didn't work
- tmp_copy = yaml.load(yaml.round_trip_dump(self.yaml_dict, default_flow_style=False), # noqa: E501
- yaml.RoundTripLoader)
- # pylint: disable=no-member
- if hasattr(self.yaml_dict, 'fa'):
+ # Try to use ruamel.yaml and fallback to pyyaml
+ try:
+ tmp_copy = yaml.load(yaml.round_trip_dump(self.yaml_dict,
+ default_flow_style=False),
+ yaml.RoundTripLoader)
+ except AttributeError:
+ tmp_copy = copy.deepcopy(self.yaml_dict)
+
+ # set the format attributes if available
+ try:
tmp_copy.fa.set_block_style()
+ except AttributeError:
+ pass
+
result = Yedit.add_entry(tmp_copy, path, value, self.separator)
if result:
self.yaml_dict = tmp_copy
@@ -715,6 +764,32 @@ class OpenShiftCLIError(Exception):
pass
+ADDITIONAL_PATH_LOOKUPS = ['/usr/local/bin', os.path.expanduser('~/bin')]
+
+
+def locate_oc_binary():
+ ''' Find and return oc binary file '''
+ # https://github.com/openshift/openshift-ansible/issues/3410
+ # oc can be in /usr/local/bin in some cases, but that may not
+ # be in $PATH due to ansible/sudo
+ paths = os.environ.get("PATH", os.defpath).split(os.pathsep) + ADDITIONAL_PATH_LOOKUPS
+
+ oc_binary = 'oc'
+
+ # Use shutil.which if it is available, otherwise fallback to a naive path search
+ try:
+ which_result = shutil.which(oc_binary, path=os.pathsep.join(paths))
+ if which_result is not None:
+ oc_binary = which_result
+ except AttributeError:
+ for path in paths:
+ if os.path.exists(os.path.join(path, oc_binary)):
+ oc_binary = os.path.join(path, oc_binary)
+ break
+
+ return oc_binary
+
+
# pylint: disable=too-few-public-methods
class OpenShiftCLI(object):
''' Class to wrap the command line tools '''
@@ -728,6 +803,7 @@ class OpenShiftCLI(object):
self.verbose = verbose
self.kubeconfig = Utils.create_tmpfile_copy(kubeconfig)
self.all_namespaces = all_namespaces
+ self.oc_binary = locate_oc_binary()
# Pylint allows only 5 arguments to be passed.
# pylint: disable=too-many-arguments
@@ -924,24 +1000,23 @@ class OpenShiftCLI(object):
stdout, stderr = proc.communicate(input_data)
- return proc.returncode, stdout, stderr
+ return proc.returncode, stdout.decode(), stderr.decode()
# pylint: disable=too-many-arguments,too-many-branches
def openshift_cmd(self, cmd, oadm=False, output=False, output_type='json', input_data=None):
'''Base command for oc '''
- cmds = []
+ cmds = [self.oc_binary]
+
if oadm:
- cmds = ['oadm']
- else:
- cmds = ['oc']
+ cmds.append('adm')
+
+ cmds.extend(cmd)
if self.all_namespaces:
cmds.extend(['--all-namespaces'])
elif self.namespace is not None and self.namespace.lower() not in ['none', 'emtpy']: # E501
cmds.extend(['-n', self.namespace])
- cmds.extend(cmd)
-
rval = {}
results = ''
err = None
@@ -949,7 +1024,10 @@ class OpenShiftCLI(object):
if self.verbose:
print(' '.join(cmds))
- returncode, stdout, stderr = self._run(cmds, input_data)
+ try:
+ returncode, stdout, stderr = self._run(cmds, input_data)
+ except OSError as ex:
+ returncode, stdout, stderr = 1, '', 'Failed to execute {}: {}'.format(subprocess.list2cmdline(cmds), ex)
rval = {"returncode": returncode,
"results": results,
@@ -1001,7 +1079,13 @@ class Utils(object):
tmp = Utils.create_tmpfile(prefix=rname)
if ftype == 'yaml':
- Utils._write(tmp, yaml.dump(data, Dumper=yaml.RoundTripDumper))
+ # AUDIT:no-member makes sense here due to ruamel.YAML/PyYAML usage
+ # pylint: disable=no-member
+ if hasattr(yaml, 'RoundTripDumper'):
+ Utils._write(tmp, yaml.dump(data, Dumper=yaml.RoundTripDumper))
+ else:
+ Utils._write(tmp, yaml.safe_dump(data, default_flow_style=False))
+
elif ftype == 'json':
Utils._write(tmp, json.dumps(data))
else:
@@ -1083,7 +1167,12 @@ class Utils(object):
contents = sfd.read()
if sfile_type == 'yaml':
- contents = yaml.load(contents, yaml.RoundTripLoader)
+ # AUDIT:no-member makes sense here due to ruamel.YAML/PyYAML usage
+ # pylint: disable=no-member
+ if hasattr(yaml, 'RoundTripLoader'):
+ contents = yaml.load(contents, yaml.RoundTripLoader)
+ else:
+ contents = yaml.safe_load(contents)
elif sfile_type == 'json':
contents = json.loads(contents)
@@ -1189,8 +1278,8 @@ class Utils(object):
elif value != user_def[key]:
if debug:
print('value should be identical')
- print(value)
print(user_def[key])
+ print(value)
return False
# recurse on a dictionary
@@ -1210,8 +1299,8 @@ class Utils(object):
if api_values != user_values:
if debug:
print("keys are not equal in dict")
- print(api_values)
print(user_values)
+ print(api_values)
return False
result = Utils.check_def_equal(user_def[key], value, skip_keys=skip_keys, debug=debug)
@@ -1272,7 +1361,7 @@ class OpenShiftCLIConfig(object):
# pylint: disable=too-many-public-methods
class DeploymentConfig(Yedit):
- ''' Class to wrap the oc command line tools '''
+ ''' Class to model an openshift DeploymentConfig'''
default_deployment_config = '''
apiVersion: v1
kind: DeploymentConfig
@@ -1336,7 +1425,6 @@ spec:
super(DeploymentConfig, self).__init__(content=content)
- # pylint: disable=no-member
def add_env_value(self, key, value):
''' add key, value pair to env array '''
rval = False
@@ -1374,6 +1462,18 @@ spec:
return False
+ def get_env_var(self, key):
+ '''return a environment variables '''
+ results = self.get(DeploymentConfig.env_path) or []
+ if not results:
+ return None
+
+ for env_var in results:
+ if env_var['name'] == key:
+ return env_var
+
+ return None
+
def get_env_vars(self):
'''return a environment variables '''
return self.get(DeploymentConfig.env_path) or []
diff --git a/roles/lib_openshift/library/oc_label.py b/roles/lib_openshift/library/oc_label.py
index e168614bd..fd6674d41 100644
--- a/roles/lib_openshift/library/oc_label.py
+++ b/roles/lib_openshift/library/oc_label.py
@@ -33,6 +33,7 @@
from __future__ import print_function
import atexit
+import copy
import json
import os
import re
@@ -40,7 +41,11 @@ import shutil
import subprocess
import tempfile
# pylint: disable=import-error
-import ruamel.yaml as yaml
+try:
+ import ruamel.yaml as yaml
+except ImportError:
+ import yaml
+
from ansible.module_utils.basic import AnsibleModule
# -*- -*- -*- End included fragment: lib/import.py -*- -*- -*-
@@ -140,6 +145,7 @@ EXAMPLES = '''
# -*- -*- -*- End included fragment: doc/label -*- -*- -*-
# -*- -*- -*- Begin included fragment: ../../lib_utils/src/class/yedit.py -*- -*- -*-
+# pylint: disable=undefined-variable,missing-docstring
# noqa: E301,E302
@@ -334,11 +340,17 @@ class Yedit(object):
if self.backup and self.file_exists():
shutil.copy(self.filename, self.filename + '.orig')
- # pylint: disable=no-member
- if hasattr(self.yaml_dict, 'fa'):
+ # Try to set format attributes if supported
+ try:
self.yaml_dict.fa.set_block_style()
+ except AttributeError:
+ pass
- Yedit._write(self.filename, yaml.dump(self.yaml_dict, Dumper=yaml.RoundTripDumper))
+ # Try to use RoundTripDumper if supported.
+ try:
+ Yedit._write(self.filename, yaml.dump(self.yaml_dict, Dumper=yaml.RoundTripDumper))
+ except AttributeError:
+ Yedit._write(self.filename, yaml.safe_dump(self.yaml_dict, default_flow_style=False))
return (True, self.yaml_dict)
@@ -378,10 +390,24 @@ class Yedit(object):
# check if it is yaml
try:
if content_type == 'yaml' and contents:
- self.yaml_dict = yaml.load(contents, yaml.RoundTripLoader)
- # pylint: disable=no-member
- if hasattr(self.yaml_dict, 'fa'):
+ # Try to set format attributes if supported
+ try:
+ self.yaml_dict.fa.set_block_style()
+ except AttributeError:
+ pass
+
+ # Try to use RoundTripLoader if supported.
+ try:
+ self.yaml_dict = yaml.safe_load(contents, yaml.RoundTripLoader)
+ except AttributeError:
+ self.yaml_dict = yaml.safe_load(contents)
+
+ # Try to set format attributes if supported
+ try:
self.yaml_dict.fa.set_block_style()
+ except AttributeError:
+ pass
+
elif content_type == 'json' and contents:
self.yaml_dict = json.loads(contents)
except yaml.YAMLError as err:
@@ -410,14 +436,16 @@ class Yedit(object):
return (False, self.yaml_dict)
if isinstance(entry, dict):
- # pylint: disable=no-member,maybe-no-member
+ # AUDIT:maybe-no-member makes sense due to fuzzy types
+ # pylint: disable=maybe-no-member
if key_or_item in entry:
entry.pop(key_or_item)
return (True, self.yaml_dict)
return (False, self.yaml_dict)
elif isinstance(entry, list):
- # pylint: disable=no-member,maybe-no-member
+ # AUDIT:maybe-no-member makes sense due to fuzzy types
+ # pylint: disable=maybe-no-member
ind = None
try:
ind = entry.index(key_or_item)
@@ -485,7 +513,9 @@ class Yedit(object):
if not isinstance(entry, list):
return (False, self.yaml_dict)
- # pylint: disable=no-member,maybe-no-member
+ # AUDIT:maybe-no-member makes sense due to loading data from
+ # a serialized format.
+ # pylint: disable=maybe-no-member
entry.append(value)
return (True, self.yaml_dict)
@@ -498,7 +528,8 @@ class Yedit(object):
entry = None
if isinstance(entry, dict):
- # pylint: disable=no-member,maybe-no-member
+ # AUDIT:maybe-no-member makes sense due to fuzzy types
+ # pylint: disable=maybe-no-member
if not isinstance(value, dict):
raise YeditException('Cannot replace key, value entry in ' +
'dict with non-dict type. value=[%s] [%s]' % (value, type(value))) # noqa: E501
@@ -507,7 +538,8 @@ class Yedit(object):
return (True, self.yaml_dict)
elif isinstance(entry, list):
- # pylint: disable=no-member,maybe-no-member
+ # AUDIT:maybe-no-member makes sense due to fuzzy types
+ # pylint: disable=maybe-no-member
ind = None
if curr_value:
try:
@@ -546,12 +578,20 @@ class Yedit(object):
return (False, self.yaml_dict)
# deepcopy didn't work
- tmp_copy = yaml.load(yaml.round_trip_dump(self.yaml_dict,
- default_flow_style=False),
- yaml.RoundTripLoader)
- # pylint: disable=no-member
- if hasattr(self.yaml_dict, 'fa'):
+ # Try to use ruamel.yaml and fallback to pyyaml
+ try:
+ tmp_copy = yaml.load(yaml.round_trip_dump(self.yaml_dict,
+ default_flow_style=False),
+ yaml.RoundTripLoader)
+ except AttributeError:
+ tmp_copy = copy.deepcopy(self.yaml_dict)
+
+ # set the format attributes if available
+ try:
tmp_copy.fa.set_block_style()
+ except AttributeError:
+ pass
+
result = Yedit.add_entry(tmp_copy, path, value, self.separator)
if not result:
return (False, self.yaml_dict)
@@ -564,11 +604,20 @@ class Yedit(object):
''' create a yaml file '''
if not self.file_exists():
# deepcopy didn't work
- tmp_copy = yaml.load(yaml.round_trip_dump(self.yaml_dict, default_flow_style=False), # noqa: E501
- yaml.RoundTripLoader)
- # pylint: disable=no-member
- if hasattr(self.yaml_dict, 'fa'):
+ # Try to use ruamel.yaml and fallback to pyyaml
+ try:
+ tmp_copy = yaml.load(yaml.round_trip_dump(self.yaml_dict,
+ default_flow_style=False),
+ yaml.RoundTripLoader)
+ except AttributeError:
+ tmp_copy = copy.deepcopy(self.yaml_dict)
+
+ # set the format attributes if available
+ try:
tmp_copy.fa.set_block_style()
+ except AttributeError:
+ pass
+
result = Yedit.add_entry(tmp_copy, path, value, self.separator)
if result:
self.yaml_dict = tmp_copy
@@ -724,6 +773,32 @@ class OpenShiftCLIError(Exception):
pass
+ADDITIONAL_PATH_LOOKUPS = ['/usr/local/bin', os.path.expanduser('~/bin')]
+
+
+def locate_oc_binary():
+ ''' Find and return oc binary file '''
+ # https://github.com/openshift/openshift-ansible/issues/3410
+ # oc can be in /usr/local/bin in some cases, but that may not
+ # be in $PATH due to ansible/sudo
+ paths = os.environ.get("PATH", os.defpath).split(os.pathsep) + ADDITIONAL_PATH_LOOKUPS
+
+ oc_binary = 'oc'
+
+ # Use shutil.which if it is available, otherwise fallback to a naive path search
+ try:
+ which_result = shutil.which(oc_binary, path=os.pathsep.join(paths))
+ if which_result is not None:
+ oc_binary = which_result
+ except AttributeError:
+ for path in paths:
+ if os.path.exists(os.path.join(path, oc_binary)):
+ oc_binary = os.path.join(path, oc_binary)
+ break
+
+ return oc_binary
+
+
# pylint: disable=too-few-public-methods
class OpenShiftCLI(object):
''' Class to wrap the command line tools '''
@@ -737,6 +812,7 @@ class OpenShiftCLI(object):
self.verbose = verbose
self.kubeconfig = Utils.create_tmpfile_copy(kubeconfig)
self.all_namespaces = all_namespaces
+ self.oc_binary = locate_oc_binary()
# Pylint allows only 5 arguments to be passed.
# pylint: disable=too-many-arguments
@@ -933,24 +1009,23 @@ class OpenShiftCLI(object):
stdout, stderr = proc.communicate(input_data)
- return proc.returncode, stdout, stderr
+ return proc.returncode, stdout.decode(), stderr.decode()
# pylint: disable=too-many-arguments,too-many-branches
def openshift_cmd(self, cmd, oadm=False, output=False, output_type='json', input_data=None):
'''Base command for oc '''
- cmds = []
+ cmds = [self.oc_binary]
+
if oadm:
- cmds = ['oadm']
- else:
- cmds = ['oc']
+ cmds.append('adm')
+
+ cmds.extend(cmd)
if self.all_namespaces:
cmds.extend(['--all-namespaces'])
elif self.namespace is not None and self.namespace.lower() not in ['none', 'emtpy']: # E501
cmds.extend(['-n', self.namespace])
- cmds.extend(cmd)
-
rval = {}
results = ''
err = None
@@ -958,7 +1033,10 @@ class OpenShiftCLI(object):
if self.verbose:
print(' '.join(cmds))
- returncode, stdout, stderr = self._run(cmds, input_data)
+ try:
+ returncode, stdout, stderr = self._run(cmds, input_data)
+ except OSError as ex:
+ returncode, stdout, stderr = 1, '', 'Failed to execute {}: {}'.format(subprocess.list2cmdline(cmds), ex)
rval = {"returncode": returncode,
"results": results,
@@ -1010,7 +1088,13 @@ class Utils(object):
tmp = Utils.create_tmpfile(prefix=rname)
if ftype == 'yaml':
- Utils._write(tmp, yaml.dump(data, Dumper=yaml.RoundTripDumper))
+ # AUDIT:no-member makes sense here due to ruamel.YAML/PyYAML usage
+ # pylint: disable=no-member
+ if hasattr(yaml, 'RoundTripDumper'):
+ Utils._write(tmp, yaml.dump(data, Dumper=yaml.RoundTripDumper))
+ else:
+ Utils._write(tmp, yaml.safe_dump(data, default_flow_style=False))
+
elif ftype == 'json':
Utils._write(tmp, json.dumps(data))
else:
@@ -1092,7 +1176,12 @@ class Utils(object):
contents = sfd.read()
if sfile_type == 'yaml':
- contents = yaml.load(contents, yaml.RoundTripLoader)
+ # AUDIT:no-member makes sense here due to ruamel.YAML/PyYAML usage
+ # pylint: disable=no-member
+ if hasattr(yaml, 'RoundTripLoader'):
+ contents = yaml.load(contents, yaml.RoundTripLoader)
+ else:
+ contents = yaml.safe_load(contents)
elif sfile_type == 'json':
contents = json.loads(contents)
@@ -1198,8 +1287,8 @@ class Utils(object):
elif value != user_def[key]:
if debug:
print('value should be identical')
- print(value)
print(user_def[key])
+ print(value)
return False
# recurse on a dictionary
@@ -1219,8 +1308,8 @@ class Utils(object):
if api_values != user_values:
if debug:
print("keys are not equal in dict")
- print(api_values)
print(user_values)
+ print(api_values)
return False
result = Utils.check_def_equal(user_def[key], value, skip_keys=skip_keys, debug=debug)
diff --git a/roles/lib_openshift/library/oc_obj.py b/roles/lib_openshift/library/oc_obj.py
index d73d05472..24397d725 100644
--- a/roles/lib_openshift/library/oc_obj.py
+++ b/roles/lib_openshift/library/oc_obj.py
@@ -33,6 +33,7 @@
from __future__ import print_function
import atexit
+import copy
import json
import os
import re
@@ -40,7 +41,11 @@ import shutil
import subprocess
import tempfile
# pylint: disable=import-error
-import ruamel.yaml as yaml
+try:
+ import ruamel.yaml as yaml
+except ImportError:
+ import yaml
+
from ansible.module_utils.basic import AnsibleModule
# -*- -*- -*- End included fragment: lib/import.py -*- -*- -*-
@@ -143,6 +148,7 @@ register: router_output
# -*- -*- -*- End included fragment: doc/obj -*- -*- -*-
# -*- -*- -*- Begin included fragment: ../../lib_utils/src/class/yedit.py -*- -*- -*-
+# pylint: disable=undefined-variable,missing-docstring
# noqa: E301,E302
@@ -337,11 +343,17 @@ class Yedit(object):
if self.backup and self.file_exists():
shutil.copy(self.filename, self.filename + '.orig')
- # pylint: disable=no-member
- if hasattr(self.yaml_dict, 'fa'):
+ # Try to set format attributes if supported
+ try:
self.yaml_dict.fa.set_block_style()
+ except AttributeError:
+ pass
- Yedit._write(self.filename, yaml.dump(self.yaml_dict, Dumper=yaml.RoundTripDumper))
+ # Try to use RoundTripDumper if supported.
+ try:
+ Yedit._write(self.filename, yaml.dump(self.yaml_dict, Dumper=yaml.RoundTripDumper))
+ except AttributeError:
+ Yedit._write(self.filename, yaml.safe_dump(self.yaml_dict, default_flow_style=False))
return (True, self.yaml_dict)
@@ -381,10 +393,24 @@ class Yedit(object):
# check if it is yaml
try:
if content_type == 'yaml' and contents:
- self.yaml_dict = yaml.load(contents, yaml.RoundTripLoader)
- # pylint: disable=no-member
- if hasattr(self.yaml_dict, 'fa'):
+ # Try to set format attributes if supported
+ try:
+ self.yaml_dict.fa.set_block_style()
+ except AttributeError:
+ pass
+
+ # Try to use RoundTripLoader if supported.
+ try:
+ self.yaml_dict = yaml.safe_load(contents, yaml.RoundTripLoader)
+ except AttributeError:
+ self.yaml_dict = yaml.safe_load(contents)
+
+ # Try to set format attributes if supported
+ try:
self.yaml_dict.fa.set_block_style()
+ except AttributeError:
+ pass
+
elif content_type == 'json' and contents:
self.yaml_dict = json.loads(contents)
except yaml.YAMLError as err:
@@ -413,14 +439,16 @@ class Yedit(object):
return (False, self.yaml_dict)
if isinstance(entry, dict):
- # pylint: disable=no-member,maybe-no-member
+ # AUDIT:maybe-no-member makes sense due to fuzzy types
+ # pylint: disable=maybe-no-member
if key_or_item in entry:
entry.pop(key_or_item)
return (True, self.yaml_dict)
return (False, self.yaml_dict)
elif isinstance(entry, list):
- # pylint: disable=no-member,maybe-no-member
+ # AUDIT:maybe-no-member makes sense due to fuzzy types
+ # pylint: disable=maybe-no-member
ind = None
try:
ind = entry.index(key_or_item)
@@ -488,7 +516,9 @@ class Yedit(object):
if not isinstance(entry, list):
return (False, self.yaml_dict)
- # pylint: disable=no-member,maybe-no-member
+ # AUDIT:maybe-no-member makes sense due to loading data from
+ # a serialized format.
+ # pylint: disable=maybe-no-member
entry.append(value)
return (True, self.yaml_dict)
@@ -501,7 +531,8 @@ class Yedit(object):
entry = None
if isinstance(entry, dict):
- # pylint: disable=no-member,maybe-no-member
+ # AUDIT:maybe-no-member makes sense due to fuzzy types
+ # pylint: disable=maybe-no-member
if not isinstance(value, dict):
raise YeditException('Cannot replace key, value entry in ' +
'dict with non-dict type. value=[%s] [%s]' % (value, type(value))) # noqa: E501
@@ -510,7 +541,8 @@ class Yedit(object):
return (True, self.yaml_dict)
elif isinstance(entry, list):
- # pylint: disable=no-member,maybe-no-member
+ # AUDIT:maybe-no-member makes sense due to fuzzy types
+ # pylint: disable=maybe-no-member
ind = None
if curr_value:
try:
@@ -549,12 +581,20 @@ class Yedit(object):
return (False, self.yaml_dict)
# deepcopy didn't work
- tmp_copy = yaml.load(yaml.round_trip_dump(self.yaml_dict,
- default_flow_style=False),
- yaml.RoundTripLoader)
- # pylint: disable=no-member
- if hasattr(self.yaml_dict, 'fa'):
+ # Try to use ruamel.yaml and fallback to pyyaml
+ try:
+ tmp_copy = yaml.load(yaml.round_trip_dump(self.yaml_dict,
+ default_flow_style=False),
+ yaml.RoundTripLoader)
+ except AttributeError:
+ tmp_copy = copy.deepcopy(self.yaml_dict)
+
+ # set the format attributes if available
+ try:
tmp_copy.fa.set_block_style()
+ except AttributeError:
+ pass
+
result = Yedit.add_entry(tmp_copy, path, value, self.separator)
if not result:
return (False, self.yaml_dict)
@@ -567,11 +607,20 @@ class Yedit(object):
''' create a yaml file '''
if not self.file_exists():
# deepcopy didn't work
- tmp_copy = yaml.load(yaml.round_trip_dump(self.yaml_dict, default_flow_style=False), # noqa: E501
- yaml.RoundTripLoader)
- # pylint: disable=no-member
- if hasattr(self.yaml_dict, 'fa'):
+ # Try to use ruamel.yaml and fallback to pyyaml
+ try:
+ tmp_copy = yaml.load(yaml.round_trip_dump(self.yaml_dict,
+ default_flow_style=False),
+ yaml.RoundTripLoader)
+ except AttributeError:
+ tmp_copy = copy.deepcopy(self.yaml_dict)
+
+ # set the format attributes if available
+ try:
tmp_copy.fa.set_block_style()
+ except AttributeError:
+ pass
+
result = Yedit.add_entry(tmp_copy, path, value, self.separator)
if result:
self.yaml_dict = tmp_copy
@@ -727,6 +776,32 @@ class OpenShiftCLIError(Exception):
pass
+ADDITIONAL_PATH_LOOKUPS = ['/usr/local/bin', os.path.expanduser('~/bin')]
+
+
+def locate_oc_binary():
+ ''' Find and return oc binary file '''
+ # https://github.com/openshift/openshift-ansible/issues/3410
+ # oc can be in /usr/local/bin in some cases, but that may not
+ # be in $PATH due to ansible/sudo
+ paths = os.environ.get("PATH", os.defpath).split(os.pathsep) + ADDITIONAL_PATH_LOOKUPS
+
+ oc_binary = 'oc'
+
+ # Use shutil.which if it is available, otherwise fallback to a naive path search
+ try:
+ which_result = shutil.which(oc_binary, path=os.pathsep.join(paths))
+ if which_result is not None:
+ oc_binary = which_result
+ except AttributeError:
+ for path in paths:
+ if os.path.exists(os.path.join(path, oc_binary)):
+ oc_binary = os.path.join(path, oc_binary)
+ break
+
+ return oc_binary
+
+
# pylint: disable=too-few-public-methods
class OpenShiftCLI(object):
''' Class to wrap the command line tools '''
@@ -740,6 +815,7 @@ class OpenShiftCLI(object):
self.verbose = verbose
self.kubeconfig = Utils.create_tmpfile_copy(kubeconfig)
self.all_namespaces = all_namespaces
+ self.oc_binary = locate_oc_binary()
# Pylint allows only 5 arguments to be passed.
# pylint: disable=too-many-arguments
@@ -936,24 +1012,23 @@ class OpenShiftCLI(object):
stdout, stderr = proc.communicate(input_data)
- return proc.returncode, stdout, stderr
+ return proc.returncode, stdout.decode(), stderr.decode()
# pylint: disable=too-many-arguments,too-many-branches
def openshift_cmd(self, cmd, oadm=False, output=False, output_type='json', input_data=None):
'''Base command for oc '''
- cmds = []
+ cmds = [self.oc_binary]
+
if oadm:
- cmds = ['oadm']
- else:
- cmds = ['oc']
+ cmds.append('adm')
+
+ cmds.extend(cmd)
if self.all_namespaces:
cmds.extend(['--all-namespaces'])
elif self.namespace is not None and self.namespace.lower() not in ['none', 'emtpy']: # E501
cmds.extend(['-n', self.namespace])
- cmds.extend(cmd)
-
rval = {}
results = ''
err = None
@@ -961,7 +1036,10 @@ class OpenShiftCLI(object):
if self.verbose:
print(' '.join(cmds))
- returncode, stdout, stderr = self._run(cmds, input_data)
+ try:
+ returncode, stdout, stderr = self._run(cmds, input_data)
+ except OSError as ex:
+ returncode, stdout, stderr = 1, '', 'Failed to execute {}: {}'.format(subprocess.list2cmdline(cmds), ex)
rval = {"returncode": returncode,
"results": results,
@@ -1013,7 +1091,13 @@ class Utils(object):
tmp = Utils.create_tmpfile(prefix=rname)
if ftype == 'yaml':
- Utils._write(tmp, yaml.dump(data, Dumper=yaml.RoundTripDumper))
+ # AUDIT:no-member makes sense here due to ruamel.YAML/PyYAML usage
+ # pylint: disable=no-member
+ if hasattr(yaml, 'RoundTripDumper'):
+ Utils._write(tmp, yaml.dump(data, Dumper=yaml.RoundTripDumper))
+ else:
+ Utils._write(tmp, yaml.safe_dump(data, default_flow_style=False))
+
elif ftype == 'json':
Utils._write(tmp, json.dumps(data))
else:
@@ -1095,7 +1179,12 @@ class Utils(object):
contents = sfd.read()
if sfile_type == 'yaml':
- contents = yaml.load(contents, yaml.RoundTripLoader)
+ # AUDIT:no-member makes sense here due to ruamel.YAML/PyYAML usage
+ # pylint: disable=no-member
+ if hasattr(yaml, 'RoundTripLoader'):
+ contents = yaml.load(contents, yaml.RoundTripLoader)
+ else:
+ contents = yaml.safe_load(contents)
elif sfile_type == 'json':
contents = json.loads(contents)
@@ -1201,8 +1290,8 @@ class Utils(object):
elif value != user_def[key]:
if debug:
print('value should be identical')
- print(value)
print(user_def[key])
+ print(value)
return False
# recurse on a dictionary
@@ -1222,8 +1311,8 @@ class Utils(object):
if api_values != user_values:
if debug:
print("keys are not equal in dict")
- print(api_values)
print(user_values)
+ print(api_values)
return False
result = Utils.check_def_equal(user_def[key], value, skip_keys=skip_keys, debug=debug)
@@ -1357,7 +1446,6 @@ class OCObject(OpenShiftCLI):
if objects['returncode'] != 0:
return objects
- # pylint: disable=no-member
data = None
if files:
data = Utils.get_resource_file(files[0], content_type)
diff --git a/roles/lib_openshift/library/oc_process.py b/roles/lib_openshift/library/oc_process.py
index bcb4d2289..0a4f2058a 100644
--- a/roles/lib_openshift/library/oc_process.py
+++ b/roles/lib_openshift/library/oc_process.py
@@ -33,6 +33,7 @@
from __future__ import print_function
import atexit
+import copy
import json
import os
import re
@@ -40,7 +41,11 @@ import shutil
import subprocess
import tempfile
# pylint: disable=import-error
-import ruamel.yaml as yaml
+try:
+ import ruamel.yaml as yaml
+except ImportError:
+ import yaml
+
from ansible.module_utils.basic import AnsibleModule
# -*- -*- -*- End included fragment: lib/import.py -*- -*- -*-
@@ -132,6 +137,7 @@ EXAMPLES = '''
# -*- -*- -*- End included fragment: doc/process -*- -*- -*-
# -*- -*- -*- Begin included fragment: ../../lib_utils/src/class/yedit.py -*- -*- -*-
+# pylint: disable=undefined-variable,missing-docstring
# noqa: E301,E302
@@ -326,11 +332,17 @@ class Yedit(object):
if self.backup and self.file_exists():
shutil.copy(self.filename, self.filename + '.orig')
- # pylint: disable=no-member
- if hasattr(self.yaml_dict, 'fa'):
+ # Try to set format attributes if supported
+ try:
self.yaml_dict.fa.set_block_style()
+ except AttributeError:
+ pass
- Yedit._write(self.filename, yaml.dump(self.yaml_dict, Dumper=yaml.RoundTripDumper))
+ # Try to use RoundTripDumper if supported.
+ try:
+ Yedit._write(self.filename, yaml.dump(self.yaml_dict, Dumper=yaml.RoundTripDumper))
+ except AttributeError:
+ Yedit._write(self.filename, yaml.safe_dump(self.yaml_dict, default_flow_style=False))
return (True, self.yaml_dict)
@@ -370,10 +382,24 @@ class Yedit(object):
# check if it is yaml
try:
if content_type == 'yaml' and contents:
- self.yaml_dict = yaml.load(contents, yaml.RoundTripLoader)
- # pylint: disable=no-member
- if hasattr(self.yaml_dict, 'fa'):
+ # Try to set format attributes if supported
+ try:
+ self.yaml_dict.fa.set_block_style()
+ except AttributeError:
+ pass
+
+ # Try to use RoundTripLoader if supported.
+ try:
+ self.yaml_dict = yaml.safe_load(contents, yaml.RoundTripLoader)
+ except AttributeError:
+ self.yaml_dict = yaml.safe_load(contents)
+
+ # Try to set format attributes if supported
+ try:
self.yaml_dict.fa.set_block_style()
+ except AttributeError:
+ pass
+
elif content_type == 'json' and contents:
self.yaml_dict = json.loads(contents)
except yaml.YAMLError as err:
@@ -402,14 +428,16 @@ class Yedit(object):
return (False, self.yaml_dict)
if isinstance(entry, dict):
- # pylint: disable=no-member,maybe-no-member
+ # AUDIT:maybe-no-member makes sense due to fuzzy types
+ # pylint: disable=maybe-no-member
if key_or_item in entry:
entry.pop(key_or_item)
return (True, self.yaml_dict)
return (False, self.yaml_dict)
elif isinstance(entry, list):
- # pylint: disable=no-member,maybe-no-member
+ # AUDIT:maybe-no-member makes sense due to fuzzy types
+ # pylint: disable=maybe-no-member
ind = None
try:
ind = entry.index(key_or_item)
@@ -477,7 +505,9 @@ class Yedit(object):
if not isinstance(entry, list):
return (False, self.yaml_dict)
- # pylint: disable=no-member,maybe-no-member
+ # AUDIT:maybe-no-member makes sense due to loading data from
+ # a serialized format.
+ # pylint: disable=maybe-no-member
entry.append(value)
return (True, self.yaml_dict)
@@ -490,7 +520,8 @@ class Yedit(object):
entry = None
if isinstance(entry, dict):
- # pylint: disable=no-member,maybe-no-member
+ # AUDIT:maybe-no-member makes sense due to fuzzy types
+ # pylint: disable=maybe-no-member
if not isinstance(value, dict):
raise YeditException('Cannot replace key, value entry in ' +
'dict with non-dict type. value=[%s] [%s]' % (value, type(value))) # noqa: E501
@@ -499,7 +530,8 @@ class Yedit(object):
return (True, self.yaml_dict)
elif isinstance(entry, list):
- # pylint: disable=no-member,maybe-no-member
+ # AUDIT:maybe-no-member makes sense due to fuzzy types
+ # pylint: disable=maybe-no-member
ind = None
if curr_value:
try:
@@ -538,12 +570,20 @@ class Yedit(object):
return (False, self.yaml_dict)
# deepcopy didn't work
- tmp_copy = yaml.load(yaml.round_trip_dump(self.yaml_dict,
- default_flow_style=False),
- yaml.RoundTripLoader)
- # pylint: disable=no-member
- if hasattr(self.yaml_dict, 'fa'):
+ # Try to use ruamel.yaml and fallback to pyyaml
+ try:
+ tmp_copy = yaml.load(yaml.round_trip_dump(self.yaml_dict,
+ default_flow_style=False),
+ yaml.RoundTripLoader)
+ except AttributeError:
+ tmp_copy = copy.deepcopy(self.yaml_dict)
+
+ # set the format attributes if available
+ try:
tmp_copy.fa.set_block_style()
+ except AttributeError:
+ pass
+
result = Yedit.add_entry(tmp_copy, path, value, self.separator)
if not result:
return (False, self.yaml_dict)
@@ -556,11 +596,20 @@ class Yedit(object):
''' create a yaml file '''
if not self.file_exists():
# deepcopy didn't work
- tmp_copy = yaml.load(yaml.round_trip_dump(self.yaml_dict, default_flow_style=False), # noqa: E501
- yaml.RoundTripLoader)
- # pylint: disable=no-member
- if hasattr(self.yaml_dict, 'fa'):
+ # Try to use ruamel.yaml and fallback to pyyaml
+ try:
+ tmp_copy = yaml.load(yaml.round_trip_dump(self.yaml_dict,
+ default_flow_style=False),
+ yaml.RoundTripLoader)
+ except AttributeError:
+ tmp_copy = copy.deepcopy(self.yaml_dict)
+
+ # set the format attributes if available
+ try:
tmp_copy.fa.set_block_style()
+ except AttributeError:
+ pass
+
result = Yedit.add_entry(tmp_copy, path, value, self.separator)
if result:
self.yaml_dict = tmp_copy
@@ -716,6 +765,32 @@ class OpenShiftCLIError(Exception):
pass
+ADDITIONAL_PATH_LOOKUPS = ['/usr/local/bin', os.path.expanduser('~/bin')]
+
+
+def locate_oc_binary():
+ ''' Find and return oc binary file '''
+ # https://github.com/openshift/openshift-ansible/issues/3410
+ # oc can be in /usr/local/bin in some cases, but that may not
+ # be in $PATH due to ansible/sudo
+ paths = os.environ.get("PATH", os.defpath).split(os.pathsep) + ADDITIONAL_PATH_LOOKUPS
+
+ oc_binary = 'oc'
+
+ # Use shutil.which if it is available, otherwise fallback to a naive path search
+ try:
+ which_result = shutil.which(oc_binary, path=os.pathsep.join(paths))
+ if which_result is not None:
+ oc_binary = which_result
+ except AttributeError:
+ for path in paths:
+ if os.path.exists(os.path.join(path, oc_binary)):
+ oc_binary = os.path.join(path, oc_binary)
+ break
+
+ return oc_binary
+
+
# pylint: disable=too-few-public-methods
class OpenShiftCLI(object):
''' Class to wrap the command line tools '''
@@ -729,6 +804,7 @@ class OpenShiftCLI(object):
self.verbose = verbose
self.kubeconfig = Utils.create_tmpfile_copy(kubeconfig)
self.all_namespaces = all_namespaces
+ self.oc_binary = locate_oc_binary()
# Pylint allows only 5 arguments to be passed.
# pylint: disable=too-many-arguments
@@ -925,24 +1001,23 @@ class OpenShiftCLI(object):
stdout, stderr = proc.communicate(input_data)
- return proc.returncode, stdout, stderr
+ return proc.returncode, stdout.decode(), stderr.decode()
# pylint: disable=too-many-arguments,too-many-branches
def openshift_cmd(self, cmd, oadm=False, output=False, output_type='json', input_data=None):
'''Base command for oc '''
- cmds = []
+ cmds = [self.oc_binary]
+
if oadm:
- cmds = ['oadm']
- else:
- cmds = ['oc']
+ cmds.append('adm')
+
+ cmds.extend(cmd)
if self.all_namespaces:
cmds.extend(['--all-namespaces'])
elif self.namespace is not None and self.namespace.lower() not in ['none', 'emtpy']: # E501
cmds.extend(['-n', self.namespace])
- cmds.extend(cmd)
-
rval = {}
results = ''
err = None
@@ -950,7 +1025,10 @@ class OpenShiftCLI(object):
if self.verbose:
print(' '.join(cmds))
- returncode, stdout, stderr = self._run(cmds, input_data)
+ try:
+ returncode, stdout, stderr = self._run(cmds, input_data)
+ except OSError as ex:
+ returncode, stdout, stderr = 1, '', 'Failed to execute {}: {}'.format(subprocess.list2cmdline(cmds), ex)
rval = {"returncode": returncode,
"results": results,
@@ -1002,7 +1080,13 @@ class Utils(object):
tmp = Utils.create_tmpfile(prefix=rname)
if ftype == 'yaml':
- Utils._write(tmp, yaml.dump(data, Dumper=yaml.RoundTripDumper))
+ # AUDIT:no-member makes sense here due to ruamel.YAML/PyYAML usage
+ # pylint: disable=no-member
+ if hasattr(yaml, 'RoundTripDumper'):
+ Utils._write(tmp, yaml.dump(data, Dumper=yaml.RoundTripDumper))
+ else:
+ Utils._write(tmp, yaml.safe_dump(data, default_flow_style=False))
+
elif ftype == 'json':
Utils._write(tmp, json.dumps(data))
else:
@@ -1084,7 +1168,12 @@ class Utils(object):
contents = sfd.read()
if sfile_type == 'yaml':
- contents = yaml.load(contents, yaml.RoundTripLoader)
+ # AUDIT:no-member makes sense here due to ruamel.YAML/PyYAML usage
+ # pylint: disable=no-member
+ if hasattr(yaml, 'RoundTripLoader'):
+ contents = yaml.load(contents, yaml.RoundTripLoader)
+ else:
+ contents = yaml.safe_load(contents)
elif sfile_type == 'json':
contents = json.loads(contents)
@@ -1190,8 +1279,8 @@ class Utils(object):
elif value != user_def[key]:
if debug:
print('value should be identical')
- print(value)
print(user_def[key])
+ print(value)
return False
# recurse on a dictionary
@@ -1211,8 +1300,8 @@ class Utils(object):
if api_values != user_values:
if debug:
print("keys are not equal in dict")
- print(api_values)
print(user_values)
+ print(api_values)
return False
result = Utils.check_def_equal(user_def[key], value, skip_keys=skip_keys, debug=debug)
diff --git a/roles/lib_openshift/library/oc_route.py b/roles/lib_openshift/library/oc_route.py
index d5dc84116..21e7e175b 100644
--- a/roles/lib_openshift/library/oc_route.py
+++ b/roles/lib_openshift/library/oc_route.py
@@ -33,6 +33,7 @@
from __future__ import print_function
import atexit
+import copy
import json
import os
import re
@@ -40,7 +41,11 @@ import shutil
import subprocess
import tempfile
# pylint: disable=import-error
-import ruamel.yaml as yaml
+try:
+ import ruamel.yaml as yaml
+except ImportError:
+ import yaml
+
from ansible.module_utils.basic import AnsibleModule
# -*- -*- -*- End included fragment: lib/import.py -*- -*- -*-
@@ -174,6 +179,7 @@ EXAMPLES = '''
# -*- -*- -*- End included fragment: doc/route -*- -*- -*-
# -*- -*- -*- Begin included fragment: ../../lib_utils/src/class/yedit.py -*- -*- -*-
+# pylint: disable=undefined-variable,missing-docstring
# noqa: E301,E302
@@ -368,11 +374,17 @@ class Yedit(object):
if self.backup and self.file_exists():
shutil.copy(self.filename, self.filename + '.orig')
- # pylint: disable=no-member
- if hasattr(self.yaml_dict, 'fa'):
+ # Try to set format attributes if supported
+ try:
self.yaml_dict.fa.set_block_style()
+ except AttributeError:
+ pass
- Yedit._write(self.filename, yaml.dump(self.yaml_dict, Dumper=yaml.RoundTripDumper))
+ # Try to use RoundTripDumper if supported.
+ try:
+ Yedit._write(self.filename, yaml.dump(self.yaml_dict, Dumper=yaml.RoundTripDumper))
+ except AttributeError:
+ Yedit._write(self.filename, yaml.safe_dump(self.yaml_dict, default_flow_style=False))
return (True, self.yaml_dict)
@@ -412,10 +424,24 @@ class Yedit(object):
# check if it is yaml
try:
if content_type == 'yaml' and contents:
- self.yaml_dict = yaml.load(contents, yaml.RoundTripLoader)
- # pylint: disable=no-member
- if hasattr(self.yaml_dict, 'fa'):
+ # Try to set format attributes if supported
+ try:
+ self.yaml_dict.fa.set_block_style()
+ except AttributeError:
+ pass
+
+ # Try to use RoundTripLoader if supported.
+ try:
+ self.yaml_dict = yaml.safe_load(contents, yaml.RoundTripLoader)
+ except AttributeError:
+ self.yaml_dict = yaml.safe_load(contents)
+
+ # Try to set format attributes if supported
+ try:
self.yaml_dict.fa.set_block_style()
+ except AttributeError:
+ pass
+
elif content_type == 'json' and contents:
self.yaml_dict = json.loads(contents)
except yaml.YAMLError as err:
@@ -444,14 +470,16 @@ class Yedit(object):
return (False, self.yaml_dict)
if isinstance(entry, dict):
- # pylint: disable=no-member,maybe-no-member
+ # AUDIT:maybe-no-member makes sense due to fuzzy types
+ # pylint: disable=maybe-no-member
if key_or_item in entry:
entry.pop(key_or_item)
return (True, self.yaml_dict)
return (False, self.yaml_dict)
elif isinstance(entry, list):
- # pylint: disable=no-member,maybe-no-member
+ # AUDIT:maybe-no-member makes sense due to fuzzy types
+ # pylint: disable=maybe-no-member
ind = None
try:
ind = entry.index(key_or_item)
@@ -519,7 +547,9 @@ class Yedit(object):
if not isinstance(entry, list):
return (False, self.yaml_dict)
- # pylint: disable=no-member,maybe-no-member
+ # AUDIT:maybe-no-member makes sense due to loading data from
+ # a serialized format.
+ # pylint: disable=maybe-no-member
entry.append(value)
return (True, self.yaml_dict)
@@ -532,7 +562,8 @@ class Yedit(object):
entry = None
if isinstance(entry, dict):
- # pylint: disable=no-member,maybe-no-member
+ # AUDIT:maybe-no-member makes sense due to fuzzy types
+ # pylint: disable=maybe-no-member
if not isinstance(value, dict):
raise YeditException('Cannot replace key, value entry in ' +
'dict with non-dict type. value=[%s] [%s]' % (value, type(value))) # noqa: E501
@@ -541,7 +572,8 @@ class Yedit(object):
return (True, self.yaml_dict)
elif isinstance(entry, list):
- # pylint: disable=no-member,maybe-no-member
+ # AUDIT:maybe-no-member makes sense due to fuzzy types
+ # pylint: disable=maybe-no-member
ind = None
if curr_value:
try:
@@ -580,12 +612,20 @@ class Yedit(object):
return (False, self.yaml_dict)
# deepcopy didn't work
- tmp_copy = yaml.load(yaml.round_trip_dump(self.yaml_dict,
- default_flow_style=False),
- yaml.RoundTripLoader)
- # pylint: disable=no-member
- if hasattr(self.yaml_dict, 'fa'):
+ # Try to use ruamel.yaml and fallback to pyyaml
+ try:
+ tmp_copy = yaml.load(yaml.round_trip_dump(self.yaml_dict,
+ default_flow_style=False),
+ yaml.RoundTripLoader)
+ except AttributeError:
+ tmp_copy = copy.deepcopy(self.yaml_dict)
+
+ # set the format attributes if available
+ try:
tmp_copy.fa.set_block_style()
+ except AttributeError:
+ pass
+
result = Yedit.add_entry(tmp_copy, path, value, self.separator)
if not result:
return (False, self.yaml_dict)
@@ -598,11 +638,20 @@ class Yedit(object):
''' create a yaml file '''
if not self.file_exists():
# deepcopy didn't work
- tmp_copy = yaml.load(yaml.round_trip_dump(self.yaml_dict, default_flow_style=False), # noqa: E501
- yaml.RoundTripLoader)
- # pylint: disable=no-member
- if hasattr(self.yaml_dict, 'fa'):
+ # Try to use ruamel.yaml and fallback to pyyaml
+ try:
+ tmp_copy = yaml.load(yaml.round_trip_dump(self.yaml_dict,
+ default_flow_style=False),
+ yaml.RoundTripLoader)
+ except AttributeError:
+ tmp_copy = copy.deepcopy(self.yaml_dict)
+
+ # set the format attributes if available
+ try:
tmp_copy.fa.set_block_style()
+ except AttributeError:
+ pass
+
result = Yedit.add_entry(tmp_copy, path, value, self.separator)
if result:
self.yaml_dict = tmp_copy
@@ -758,6 +807,32 @@ class OpenShiftCLIError(Exception):
pass
+ADDITIONAL_PATH_LOOKUPS = ['/usr/local/bin', os.path.expanduser('~/bin')]
+
+
+def locate_oc_binary():
+ ''' Find and return oc binary file '''
+ # https://github.com/openshift/openshift-ansible/issues/3410
+ # oc can be in /usr/local/bin in some cases, but that may not
+ # be in $PATH due to ansible/sudo
+ paths = os.environ.get("PATH", os.defpath).split(os.pathsep) + ADDITIONAL_PATH_LOOKUPS
+
+ oc_binary = 'oc'
+
+ # Use shutil.which if it is available, otherwise fallback to a naive path search
+ try:
+ which_result = shutil.which(oc_binary, path=os.pathsep.join(paths))
+ if which_result is not None:
+ oc_binary = which_result
+ except AttributeError:
+ for path in paths:
+ if os.path.exists(os.path.join(path, oc_binary)):
+ oc_binary = os.path.join(path, oc_binary)
+ break
+
+ return oc_binary
+
+
# pylint: disable=too-few-public-methods
class OpenShiftCLI(object):
''' Class to wrap the command line tools '''
@@ -771,6 +846,7 @@ class OpenShiftCLI(object):
self.verbose = verbose
self.kubeconfig = Utils.create_tmpfile_copy(kubeconfig)
self.all_namespaces = all_namespaces
+ self.oc_binary = locate_oc_binary()
# Pylint allows only 5 arguments to be passed.
# pylint: disable=too-many-arguments
@@ -967,24 +1043,23 @@ class OpenShiftCLI(object):
stdout, stderr = proc.communicate(input_data)
- return proc.returncode, stdout, stderr
+ return proc.returncode, stdout.decode(), stderr.decode()
# pylint: disable=too-many-arguments,too-many-branches
def openshift_cmd(self, cmd, oadm=False, output=False, output_type='json', input_data=None):
'''Base command for oc '''
- cmds = []
+ cmds = [self.oc_binary]
+
if oadm:
- cmds = ['oadm']
- else:
- cmds = ['oc']
+ cmds.append('adm')
+
+ cmds.extend(cmd)
if self.all_namespaces:
cmds.extend(['--all-namespaces'])
elif self.namespace is not None and self.namespace.lower() not in ['none', 'emtpy']: # E501
cmds.extend(['-n', self.namespace])
- cmds.extend(cmd)
-
rval = {}
results = ''
err = None
@@ -992,7 +1067,10 @@ class OpenShiftCLI(object):
if self.verbose:
print(' '.join(cmds))
- returncode, stdout, stderr = self._run(cmds, input_data)
+ try:
+ returncode, stdout, stderr = self._run(cmds, input_data)
+ except OSError as ex:
+ returncode, stdout, stderr = 1, '', 'Failed to execute {}: {}'.format(subprocess.list2cmdline(cmds), ex)
rval = {"returncode": returncode,
"results": results,
@@ -1044,7 +1122,13 @@ class Utils(object):
tmp = Utils.create_tmpfile(prefix=rname)
if ftype == 'yaml':
- Utils._write(tmp, yaml.dump(data, Dumper=yaml.RoundTripDumper))
+ # AUDIT:no-member makes sense here due to ruamel.YAML/PyYAML usage
+ # pylint: disable=no-member
+ if hasattr(yaml, 'RoundTripDumper'):
+ Utils._write(tmp, yaml.dump(data, Dumper=yaml.RoundTripDumper))
+ else:
+ Utils._write(tmp, yaml.safe_dump(data, default_flow_style=False))
+
elif ftype == 'json':
Utils._write(tmp, json.dumps(data))
else:
@@ -1126,7 +1210,12 @@ class Utils(object):
contents = sfd.read()
if sfile_type == 'yaml':
- contents = yaml.load(contents, yaml.RoundTripLoader)
+ # AUDIT:no-member makes sense here due to ruamel.YAML/PyYAML usage
+ # pylint: disable=no-member
+ if hasattr(yaml, 'RoundTripLoader'):
+ contents = yaml.load(contents, yaml.RoundTripLoader)
+ else:
+ contents = yaml.safe_load(contents)
elif sfile_type == 'json':
contents = json.loads(contents)
@@ -1232,8 +1321,8 @@ class Utils(object):
elif value != user_def[key]:
if debug:
print('value should be identical')
- print(value)
print(user_def[key])
+ print(value)
return False
# recurse on a dictionary
@@ -1253,8 +1342,8 @@ class Utils(object):
if api_values != user_values:
if debug:
print("keys are not equal in dict")
- print(api_values)
print(user_values)
+ print(api_values)
return False
result = Utils.check_def_equal(user_def[key], value, skip_keys=skip_keys, debug=debug)
@@ -1505,13 +1594,15 @@ class OCRoute(OpenShiftCLI):
def update(self):
'''update the object'''
- # need to update the tls information and the service name
- return self._replace_content(self.kind, self.config.name, self.config.data)
+ return self._replace_content(self.kind,
+ self.config.name,
+ self.config.data,
+ force=(self.config.host != self.route.get_host()))
def needs_update(self):
''' verify an update is needed '''
skip = []
- return not Utils.check_def_equal(self.config.data, self.route.yaml_dict, skip_keys=skip, debug=True)
+ return not Utils.check_def_equal(self.config.data, self.route.yaml_dict, skip_keys=skip, debug=self.verbose)
@staticmethod
def get_cert_data(path, content):
diff --git a/roles/lib_openshift/library/oc_scale.py b/roles/lib_openshift/library/oc_scale.py
index be3b7f837..0c2d86a18 100644
--- a/roles/lib_openshift/library/oc_scale.py
+++ b/roles/lib_openshift/library/oc_scale.py
@@ -33,6 +33,7 @@
from __future__ import print_function
import atexit
+import copy
import json
import os
import re
@@ -40,7 +41,11 @@ import shutil
import subprocess
import tempfile
# pylint: disable=import-error
-import ruamel.yaml as yaml
+try:
+ import ruamel.yaml as yaml
+except ImportError:
+ import yaml
+
from ansible.module_utils.basic import AnsibleModule
# -*- -*- -*- End included fragment: lib/import.py -*- -*- -*-
@@ -118,6 +123,7 @@ EXAMPLES = '''
# -*- -*- -*- End included fragment: doc/scale -*- -*- -*-
# -*- -*- -*- Begin included fragment: ../../lib_utils/src/class/yedit.py -*- -*- -*-
+# pylint: disable=undefined-variable,missing-docstring
# noqa: E301,E302
@@ -312,11 +318,17 @@ class Yedit(object):
if self.backup and self.file_exists():
shutil.copy(self.filename, self.filename + '.orig')
- # pylint: disable=no-member
- if hasattr(self.yaml_dict, 'fa'):
+ # Try to set format attributes if supported
+ try:
self.yaml_dict.fa.set_block_style()
+ except AttributeError:
+ pass
- Yedit._write(self.filename, yaml.dump(self.yaml_dict, Dumper=yaml.RoundTripDumper))
+ # Try to use RoundTripDumper if supported.
+ try:
+ Yedit._write(self.filename, yaml.dump(self.yaml_dict, Dumper=yaml.RoundTripDumper))
+ except AttributeError:
+ Yedit._write(self.filename, yaml.safe_dump(self.yaml_dict, default_flow_style=False))
return (True, self.yaml_dict)
@@ -356,10 +368,24 @@ class Yedit(object):
# check if it is yaml
try:
if content_type == 'yaml' and contents:
- self.yaml_dict = yaml.load(contents, yaml.RoundTripLoader)
- # pylint: disable=no-member
- if hasattr(self.yaml_dict, 'fa'):
+ # Try to set format attributes if supported
+ try:
self.yaml_dict.fa.set_block_style()
+ except AttributeError:
+ pass
+
+ # Try to use RoundTripLoader if supported.
+ try:
+ self.yaml_dict = yaml.safe_load(contents, yaml.RoundTripLoader)
+ except AttributeError:
+ self.yaml_dict = yaml.safe_load(contents)
+
+ # Try to set format attributes if supported
+ try:
+ self.yaml_dict.fa.set_block_style()
+ except AttributeError:
+ pass
+
elif content_type == 'json' and contents:
self.yaml_dict = json.loads(contents)
except yaml.YAMLError as err:
@@ -388,14 +414,16 @@ class Yedit(object):
return (False, self.yaml_dict)
if isinstance(entry, dict):
- # pylint: disable=no-member,maybe-no-member
+ # AUDIT:maybe-no-member makes sense due to fuzzy types
+ # pylint: disable=maybe-no-member
if key_or_item in entry:
entry.pop(key_or_item)
return (True, self.yaml_dict)
return (False, self.yaml_dict)
elif isinstance(entry, list):
- # pylint: disable=no-member,maybe-no-member
+ # AUDIT:maybe-no-member makes sense due to fuzzy types
+ # pylint: disable=maybe-no-member
ind = None
try:
ind = entry.index(key_or_item)
@@ -463,7 +491,9 @@ class Yedit(object):
if not isinstance(entry, list):
return (False, self.yaml_dict)
- # pylint: disable=no-member,maybe-no-member
+ # AUDIT:maybe-no-member makes sense due to loading data from
+ # a serialized format.
+ # pylint: disable=maybe-no-member
entry.append(value)
return (True, self.yaml_dict)
@@ -476,7 +506,8 @@ class Yedit(object):
entry = None
if isinstance(entry, dict):
- # pylint: disable=no-member,maybe-no-member
+ # AUDIT:maybe-no-member makes sense due to fuzzy types
+ # pylint: disable=maybe-no-member
if not isinstance(value, dict):
raise YeditException('Cannot replace key, value entry in ' +
'dict with non-dict type. value=[%s] [%s]' % (value, type(value))) # noqa: E501
@@ -485,7 +516,8 @@ class Yedit(object):
return (True, self.yaml_dict)
elif isinstance(entry, list):
- # pylint: disable=no-member,maybe-no-member
+ # AUDIT:maybe-no-member makes sense due to fuzzy types
+ # pylint: disable=maybe-no-member
ind = None
if curr_value:
try:
@@ -524,12 +556,20 @@ class Yedit(object):
return (False, self.yaml_dict)
# deepcopy didn't work
- tmp_copy = yaml.load(yaml.round_trip_dump(self.yaml_dict,
- default_flow_style=False),
- yaml.RoundTripLoader)
- # pylint: disable=no-member
- if hasattr(self.yaml_dict, 'fa'):
+ # Try to use ruamel.yaml and fallback to pyyaml
+ try:
+ tmp_copy = yaml.load(yaml.round_trip_dump(self.yaml_dict,
+ default_flow_style=False),
+ yaml.RoundTripLoader)
+ except AttributeError:
+ tmp_copy = copy.deepcopy(self.yaml_dict)
+
+ # set the format attributes if available
+ try:
tmp_copy.fa.set_block_style()
+ except AttributeError:
+ pass
+
result = Yedit.add_entry(tmp_copy, path, value, self.separator)
if not result:
return (False, self.yaml_dict)
@@ -542,11 +582,20 @@ class Yedit(object):
''' create a yaml file '''
if not self.file_exists():
# deepcopy didn't work
- tmp_copy = yaml.load(yaml.round_trip_dump(self.yaml_dict, default_flow_style=False), # noqa: E501
- yaml.RoundTripLoader)
- # pylint: disable=no-member
- if hasattr(self.yaml_dict, 'fa'):
+ # Try to use ruamel.yaml and fallback to pyyaml
+ try:
+ tmp_copy = yaml.load(yaml.round_trip_dump(self.yaml_dict,
+ default_flow_style=False),
+ yaml.RoundTripLoader)
+ except AttributeError:
+ tmp_copy = copy.deepcopy(self.yaml_dict)
+
+ # set the format attributes if available
+ try:
tmp_copy.fa.set_block_style()
+ except AttributeError:
+ pass
+
result = Yedit.add_entry(tmp_copy, path, value, self.separator)
if result:
self.yaml_dict = tmp_copy
@@ -702,6 +751,32 @@ class OpenShiftCLIError(Exception):
pass
+ADDITIONAL_PATH_LOOKUPS = ['/usr/local/bin', os.path.expanduser('~/bin')]
+
+
+def locate_oc_binary():
+ ''' Find and return oc binary file '''
+ # https://github.com/openshift/openshift-ansible/issues/3410
+ # oc can be in /usr/local/bin in some cases, but that may not
+ # be in $PATH due to ansible/sudo
+ paths = os.environ.get("PATH", os.defpath).split(os.pathsep) + ADDITIONAL_PATH_LOOKUPS
+
+ oc_binary = 'oc'
+
+ # Use shutil.which if it is available, otherwise fallback to a naive path search
+ try:
+ which_result = shutil.which(oc_binary, path=os.pathsep.join(paths))
+ if which_result is not None:
+ oc_binary = which_result
+ except AttributeError:
+ for path in paths:
+ if os.path.exists(os.path.join(path, oc_binary)):
+ oc_binary = os.path.join(path, oc_binary)
+ break
+
+ return oc_binary
+
+
# pylint: disable=too-few-public-methods
class OpenShiftCLI(object):
''' Class to wrap the command line tools '''
@@ -715,6 +790,7 @@ class OpenShiftCLI(object):
self.verbose = verbose
self.kubeconfig = Utils.create_tmpfile_copy(kubeconfig)
self.all_namespaces = all_namespaces
+ self.oc_binary = locate_oc_binary()
# Pylint allows only 5 arguments to be passed.
# pylint: disable=too-many-arguments
@@ -911,24 +987,23 @@ class OpenShiftCLI(object):
stdout, stderr = proc.communicate(input_data)
- return proc.returncode, stdout, stderr
+ return proc.returncode, stdout.decode(), stderr.decode()
# pylint: disable=too-many-arguments,too-many-branches
def openshift_cmd(self, cmd, oadm=False, output=False, output_type='json', input_data=None):
'''Base command for oc '''
- cmds = []
+ cmds = [self.oc_binary]
+
if oadm:
- cmds = ['oadm']
- else:
- cmds = ['oc']
+ cmds.append('adm')
+
+ cmds.extend(cmd)
if self.all_namespaces:
cmds.extend(['--all-namespaces'])
elif self.namespace is not None and self.namespace.lower() not in ['none', 'emtpy']: # E501
cmds.extend(['-n', self.namespace])
- cmds.extend(cmd)
-
rval = {}
results = ''
err = None
@@ -936,7 +1011,10 @@ class OpenShiftCLI(object):
if self.verbose:
print(' '.join(cmds))
- returncode, stdout, stderr = self._run(cmds, input_data)
+ try:
+ returncode, stdout, stderr = self._run(cmds, input_data)
+ except OSError as ex:
+ returncode, stdout, stderr = 1, '', 'Failed to execute {}: {}'.format(subprocess.list2cmdline(cmds), ex)
rval = {"returncode": returncode,
"results": results,
@@ -988,7 +1066,13 @@ class Utils(object):
tmp = Utils.create_tmpfile(prefix=rname)
if ftype == 'yaml':
- Utils._write(tmp, yaml.dump(data, Dumper=yaml.RoundTripDumper))
+ # AUDIT:no-member makes sense here due to ruamel.YAML/PyYAML usage
+ # pylint: disable=no-member
+ if hasattr(yaml, 'RoundTripDumper'):
+ Utils._write(tmp, yaml.dump(data, Dumper=yaml.RoundTripDumper))
+ else:
+ Utils._write(tmp, yaml.safe_dump(data, default_flow_style=False))
+
elif ftype == 'json':
Utils._write(tmp, json.dumps(data))
else:
@@ -1070,7 +1154,12 @@ class Utils(object):
contents = sfd.read()
if sfile_type == 'yaml':
- contents = yaml.load(contents, yaml.RoundTripLoader)
+ # AUDIT:no-member makes sense here due to ruamel.YAML/PyYAML usage
+ # pylint: disable=no-member
+ if hasattr(yaml, 'RoundTripLoader'):
+ contents = yaml.load(contents, yaml.RoundTripLoader)
+ else:
+ contents = yaml.safe_load(contents)
elif sfile_type == 'json':
contents = json.loads(contents)
@@ -1176,8 +1265,8 @@ class Utils(object):
elif value != user_def[key]:
if debug:
print('value should be identical')
- print(value)
print(user_def[key])
+ print(value)
return False
# recurse on a dictionary
@@ -1197,8 +1286,8 @@ class Utils(object):
if api_values != user_values:
if debug:
print("keys are not equal in dict")
- print(api_values)
print(user_values)
+ print(api_values)
return False
result = Utils.check_def_equal(user_def[key], value, skip_keys=skip_keys, debug=debug)
@@ -1259,7 +1348,7 @@ class OpenShiftCLIConfig(object):
# pylint: disable=too-many-public-methods
class DeploymentConfig(Yedit):
- ''' Class to wrap the oc command line tools '''
+ ''' Class to model an openshift DeploymentConfig'''
default_deployment_config = '''
apiVersion: v1
kind: DeploymentConfig
@@ -1323,7 +1412,6 @@ spec:
super(DeploymentConfig, self).__init__(content=content)
- # pylint: disable=no-member
def add_env_value(self, key, value):
''' add key, value pair to env array '''
rval = False
@@ -1361,6 +1449,18 @@ spec:
return False
+ def get_env_var(self, key):
+ '''return a environment variables '''
+ results = self.get(DeploymentConfig.env_path) or []
+ if not results:
+ return None
+
+ for env_var in results:
+ if env_var['name'] == key:
+ return env_var
+
+ return None
+
def get_env_vars(self):
'''return a environment variables '''
return self.get(DeploymentConfig.env_path) or []
@@ -1600,7 +1700,12 @@ spec:
# pylint: disable=too-many-public-methods
class ReplicationController(DeploymentConfig):
- ''' Class to wrap the oc command line tools '''
+ ''' Class to model a replicationcontroller openshift object.
+
+ Currently we are modeled after a deployment config since they
+ are very similar. In the future, when the need arises we
+ will add functionality to this class.
+ '''
replicas_path = "spec.replicas"
env_path = "spec.template.spec.containers[0].env"
volumes_path = "spec.template.spec.volumes"
diff --git a/roles/lib_openshift/library/oc_sdnvalidator.py b/roles/lib_openshift/library/oc_sdnvalidator.py
new file mode 100644
index 000000000..795b775f3
--- /dev/null
+++ b/roles/lib_openshift/library/oc_sdnvalidator.py
@@ -0,0 +1,1387 @@
+#!/usr/bin/env python
+# pylint: disable=missing-docstring
+# flake8: noqa: T001
+# ___ ___ _ _ ___ ___ _ _____ ___ ___
+# / __| __| \| | __| _ \ /_\_ _| __| \
+# | (_ | _|| .` | _|| / / _ \| | | _|| |) |
+# \___|___|_|\_|___|_|_\/_/_\_\_|_|___|___/_ _____
+# | \ / _ \ | \| |/ _ \_ _| | __| \_ _|_ _|
+# | |) | (_) | | .` | (_) || | | _|| |) | | | |
+# |___/ \___/ |_|\_|\___/ |_| |___|___/___| |_|
+#
+# Copyright 2016 Red Hat, Inc. and/or its affiliates
+# and other contributors as indicated by the @author tags.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+
+# -*- -*- -*- Begin included fragment: lib/import.py -*- -*- -*-
+'''
+ OpenShiftCLI class that wraps the oc commands in a subprocess
+'''
+# pylint: disable=too-many-lines
+
+from __future__ import print_function
+import atexit
+import copy
+import json
+import os
+import re
+import shutil
+import subprocess
+import tempfile
+# pylint: disable=import-error
+try:
+ import ruamel.yaml as yaml
+except ImportError:
+ import yaml
+
+from ansible.module_utils.basic import AnsibleModule
+
+# -*- -*- -*- End included fragment: lib/import.py -*- -*- -*-
+
+# -*- -*- -*- Begin included fragment: doc/sdnvalidator -*- -*- -*-
+
+DOCUMENTATION = '''
+---
+module: oc_sdnvalidator
+short_description: Validate SDN objects
+description:
+ - Validate SDN objects
+options:
+ kubeconfig:
+ description:
+ - The path for the kubeconfig file to use for authentication
+ required: false
+ default: /etc/origin/master/admin.kubeconfig
+ aliases: []
+author:
+- "Mo Khan <monis@redhat.com>"
+extends_documentation_fragment: []
+'''
+
+EXAMPLES = '''
+oc_version:
+- name: get oc sdnvalidator
+ sdnvalidator:
+ register: oc_sdnvalidator
+'''
+
+# -*- -*- -*- End included fragment: doc/sdnvalidator -*- -*- -*-
+
+# -*- -*- -*- Begin included fragment: ../../lib_utils/src/class/yedit.py -*- -*- -*-
+# pylint: disable=undefined-variable,missing-docstring
+# noqa: E301,E302
+
+
+class YeditException(Exception):
+ ''' Exception class for Yedit '''
+ pass
+
+
+# pylint: disable=too-many-public-methods
+class Yedit(object):
+ ''' Class to modify yaml files '''
+ re_valid_key = r"(((\[-?\d+\])|([0-9a-zA-Z%s/_-]+)).?)+$"
+ re_key = r"(?:\[(-?\d+)\])|([0-9a-zA-Z%s/_-]+)"
+ com_sep = set(['.', '#', '|', ':'])
+
+ # pylint: disable=too-many-arguments
+ def __init__(self,
+ filename=None,
+ content=None,
+ content_type='yaml',
+ separator='.',
+ backup=False):
+ self.content = content
+ self._separator = separator
+ self.filename = filename
+ self.__yaml_dict = content
+ self.content_type = content_type
+ self.backup = backup
+ self.load(content_type=self.content_type)
+ if self.__yaml_dict is None:
+ self.__yaml_dict = {}
+
+ @property
+ def separator(self):
+ ''' getter method for yaml_dict '''
+ return self._separator
+
+ @separator.setter
+ def separator(self):
+ ''' getter method for yaml_dict '''
+ return self._separator
+
+ @property
+ def yaml_dict(self):
+ ''' getter method for yaml_dict '''
+ return self.__yaml_dict
+
+ @yaml_dict.setter
+ def yaml_dict(self, value):
+ ''' setter method for yaml_dict '''
+ self.__yaml_dict = value
+
+ @staticmethod
+ def parse_key(key, sep='.'):
+ '''parse the key allowing the appropriate separator'''
+ common_separators = list(Yedit.com_sep - set([sep]))
+ return re.findall(Yedit.re_key % ''.join(common_separators), key)
+
+ @staticmethod
+ def valid_key(key, sep='.'):
+ '''validate the incoming key'''
+ common_separators = list(Yedit.com_sep - set([sep]))
+ if not re.match(Yedit.re_valid_key % ''.join(common_separators), key):
+ return False
+
+ return True
+
+ @staticmethod
+ def remove_entry(data, key, sep='.'):
+ ''' remove data at location key '''
+ if key == '' and isinstance(data, dict):
+ data.clear()
+ return True
+ elif key == '' and isinstance(data, list):
+ del data[:]
+ return True
+
+ if not (key and Yedit.valid_key(key, sep)) and \
+ isinstance(data, (list, dict)):
+ return None
+
+ key_indexes = Yedit.parse_key(key, sep)
+ for arr_ind, dict_key in key_indexes[:-1]:
+ if dict_key and isinstance(data, dict):
+ data = data.get(dict_key, None)
+ elif (arr_ind and isinstance(data, list) and
+ int(arr_ind) <= len(data) - 1):
+ data = data[int(arr_ind)]
+ else:
+ return None
+
+ # process last index for remove
+ # expected list entry
+ if key_indexes[-1][0]:
+ if isinstance(data, list) and int(key_indexes[-1][0]) <= len(data) - 1: # noqa: E501
+ del data[int(key_indexes[-1][0])]
+ return True
+
+ # expected dict entry
+ elif key_indexes[-1][1]:
+ if isinstance(data, dict):
+ del data[key_indexes[-1][1]]
+ return True
+
+ @staticmethod
+ def add_entry(data, key, item=None, sep='.'):
+ ''' Get an item from a dictionary with key notation a.b.c
+ d = {'a': {'b': 'c'}}}
+ key = a#b
+ return c
+ '''
+ if key == '':
+ pass
+ elif (not (key and Yedit.valid_key(key, sep)) and
+ isinstance(data, (list, dict))):
+ return None
+
+ key_indexes = Yedit.parse_key(key, sep)
+ for arr_ind, dict_key in key_indexes[:-1]:
+ if dict_key:
+ if isinstance(data, dict) and dict_key in data and data[dict_key]: # noqa: E501
+ data = data[dict_key]
+ continue
+
+ elif data and not isinstance(data, dict):
+ return None
+
+ data[dict_key] = {}
+ data = data[dict_key]
+
+ elif (arr_ind and isinstance(data, list) and
+ int(arr_ind) <= len(data) - 1):
+ data = data[int(arr_ind)]
+ else:
+ return None
+
+ if key == '':
+ data = item
+
+ # process last index for add
+ # expected list entry
+ elif key_indexes[-1][0] and isinstance(data, list) and int(key_indexes[-1][0]) <= len(data) - 1: # noqa: E501
+ data[int(key_indexes[-1][0])] = item
+
+ # expected dict entry
+ elif key_indexes[-1][1] and isinstance(data, dict):
+ data[key_indexes[-1][1]] = item
+
+ return data
+
+ @staticmethod
+ def get_entry(data, key, sep='.'):
+ ''' Get an item from a dictionary with key notation a.b.c
+ d = {'a': {'b': 'c'}}}
+ key = a.b
+ return c
+ '''
+ if key == '':
+ pass
+ elif (not (key and Yedit.valid_key(key, sep)) and
+ isinstance(data, (list, dict))):
+ return None
+
+ key_indexes = Yedit.parse_key(key, sep)
+ for arr_ind, dict_key in key_indexes:
+ if dict_key and isinstance(data, dict):
+ data = data.get(dict_key, None)
+ elif (arr_ind and isinstance(data, list) and
+ int(arr_ind) <= len(data) - 1):
+ data = data[int(arr_ind)]
+ else:
+ return None
+
+ return data
+
+ @staticmethod
+ def _write(filename, contents):
+ ''' Actually write the file contents to disk. This helps with mocking. '''
+
+ tmp_filename = filename + '.yedit'
+
+ with open(tmp_filename, 'w') as yfd:
+ yfd.write(contents)
+
+ os.rename(tmp_filename, filename)
+
+ def write(self):
+ ''' write to file '''
+ if not self.filename:
+ raise YeditException('Please specify a filename.')
+
+ if self.backup and self.file_exists():
+ shutil.copy(self.filename, self.filename + '.orig')
+
+ # Try to set format attributes if supported
+ try:
+ self.yaml_dict.fa.set_block_style()
+ except AttributeError:
+ pass
+
+ # Try to use RoundTripDumper if supported.
+ try:
+ Yedit._write(self.filename, yaml.dump(self.yaml_dict, Dumper=yaml.RoundTripDumper))
+ except AttributeError:
+ Yedit._write(self.filename, yaml.safe_dump(self.yaml_dict, default_flow_style=False))
+
+ return (True, self.yaml_dict)
+
+ def read(self):
+ ''' read from file '''
+ # check if it exists
+ if self.filename is None or not self.file_exists():
+ return None
+
+ contents = None
+ with open(self.filename) as yfd:
+ contents = yfd.read()
+
+ return contents
+
+ def file_exists(self):
+ ''' return whether file exists '''
+ if os.path.exists(self.filename):
+ return True
+
+ return False
+
+ def load(self, content_type='yaml'):
+ ''' return yaml file '''
+ contents = self.read()
+
+ if not contents and not self.content:
+ return None
+
+ if self.content:
+ if isinstance(self.content, dict):
+ self.yaml_dict = self.content
+ return self.yaml_dict
+ elif isinstance(self.content, str):
+ contents = self.content
+
+ # check if it is yaml
+ try:
+ if content_type == 'yaml' and contents:
+ # Try to set format attributes if supported
+ try:
+ self.yaml_dict.fa.set_block_style()
+ except AttributeError:
+ pass
+
+ # Try to use RoundTripLoader if supported.
+ try:
+ self.yaml_dict = yaml.safe_load(contents, yaml.RoundTripLoader)
+ except AttributeError:
+ self.yaml_dict = yaml.safe_load(contents)
+
+ # Try to set format attributes if supported
+ try:
+ self.yaml_dict.fa.set_block_style()
+ except AttributeError:
+ pass
+
+ elif content_type == 'json' and contents:
+ self.yaml_dict = json.loads(contents)
+ except yaml.YAMLError as err:
+ # Error loading yaml or json
+ raise YeditException('Problem with loading yaml file. %s' % err)
+
+ return self.yaml_dict
+
+ def get(self, key):
+ ''' get a specified key'''
+ try:
+ entry = Yedit.get_entry(self.yaml_dict, key, self.separator)
+ except KeyError:
+ entry = None
+
+ return entry
+
+ def pop(self, path, key_or_item):
+ ''' remove a key, value pair from a dict or an item for a list'''
+ try:
+ entry = Yedit.get_entry(self.yaml_dict, path, self.separator)
+ except KeyError:
+ entry = None
+
+ if entry is None:
+ return (False, self.yaml_dict)
+
+ if isinstance(entry, dict):
+ # AUDIT:maybe-no-member makes sense due to fuzzy types
+ # pylint: disable=maybe-no-member
+ if key_or_item in entry:
+ entry.pop(key_or_item)
+ return (True, self.yaml_dict)
+ return (False, self.yaml_dict)
+
+ elif isinstance(entry, list):
+ # AUDIT:maybe-no-member makes sense due to fuzzy types
+ # pylint: disable=maybe-no-member
+ ind = None
+ try:
+ ind = entry.index(key_or_item)
+ except ValueError:
+ return (False, self.yaml_dict)
+
+ entry.pop(ind)
+ return (True, self.yaml_dict)
+
+ return (False, self.yaml_dict)
+
+ def delete(self, path):
+ ''' remove path from a dict'''
+ try:
+ entry = Yedit.get_entry(self.yaml_dict, path, self.separator)
+ except KeyError:
+ entry = None
+
+ if entry is None:
+ return (False, self.yaml_dict)
+
+ result = Yedit.remove_entry(self.yaml_dict, path, self.separator)
+ if not result:
+ return (False, self.yaml_dict)
+
+ return (True, self.yaml_dict)
+
+ def exists(self, path, value):
+ ''' check if value exists at path'''
+ try:
+ entry = Yedit.get_entry(self.yaml_dict, path, self.separator)
+ except KeyError:
+ entry = None
+
+ if isinstance(entry, list):
+ if value in entry:
+ return True
+ return False
+
+ elif isinstance(entry, dict):
+ if isinstance(value, dict):
+ rval = False
+ for key, val in value.items():
+ if entry[key] != val:
+ rval = False
+ break
+ else:
+ rval = True
+ return rval
+
+ return value in entry
+
+ return entry == value
+
+ def append(self, path, value):
+ '''append value to a list'''
+ try:
+ entry = Yedit.get_entry(self.yaml_dict, path, self.separator)
+ except KeyError:
+ entry = None
+
+ if entry is None:
+ self.put(path, [])
+ entry = Yedit.get_entry(self.yaml_dict, path, self.separator)
+ if not isinstance(entry, list):
+ return (False, self.yaml_dict)
+
+ # AUDIT:maybe-no-member makes sense due to loading data from
+ # a serialized format.
+ # pylint: disable=maybe-no-member
+ entry.append(value)
+ return (True, self.yaml_dict)
+
+ # pylint: disable=too-many-arguments
+ def update(self, path, value, index=None, curr_value=None):
+ ''' put path, value into a dict '''
+ try:
+ entry = Yedit.get_entry(self.yaml_dict, path, self.separator)
+ except KeyError:
+ entry = None
+
+ if isinstance(entry, dict):
+ # AUDIT:maybe-no-member makes sense due to fuzzy types
+ # pylint: disable=maybe-no-member
+ if not isinstance(value, dict):
+ raise YeditException('Cannot replace key, value entry in ' +
+ 'dict with non-dict type. value=[%s] [%s]' % (value, type(value))) # noqa: E501
+
+ entry.update(value)
+ return (True, self.yaml_dict)
+
+ elif isinstance(entry, list):
+ # AUDIT:maybe-no-member makes sense due to fuzzy types
+ # pylint: disable=maybe-no-member
+ ind = None
+ if curr_value:
+ try:
+ ind = entry.index(curr_value)
+ except ValueError:
+ return (False, self.yaml_dict)
+
+ elif index is not None:
+ ind = index
+
+ if ind is not None and entry[ind] != value:
+ entry[ind] = value
+ return (True, self.yaml_dict)
+
+ # see if it exists in the list
+ try:
+ ind = entry.index(value)
+ except ValueError:
+ # doesn't exist, append it
+ entry.append(value)
+ return (True, self.yaml_dict)
+
+ # already exists, return
+ if ind is not None:
+ return (False, self.yaml_dict)
+ return (False, self.yaml_dict)
+
+ def put(self, path, value):
+ ''' put path, value into a dict '''
+ try:
+ entry = Yedit.get_entry(self.yaml_dict, path, self.separator)
+ except KeyError:
+ entry = None
+
+ if entry == value:
+ return (False, self.yaml_dict)
+
+ # deepcopy didn't work
+ # Try to use ruamel.yaml and fallback to pyyaml
+ try:
+ tmp_copy = yaml.load(yaml.round_trip_dump(self.yaml_dict,
+ default_flow_style=False),
+ yaml.RoundTripLoader)
+ except AttributeError:
+ tmp_copy = copy.deepcopy(self.yaml_dict)
+
+ # set the format attributes if available
+ try:
+ tmp_copy.fa.set_block_style()
+ except AttributeError:
+ pass
+
+ result = Yedit.add_entry(tmp_copy, path, value, self.separator)
+ if not result:
+ return (False, self.yaml_dict)
+
+ self.yaml_dict = tmp_copy
+
+ return (True, self.yaml_dict)
+
+ def create(self, path, value):
+ ''' create a yaml file '''
+ if not self.file_exists():
+ # deepcopy didn't work
+ # Try to use ruamel.yaml and fallback to pyyaml
+ try:
+ tmp_copy = yaml.load(yaml.round_trip_dump(self.yaml_dict,
+ default_flow_style=False),
+ yaml.RoundTripLoader)
+ except AttributeError:
+ tmp_copy = copy.deepcopy(self.yaml_dict)
+
+ # set the format attributes if available
+ try:
+ tmp_copy.fa.set_block_style()
+ except AttributeError:
+ pass
+
+ result = Yedit.add_entry(tmp_copy, path, value, self.separator)
+ if result:
+ self.yaml_dict = tmp_copy
+ return (True, self.yaml_dict)
+
+ return (False, self.yaml_dict)
+
+ @staticmethod
+ def get_curr_value(invalue, val_type):
+ '''return the current value'''
+ if invalue is None:
+ return None
+
+ curr_value = invalue
+ if val_type == 'yaml':
+ curr_value = yaml.load(invalue)
+ elif val_type == 'json':
+ curr_value = json.loads(invalue)
+
+ return curr_value
+
+ @staticmethod
+ def parse_value(inc_value, vtype=''):
+ '''determine value type passed'''
+ true_bools = ['y', 'Y', 'yes', 'Yes', 'YES', 'true', 'True', 'TRUE',
+ 'on', 'On', 'ON', ]
+ false_bools = ['n', 'N', 'no', 'No', 'NO', 'false', 'False', 'FALSE',
+ 'off', 'Off', 'OFF']
+
+ # It came in as a string but you didn't specify value_type as string
+ # we will convert to bool if it matches any of the above cases
+ if isinstance(inc_value, str) and 'bool' in vtype:
+ if inc_value not in true_bools and inc_value not in false_bools:
+ raise YeditException('Not a boolean type. str=[%s] vtype=[%s]'
+ % (inc_value, vtype))
+ elif isinstance(inc_value, bool) and 'str' in vtype:
+ inc_value = str(inc_value)
+
+ # If vtype is not str then go ahead and attempt to yaml load it.
+ if isinstance(inc_value, str) and 'str' not in vtype:
+ try:
+ inc_value = yaml.load(inc_value)
+ except Exception:
+ raise YeditException('Could not determine type of incoming ' +
+ 'value. value=[%s] vtype=[%s]'
+ % (type(inc_value), vtype))
+
+ return inc_value
+
+ # pylint: disable=too-many-return-statements,too-many-branches
+ @staticmethod
+ def run_ansible(module):
+ '''perform the idempotent crud operations'''
+ yamlfile = Yedit(filename=module.params['src'],
+ backup=module.params['backup'],
+ separator=module.params['separator'])
+
+ if module.params['src']:
+ rval = yamlfile.load()
+
+ if yamlfile.yaml_dict is None and \
+ module.params['state'] != 'present':
+ return {'failed': True,
+ 'msg': 'Error opening file [%s]. Verify that the ' +
+ 'file exists, that it is has correct' +
+ ' permissions, and is valid yaml.'}
+
+ if module.params['state'] == 'list':
+ if module.params['content']:
+ content = Yedit.parse_value(module.params['content'],
+ module.params['content_type'])
+ yamlfile.yaml_dict = content
+
+ if module.params['key']:
+ rval = yamlfile.get(module.params['key']) or {}
+
+ return {'changed': False, 'result': rval, 'state': "list"}
+
+ elif module.params['state'] == 'absent':
+ if module.params['content']:
+ content = Yedit.parse_value(module.params['content'],
+ module.params['content_type'])
+ yamlfile.yaml_dict = content
+
+ if module.params['update']:
+ rval = yamlfile.pop(module.params['key'],
+ module.params['value'])
+ else:
+ rval = yamlfile.delete(module.params['key'])
+
+ if rval[0] and module.params['src']:
+ yamlfile.write()
+
+ return {'changed': rval[0], 'result': rval[1], 'state': "absent"}
+
+ elif module.params['state'] == 'present':
+ # check if content is different than what is in the file
+ if module.params['content']:
+ content = Yedit.parse_value(module.params['content'],
+ module.params['content_type'])
+
+ # We had no edits to make and the contents are the same
+ if yamlfile.yaml_dict == content and \
+ module.params['value'] is None:
+ return {'changed': False,
+ 'result': yamlfile.yaml_dict,
+ 'state': "present"}
+
+ yamlfile.yaml_dict = content
+
+ # we were passed a value; parse it
+ if module.params['value']:
+ value = Yedit.parse_value(module.params['value'],
+ module.params['value_type'])
+ key = module.params['key']
+ if module.params['update']:
+ # pylint: disable=line-too-long
+ curr_value = Yedit.get_curr_value(Yedit.parse_value(module.params['curr_value']), # noqa: E501
+ module.params['curr_value_format']) # noqa: E501
+
+ rval = yamlfile.update(key, value, module.params['index'], curr_value) # noqa: E501
+
+ elif module.params['append']:
+ rval = yamlfile.append(key, value)
+ else:
+ rval = yamlfile.put(key, value)
+
+ if rval[0] and module.params['src']:
+ yamlfile.write()
+
+ return {'changed': rval[0],
+ 'result': rval[1], 'state': "present"}
+
+ # no edits to make
+ if module.params['src']:
+ # pylint: disable=redefined-variable-type
+ rval = yamlfile.write()
+ return {'changed': rval[0],
+ 'result': rval[1],
+ 'state': "present"}
+
+ return {'failed': True, 'msg': 'Unkown state passed'}
+
+# -*- -*- -*- End included fragment: ../../lib_utils/src/class/yedit.py -*- -*- -*-
+
+# -*- -*- -*- Begin included fragment: lib/base.py -*- -*- -*-
+# pylint: disable=too-many-lines
+# noqa: E301,E302,E303,T001
+
+
+class OpenShiftCLIError(Exception):
+ '''Exception class for openshiftcli'''
+ pass
+
+
+ADDITIONAL_PATH_LOOKUPS = ['/usr/local/bin', os.path.expanduser('~/bin')]
+
+
+def locate_oc_binary():
+ ''' Find and return oc binary file '''
+ # https://github.com/openshift/openshift-ansible/issues/3410
+ # oc can be in /usr/local/bin in some cases, but that may not
+ # be in $PATH due to ansible/sudo
+ paths = os.environ.get("PATH", os.defpath).split(os.pathsep) + ADDITIONAL_PATH_LOOKUPS
+
+ oc_binary = 'oc'
+
+ # Use shutil.which if it is available, otherwise fallback to a naive path search
+ try:
+ which_result = shutil.which(oc_binary, path=os.pathsep.join(paths))
+ if which_result is not None:
+ oc_binary = which_result
+ except AttributeError:
+ for path in paths:
+ if os.path.exists(os.path.join(path, oc_binary)):
+ oc_binary = os.path.join(path, oc_binary)
+ break
+
+ return oc_binary
+
+
+# pylint: disable=too-few-public-methods
+class OpenShiftCLI(object):
+ ''' Class to wrap the command line tools '''
+ def __init__(self,
+ namespace,
+ kubeconfig='/etc/origin/master/admin.kubeconfig',
+ verbose=False,
+ all_namespaces=False):
+ ''' Constructor for OpenshiftCLI '''
+ self.namespace = namespace
+ self.verbose = verbose
+ self.kubeconfig = Utils.create_tmpfile_copy(kubeconfig)
+ self.all_namespaces = all_namespaces
+ self.oc_binary = locate_oc_binary()
+
+ # Pylint allows only 5 arguments to be passed.
+ # pylint: disable=too-many-arguments
+ def _replace_content(self, resource, rname, content, force=False, sep='.'):
+ ''' replace the current object with the content '''
+ res = self._get(resource, rname)
+ if not res['results']:
+ return res
+
+ fname = Utils.create_tmpfile(rname + '-')
+
+ yed = Yedit(fname, res['results'][0], separator=sep)
+ changes = []
+ for key, value in content.items():
+ changes.append(yed.put(key, value))
+
+ if any([change[0] for change in changes]):
+ yed.write()
+
+ atexit.register(Utils.cleanup, [fname])
+
+ return self._replace(fname, force)
+
+ return {'returncode': 0, 'updated': False}
+
+ def _replace(self, fname, force=False):
+ '''replace the current object with oc replace'''
+ cmd = ['replace', '-f', fname]
+ if force:
+ cmd.append('--force')
+ return self.openshift_cmd(cmd)
+
+ def _create_from_content(self, rname, content):
+ '''create a temporary file and then call oc create on it'''
+ fname = Utils.create_tmpfile(rname + '-')
+ yed = Yedit(fname, content=content)
+ yed.write()
+
+ atexit.register(Utils.cleanup, [fname])
+
+ return self._create(fname)
+
+ def _create(self, fname):
+ '''call oc create on a filename'''
+ return self.openshift_cmd(['create', '-f', fname])
+
+ def _delete(self, resource, rname, selector=None):
+ '''call oc delete on a resource'''
+ cmd = ['delete', resource, rname]
+ if selector:
+ cmd.append('--selector=%s' % selector)
+
+ return self.openshift_cmd(cmd)
+
+ def _process(self, template_name, create=False, params=None, template_data=None): # noqa: E501
+ '''process a template
+
+ template_name: the name of the template to process
+ create: whether to send to oc create after processing
+ params: the parameters for the template
+ template_data: the incoming template's data; instead of a file
+ '''
+ cmd = ['process']
+ if template_data:
+ cmd.extend(['-f', '-'])
+ else:
+ cmd.append(template_name)
+ if params:
+ param_str = ["%s=%s" % (key, value) for key, value in params.items()]
+ cmd.append('-v')
+ cmd.extend(param_str)
+
+ results = self.openshift_cmd(cmd, output=True, input_data=template_data)
+
+ if results['returncode'] != 0 or not create:
+ return results
+
+ fname = Utils.create_tmpfile(template_name + '-')
+ yed = Yedit(fname, results['results'])
+ yed.write()
+
+ atexit.register(Utils.cleanup, [fname])
+
+ return self.openshift_cmd(['create', '-f', fname])
+
+ def _get(self, resource, rname=None, selector=None):
+ '''return a resource by name '''
+ cmd = ['get', resource]
+ if selector:
+ cmd.append('--selector=%s' % selector)
+ elif rname:
+ cmd.append(rname)
+
+ cmd.extend(['-o', 'json'])
+
+ rval = self.openshift_cmd(cmd, output=True)
+
+ # Ensure results are retuned in an array
+ if 'items' in rval:
+ rval['results'] = rval['items']
+ elif not isinstance(rval['results'], list):
+ rval['results'] = [rval['results']]
+
+ return rval
+
+ def _schedulable(self, node=None, selector=None, schedulable=True):
+ ''' perform oadm manage-node scheduable '''
+ cmd = ['manage-node']
+ if node:
+ cmd.extend(node)
+ else:
+ cmd.append('--selector=%s' % selector)
+
+ cmd.append('--schedulable=%s' % schedulable)
+
+ return self.openshift_cmd(cmd, oadm=True, output=True, output_type='raw') # noqa: E501
+
+ def _list_pods(self, node=None, selector=None, pod_selector=None):
+ ''' perform oadm list pods
+
+ node: the node in which to list pods
+ selector: the label selector filter if provided
+ pod_selector: the pod selector filter if provided
+ '''
+ cmd = ['manage-node']
+ if node:
+ cmd.extend(node)
+ else:
+ cmd.append('--selector=%s' % selector)
+
+ if pod_selector:
+ cmd.append('--pod-selector=%s' % pod_selector)
+
+ cmd.extend(['--list-pods', '-o', 'json'])
+
+ return self.openshift_cmd(cmd, oadm=True, output=True, output_type='raw')
+
+ # pylint: disable=too-many-arguments
+ def _evacuate(self, node=None, selector=None, pod_selector=None, dry_run=False, grace_period=None, force=False):
+ ''' perform oadm manage-node evacuate '''
+ cmd = ['manage-node']
+ if node:
+ cmd.extend(node)
+ else:
+ cmd.append('--selector=%s' % selector)
+
+ if dry_run:
+ cmd.append('--dry-run')
+
+ if pod_selector:
+ cmd.append('--pod-selector=%s' % pod_selector)
+
+ if grace_period:
+ cmd.append('--grace-period=%s' % int(grace_period))
+
+ if force:
+ cmd.append('--force')
+
+ cmd.append('--evacuate')
+
+ return self.openshift_cmd(cmd, oadm=True, output=True, output_type='raw')
+
+ def _version(self):
+ ''' return the openshift version'''
+ return self.openshift_cmd(['version'], output=True, output_type='raw')
+
+ def _import_image(self, url=None, name=None, tag=None):
+ ''' perform image import '''
+ cmd = ['import-image']
+
+ image = '{0}'.format(name)
+ if tag:
+ image += ':{0}'.format(tag)
+
+ cmd.append(image)
+
+ if url:
+ cmd.append('--from={0}/{1}'.format(url, image))
+
+ cmd.append('-n{0}'.format(self.namespace))
+
+ cmd.append('--confirm')
+ return self.openshift_cmd(cmd)
+
+ def _run(self, cmds, input_data):
+ ''' Actually executes the command. This makes mocking easier. '''
+ curr_env = os.environ.copy()
+ curr_env.update({'KUBECONFIG': self.kubeconfig})
+ proc = subprocess.Popen(cmds,
+ stdin=subprocess.PIPE,
+ stdout=subprocess.PIPE,
+ stderr=subprocess.PIPE,
+ env=curr_env)
+
+ stdout, stderr = proc.communicate(input_data)
+
+ return proc.returncode, stdout.decode(), stderr.decode()
+
+ # pylint: disable=too-many-arguments,too-many-branches
+ def openshift_cmd(self, cmd, oadm=False, output=False, output_type='json', input_data=None):
+ '''Base command for oc '''
+ cmds = [self.oc_binary]
+
+ if oadm:
+ cmds.append('adm')
+
+ cmds.extend(cmd)
+
+ if self.all_namespaces:
+ cmds.extend(['--all-namespaces'])
+ elif self.namespace is not None and self.namespace.lower() not in ['none', 'emtpy']: # E501
+ cmds.extend(['-n', self.namespace])
+
+ rval = {}
+ results = ''
+ err = None
+
+ if self.verbose:
+ print(' '.join(cmds))
+
+ try:
+ returncode, stdout, stderr = self._run(cmds, input_data)
+ except OSError as ex:
+ returncode, stdout, stderr = 1, '', 'Failed to execute {}: {}'.format(subprocess.list2cmdline(cmds), ex)
+
+ rval = {"returncode": returncode,
+ "results": results,
+ "cmd": ' '.join(cmds)}
+
+ if returncode == 0:
+ if output:
+ if output_type == 'json':
+ try:
+ rval['results'] = json.loads(stdout)
+ except ValueError as err:
+ if "No JSON object could be decoded" in err.args:
+ err = err.args
+ elif output_type == 'raw':
+ rval['results'] = stdout
+
+ if self.verbose:
+ print("STDOUT: {0}".format(stdout))
+ print("STDERR: {0}".format(stderr))
+
+ if err:
+ rval.update({"err": err,
+ "stderr": stderr,
+ "stdout": stdout,
+ "cmd": cmds})
+
+ else:
+ rval.update({"stderr": stderr,
+ "stdout": stdout,
+ "results": {}})
+
+ return rval
+
+
+class Utils(object):
+ ''' utilities for openshiftcli modules '''
+
+ @staticmethod
+ def _write(filename, contents):
+ ''' Actually write the file contents to disk. This helps with mocking. '''
+
+ with open(filename, 'w') as sfd:
+ sfd.write(contents)
+
+ @staticmethod
+ def create_tmp_file_from_contents(rname, data, ftype='yaml'):
+ ''' create a file in tmp with name and contents'''
+
+ tmp = Utils.create_tmpfile(prefix=rname)
+
+ if ftype == 'yaml':
+ # AUDIT:no-member makes sense here due to ruamel.YAML/PyYAML usage
+ # pylint: disable=no-member
+ if hasattr(yaml, 'RoundTripDumper'):
+ Utils._write(tmp, yaml.dump(data, Dumper=yaml.RoundTripDumper))
+ else:
+ Utils._write(tmp, yaml.safe_dump(data, default_flow_style=False))
+
+ elif ftype == 'json':
+ Utils._write(tmp, json.dumps(data))
+ else:
+ Utils._write(tmp, data)
+
+ # Register cleanup when module is done
+ atexit.register(Utils.cleanup, [tmp])
+ return tmp
+
+ @staticmethod
+ def create_tmpfile_copy(inc_file):
+ '''create a temporary copy of a file'''
+ tmpfile = Utils.create_tmpfile('lib_openshift-')
+ Utils._write(tmpfile, open(inc_file).read())
+
+ # Cleanup the tmpfile
+ atexit.register(Utils.cleanup, [tmpfile])
+
+ return tmpfile
+
+ @staticmethod
+ def create_tmpfile(prefix='tmp'):
+ ''' Generates and returns a temporary file name '''
+
+ with tempfile.NamedTemporaryFile(prefix=prefix, delete=False) as tmp:
+ return tmp.name
+
+ @staticmethod
+ def create_tmp_files_from_contents(content, content_type=None):
+ '''Turn an array of dict: filename, content into a files array'''
+ if not isinstance(content, list):
+ content = [content]
+ files = []
+ for item in content:
+ path = Utils.create_tmp_file_from_contents(item['path'] + '-',
+ item['data'],
+ ftype=content_type)
+ files.append({'name': os.path.basename(item['path']),
+ 'path': path})
+ return files
+
+ @staticmethod
+ def cleanup(files):
+ '''Clean up on exit '''
+ for sfile in files:
+ if os.path.exists(sfile):
+ if os.path.isdir(sfile):
+ shutil.rmtree(sfile)
+ elif os.path.isfile(sfile):
+ os.remove(sfile)
+
+ @staticmethod
+ def exists(results, _name):
+ ''' Check to see if the results include the name '''
+ if not results:
+ return False
+
+ if Utils.find_result(results, _name):
+ return True
+
+ return False
+
+ @staticmethod
+ def find_result(results, _name):
+ ''' Find the specified result by name'''
+ rval = None
+ for result in results:
+ if 'metadata' in result and result['metadata']['name'] == _name:
+ rval = result
+ break
+
+ return rval
+
+ @staticmethod
+ def get_resource_file(sfile, sfile_type='yaml'):
+ ''' return the service file '''
+ contents = None
+ with open(sfile) as sfd:
+ contents = sfd.read()
+
+ if sfile_type == 'yaml':
+ # AUDIT:no-member makes sense here due to ruamel.YAML/PyYAML usage
+ # pylint: disable=no-member
+ if hasattr(yaml, 'RoundTripLoader'):
+ contents = yaml.load(contents, yaml.RoundTripLoader)
+ else:
+ contents = yaml.safe_load(contents)
+ elif sfile_type == 'json':
+ contents = json.loads(contents)
+
+ return contents
+
+ @staticmethod
+ def filter_versions(stdout):
+ ''' filter the oc version output '''
+
+ version_dict = {}
+ version_search = ['oc', 'openshift', 'kubernetes']
+
+ for line in stdout.strip().split('\n'):
+ for term in version_search:
+ if not line:
+ continue
+ if line.startswith(term):
+ version_dict[term] = line.split()[-1]
+
+ # horrible hack to get openshift version in Openshift 3.2
+ # By default "oc version in 3.2 does not return an "openshift" version
+ if "openshift" not in version_dict:
+ version_dict["openshift"] = version_dict["oc"]
+
+ return version_dict
+
+ @staticmethod
+ def add_custom_versions(versions):
+ ''' create custom versions strings '''
+
+ versions_dict = {}
+
+ for tech, version in versions.items():
+ # clean up "-" from version
+ if "-" in version:
+ version = version.split("-")[0]
+
+ if version.startswith('v'):
+ versions_dict[tech + '_numeric'] = version[1:].split('+')[0]
+ # "v3.3.0.33" is what we have, we want "3.3"
+ versions_dict[tech + '_short'] = version[1:4]
+
+ return versions_dict
+
+ @staticmethod
+ def openshift_installed():
+ ''' check if openshift is installed '''
+ import yum
+
+ yum_base = yum.YumBase()
+ if yum_base.rpmdb.searchNevra(name='atomic-openshift'):
+ return True
+
+ return False
+
+ # Disabling too-many-branches. This is a yaml dictionary comparison function
+ # pylint: disable=too-many-branches,too-many-return-statements,too-many-statements
+ @staticmethod
+ def check_def_equal(user_def, result_def, skip_keys=None, debug=False):
+ ''' Given a user defined definition, compare it with the results given back by our query. '''
+
+ # Currently these values are autogenerated and we do not need to check them
+ skip = ['metadata', 'status']
+ if skip_keys:
+ skip.extend(skip_keys)
+
+ for key, value in result_def.items():
+ if key in skip:
+ continue
+
+ # Both are lists
+ if isinstance(value, list):
+ if key not in user_def:
+ if debug:
+ print('User data does not have key [%s]' % key)
+ print('User data: %s' % user_def)
+ return False
+
+ if not isinstance(user_def[key], list):
+ if debug:
+ print('user_def[key] is not a list key=[%s] user_def[key]=%s' % (key, user_def[key]))
+ return False
+
+ if len(user_def[key]) != len(value):
+ if debug:
+ print("List lengths are not equal.")
+ print("key=[%s]: user_def[%s] != value[%s]" % (key, len(user_def[key]), len(value)))
+ print("user_def: %s" % user_def[key])
+ print("value: %s" % value)
+ return False
+
+ for values in zip(user_def[key], value):
+ if isinstance(values[0], dict) and isinstance(values[1], dict):
+ if debug:
+ print('sending list - list')
+ print(type(values[0]))
+ print(type(values[1]))
+ result = Utils.check_def_equal(values[0], values[1], skip_keys=skip_keys, debug=debug)
+ if not result:
+ print('list compare returned false')
+ return False
+
+ elif value != user_def[key]:
+ if debug:
+ print('value should be identical')
+ print(user_def[key])
+ print(value)
+ return False
+
+ # recurse on a dictionary
+ elif isinstance(value, dict):
+ if key not in user_def:
+ if debug:
+ print("user_def does not have key [%s]" % key)
+ return False
+ if not isinstance(user_def[key], dict):
+ if debug:
+ print("dict returned false: not instance of dict")
+ return False
+
+ # before passing ensure keys match
+ api_values = set(value.keys()) - set(skip)
+ user_values = set(user_def[key].keys()) - set(skip)
+ if api_values != user_values:
+ if debug:
+ print("keys are not equal in dict")
+ print(user_values)
+ print(api_values)
+ return False
+
+ result = Utils.check_def_equal(user_def[key], value, skip_keys=skip_keys, debug=debug)
+ if not result:
+ if debug:
+ print("dict returned false")
+ print(result)
+ return False
+
+ # Verify each key, value pair is the same
+ else:
+ if key not in user_def or value != user_def[key]:
+ if debug:
+ print("value not equal; user_def does not have key")
+ print(key)
+ print(value)
+ if key in user_def:
+ print(user_def[key])
+ return False
+
+ if debug:
+ print('returning true')
+ return True
+
+
+class OpenShiftCLIConfig(object):
+ '''Generic Config'''
+ def __init__(self, rname, namespace, kubeconfig, options):
+ self.kubeconfig = kubeconfig
+ self.name = rname
+ self.namespace = namespace
+ self._options = options
+
+ @property
+ def config_options(self):
+ ''' return config options '''
+ return self._options
+
+ def to_option_list(self):
+ '''return all options as a string'''
+ return self.stringify()
+
+ def stringify(self):
+ ''' return the options hash as cli params in a string '''
+ rval = []
+ for key, data in self.config_options.items():
+ if data['include'] \
+ and (data['value'] or isinstance(data['value'], int)):
+ rval.append('--%s=%s' % (key.replace('_', '-'), data['value']))
+
+ return rval
+
+
+# -*- -*- -*- End included fragment: lib/base.py -*- -*- -*-
+
+# -*- -*- -*- Begin included fragment: class/oc_sdnvalidator.py -*- -*- -*-
+
+# pylint: disable=too-many-instance-attributes
+class OCSDNValidator(OpenShiftCLI):
+ ''' Class to wrap the oc command line tools '''
+
+ def __init__(self, kubeconfig):
+ ''' Constructor for OCSDNValidator '''
+ # namespace has no meaning for SDN validation, hardcode to 'default'
+ super(OCSDNValidator, self).__init__('default', kubeconfig)
+
+ def get(self, kind, invalid_filter):
+ ''' return SDN information '''
+
+ rval = self._get(kind)
+ if rval['returncode'] != 0:
+ return False, rval, []
+
+ return True, rval, filter(invalid_filter, rval['results'][0]['items'])
+
+ # pylint: disable=too-many-return-statements
+ @staticmethod
+ def run_ansible(params):
+ ''' run the idempotent ansible code
+
+ params comes from the ansible portion of this module
+ '''
+
+ sdnvalidator = OCSDNValidator(params['kubeconfig'])
+ all_invalid = {}
+ failed = False
+
+ checks = (
+ (
+ 'hostsubnet',
+ lambda x: x['metadata']['name'] != x['host'],
+ u'hostsubnets where metadata.name != host',
+ ),
+ (
+ 'netnamespace',
+ lambda x: x['metadata']['name'] != x['netname'],
+ u'netnamespaces where metadata.name != netname',
+ ),
+ )
+
+ for resource, invalid_filter, invalid_msg in checks:
+ success, rval, invalid = sdnvalidator.get(resource, invalid_filter)
+ if not success:
+ return {'failed': True, 'msg': 'Failed to GET {}.'.format(resource), 'state': 'list', 'results': rval}
+ if invalid:
+ failed = True
+ all_invalid[invalid_msg] = invalid
+
+ if failed:
+ return {'failed': True, 'msg': 'All SDN objects are not valid.', 'state': 'list', 'results': all_invalid}
+
+ return {'msg': 'All SDN objects are valid.'}
+
+# -*- -*- -*- End included fragment: class/oc_sdnvalidator.py -*- -*- -*-
+
+# -*- -*- -*- Begin included fragment: ansible/oc_sdnvalidator.py -*- -*- -*-
+
+def main():
+ '''
+ ansible oc module for validating OpenShift SDN objects
+ '''
+
+ module = AnsibleModule(
+ argument_spec=dict(
+ kubeconfig=dict(default='/etc/origin/master/admin.kubeconfig', type='str'),
+ ),
+ supports_check_mode=False,
+ )
+
+
+ rval = OCSDNValidator.run_ansible(module.params)
+ if 'failed' in rval:
+ module.fail_json(**rval)
+
+ module.exit_json(**rval)
+
+if __name__ == '__main__':
+ main()
+
+# -*- -*- -*- End included fragment: ansible/oc_sdnvalidator.py -*- -*- -*-
diff --git a/roles/lib_openshift/library/oc_secret.py b/roles/lib_openshift/library/oc_secret.py
index 8598cb0ec..b6dcd9fff 100644
--- a/roles/lib_openshift/library/oc_secret.py
+++ b/roles/lib_openshift/library/oc_secret.py
@@ -33,6 +33,7 @@
from __future__ import print_function
import atexit
+import copy
import json
import os
import re
@@ -40,7 +41,11 @@ import shutil
import subprocess
import tempfile
# pylint: disable=import-error
-import ruamel.yaml as yaml
+try:
+ import ruamel.yaml as yaml
+except ImportError:
+ import yaml
+
from ansible.module_utils.basic import AnsibleModule
# -*- -*- -*- End included fragment: lib/import.py -*- -*- -*-
@@ -164,6 +169,7 @@ EXAMPLES = '''
# -*- -*- -*- End included fragment: doc/secret -*- -*- -*-
# -*- -*- -*- Begin included fragment: ../../lib_utils/src/class/yedit.py -*- -*- -*-
+# pylint: disable=undefined-variable,missing-docstring
# noqa: E301,E302
@@ -358,11 +364,17 @@ class Yedit(object):
if self.backup and self.file_exists():
shutil.copy(self.filename, self.filename + '.orig')
- # pylint: disable=no-member
- if hasattr(self.yaml_dict, 'fa'):
+ # Try to set format attributes if supported
+ try:
self.yaml_dict.fa.set_block_style()
+ except AttributeError:
+ pass
- Yedit._write(self.filename, yaml.dump(self.yaml_dict, Dumper=yaml.RoundTripDumper))
+ # Try to use RoundTripDumper if supported.
+ try:
+ Yedit._write(self.filename, yaml.dump(self.yaml_dict, Dumper=yaml.RoundTripDumper))
+ except AttributeError:
+ Yedit._write(self.filename, yaml.safe_dump(self.yaml_dict, default_flow_style=False))
return (True, self.yaml_dict)
@@ -402,10 +414,24 @@ class Yedit(object):
# check if it is yaml
try:
if content_type == 'yaml' and contents:
- self.yaml_dict = yaml.load(contents, yaml.RoundTripLoader)
- # pylint: disable=no-member
- if hasattr(self.yaml_dict, 'fa'):
+ # Try to set format attributes if supported
+ try:
+ self.yaml_dict.fa.set_block_style()
+ except AttributeError:
+ pass
+
+ # Try to use RoundTripLoader if supported.
+ try:
+ self.yaml_dict = yaml.safe_load(contents, yaml.RoundTripLoader)
+ except AttributeError:
+ self.yaml_dict = yaml.safe_load(contents)
+
+ # Try to set format attributes if supported
+ try:
self.yaml_dict.fa.set_block_style()
+ except AttributeError:
+ pass
+
elif content_type == 'json' and contents:
self.yaml_dict = json.loads(contents)
except yaml.YAMLError as err:
@@ -434,14 +460,16 @@ class Yedit(object):
return (False, self.yaml_dict)
if isinstance(entry, dict):
- # pylint: disable=no-member,maybe-no-member
+ # AUDIT:maybe-no-member makes sense due to fuzzy types
+ # pylint: disable=maybe-no-member
if key_or_item in entry:
entry.pop(key_or_item)
return (True, self.yaml_dict)
return (False, self.yaml_dict)
elif isinstance(entry, list):
- # pylint: disable=no-member,maybe-no-member
+ # AUDIT:maybe-no-member makes sense due to fuzzy types
+ # pylint: disable=maybe-no-member
ind = None
try:
ind = entry.index(key_or_item)
@@ -509,7 +537,9 @@ class Yedit(object):
if not isinstance(entry, list):
return (False, self.yaml_dict)
- # pylint: disable=no-member,maybe-no-member
+ # AUDIT:maybe-no-member makes sense due to loading data from
+ # a serialized format.
+ # pylint: disable=maybe-no-member
entry.append(value)
return (True, self.yaml_dict)
@@ -522,7 +552,8 @@ class Yedit(object):
entry = None
if isinstance(entry, dict):
- # pylint: disable=no-member,maybe-no-member
+ # AUDIT:maybe-no-member makes sense due to fuzzy types
+ # pylint: disable=maybe-no-member
if not isinstance(value, dict):
raise YeditException('Cannot replace key, value entry in ' +
'dict with non-dict type. value=[%s] [%s]' % (value, type(value))) # noqa: E501
@@ -531,7 +562,8 @@ class Yedit(object):
return (True, self.yaml_dict)
elif isinstance(entry, list):
- # pylint: disable=no-member,maybe-no-member
+ # AUDIT:maybe-no-member makes sense due to fuzzy types
+ # pylint: disable=maybe-no-member
ind = None
if curr_value:
try:
@@ -570,12 +602,20 @@ class Yedit(object):
return (False, self.yaml_dict)
# deepcopy didn't work
- tmp_copy = yaml.load(yaml.round_trip_dump(self.yaml_dict,
- default_flow_style=False),
- yaml.RoundTripLoader)
- # pylint: disable=no-member
- if hasattr(self.yaml_dict, 'fa'):
+ # Try to use ruamel.yaml and fallback to pyyaml
+ try:
+ tmp_copy = yaml.load(yaml.round_trip_dump(self.yaml_dict,
+ default_flow_style=False),
+ yaml.RoundTripLoader)
+ except AttributeError:
+ tmp_copy = copy.deepcopy(self.yaml_dict)
+
+ # set the format attributes if available
+ try:
tmp_copy.fa.set_block_style()
+ except AttributeError:
+ pass
+
result = Yedit.add_entry(tmp_copy, path, value, self.separator)
if not result:
return (False, self.yaml_dict)
@@ -588,11 +628,20 @@ class Yedit(object):
''' create a yaml file '''
if not self.file_exists():
# deepcopy didn't work
- tmp_copy = yaml.load(yaml.round_trip_dump(self.yaml_dict, default_flow_style=False), # noqa: E501
- yaml.RoundTripLoader)
- # pylint: disable=no-member
- if hasattr(self.yaml_dict, 'fa'):
+ # Try to use ruamel.yaml and fallback to pyyaml
+ try:
+ tmp_copy = yaml.load(yaml.round_trip_dump(self.yaml_dict,
+ default_flow_style=False),
+ yaml.RoundTripLoader)
+ except AttributeError:
+ tmp_copy = copy.deepcopy(self.yaml_dict)
+
+ # set the format attributes if available
+ try:
tmp_copy.fa.set_block_style()
+ except AttributeError:
+ pass
+
result = Yedit.add_entry(tmp_copy, path, value, self.separator)
if result:
self.yaml_dict = tmp_copy
@@ -748,6 +797,32 @@ class OpenShiftCLIError(Exception):
pass
+ADDITIONAL_PATH_LOOKUPS = ['/usr/local/bin', os.path.expanduser('~/bin')]
+
+
+def locate_oc_binary():
+ ''' Find and return oc binary file '''
+ # https://github.com/openshift/openshift-ansible/issues/3410
+ # oc can be in /usr/local/bin in some cases, but that may not
+ # be in $PATH due to ansible/sudo
+ paths = os.environ.get("PATH", os.defpath).split(os.pathsep) + ADDITIONAL_PATH_LOOKUPS
+
+ oc_binary = 'oc'
+
+ # Use shutil.which if it is available, otherwise fallback to a naive path search
+ try:
+ which_result = shutil.which(oc_binary, path=os.pathsep.join(paths))
+ if which_result is not None:
+ oc_binary = which_result
+ except AttributeError:
+ for path in paths:
+ if os.path.exists(os.path.join(path, oc_binary)):
+ oc_binary = os.path.join(path, oc_binary)
+ break
+
+ return oc_binary
+
+
# pylint: disable=too-few-public-methods
class OpenShiftCLI(object):
''' Class to wrap the command line tools '''
@@ -761,6 +836,7 @@ class OpenShiftCLI(object):
self.verbose = verbose
self.kubeconfig = Utils.create_tmpfile_copy(kubeconfig)
self.all_namespaces = all_namespaces
+ self.oc_binary = locate_oc_binary()
# Pylint allows only 5 arguments to be passed.
# pylint: disable=too-many-arguments
@@ -957,24 +1033,23 @@ class OpenShiftCLI(object):
stdout, stderr = proc.communicate(input_data)
- return proc.returncode, stdout, stderr
+ return proc.returncode, stdout.decode(), stderr.decode()
# pylint: disable=too-many-arguments,too-many-branches
def openshift_cmd(self, cmd, oadm=False, output=False, output_type='json', input_data=None):
'''Base command for oc '''
- cmds = []
+ cmds = [self.oc_binary]
+
if oadm:
- cmds = ['oadm']
- else:
- cmds = ['oc']
+ cmds.append('adm')
+
+ cmds.extend(cmd)
if self.all_namespaces:
cmds.extend(['--all-namespaces'])
elif self.namespace is not None and self.namespace.lower() not in ['none', 'emtpy']: # E501
cmds.extend(['-n', self.namespace])
- cmds.extend(cmd)
-
rval = {}
results = ''
err = None
@@ -982,7 +1057,10 @@ class OpenShiftCLI(object):
if self.verbose:
print(' '.join(cmds))
- returncode, stdout, stderr = self._run(cmds, input_data)
+ try:
+ returncode, stdout, stderr = self._run(cmds, input_data)
+ except OSError as ex:
+ returncode, stdout, stderr = 1, '', 'Failed to execute {}: {}'.format(subprocess.list2cmdline(cmds), ex)
rval = {"returncode": returncode,
"results": results,
@@ -1034,7 +1112,13 @@ class Utils(object):
tmp = Utils.create_tmpfile(prefix=rname)
if ftype == 'yaml':
- Utils._write(tmp, yaml.dump(data, Dumper=yaml.RoundTripDumper))
+ # AUDIT:no-member makes sense here due to ruamel.YAML/PyYAML usage
+ # pylint: disable=no-member
+ if hasattr(yaml, 'RoundTripDumper'):
+ Utils._write(tmp, yaml.dump(data, Dumper=yaml.RoundTripDumper))
+ else:
+ Utils._write(tmp, yaml.safe_dump(data, default_flow_style=False))
+
elif ftype == 'json':
Utils._write(tmp, json.dumps(data))
else:
@@ -1116,7 +1200,12 @@ class Utils(object):
contents = sfd.read()
if sfile_type == 'yaml':
- contents = yaml.load(contents, yaml.RoundTripLoader)
+ # AUDIT:no-member makes sense here due to ruamel.YAML/PyYAML usage
+ # pylint: disable=no-member
+ if hasattr(yaml, 'RoundTripLoader'):
+ contents = yaml.load(contents, yaml.RoundTripLoader)
+ else:
+ contents = yaml.safe_load(contents)
elif sfile_type == 'json':
contents = json.loads(contents)
@@ -1222,8 +1311,8 @@ class Utils(object):
elif value != user_def[key]:
if debug:
print('value should be identical')
- print(value)
print(user_def[key])
+ print(value)
return False
# recurse on a dictionary
@@ -1243,8 +1332,8 @@ class Utils(object):
if api_values != user_values:
if debug:
print("keys are not equal in dict")
- print(api_values)
print(user_values)
+ print(api_values)
return False
result = Utils.check_def_equal(user_def[key], value, skip_keys=skip_keys, debug=debug)
@@ -1321,7 +1410,7 @@ class SecretConfig(object):
self.create_dict()
def create_dict(self):
- ''' return a secret as a dict '''
+ ''' assign the correct properties for a secret dict '''
self.data['apiVersion'] = 'v1'
self.data['kind'] = 'Secret'
self.data['metadata'] = {}
@@ -1391,8 +1480,7 @@ class Secret(Yedit):
def update_secret(self, key, value):
''' update a secret'''
- # pylint: disable=no-member
- if self.secrets.has_key(key):
+ if key in self.secrets:
self.secrets[key] = value
else:
self.add_secret(key, value)
@@ -1430,7 +1518,7 @@ class OCSecret(OpenShiftCLI):
if results['returncode'] == 0 and results['results'][0]:
results['exists'] = True
if self.decode:
- if results['results'][0].has_key('data'):
+ if 'data' in results['results'][0]:
for sname, value in results['results'][0]['data'].items():
results['decoded'][sname] = base64.b64decode(value)
diff --git a/roles/lib_openshift/library/oc_service.py b/roles/lib_openshift/library/oc_service.py
index a9baef765..83d0579a5 100644
--- a/roles/lib_openshift/library/oc_service.py
+++ b/roles/lib_openshift/library/oc_service.py
@@ -33,6 +33,7 @@
from __future__ import print_function
import atexit
+import copy
import json
import os
import re
@@ -40,7 +41,11 @@ import shutil
import subprocess
import tempfile
# pylint: disable=import-error
-import ruamel.yaml as yaml
+try:
+ import ruamel.yaml as yaml
+except ImportError:
+ import yaml
+
from ansible.module_utils.basic import AnsibleModule
# -*- -*- -*- End included fragment: lib/import.py -*- -*- -*-
@@ -170,6 +175,7 @@ EXAMPLES = '''
# -*- -*- -*- End included fragment: doc/service -*- -*- -*-
# -*- -*- -*- Begin included fragment: ../../lib_utils/src/class/yedit.py -*- -*- -*-
+# pylint: disable=undefined-variable,missing-docstring
# noqa: E301,E302
@@ -364,11 +370,17 @@ class Yedit(object):
if self.backup and self.file_exists():
shutil.copy(self.filename, self.filename + '.orig')
- # pylint: disable=no-member
- if hasattr(self.yaml_dict, 'fa'):
+ # Try to set format attributes if supported
+ try:
self.yaml_dict.fa.set_block_style()
+ except AttributeError:
+ pass
- Yedit._write(self.filename, yaml.dump(self.yaml_dict, Dumper=yaml.RoundTripDumper))
+ # Try to use RoundTripDumper if supported.
+ try:
+ Yedit._write(self.filename, yaml.dump(self.yaml_dict, Dumper=yaml.RoundTripDumper))
+ except AttributeError:
+ Yedit._write(self.filename, yaml.safe_dump(self.yaml_dict, default_flow_style=False))
return (True, self.yaml_dict)
@@ -408,10 +420,24 @@ class Yedit(object):
# check if it is yaml
try:
if content_type == 'yaml' and contents:
- self.yaml_dict = yaml.load(contents, yaml.RoundTripLoader)
- # pylint: disable=no-member
- if hasattr(self.yaml_dict, 'fa'):
+ # Try to set format attributes if supported
+ try:
+ self.yaml_dict.fa.set_block_style()
+ except AttributeError:
+ pass
+
+ # Try to use RoundTripLoader if supported.
+ try:
+ self.yaml_dict = yaml.safe_load(contents, yaml.RoundTripLoader)
+ except AttributeError:
+ self.yaml_dict = yaml.safe_load(contents)
+
+ # Try to set format attributes if supported
+ try:
self.yaml_dict.fa.set_block_style()
+ except AttributeError:
+ pass
+
elif content_type == 'json' and contents:
self.yaml_dict = json.loads(contents)
except yaml.YAMLError as err:
@@ -440,14 +466,16 @@ class Yedit(object):
return (False, self.yaml_dict)
if isinstance(entry, dict):
- # pylint: disable=no-member,maybe-no-member
+ # AUDIT:maybe-no-member makes sense due to fuzzy types
+ # pylint: disable=maybe-no-member
if key_or_item in entry:
entry.pop(key_or_item)
return (True, self.yaml_dict)
return (False, self.yaml_dict)
elif isinstance(entry, list):
- # pylint: disable=no-member,maybe-no-member
+ # AUDIT:maybe-no-member makes sense due to fuzzy types
+ # pylint: disable=maybe-no-member
ind = None
try:
ind = entry.index(key_or_item)
@@ -515,7 +543,9 @@ class Yedit(object):
if not isinstance(entry, list):
return (False, self.yaml_dict)
- # pylint: disable=no-member,maybe-no-member
+ # AUDIT:maybe-no-member makes sense due to loading data from
+ # a serialized format.
+ # pylint: disable=maybe-no-member
entry.append(value)
return (True, self.yaml_dict)
@@ -528,7 +558,8 @@ class Yedit(object):
entry = None
if isinstance(entry, dict):
- # pylint: disable=no-member,maybe-no-member
+ # AUDIT:maybe-no-member makes sense due to fuzzy types
+ # pylint: disable=maybe-no-member
if not isinstance(value, dict):
raise YeditException('Cannot replace key, value entry in ' +
'dict with non-dict type. value=[%s] [%s]' % (value, type(value))) # noqa: E501
@@ -537,7 +568,8 @@ class Yedit(object):
return (True, self.yaml_dict)
elif isinstance(entry, list):
- # pylint: disable=no-member,maybe-no-member
+ # AUDIT:maybe-no-member makes sense due to fuzzy types
+ # pylint: disable=maybe-no-member
ind = None
if curr_value:
try:
@@ -576,12 +608,20 @@ class Yedit(object):
return (False, self.yaml_dict)
# deepcopy didn't work
- tmp_copy = yaml.load(yaml.round_trip_dump(self.yaml_dict,
- default_flow_style=False),
- yaml.RoundTripLoader)
- # pylint: disable=no-member
- if hasattr(self.yaml_dict, 'fa'):
+ # Try to use ruamel.yaml and fallback to pyyaml
+ try:
+ tmp_copy = yaml.load(yaml.round_trip_dump(self.yaml_dict,
+ default_flow_style=False),
+ yaml.RoundTripLoader)
+ except AttributeError:
+ tmp_copy = copy.deepcopy(self.yaml_dict)
+
+ # set the format attributes if available
+ try:
tmp_copy.fa.set_block_style()
+ except AttributeError:
+ pass
+
result = Yedit.add_entry(tmp_copy, path, value, self.separator)
if not result:
return (False, self.yaml_dict)
@@ -594,11 +634,20 @@ class Yedit(object):
''' create a yaml file '''
if not self.file_exists():
# deepcopy didn't work
- tmp_copy = yaml.load(yaml.round_trip_dump(self.yaml_dict, default_flow_style=False), # noqa: E501
- yaml.RoundTripLoader)
- # pylint: disable=no-member
- if hasattr(self.yaml_dict, 'fa'):
+ # Try to use ruamel.yaml and fallback to pyyaml
+ try:
+ tmp_copy = yaml.load(yaml.round_trip_dump(self.yaml_dict,
+ default_flow_style=False),
+ yaml.RoundTripLoader)
+ except AttributeError:
+ tmp_copy = copy.deepcopy(self.yaml_dict)
+
+ # set the format attributes if available
+ try:
tmp_copy.fa.set_block_style()
+ except AttributeError:
+ pass
+
result = Yedit.add_entry(tmp_copy, path, value, self.separator)
if result:
self.yaml_dict = tmp_copy
@@ -754,6 +803,32 @@ class OpenShiftCLIError(Exception):
pass
+ADDITIONAL_PATH_LOOKUPS = ['/usr/local/bin', os.path.expanduser('~/bin')]
+
+
+def locate_oc_binary():
+ ''' Find and return oc binary file '''
+ # https://github.com/openshift/openshift-ansible/issues/3410
+ # oc can be in /usr/local/bin in some cases, but that may not
+ # be in $PATH due to ansible/sudo
+ paths = os.environ.get("PATH", os.defpath).split(os.pathsep) + ADDITIONAL_PATH_LOOKUPS
+
+ oc_binary = 'oc'
+
+ # Use shutil.which if it is available, otherwise fallback to a naive path search
+ try:
+ which_result = shutil.which(oc_binary, path=os.pathsep.join(paths))
+ if which_result is not None:
+ oc_binary = which_result
+ except AttributeError:
+ for path in paths:
+ if os.path.exists(os.path.join(path, oc_binary)):
+ oc_binary = os.path.join(path, oc_binary)
+ break
+
+ return oc_binary
+
+
# pylint: disable=too-few-public-methods
class OpenShiftCLI(object):
''' Class to wrap the command line tools '''
@@ -767,6 +842,7 @@ class OpenShiftCLI(object):
self.verbose = verbose
self.kubeconfig = Utils.create_tmpfile_copy(kubeconfig)
self.all_namespaces = all_namespaces
+ self.oc_binary = locate_oc_binary()
# Pylint allows only 5 arguments to be passed.
# pylint: disable=too-many-arguments
@@ -963,24 +1039,23 @@ class OpenShiftCLI(object):
stdout, stderr = proc.communicate(input_data)
- return proc.returncode, stdout, stderr
+ return proc.returncode, stdout.decode(), stderr.decode()
# pylint: disable=too-many-arguments,too-many-branches
def openshift_cmd(self, cmd, oadm=False, output=False, output_type='json', input_data=None):
'''Base command for oc '''
- cmds = []
+ cmds = [self.oc_binary]
+
if oadm:
- cmds = ['oadm']
- else:
- cmds = ['oc']
+ cmds.append('adm')
+
+ cmds.extend(cmd)
if self.all_namespaces:
cmds.extend(['--all-namespaces'])
elif self.namespace is not None and self.namespace.lower() not in ['none', 'emtpy']: # E501
cmds.extend(['-n', self.namespace])
- cmds.extend(cmd)
-
rval = {}
results = ''
err = None
@@ -988,7 +1063,10 @@ class OpenShiftCLI(object):
if self.verbose:
print(' '.join(cmds))
- returncode, stdout, stderr = self._run(cmds, input_data)
+ try:
+ returncode, stdout, stderr = self._run(cmds, input_data)
+ except OSError as ex:
+ returncode, stdout, stderr = 1, '', 'Failed to execute {}: {}'.format(subprocess.list2cmdline(cmds), ex)
rval = {"returncode": returncode,
"results": results,
@@ -1040,7 +1118,13 @@ class Utils(object):
tmp = Utils.create_tmpfile(prefix=rname)
if ftype == 'yaml':
- Utils._write(tmp, yaml.dump(data, Dumper=yaml.RoundTripDumper))
+ # AUDIT:no-member makes sense here due to ruamel.YAML/PyYAML usage
+ # pylint: disable=no-member
+ if hasattr(yaml, 'RoundTripDumper'):
+ Utils._write(tmp, yaml.dump(data, Dumper=yaml.RoundTripDumper))
+ else:
+ Utils._write(tmp, yaml.safe_dump(data, default_flow_style=False))
+
elif ftype == 'json':
Utils._write(tmp, json.dumps(data))
else:
@@ -1122,7 +1206,12 @@ class Utils(object):
contents = sfd.read()
if sfile_type == 'yaml':
- contents = yaml.load(contents, yaml.RoundTripLoader)
+ # AUDIT:no-member makes sense here due to ruamel.YAML/PyYAML usage
+ # pylint: disable=no-member
+ if hasattr(yaml, 'RoundTripLoader'):
+ contents = yaml.load(contents, yaml.RoundTripLoader)
+ else:
+ contents = yaml.safe_load(contents)
elif sfile_type == 'json':
contents = json.loads(contents)
@@ -1228,8 +1317,8 @@ class Utils(object):
elif value != user_def[key]:
if debug:
print('value should be identical')
- print(value)
print(user_def[key])
+ print(value)
return False
# recurse on a dictionary
@@ -1249,8 +1338,8 @@ class Utils(object):
if api_values != user_values:
if debug:
print("keys are not equal in dict")
- print(api_values)
print(user_values)
+ print(api_values)
return False
result = Utils.check_def_equal(user_def[key], value, skip_keys=skip_keys, debug=debug)
@@ -1374,6 +1463,7 @@ class Service(Yedit):
port_path = "spec.ports"
portal_ip = "spec.portalIP"
cluster_ip = "spec.clusterIP"
+ selector_path = 'spec.selector'
kind = 'Service'
def __init__(self, content):
@@ -1384,6 +1474,10 @@ class Service(Yedit):
''' get a list of ports '''
return self.get(Service.port_path) or []
+ def get_selector(self):
+ ''' get the service selector'''
+ return self.get(Service.selector_path) or {}
+
def add_ports(self, inc_ports):
''' add a port object to the ports list '''
if not isinstance(inc_ports, list):
@@ -1457,7 +1551,7 @@ class OCService(OpenShiftCLI):
kubeconfig='/etc/origin/master/admin.kubeconfig',
verbose=False):
''' Constructor for OCVolume '''
- super(OCService, self).__init__(namespace, kubeconfig)
+ super(OCService, self).__init__(namespace, kubeconfig, verbose)
self.namespace = namespace
self.config = ServiceConfig(sname, namespace, ports, selector, labels,
cluster_ip, portal_ip, session_affinity, service_type)
@@ -1528,7 +1622,9 @@ class OCService(OpenShiftCLI):
params['portalip'],
params['ports'],
params['session_affinity'],
- params['service_type'])
+ params['service_type'],
+ params['kubeconfig'],
+ params['debug'])
state = params['state']
diff --git a/roles/lib_openshift/library/oc_serviceaccount.py b/roles/lib_openshift/library/oc_serviceaccount.py
index fcc5bbfa7..9b0a6e060 100644
--- a/roles/lib_openshift/library/oc_serviceaccount.py
+++ b/roles/lib_openshift/library/oc_serviceaccount.py
@@ -33,6 +33,7 @@
from __future__ import print_function
import atexit
+import copy
import json
import os
import re
@@ -40,7 +41,11 @@ import shutil
import subprocess
import tempfile
# pylint: disable=import-error
-import ruamel.yaml as yaml
+try:
+ import ruamel.yaml as yaml
+except ImportError:
+ import yaml
+
from ansible.module_utils.basic import AnsibleModule
# -*- -*- -*- End included fragment: lib/import.py -*- -*- -*-
@@ -116,6 +121,7 @@ EXAMPLES = '''
# -*- -*- -*- End included fragment: doc/serviceaccount -*- -*- -*-
# -*- -*- -*- Begin included fragment: ../../lib_utils/src/class/yedit.py -*- -*- -*-
+# pylint: disable=undefined-variable,missing-docstring
# noqa: E301,E302
@@ -310,11 +316,17 @@ class Yedit(object):
if self.backup and self.file_exists():
shutil.copy(self.filename, self.filename + '.orig')
- # pylint: disable=no-member
- if hasattr(self.yaml_dict, 'fa'):
+ # Try to set format attributes if supported
+ try:
self.yaml_dict.fa.set_block_style()
+ except AttributeError:
+ pass
- Yedit._write(self.filename, yaml.dump(self.yaml_dict, Dumper=yaml.RoundTripDumper))
+ # Try to use RoundTripDumper if supported.
+ try:
+ Yedit._write(self.filename, yaml.dump(self.yaml_dict, Dumper=yaml.RoundTripDumper))
+ except AttributeError:
+ Yedit._write(self.filename, yaml.safe_dump(self.yaml_dict, default_flow_style=False))
return (True, self.yaml_dict)
@@ -354,10 +366,24 @@ class Yedit(object):
# check if it is yaml
try:
if content_type == 'yaml' and contents:
- self.yaml_dict = yaml.load(contents, yaml.RoundTripLoader)
- # pylint: disable=no-member
- if hasattr(self.yaml_dict, 'fa'):
+ # Try to set format attributes if supported
+ try:
+ self.yaml_dict.fa.set_block_style()
+ except AttributeError:
+ pass
+
+ # Try to use RoundTripLoader if supported.
+ try:
+ self.yaml_dict = yaml.safe_load(contents, yaml.RoundTripLoader)
+ except AttributeError:
+ self.yaml_dict = yaml.safe_load(contents)
+
+ # Try to set format attributes if supported
+ try:
self.yaml_dict.fa.set_block_style()
+ except AttributeError:
+ pass
+
elif content_type == 'json' and contents:
self.yaml_dict = json.loads(contents)
except yaml.YAMLError as err:
@@ -386,14 +412,16 @@ class Yedit(object):
return (False, self.yaml_dict)
if isinstance(entry, dict):
- # pylint: disable=no-member,maybe-no-member
+ # AUDIT:maybe-no-member makes sense due to fuzzy types
+ # pylint: disable=maybe-no-member
if key_or_item in entry:
entry.pop(key_or_item)
return (True, self.yaml_dict)
return (False, self.yaml_dict)
elif isinstance(entry, list):
- # pylint: disable=no-member,maybe-no-member
+ # AUDIT:maybe-no-member makes sense due to fuzzy types
+ # pylint: disable=maybe-no-member
ind = None
try:
ind = entry.index(key_or_item)
@@ -461,7 +489,9 @@ class Yedit(object):
if not isinstance(entry, list):
return (False, self.yaml_dict)
- # pylint: disable=no-member,maybe-no-member
+ # AUDIT:maybe-no-member makes sense due to loading data from
+ # a serialized format.
+ # pylint: disable=maybe-no-member
entry.append(value)
return (True, self.yaml_dict)
@@ -474,7 +504,8 @@ class Yedit(object):
entry = None
if isinstance(entry, dict):
- # pylint: disable=no-member,maybe-no-member
+ # AUDIT:maybe-no-member makes sense due to fuzzy types
+ # pylint: disable=maybe-no-member
if not isinstance(value, dict):
raise YeditException('Cannot replace key, value entry in ' +
'dict with non-dict type. value=[%s] [%s]' % (value, type(value))) # noqa: E501
@@ -483,7 +514,8 @@ class Yedit(object):
return (True, self.yaml_dict)
elif isinstance(entry, list):
- # pylint: disable=no-member,maybe-no-member
+ # AUDIT:maybe-no-member makes sense due to fuzzy types
+ # pylint: disable=maybe-no-member
ind = None
if curr_value:
try:
@@ -522,12 +554,20 @@ class Yedit(object):
return (False, self.yaml_dict)
# deepcopy didn't work
- tmp_copy = yaml.load(yaml.round_trip_dump(self.yaml_dict,
- default_flow_style=False),
- yaml.RoundTripLoader)
- # pylint: disable=no-member
- if hasattr(self.yaml_dict, 'fa'):
+ # Try to use ruamel.yaml and fallback to pyyaml
+ try:
+ tmp_copy = yaml.load(yaml.round_trip_dump(self.yaml_dict,
+ default_flow_style=False),
+ yaml.RoundTripLoader)
+ except AttributeError:
+ tmp_copy = copy.deepcopy(self.yaml_dict)
+
+ # set the format attributes if available
+ try:
tmp_copy.fa.set_block_style()
+ except AttributeError:
+ pass
+
result = Yedit.add_entry(tmp_copy, path, value, self.separator)
if not result:
return (False, self.yaml_dict)
@@ -540,11 +580,20 @@ class Yedit(object):
''' create a yaml file '''
if not self.file_exists():
# deepcopy didn't work
- tmp_copy = yaml.load(yaml.round_trip_dump(self.yaml_dict, default_flow_style=False), # noqa: E501
- yaml.RoundTripLoader)
- # pylint: disable=no-member
- if hasattr(self.yaml_dict, 'fa'):
+ # Try to use ruamel.yaml and fallback to pyyaml
+ try:
+ tmp_copy = yaml.load(yaml.round_trip_dump(self.yaml_dict,
+ default_flow_style=False),
+ yaml.RoundTripLoader)
+ except AttributeError:
+ tmp_copy = copy.deepcopy(self.yaml_dict)
+
+ # set the format attributes if available
+ try:
tmp_copy.fa.set_block_style()
+ except AttributeError:
+ pass
+
result = Yedit.add_entry(tmp_copy, path, value, self.separator)
if result:
self.yaml_dict = tmp_copy
@@ -700,6 +749,32 @@ class OpenShiftCLIError(Exception):
pass
+ADDITIONAL_PATH_LOOKUPS = ['/usr/local/bin', os.path.expanduser('~/bin')]
+
+
+def locate_oc_binary():
+ ''' Find and return oc binary file '''
+ # https://github.com/openshift/openshift-ansible/issues/3410
+ # oc can be in /usr/local/bin in some cases, but that may not
+ # be in $PATH due to ansible/sudo
+ paths = os.environ.get("PATH", os.defpath).split(os.pathsep) + ADDITIONAL_PATH_LOOKUPS
+
+ oc_binary = 'oc'
+
+ # Use shutil.which if it is available, otherwise fallback to a naive path search
+ try:
+ which_result = shutil.which(oc_binary, path=os.pathsep.join(paths))
+ if which_result is not None:
+ oc_binary = which_result
+ except AttributeError:
+ for path in paths:
+ if os.path.exists(os.path.join(path, oc_binary)):
+ oc_binary = os.path.join(path, oc_binary)
+ break
+
+ return oc_binary
+
+
# pylint: disable=too-few-public-methods
class OpenShiftCLI(object):
''' Class to wrap the command line tools '''
@@ -713,6 +788,7 @@ class OpenShiftCLI(object):
self.verbose = verbose
self.kubeconfig = Utils.create_tmpfile_copy(kubeconfig)
self.all_namespaces = all_namespaces
+ self.oc_binary = locate_oc_binary()
# Pylint allows only 5 arguments to be passed.
# pylint: disable=too-many-arguments
@@ -909,24 +985,23 @@ class OpenShiftCLI(object):
stdout, stderr = proc.communicate(input_data)
- return proc.returncode, stdout, stderr
+ return proc.returncode, stdout.decode(), stderr.decode()
# pylint: disable=too-many-arguments,too-many-branches
def openshift_cmd(self, cmd, oadm=False, output=False, output_type='json', input_data=None):
'''Base command for oc '''
- cmds = []
+ cmds = [self.oc_binary]
+
if oadm:
- cmds = ['oadm']
- else:
- cmds = ['oc']
+ cmds.append('adm')
+
+ cmds.extend(cmd)
if self.all_namespaces:
cmds.extend(['--all-namespaces'])
elif self.namespace is not None and self.namespace.lower() not in ['none', 'emtpy']: # E501
cmds.extend(['-n', self.namespace])
- cmds.extend(cmd)
-
rval = {}
results = ''
err = None
@@ -934,7 +1009,10 @@ class OpenShiftCLI(object):
if self.verbose:
print(' '.join(cmds))
- returncode, stdout, stderr = self._run(cmds, input_data)
+ try:
+ returncode, stdout, stderr = self._run(cmds, input_data)
+ except OSError as ex:
+ returncode, stdout, stderr = 1, '', 'Failed to execute {}: {}'.format(subprocess.list2cmdline(cmds), ex)
rval = {"returncode": returncode,
"results": results,
@@ -986,7 +1064,13 @@ class Utils(object):
tmp = Utils.create_tmpfile(prefix=rname)
if ftype == 'yaml':
- Utils._write(tmp, yaml.dump(data, Dumper=yaml.RoundTripDumper))
+ # AUDIT:no-member makes sense here due to ruamel.YAML/PyYAML usage
+ # pylint: disable=no-member
+ if hasattr(yaml, 'RoundTripDumper'):
+ Utils._write(tmp, yaml.dump(data, Dumper=yaml.RoundTripDumper))
+ else:
+ Utils._write(tmp, yaml.safe_dump(data, default_flow_style=False))
+
elif ftype == 'json':
Utils._write(tmp, json.dumps(data))
else:
@@ -1068,7 +1152,12 @@ class Utils(object):
contents = sfd.read()
if sfile_type == 'yaml':
- contents = yaml.load(contents, yaml.RoundTripLoader)
+ # AUDIT:no-member makes sense here due to ruamel.YAML/PyYAML usage
+ # pylint: disable=no-member
+ if hasattr(yaml, 'RoundTripLoader'):
+ contents = yaml.load(contents, yaml.RoundTripLoader)
+ else:
+ contents = yaml.safe_load(contents)
elif sfile_type == 'json':
contents = json.loads(contents)
@@ -1174,8 +1263,8 @@ class Utils(object):
elif value != user_def[key]:
if debug:
print('value should be identical')
- print(value)
print(user_def[key])
+ print(value)
return False
# recurse on a dictionary
@@ -1195,8 +1284,8 @@ class Utils(object):
if api_values != user_values:
if debug:
print("keys are not equal in dict")
- print(api_values)
print(user_values)
+ print(api_values)
return False
result = Utils.check_def_equal(user_def[key], value, skip_keys=skip_keys, debug=debug)
@@ -1271,7 +1360,7 @@ class ServiceAccountConfig(object):
self.create_dict()
def create_dict(self):
- ''' return a properly structured volume '''
+ ''' instantiate a properly structured volume '''
self.data['apiVersion'] = 'v1'
self.data['kind'] = 'ServiceAccount'
self.data['metadata'] = {}
diff --git a/roles/lib_openshift/library/oc_serviceaccount_secret.py b/roles/lib_openshift/library/oc_serviceaccount_secret.py
index ef10162c2..413b8e358 100644
--- a/roles/lib_openshift/library/oc_serviceaccount_secret.py
+++ b/roles/lib_openshift/library/oc_serviceaccount_secret.py
@@ -33,6 +33,7 @@
from __future__ import print_function
import atexit
+import copy
import json
import os
import re
@@ -40,7 +41,11 @@ import shutil
import subprocess
import tempfile
# pylint: disable=import-error
-import ruamel.yaml as yaml
+try:
+ import ruamel.yaml as yaml
+except ImportError:
+ import yaml
+
from ansible.module_utils.basic import AnsibleModule
# -*- -*- -*- End included fragment: lib/import.py -*- -*- -*-
@@ -116,6 +121,7 @@ EXAMPLES = '''
# -*- -*- -*- End included fragment: doc/serviceaccount_secret -*- -*- -*-
# -*- -*- -*- Begin included fragment: ../../lib_utils/src/class/yedit.py -*- -*- -*-
+# pylint: disable=undefined-variable,missing-docstring
# noqa: E301,E302
@@ -310,11 +316,17 @@ class Yedit(object):
if self.backup and self.file_exists():
shutil.copy(self.filename, self.filename + '.orig')
- # pylint: disable=no-member
- if hasattr(self.yaml_dict, 'fa'):
+ # Try to set format attributes if supported
+ try:
self.yaml_dict.fa.set_block_style()
+ except AttributeError:
+ pass
- Yedit._write(self.filename, yaml.dump(self.yaml_dict, Dumper=yaml.RoundTripDumper))
+ # Try to use RoundTripDumper if supported.
+ try:
+ Yedit._write(self.filename, yaml.dump(self.yaml_dict, Dumper=yaml.RoundTripDumper))
+ except AttributeError:
+ Yedit._write(self.filename, yaml.safe_dump(self.yaml_dict, default_flow_style=False))
return (True, self.yaml_dict)
@@ -354,10 +366,24 @@ class Yedit(object):
# check if it is yaml
try:
if content_type == 'yaml' and contents:
- self.yaml_dict = yaml.load(contents, yaml.RoundTripLoader)
- # pylint: disable=no-member
- if hasattr(self.yaml_dict, 'fa'):
+ # Try to set format attributes if supported
+ try:
+ self.yaml_dict.fa.set_block_style()
+ except AttributeError:
+ pass
+
+ # Try to use RoundTripLoader if supported.
+ try:
+ self.yaml_dict = yaml.safe_load(contents, yaml.RoundTripLoader)
+ except AttributeError:
+ self.yaml_dict = yaml.safe_load(contents)
+
+ # Try to set format attributes if supported
+ try:
self.yaml_dict.fa.set_block_style()
+ except AttributeError:
+ pass
+
elif content_type == 'json' and contents:
self.yaml_dict = json.loads(contents)
except yaml.YAMLError as err:
@@ -386,14 +412,16 @@ class Yedit(object):
return (False, self.yaml_dict)
if isinstance(entry, dict):
- # pylint: disable=no-member,maybe-no-member
+ # AUDIT:maybe-no-member makes sense due to fuzzy types
+ # pylint: disable=maybe-no-member
if key_or_item in entry:
entry.pop(key_or_item)
return (True, self.yaml_dict)
return (False, self.yaml_dict)
elif isinstance(entry, list):
- # pylint: disable=no-member,maybe-no-member
+ # AUDIT:maybe-no-member makes sense due to fuzzy types
+ # pylint: disable=maybe-no-member
ind = None
try:
ind = entry.index(key_or_item)
@@ -461,7 +489,9 @@ class Yedit(object):
if not isinstance(entry, list):
return (False, self.yaml_dict)
- # pylint: disable=no-member,maybe-no-member
+ # AUDIT:maybe-no-member makes sense due to loading data from
+ # a serialized format.
+ # pylint: disable=maybe-no-member
entry.append(value)
return (True, self.yaml_dict)
@@ -474,7 +504,8 @@ class Yedit(object):
entry = None
if isinstance(entry, dict):
- # pylint: disable=no-member,maybe-no-member
+ # AUDIT:maybe-no-member makes sense due to fuzzy types
+ # pylint: disable=maybe-no-member
if not isinstance(value, dict):
raise YeditException('Cannot replace key, value entry in ' +
'dict with non-dict type. value=[%s] [%s]' % (value, type(value))) # noqa: E501
@@ -483,7 +514,8 @@ class Yedit(object):
return (True, self.yaml_dict)
elif isinstance(entry, list):
- # pylint: disable=no-member,maybe-no-member
+ # AUDIT:maybe-no-member makes sense due to fuzzy types
+ # pylint: disable=maybe-no-member
ind = None
if curr_value:
try:
@@ -522,12 +554,20 @@ class Yedit(object):
return (False, self.yaml_dict)
# deepcopy didn't work
- tmp_copy = yaml.load(yaml.round_trip_dump(self.yaml_dict,
- default_flow_style=False),
- yaml.RoundTripLoader)
- # pylint: disable=no-member
- if hasattr(self.yaml_dict, 'fa'):
+ # Try to use ruamel.yaml and fallback to pyyaml
+ try:
+ tmp_copy = yaml.load(yaml.round_trip_dump(self.yaml_dict,
+ default_flow_style=False),
+ yaml.RoundTripLoader)
+ except AttributeError:
+ tmp_copy = copy.deepcopy(self.yaml_dict)
+
+ # set the format attributes if available
+ try:
tmp_copy.fa.set_block_style()
+ except AttributeError:
+ pass
+
result = Yedit.add_entry(tmp_copy, path, value, self.separator)
if not result:
return (False, self.yaml_dict)
@@ -540,11 +580,20 @@ class Yedit(object):
''' create a yaml file '''
if not self.file_exists():
# deepcopy didn't work
- tmp_copy = yaml.load(yaml.round_trip_dump(self.yaml_dict, default_flow_style=False), # noqa: E501
- yaml.RoundTripLoader)
- # pylint: disable=no-member
- if hasattr(self.yaml_dict, 'fa'):
+ # Try to use ruamel.yaml and fallback to pyyaml
+ try:
+ tmp_copy = yaml.load(yaml.round_trip_dump(self.yaml_dict,
+ default_flow_style=False),
+ yaml.RoundTripLoader)
+ except AttributeError:
+ tmp_copy = copy.deepcopy(self.yaml_dict)
+
+ # set the format attributes if available
+ try:
tmp_copy.fa.set_block_style()
+ except AttributeError:
+ pass
+
result = Yedit.add_entry(tmp_copy, path, value, self.separator)
if result:
self.yaml_dict = tmp_copy
@@ -700,6 +749,32 @@ class OpenShiftCLIError(Exception):
pass
+ADDITIONAL_PATH_LOOKUPS = ['/usr/local/bin', os.path.expanduser('~/bin')]
+
+
+def locate_oc_binary():
+ ''' Find and return oc binary file '''
+ # https://github.com/openshift/openshift-ansible/issues/3410
+ # oc can be in /usr/local/bin in some cases, but that may not
+ # be in $PATH due to ansible/sudo
+ paths = os.environ.get("PATH", os.defpath).split(os.pathsep) + ADDITIONAL_PATH_LOOKUPS
+
+ oc_binary = 'oc'
+
+ # Use shutil.which if it is available, otherwise fallback to a naive path search
+ try:
+ which_result = shutil.which(oc_binary, path=os.pathsep.join(paths))
+ if which_result is not None:
+ oc_binary = which_result
+ except AttributeError:
+ for path in paths:
+ if os.path.exists(os.path.join(path, oc_binary)):
+ oc_binary = os.path.join(path, oc_binary)
+ break
+
+ return oc_binary
+
+
# pylint: disable=too-few-public-methods
class OpenShiftCLI(object):
''' Class to wrap the command line tools '''
@@ -713,6 +788,7 @@ class OpenShiftCLI(object):
self.verbose = verbose
self.kubeconfig = Utils.create_tmpfile_copy(kubeconfig)
self.all_namespaces = all_namespaces
+ self.oc_binary = locate_oc_binary()
# Pylint allows only 5 arguments to be passed.
# pylint: disable=too-many-arguments
@@ -909,24 +985,23 @@ class OpenShiftCLI(object):
stdout, stderr = proc.communicate(input_data)
- return proc.returncode, stdout, stderr
+ return proc.returncode, stdout.decode(), stderr.decode()
# pylint: disable=too-many-arguments,too-many-branches
def openshift_cmd(self, cmd, oadm=False, output=False, output_type='json', input_data=None):
'''Base command for oc '''
- cmds = []
+ cmds = [self.oc_binary]
+
if oadm:
- cmds = ['oadm']
- else:
- cmds = ['oc']
+ cmds.append('adm')
+
+ cmds.extend(cmd)
if self.all_namespaces:
cmds.extend(['--all-namespaces'])
elif self.namespace is not None and self.namespace.lower() not in ['none', 'emtpy']: # E501
cmds.extend(['-n', self.namespace])
- cmds.extend(cmd)
-
rval = {}
results = ''
err = None
@@ -934,7 +1009,10 @@ class OpenShiftCLI(object):
if self.verbose:
print(' '.join(cmds))
- returncode, stdout, stderr = self._run(cmds, input_data)
+ try:
+ returncode, stdout, stderr = self._run(cmds, input_data)
+ except OSError as ex:
+ returncode, stdout, stderr = 1, '', 'Failed to execute {}: {}'.format(subprocess.list2cmdline(cmds), ex)
rval = {"returncode": returncode,
"results": results,
@@ -986,7 +1064,13 @@ class Utils(object):
tmp = Utils.create_tmpfile(prefix=rname)
if ftype == 'yaml':
- Utils._write(tmp, yaml.dump(data, Dumper=yaml.RoundTripDumper))
+ # AUDIT:no-member makes sense here due to ruamel.YAML/PyYAML usage
+ # pylint: disable=no-member
+ if hasattr(yaml, 'RoundTripDumper'):
+ Utils._write(tmp, yaml.dump(data, Dumper=yaml.RoundTripDumper))
+ else:
+ Utils._write(tmp, yaml.safe_dump(data, default_flow_style=False))
+
elif ftype == 'json':
Utils._write(tmp, json.dumps(data))
else:
@@ -1068,7 +1152,12 @@ class Utils(object):
contents = sfd.read()
if sfile_type == 'yaml':
- contents = yaml.load(contents, yaml.RoundTripLoader)
+ # AUDIT:no-member makes sense here due to ruamel.YAML/PyYAML usage
+ # pylint: disable=no-member
+ if hasattr(yaml, 'RoundTripLoader'):
+ contents = yaml.load(contents, yaml.RoundTripLoader)
+ else:
+ contents = yaml.safe_load(contents)
elif sfile_type == 'json':
contents = json.loads(contents)
@@ -1174,8 +1263,8 @@ class Utils(object):
elif value != user_def[key]:
if debug:
print('value should be identical')
- print(value)
print(user_def[key])
+ print(value)
return False
# recurse on a dictionary
@@ -1195,8 +1284,8 @@ class Utils(object):
if api_values != user_values:
if debug:
print("keys are not equal in dict")
- print(api_values)
print(user_values)
+ print(api_values)
return False
result = Utils.check_def_equal(user_def[key], value, skip_keys=skip_keys, debug=debug)
@@ -1271,7 +1360,7 @@ class ServiceAccountConfig(object):
self.create_dict()
def create_dict(self):
- ''' return a properly structured volume '''
+ ''' instantiate a properly structured volume '''
self.data['apiVersion'] = 'v1'
self.data['kind'] = 'ServiceAccount'
self.data['metadata'] = {}
diff --git a/roles/lib_openshift/library/oc_version.py b/roles/lib_openshift/library/oc_version.py
index e44375ffa..1aae7a8ea 100644
--- a/roles/lib_openshift/library/oc_version.py
+++ b/roles/lib_openshift/library/oc_version.py
@@ -33,6 +33,7 @@
from __future__ import print_function
import atexit
+import copy
import json
import os
import re
@@ -40,7 +41,11 @@ import shutil
import subprocess
import tempfile
# pylint: disable=import-error
-import ruamel.yaml as yaml
+try:
+ import ruamel.yaml as yaml
+except ImportError:
+ import yaml
+
from ansible.module_utils.basic import AnsibleModule
# -*- -*- -*- End included fragment: lib/import.py -*- -*- -*-
@@ -88,6 +93,7 @@ oc_version:
# -*- -*- -*- End included fragment: doc/version -*- -*- -*-
# -*- -*- -*- Begin included fragment: ../../lib_utils/src/class/yedit.py -*- -*- -*-
+# pylint: disable=undefined-variable,missing-docstring
# noqa: E301,E302
@@ -282,11 +288,17 @@ class Yedit(object):
if self.backup and self.file_exists():
shutil.copy(self.filename, self.filename + '.orig')
- # pylint: disable=no-member
- if hasattr(self.yaml_dict, 'fa'):
+ # Try to set format attributes if supported
+ try:
self.yaml_dict.fa.set_block_style()
+ except AttributeError:
+ pass
- Yedit._write(self.filename, yaml.dump(self.yaml_dict, Dumper=yaml.RoundTripDumper))
+ # Try to use RoundTripDumper if supported.
+ try:
+ Yedit._write(self.filename, yaml.dump(self.yaml_dict, Dumper=yaml.RoundTripDumper))
+ except AttributeError:
+ Yedit._write(self.filename, yaml.safe_dump(self.yaml_dict, default_flow_style=False))
return (True, self.yaml_dict)
@@ -326,10 +338,24 @@ class Yedit(object):
# check if it is yaml
try:
if content_type == 'yaml' and contents:
- self.yaml_dict = yaml.load(contents, yaml.RoundTripLoader)
- # pylint: disable=no-member
- if hasattr(self.yaml_dict, 'fa'):
+ # Try to set format attributes if supported
+ try:
+ self.yaml_dict.fa.set_block_style()
+ except AttributeError:
+ pass
+
+ # Try to use RoundTripLoader if supported.
+ try:
+ self.yaml_dict = yaml.safe_load(contents, yaml.RoundTripLoader)
+ except AttributeError:
+ self.yaml_dict = yaml.safe_load(contents)
+
+ # Try to set format attributes if supported
+ try:
self.yaml_dict.fa.set_block_style()
+ except AttributeError:
+ pass
+
elif content_type == 'json' and contents:
self.yaml_dict = json.loads(contents)
except yaml.YAMLError as err:
@@ -358,14 +384,16 @@ class Yedit(object):
return (False, self.yaml_dict)
if isinstance(entry, dict):
- # pylint: disable=no-member,maybe-no-member
+ # AUDIT:maybe-no-member makes sense due to fuzzy types
+ # pylint: disable=maybe-no-member
if key_or_item in entry:
entry.pop(key_or_item)
return (True, self.yaml_dict)
return (False, self.yaml_dict)
elif isinstance(entry, list):
- # pylint: disable=no-member,maybe-no-member
+ # AUDIT:maybe-no-member makes sense due to fuzzy types
+ # pylint: disable=maybe-no-member
ind = None
try:
ind = entry.index(key_or_item)
@@ -433,7 +461,9 @@ class Yedit(object):
if not isinstance(entry, list):
return (False, self.yaml_dict)
- # pylint: disable=no-member,maybe-no-member
+ # AUDIT:maybe-no-member makes sense due to loading data from
+ # a serialized format.
+ # pylint: disable=maybe-no-member
entry.append(value)
return (True, self.yaml_dict)
@@ -446,7 +476,8 @@ class Yedit(object):
entry = None
if isinstance(entry, dict):
- # pylint: disable=no-member,maybe-no-member
+ # AUDIT:maybe-no-member makes sense due to fuzzy types
+ # pylint: disable=maybe-no-member
if not isinstance(value, dict):
raise YeditException('Cannot replace key, value entry in ' +
'dict with non-dict type. value=[%s] [%s]' % (value, type(value))) # noqa: E501
@@ -455,7 +486,8 @@ class Yedit(object):
return (True, self.yaml_dict)
elif isinstance(entry, list):
- # pylint: disable=no-member,maybe-no-member
+ # AUDIT:maybe-no-member makes sense due to fuzzy types
+ # pylint: disable=maybe-no-member
ind = None
if curr_value:
try:
@@ -494,12 +526,20 @@ class Yedit(object):
return (False, self.yaml_dict)
# deepcopy didn't work
- tmp_copy = yaml.load(yaml.round_trip_dump(self.yaml_dict,
- default_flow_style=False),
- yaml.RoundTripLoader)
- # pylint: disable=no-member
- if hasattr(self.yaml_dict, 'fa'):
+ # Try to use ruamel.yaml and fallback to pyyaml
+ try:
+ tmp_copy = yaml.load(yaml.round_trip_dump(self.yaml_dict,
+ default_flow_style=False),
+ yaml.RoundTripLoader)
+ except AttributeError:
+ tmp_copy = copy.deepcopy(self.yaml_dict)
+
+ # set the format attributes if available
+ try:
tmp_copy.fa.set_block_style()
+ except AttributeError:
+ pass
+
result = Yedit.add_entry(tmp_copy, path, value, self.separator)
if not result:
return (False, self.yaml_dict)
@@ -512,11 +552,20 @@ class Yedit(object):
''' create a yaml file '''
if not self.file_exists():
# deepcopy didn't work
- tmp_copy = yaml.load(yaml.round_trip_dump(self.yaml_dict, default_flow_style=False), # noqa: E501
- yaml.RoundTripLoader)
- # pylint: disable=no-member
- if hasattr(self.yaml_dict, 'fa'):
+ # Try to use ruamel.yaml and fallback to pyyaml
+ try:
+ tmp_copy = yaml.load(yaml.round_trip_dump(self.yaml_dict,
+ default_flow_style=False),
+ yaml.RoundTripLoader)
+ except AttributeError:
+ tmp_copy = copy.deepcopy(self.yaml_dict)
+
+ # set the format attributes if available
+ try:
tmp_copy.fa.set_block_style()
+ except AttributeError:
+ pass
+
result = Yedit.add_entry(tmp_copy, path, value, self.separator)
if result:
self.yaml_dict = tmp_copy
@@ -672,6 +721,32 @@ class OpenShiftCLIError(Exception):
pass
+ADDITIONAL_PATH_LOOKUPS = ['/usr/local/bin', os.path.expanduser('~/bin')]
+
+
+def locate_oc_binary():
+ ''' Find and return oc binary file '''
+ # https://github.com/openshift/openshift-ansible/issues/3410
+ # oc can be in /usr/local/bin in some cases, but that may not
+ # be in $PATH due to ansible/sudo
+ paths = os.environ.get("PATH", os.defpath).split(os.pathsep) + ADDITIONAL_PATH_LOOKUPS
+
+ oc_binary = 'oc'
+
+ # Use shutil.which if it is available, otherwise fallback to a naive path search
+ try:
+ which_result = shutil.which(oc_binary, path=os.pathsep.join(paths))
+ if which_result is not None:
+ oc_binary = which_result
+ except AttributeError:
+ for path in paths:
+ if os.path.exists(os.path.join(path, oc_binary)):
+ oc_binary = os.path.join(path, oc_binary)
+ break
+
+ return oc_binary
+
+
# pylint: disable=too-few-public-methods
class OpenShiftCLI(object):
''' Class to wrap the command line tools '''
@@ -685,6 +760,7 @@ class OpenShiftCLI(object):
self.verbose = verbose
self.kubeconfig = Utils.create_tmpfile_copy(kubeconfig)
self.all_namespaces = all_namespaces
+ self.oc_binary = locate_oc_binary()
# Pylint allows only 5 arguments to be passed.
# pylint: disable=too-many-arguments
@@ -881,24 +957,23 @@ class OpenShiftCLI(object):
stdout, stderr = proc.communicate(input_data)
- return proc.returncode, stdout, stderr
+ return proc.returncode, stdout.decode(), stderr.decode()
# pylint: disable=too-many-arguments,too-many-branches
def openshift_cmd(self, cmd, oadm=False, output=False, output_type='json', input_data=None):
'''Base command for oc '''
- cmds = []
+ cmds = [self.oc_binary]
+
if oadm:
- cmds = ['oadm']
- else:
- cmds = ['oc']
+ cmds.append('adm')
+
+ cmds.extend(cmd)
if self.all_namespaces:
cmds.extend(['--all-namespaces'])
elif self.namespace is not None and self.namespace.lower() not in ['none', 'emtpy']: # E501
cmds.extend(['-n', self.namespace])
- cmds.extend(cmd)
-
rval = {}
results = ''
err = None
@@ -906,7 +981,10 @@ class OpenShiftCLI(object):
if self.verbose:
print(' '.join(cmds))
- returncode, stdout, stderr = self._run(cmds, input_data)
+ try:
+ returncode, stdout, stderr = self._run(cmds, input_data)
+ except OSError as ex:
+ returncode, stdout, stderr = 1, '', 'Failed to execute {}: {}'.format(subprocess.list2cmdline(cmds), ex)
rval = {"returncode": returncode,
"results": results,
@@ -958,7 +1036,13 @@ class Utils(object):
tmp = Utils.create_tmpfile(prefix=rname)
if ftype == 'yaml':
- Utils._write(tmp, yaml.dump(data, Dumper=yaml.RoundTripDumper))
+ # AUDIT:no-member makes sense here due to ruamel.YAML/PyYAML usage
+ # pylint: disable=no-member
+ if hasattr(yaml, 'RoundTripDumper'):
+ Utils._write(tmp, yaml.dump(data, Dumper=yaml.RoundTripDumper))
+ else:
+ Utils._write(tmp, yaml.safe_dump(data, default_flow_style=False))
+
elif ftype == 'json':
Utils._write(tmp, json.dumps(data))
else:
@@ -1040,7 +1124,12 @@ class Utils(object):
contents = sfd.read()
if sfile_type == 'yaml':
- contents = yaml.load(contents, yaml.RoundTripLoader)
+ # AUDIT:no-member makes sense here due to ruamel.YAML/PyYAML usage
+ # pylint: disable=no-member
+ if hasattr(yaml, 'RoundTripLoader'):
+ contents = yaml.load(contents, yaml.RoundTripLoader)
+ else:
+ contents = yaml.safe_load(contents)
elif sfile_type == 'json':
contents = json.loads(contents)
@@ -1146,8 +1235,8 @@ class Utils(object):
elif value != user_def[key]:
if debug:
print('value should be identical')
- print(value)
print(user_def[key])
+ print(value)
return False
# recurse on a dictionary
@@ -1167,8 +1256,8 @@ class Utils(object):
if api_values != user_values:
if debug:
print("keys are not equal in dict")
- print(api_values)
print(user_values)
+ print(api_values)
return False
result = Utils.check_def_equal(user_def[key], value, skip_keys=skip_keys, debug=debug)
diff --git a/roles/lib_openshift/meta/main.yml b/roles/lib_openshift/meta/main.yml
new file mode 100644
index 000000000..7c72daa63
--- /dev/null
+++ b/roles/lib_openshift/meta/main.yml
@@ -0,0 +1,15 @@
+---
+galaxy_info:
+ author: OpenShift Team
+ description: OpenShift Repositories
+ company: Red Hat, Inc.
+ license: Apache License, Version 2.0
+ min_ansible_version: 1.7
+ platforms:
+ - name: EL
+ versions:
+ - 7
+ categories:
+ - cloud
+dependencies:
+- { role: openshift_repos }
diff --git a/roles/lib_openshift/src/ansible/oc_adm_ca_server_cert.py b/roles/lib_openshift/src/ansible/oc_adm_ca_server_cert.py
new file mode 100644
index 000000000..c80c2eb44
--- /dev/null
+++ b/roles/lib_openshift/src/ansible/oc_adm_ca_server_cert.py
@@ -0,0 +1,35 @@
+# pylint: skip-file
+# flake8: noqa
+
+def main():
+ '''
+ ansible oc adm module for ca create-server-cert
+ '''
+
+ module = AnsibleModule(
+ argument_spec=dict(
+ state=dict(default='present', type='str', choices=['present']),
+ debug=dict(default=False, type='bool'),
+ kubeconfig=dict(default='/etc/origin/master/admin.kubeconfig', type='str'),
+ backup=dict(default=True, type='bool'),
+ force=dict(default=False, type='bool'),
+ # oc adm ca create-server-cert [options]
+ cert=dict(default=None, type='str'),
+ key=dict(default=None, type='str'),
+ signer_cert=dict(default='/etc/origin/master/ca.crt', type='str'),
+ signer_key=dict(default='/etc/origin/master/ca.key', type='str'),
+ signer_serial=dict(default='/etc/origin/master/ca.serial.txt', type='str'),
+ hostnames=dict(default=[], type='list'),
+ ),
+ supports_check_mode=True,
+ )
+
+ results = CAServerCert.run_ansible(module.params, module.check_mode)
+ if 'failed' in results:
+ return module.fail_json(**results)
+
+ return module.exit_json(**results)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/roles/lib_openshift/src/ansible/oc_adm_registry.py b/roles/lib_openshift/src/ansible/oc_adm_registry.py
new file mode 100644
index 000000000..c85973c7d
--- /dev/null
+++ b/roles/lib_openshift/src/ansible/oc_adm_registry.py
@@ -0,0 +1,47 @@
+# pylint: skip-file
+# flake8: noqa
+
+def main():
+ '''
+ ansible oc module for registry
+ '''
+
+ module = AnsibleModule(
+ argument_spec=dict(
+ state=dict(default='present', type='str',
+ choices=['present', 'absent']),
+ debug=dict(default=False, type='bool'),
+ namespace=dict(default='default', type='str'),
+ name=dict(default=None, required=True, type='str'),
+
+ kubeconfig=dict(default='/etc/origin/master/admin.kubeconfig', type='str'),
+ images=dict(default=None, type='str'),
+ latest_images=dict(default=False, type='bool'),
+ labels=dict(default=None, type='list'),
+ ports=dict(default=['5000'], type='list'),
+ replicas=dict(default=1, type='int'),
+ selector=dict(default=None, type='str'),
+ service_account=dict(default='registry', type='str'),
+ mount_host=dict(default=None, type='str'),
+ volume_mounts=dict(default=None, type='list'),
+ env_vars=dict(default={}, type='dict'),
+ edits=dict(default=[], type='list'),
+ enforce_quota=dict(default=False, type='bool'),
+ force=dict(default=False, type='bool'),
+ daemonset=dict(default=False, type='bool'),
+ tls_key=dict(default=None, type='str'),
+ tls_certificate=dict(default=None, type='str'),
+ ),
+
+ supports_check_mode=True,
+ )
+
+ results = Registry.run_ansible(module.params, module.check_mode)
+ if 'failed' in results:
+ module.fail_json(**results)
+
+ module.exit_json(**results)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/roles/lib_openshift/src/ansible/oc_adm_router.py b/roles/lib_openshift/src/ansible/oc_adm_router.py
new file mode 100644
index 000000000..b6f8e90d0
--- /dev/null
+++ b/roles/lib_openshift/src/ansible/oc_adm_router.py
@@ -0,0 +1,69 @@
+# pylint: skip-file
+# flake8: noqa
+
+
+def main():
+ '''
+ ansible oc module for router
+ '''
+
+ module = AnsibleModule(
+ argument_spec=dict(
+ state=dict(default='present', type='str',
+ choices=['present', 'absent']),
+ debug=dict(default=False, type='bool'),
+ namespace=dict(default='default', type='str'),
+ name=dict(default='router', type='str'),
+
+ kubeconfig=dict(default='/etc/origin/master/admin.kubeconfig', type='str'),
+ default_cert=dict(default=None, type='str'),
+ cert_file=dict(default=None, type='str'),
+ key_file=dict(default=None, type='str'),
+ images=dict(default=None, type='str'), #'openshift3/ose-${component}:${version}'
+ latest_images=dict(default=False, type='bool'),
+ labels=dict(default=None, type='list'),
+ ports=dict(default=['80:80', '443:443'], type='list'),
+ replicas=dict(default=1, type='int'),
+ selector=dict(default=None, type='str'),
+ service_account=dict(default='router', type='str'),
+ router_type=dict(default='haproxy-router', type='str'),
+ host_network=dict(default=True, type='bool'),
+ # external host options
+ external_host=dict(default=None, type='str'),
+ external_host_vserver=dict(default=None, type='str'),
+ external_host_insecure=dict(default=False, type='bool'),
+ external_host_partition_path=dict(default=None, type='str'),
+ external_host_username=dict(default=None, type='str'),
+ external_host_password=dict(default=None, type='str'),
+ external_host_private_key=dict(default=None, type='str'),
+ # Metrics
+ expose_metrics=dict(default=False, type='bool'),
+ metrics_image=dict(default=None, type='str'),
+ # Stats
+ stats_user=dict(default=None, type='str'),
+ stats_password=dict(default=None, type='str'),
+ stats_port=dict(default=1936, type='int'),
+ # extra
+ cacert_file=dict(default=None, type='str'),
+ # edits
+ edits=dict(default=[], type='list'),
+ ),
+ mutually_exclusive=[["router_type", "images"],
+ ["key_file", "default_cert"],
+ ["cert_file", "default_cert"],
+ ["cacert_file", "default_cert"],
+ ],
+
+ required_together=[['cacert_file', 'cert_file', 'key_file']],
+ supports_check_mode=True,
+ )
+ results = Router.run_ansible(module.params, module.check_mode)
+
+ if 'failed' in results:
+ module.fail_json(**results)
+
+ module.exit_json(**results)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/roles/lib_openshift/src/ansible/oc_atomic_container.py b/roles/lib_openshift/src/ansible/oc_atomic_container.py
new file mode 100644
index 000000000..20d75cb63
--- /dev/null
+++ b/roles/lib_openshift/src/ansible/oc_atomic_container.py
@@ -0,0 +1,137 @@
+# pylint: skip-file
+# flake8: noqa
+
+# pylint: disable=wrong-import-position,too-many-branches,invalid-name
+import json
+from ansible.module_utils.basic import AnsibleModule
+
+
+def _install(module, container, image, values_list):
+ ''' install a container using atomic CLI. values_list is the list of --set arguments.
+ container is the name given to the container. image is the image to use for the installation. '''
+ args = ['atomic', 'install', "--system", '--name=%s' % container] + values_list + [image]
+ rc, out, err = module.run_command(args, check_rc=False)
+ if rc != 0:
+ return rc, out, err, False
+ else:
+ changed = "Extracting" in out
+ return rc, out, err, changed
+
+def _uninstall(module, name):
+ ''' uninstall an atomic container by its name. '''
+ args = ['atomic', 'uninstall', name]
+ rc, out, err = module.run_command(args, check_rc=False)
+ return rc, out, err, False
+
+
+def do_install(module, container, image, values_list):
+ ''' install a container and exit the module. '''
+ rc, out, err, changed = _install(module, container, image, values_list)
+ if rc != 0:
+ module.fail_json(rc=rc, msg=err)
+ else:
+ module.exit_json(msg=out, changed=changed)
+
+
+def do_uninstall(module, name):
+ ''' uninstall a container and exit the module. '''
+ rc, out, err, changed = _uninstall(module, name)
+ if rc != 0:
+ module.fail_json(rc=rc, msg=err)
+ module.exit_json(msg=out, changed=changed)
+
+
+def do_update(module, container, old_image, image, values_list):
+ ''' update a container and exit the module. If the container uses a different
+ image than the current installed one, then first uninstall the old one '''
+
+ # the image we want is different than the installed one
+ if old_image != image:
+ rc, out, err, _ = _uninstall(module, container)
+ if rc != 0:
+ module.fail_json(rc=rc, msg=err)
+ return do_install(module, container, image, values_list)
+
+ # if the image didn't change, use "atomic containers update"
+ args = ['atomic', 'containers', 'update'] + values_list + [container]
+ rc, out, err = module.run_command(args, check_rc=False)
+ if rc != 0:
+ module.fail_json(rc=rc, msg=err)
+ else:
+ changed = "Extracting" in out
+ module.exit_json(msg=out, changed=changed)
+
+
+def do_rollback(module, name):
+ ''' move to the previous deployment of the container, if present, and exit the module. '''
+ args = ['atomic', 'containers', 'rollback', name]
+ rc, out, err = module.run_command(args, check_rc=False)
+ if rc != 0:
+ module.fail_json(rc=rc, msg=err)
+ else:
+ changed = "Rolling back" in out
+ module.exit_json(msg=out, changed=changed)
+
+
+def core(module):
+ ''' entrypoint for the module. '''
+ name = module.params['name']
+ image = module.params['image']
+ values = module.params['values']
+ state = module.params['state']
+
+ module.run_command_environ_update = dict(LANG='C', LC_ALL='C', LC_MESSAGES='C')
+ out = {}
+ err = {}
+ rc = 0
+
+ values_list = ["--set=%s" % x for x in values] if values else []
+
+ args = ['atomic', 'containers', 'list', '--json', '--all', '-f', 'container=%s' % name]
+ rc, out, err = module.run_command(args, check_rc=False)
+ if rc != 0:
+ module.fail_json(rc=rc, msg=err)
+ return
+
+ containers = json.loads(out)
+ present = len(containers) > 0
+ old_image = containers[0]["image_name"] if present else None
+
+ if state == 'present' and present:
+ module.exit_json(msg=out, changed=False)
+ elif (state in ['latest', 'present']) and not present:
+ do_install(module, name, image, values_list)
+ elif state == 'latest':
+ do_update(module, name, old_image, image, values_list)
+ elif state == 'absent':
+ if not present:
+ module.exit_json(msg="", changed=False)
+ else:
+ do_uninstall(module, name)
+ elif state == 'rollback':
+ do_rollback(module, name)
+
+
+def main():
+ module = AnsibleModule(
+ argument_spec=dict(
+ name=dict(default=None, required=True),
+ image=dict(default=None, required=True),
+ state=dict(default='latest', choices=['present', 'absent', 'latest', 'rollback']),
+ values=dict(type='list', default=[]),
+ ),
+ )
+
+ # Verify that the platform supports atomic command
+ rc, _, err = module.run_command('atomic -v', check_rc=False)
+ if rc != 0:
+ module.fail_json(msg="Error in running atomic command", err=err)
+
+ try:
+ core(module)
+ except Exception as e: # pylint: disable=broad-except
+ module.fail_json(msg=str(e))
+
+
+if __name__ == '__main__':
+ main()
diff --git a/roles/lib_openshift/src/ansible/oc_sdnvalidator.py b/roles/lib_openshift/src/ansible/oc_sdnvalidator.py
new file mode 100644
index 000000000..e91417d63
--- /dev/null
+++ b/roles/lib_openshift/src/ansible/oc_sdnvalidator.py
@@ -0,0 +1,24 @@
+# pylint: skip-file
+# flake8: noqa
+
+def main():
+ '''
+ ansible oc module for validating OpenShift SDN objects
+ '''
+
+ module = AnsibleModule(
+ argument_spec=dict(
+ kubeconfig=dict(default='/etc/origin/master/admin.kubeconfig', type='str'),
+ ),
+ supports_check_mode=False,
+ )
+
+
+ rval = OCSDNValidator.run_ansible(module.params)
+ if 'failed' in rval:
+ module.fail_json(**rval)
+
+ module.exit_json(**rval)
+
+if __name__ == '__main__':
+ main()
diff --git a/roles/lib_openshift/src/class/oc_adm_ca_server_cert.py b/roles/lib_openshift/src/class/oc_adm_ca_server_cert.py
new file mode 100644
index 000000000..6ed1f2f35
--- /dev/null
+++ b/roles/lib_openshift/src/class/oc_adm_ca_server_cert.py
@@ -0,0 +1,135 @@
+# pylint: skip-file
+# flake8: noqa
+
+class CAServerCertConfig(OpenShiftCLIConfig):
+ ''' CAServerCertConfig is a DTO for the oc adm ca command '''
+ def __init__(self, kubeconfig, verbose, ca_options):
+ super(CAServerCertConfig, self).__init__('ca', None, kubeconfig, ca_options)
+ self.kubeconfig = kubeconfig
+ self.verbose = verbose
+ self._ca = ca_options
+
+
+class CAServerCert(OpenShiftCLI):
+ ''' Class to wrap the oc adm ca create-server-cert command line'''
+ def __init__(self,
+ config,
+ verbose=False):
+ ''' Constructor for oadm ca '''
+ super(CAServerCert, self).__init__(None, config.kubeconfig, verbose)
+ self.config = config
+ self.verbose = verbose
+
+ def get(self):
+ '''get the current cert file
+
+ If a file exists by the same name in the specified location then the cert exists
+ '''
+ cert = self.config.config_options['cert']['value']
+ if cert and os.path.exists(cert):
+ return open(cert).read()
+
+ return None
+
+ def create(self):
+ '''run openshift oc adm ca create-server-cert cmd'''
+
+ # Added this here as a safegaurd for stomping on the
+ # cert and key files if they exist
+ if self.config.config_options['backup']['value']:
+ import time
+ ext = time.strftime("%Y-%m-%d@%H:%M:%S", time.localtime(time.time()))
+ date_str = "%s_" + "%s" % ext
+
+ if os.path.exists(self.config.config_options['key']['value']):
+ shutil.copy(self.config.config_options['key']['value'],
+ date_str % self.config.config_options['key']['value'])
+ if os.path.exists(self.config.config_options['cert']['value']):
+ shutil.copy(self.config.config_options['cert']['value'],
+ date_str % self.config.config_options['cert']['value'])
+
+ options = self.config.to_option_list()
+
+ cmd = ['ca', 'create-server-cert']
+ cmd.extend(options)
+
+ return self.openshift_cmd(cmd, oadm=True)
+
+ def exists(self):
+ ''' check whether the certificate exists and has the clusterIP '''
+
+ cert_path = self.config.config_options['cert']['value']
+ if not os.path.exists(cert_path):
+ return False
+
+ # Would prefer pyopenssl but is not installed.
+ # When we verify it is, switch this code
+ # Here is the code to get the subject and the SAN
+ # openssl x509 -text -noout -certopt \
+ # no_header,no_version,no_serial,no_signame,no_validity,no_issuer,no_pubkey,no_sigdump,no_aux \
+ # -in /etc/origin/master/registry.crt
+ # Instead of this solution we will use a regex.
+ cert_names = []
+ hostnames = self.config.config_options['hostnames']['value'].split(',')
+ proc = subprocess.Popen(['openssl', 'x509', '-noout', '-text', '-in', cert_path],
+ stdout=subprocess.PIPE, stderr=subprocess.PIPE)
+
+ x509output, _ = proc.communicate()
+ if proc.returncode == 0:
+ regex = re.compile(r"^\s*X509v3 Subject Alternative Name:\s*?\n\s*(.*)\s*\n", re.MULTILINE)
+ match = regex.search(x509output) # E501
+ for entry in re.split(r", *", match.group(1)):
+ if entry.startswith('DNS') or entry.startswith('IP Address'):
+ cert_names.append(entry.split(':')[1])
+ # now that we have cert names let's compare
+ cert_set = set(cert_names)
+ hname_set = set(hostnames)
+ if cert_set.issubset(hname_set) and hname_set.issubset(cert_set):
+ return True
+
+ return False
+
+ @staticmethod
+ def run_ansible(params, check_mode):
+ '''run the idempotent ansible code'''
+
+ config = CAServerCertConfig(params['kubeconfig'],
+ params['debug'],
+ {'cert': {'value': params['cert'], 'include': True},
+ 'hostnames': {'value': ','.join(params['hostnames']), 'include': True},
+ 'overwrite': {'value': True, 'include': True},
+ 'key': {'value': params['key'], 'include': True},
+ 'signer_cert': {'value': params['signer_cert'], 'include': True},
+ 'signer_key': {'value': params['signer_key'], 'include': True},
+ 'signer_serial': {'value': params['signer_serial'], 'include': True},
+ 'backup': {'value': params['backup'], 'include': False},
+ })
+
+ server_cert = CAServerCert(config)
+
+ state = params['state']
+
+ if state == 'present':
+ ########
+ # Create
+ ########
+ if not server_cert.exists() or params['force']:
+
+ if check_mode:
+ return {'changed': True,
+ 'msg': "CHECK_MODE: Would have created the certificate.",
+ 'state': state}
+
+ api_rval = server_cert.create()
+
+ return {'changed': True, 'results': api_rval, 'state': state}
+
+ ########
+ # Exists
+ ########
+ api_rval = server_cert.get()
+ return {'changed': False, 'results': api_rval, 'state': state}
+
+ return {'failed': True,
+ 'msg': 'Unknown state passed. %s' % state}
+
diff --git a/roles/lib_openshift/src/class/oc_adm_registry.py b/roles/lib_openshift/src/class/oc_adm_registry.py
new file mode 100644
index 000000000..37904c43f
--- /dev/null
+++ b/roles/lib_openshift/src/class/oc_adm_registry.py
@@ -0,0 +1,421 @@
+# pylint: skip-file
+# flake8: noqa
+
+class RegistryException(Exception):
+ ''' Registry Exception Class '''
+ pass
+
+
+class RegistryConfig(OpenShiftCLIConfig):
+ ''' RegistryConfig is a DTO for the registry. '''
+ def __init__(self, rname, namespace, kubeconfig, registry_options):
+ super(RegistryConfig, self).__init__(rname, namespace, kubeconfig, registry_options)
+
+
+class Registry(OpenShiftCLI):
+ ''' Class to wrap the oc command line tools '''
+
+ volume_mount_path = 'spec.template.spec.containers[0].volumeMounts'
+ volume_path = 'spec.template.spec.volumes'
+ env_path = 'spec.template.spec.containers[0].env'
+
+ def __init__(self,
+ registry_config,
+ verbose=False):
+ ''' Constructor for Registry
+
+ a registry consists of 3 or more parts
+ - dc/docker-registry
+ - svc/docker-registry
+
+ Parameters:
+ :registry_config:
+ :verbose:
+ '''
+ super(Registry, self).__init__(registry_config.namespace, registry_config.kubeconfig, verbose)
+ self.version = OCVersion(registry_config.kubeconfig, verbose)
+ self.svc_ip = None
+ self.portal_ip = None
+ self.config = registry_config
+ self.verbose = verbose
+ self.registry_parts = [{'kind': 'dc', 'name': self.config.name},
+ {'kind': 'svc', 'name': self.config.name},
+ ]
+
+ self.__prepared_registry = None
+ self.volume_mounts = []
+ self.volumes = []
+ if self.config.config_options['volume_mounts']['value']:
+ for volume in self.config.config_options['volume_mounts']['value']:
+ volume_info = {'secret_name': volume.get('secret_name', None),
+ 'name': volume.get('name', None),
+ 'type': volume.get('type', None),
+ 'path': volume.get('path', None),
+ 'claimName': volume.get('claim_name', None),
+ 'claimSize': volume.get('claim_size', None),
+ }
+
+ vol, vol_mount = Volume.create_volume_structure(volume_info)
+ self.volumes.append(vol)
+ self.volume_mounts.append(vol_mount)
+
+ self.dconfig = None
+ self.svc = None
+
+ @property
+ def deploymentconfig(self):
+ ''' deploymentconfig property '''
+ return self.dconfig
+
+ @deploymentconfig.setter
+ def deploymentconfig(self, config):
+ ''' setter for deploymentconfig property '''
+ self.dconfig = config
+
+ @property
+ def service(self):
+ ''' service property '''
+ return self.svc
+
+ @service.setter
+ def service(self, config):
+ ''' setter for service property '''
+ self.svc = config
+
+ @property
+ def prepared_registry(self):
+ ''' prepared_registry property '''
+ if not self.__prepared_registry:
+ results = self.prepare_registry()
+ if not results:
+ raise RegistryException('Could not perform registry preparation.')
+ self.__prepared_registry = results
+
+ return self.__prepared_registry
+
+ @prepared_registry.setter
+ def prepared_registry(self, data):
+ ''' setter method for prepared_registry attribute '''
+ self.__prepared_registry = data
+
+ def get(self):
+ ''' return the self.registry_parts '''
+ self.deploymentconfig = None
+ self.service = None
+
+ rval = 0
+ for part in self.registry_parts:
+ result = self._get(part['kind'], rname=part['name'])
+ if result['returncode'] == 0 and part['kind'] == 'dc':
+ self.deploymentconfig = DeploymentConfig(result['results'][0])
+ elif result['returncode'] == 0 and part['kind'] == 'svc':
+ self.service = Service(result['results'][0])
+
+ if result['returncode'] != 0:
+ rval = result['returncode']
+
+
+ return {'returncode': rval, 'deploymentconfig': self.deploymentconfig, 'service': self.service}
+
+ def exists(self):
+ '''does the object exist?'''
+ self.get()
+ if self.deploymentconfig and self.service:
+ return True
+
+ return False
+
+ def delete(self, complete=True):
+ '''return all pods '''
+ parts = []
+ for part in self.registry_parts:
+ if not complete and part['kind'] == 'svc':
+ continue
+ parts.append(self._delete(part['kind'], part['name']))
+
+ # Clean up returned results
+ rval = 0
+ for part in parts:
+ # pylint: disable=invalid-sequence-index
+ if 'returncode' in part and part['returncode'] != 0:
+ rval = part['returncode']
+
+ return {'returncode': rval, 'results': parts}
+
+ def prepare_registry(self):
+ ''' prepare a registry for instantiation '''
+ options = self.config.to_option_list()
+
+ cmd = ['registry', '-n', self.config.namespace]
+ cmd.extend(options)
+ cmd.extend(['--dry-run=True', '-o', 'json'])
+
+ results = self.openshift_cmd(cmd, oadm=True, output=True, output_type='json')
+ # probably need to parse this
+ # pylint thinks results is a string
+ # pylint: disable=no-member
+ if results['returncode'] != 0 and 'items' in results['results']:
+ return results
+
+ service = None
+ deploymentconfig = None
+ # pylint: disable=invalid-sequence-index
+ for res in results['results']['items']:
+ if res['kind'] == 'DeploymentConfig':
+ deploymentconfig = DeploymentConfig(res)
+ elif res['kind'] == 'Service':
+ service = Service(res)
+
+ # Verify we got a service and a deploymentconfig
+ if not service or not deploymentconfig:
+ return results
+
+ # results will need to get parsed here and modifications added
+ deploymentconfig = DeploymentConfig(self.add_modifications(deploymentconfig))
+
+ # modify service ip
+ if self.svc_ip:
+ service.put('spec.clusterIP', self.svc_ip)
+ if self.portal_ip:
+ service.put('spec.portalIP', self.portal_ip)
+
+ # the dry-run doesn't apply the selector correctly
+ service.put('spec.selector', self.service.get_selector())
+
+ # need to create the service and the deploymentconfig
+ service_file = Utils.create_tmp_file_from_contents('service', service.yaml_dict)
+ deployment_file = Utils.create_tmp_file_from_contents('deploymentconfig', deploymentconfig.yaml_dict)
+
+ return {"service": service,
+ "service_file": service_file,
+ "service_update": False,
+ "deployment": deploymentconfig,
+ "deployment_file": deployment_file,
+ "deployment_update": False}
+
+ def create(self):
+ '''Create a registry'''
+ results = []
+ self.needs_update()
+ # if the object is none, then we need to create it
+ # if the object needs an update, then we should call replace
+ # Handle the deploymentconfig
+ if self.deploymentconfig is None:
+ results.append(self._create(self.prepared_registry['deployment_file']))
+ elif self.prepared_registry['deployment_update']:
+ results.append(self._replace(self.prepared_registry['deployment_file']))
+
+ # Handle the service
+ if self.service is None:
+ results.append(self._create(self.prepared_registry['service_file']))
+ elif self.prepared_registry['service_update']:
+ results.append(self._replace(self.prepared_registry['service_file']))
+
+ # Clean up returned results
+ rval = 0
+ for result in results:
+ # pylint: disable=invalid-sequence-index
+ if 'returncode' in result and result['returncode'] != 0:
+ rval = result['returncode']
+
+ return {'returncode': rval, 'results': results}
+
+ def update(self):
+ '''run update for the registry. This performs a replace if required'''
+ # Store the current service IP
+ if self.service:
+ svcip = self.service.get('spec.clusterIP')
+ if svcip:
+ self.svc_ip = svcip
+ portip = self.service.get('spec.portalIP')
+ if portip:
+ self.portal_ip = portip
+
+ results = []
+ if self.prepared_registry['deployment_update']:
+ results.append(self._replace(self.prepared_registry['deployment_file']))
+ if self.prepared_registry['service_update']:
+ results.append(self._replace(self.prepared_registry['service_file']))
+
+ # Clean up returned results
+ rval = 0
+ for result in results:
+ if result['returncode'] != 0:
+ rval = result['returncode']
+
+ return {'returncode': rval, 'results': results}
+
+ def add_modifications(self, deploymentconfig):
+ ''' update a deployment config with changes '''
+ # The environment variable for REGISTRY_HTTP_SECRET is autogenerated
+ # We should set the generated deploymentconfig to the in memory version
+ # the following modifications will overwrite if needed
+ if self.deploymentconfig:
+ result = self.deploymentconfig.get_env_var('REGISTRY_HTTP_SECRET')
+ if result:
+ deploymentconfig.update_env_var('REGISTRY_HTTP_SECRET', result['value'])
+
+ # Currently we know that our deployment of a registry requires a few extra modifications
+ # Modification 1
+ # we need specific environment variables to be set
+ for key, value in self.config.config_options['env_vars'].get('value', {}).items():
+ if not deploymentconfig.exists_env_key(key):
+ deploymentconfig.add_env_value(key, value)
+ else:
+ deploymentconfig.update_env_var(key, value)
+
+ # Modification 2
+ # we need specific volume variables to be set
+ for volume in self.volumes:
+ deploymentconfig.update_volume(volume)
+
+ for vol_mount in self.volume_mounts:
+ deploymentconfig.update_volume_mount(vol_mount)
+
+ # Modification 3
+ # Edits
+ edit_results = []
+ for edit in self.config.config_options['edits'].get('value', []):
+ if edit['action'] == 'put':
+ edit_results.append(deploymentconfig.put(edit['key'],
+ edit['value']))
+ if edit['action'] == 'update':
+ edit_results.append(deploymentconfig.update(edit['key'],
+ edit['value'],
+ edit.get('index', None),
+ edit.get('curr_value', None)))
+ if edit['action'] == 'append':
+ edit_results.append(deploymentconfig.append(edit['key'],
+ edit['value']))
+
+ if edit_results and not any([res[0] for res in edit_results]):
+ return None
+
+ return deploymentconfig.yaml_dict
+
+ def needs_update(self):
+ ''' check to see if we need to update '''
+ exclude_list = ['clusterIP', 'portalIP', 'type', 'protocol']
+ if self.service is None or \
+ not Utils.check_def_equal(self.prepared_registry['service'].yaml_dict,
+ self.service.yaml_dict,
+ exclude_list,
+ debug=self.verbose):
+ self.prepared_registry['service_update'] = True
+
+ exclude_list = ['dnsPolicy',
+ 'terminationGracePeriodSeconds',
+ 'restartPolicy', 'timeoutSeconds',
+ 'livenessProbe', 'readinessProbe',
+ 'terminationMessagePath',
+ 'securityContext',
+ 'imagePullPolicy',
+ 'protocol', # ports.portocol: TCP
+ 'type', # strategy: {'type': 'rolling'}
+ 'defaultMode', # added on secrets
+ 'activeDeadlineSeconds', # added in 1.5 for timeouts
+ ]
+
+ if self.deploymentconfig is None or \
+ not Utils.check_def_equal(self.prepared_registry['deployment'].yaml_dict,
+ self.deploymentconfig.yaml_dict,
+ exclude_list,
+ debug=self.verbose):
+ self.prepared_registry['deployment_update'] = True
+
+ return self.prepared_registry['deployment_update'] or self.prepared_registry['service_update'] or False
+
+ # In the future, we would like to break out each ansible state into a function.
+ # pylint: disable=too-many-branches,too-many-return-statements
+ @staticmethod
+ def run_ansible(params, check_mode):
+ '''run idempotent ansible code'''
+
+ rconfig = RegistryConfig(params['name'],
+ params['namespace'],
+ params['kubeconfig'],
+ {'images': {'value': params['images'], 'include': True},
+ 'latest_images': {'value': params['latest_images'], 'include': True},
+ 'labels': {'value': params['labels'], 'include': True},
+ 'ports': {'value': ','.join(params['ports']), 'include': True},
+ 'replicas': {'value': params['replicas'], 'include': True},
+ 'selector': {'value': params['selector'], 'include': True},
+ 'service_account': {'value': params['service_account'], 'include': True},
+ 'mount_host': {'value': params['mount_host'], 'include': True},
+ 'env_vars': {'value': params['env_vars'], 'include': False},
+ 'volume_mounts': {'value': params['volume_mounts'], 'include': False},
+ 'edits': {'value': params['edits'], 'include': False},
+ 'enforce_quota': {'value': params['enforce_quota'], 'include': True},
+ 'daemonset': {'value': params['daemonset'], 'include': True},
+ 'tls_key': {'value': params['tls_key'], 'include': True},
+ 'tls_certificate': {'value': params['tls_certificate'], 'include': True},
+ })
+
+
+ ocregistry = Registry(rconfig, params['debug'])
+
+ api_rval = ocregistry.get()
+
+ state = params['state']
+ ########
+ # get
+ ########
+ if state == 'list':
+
+ if api_rval['returncode'] != 0:
+ return {'failed': True, 'msg': api_rval}
+
+ return {'changed': False, 'results': api_rval, 'state': state}
+
+ ########
+ # Delete
+ ########
+ if state == 'absent':
+ if not ocregistry.exists():
+ return {'changed': False, 'state': state}
+
+ if check_mode:
+ return {'changed': True, 'msg': 'CHECK_MODE: Would have performed a delete.'}
+
+ # Unsure as to why this is angry with the return type.
+ # pylint: disable=redefined-variable-type
+ api_rval = ocregistry.delete()
+
+ if api_rval['returncode'] != 0:
+ return {'failed': True, 'msg': api_rval}
+
+ return {'changed': True, 'results': api_rval, 'state': state}
+
+ if state == 'present':
+ ########
+ # Create
+ ########
+ if not ocregistry.exists():
+
+ if check_mode:
+ return {'changed': True, 'msg': 'CHECK_MODE: Would have performed a create.'}
+
+ api_rval = ocregistry.create()
+
+ if api_rval['returncode'] != 0:
+ return {'failed': True, 'msg': api_rval}
+
+ return {'changed': True, 'results': api_rval, 'state': state}
+
+ ########
+ # Update
+ ########
+ if not params['force'] and not ocregistry.needs_update():
+ return {'changed': False, 'state': state}
+
+ if check_mode:
+ return {'changed': True, 'msg': 'CHECK_MODE: Would have performed an update.'}
+
+ api_rval = ocregistry.update()
+
+ if api_rval['returncode'] != 0:
+ return {'failed': True, 'msg': api_rval}
+
+ return {'changed': True, 'results': api_rval, 'state': state}
+
+ return {'failed': True, 'msg': 'Unknown state passed. %s' % state}
diff --git a/roles/lib_openshift/src/class/oc_adm_router.py b/roles/lib_openshift/src/class/oc_adm_router.py
new file mode 100644
index 000000000..7b163b120
--- /dev/null
+++ b/roles/lib_openshift/src/class/oc_adm_router.py
@@ -0,0 +1,500 @@
+# pylint: skip-file
+# flake8: noqa
+
+
+class RouterException(Exception):
+ ''' Router exception'''
+ pass
+
+
+class RouterConfig(OpenShiftCLIConfig):
+ ''' RouterConfig is a DTO for the router. '''
+ def __init__(self, rname, namespace, kubeconfig, router_options):
+ super(RouterConfig, self).__init__(rname, namespace, kubeconfig, router_options)
+
+
+class Router(OpenShiftCLI):
+ ''' Class to wrap the oc command line tools '''
+ def __init__(self,
+ router_config,
+ verbose=False):
+ ''' Constructor for OpenshiftOC
+
+ a router consists of 3 or more parts
+ - dc/router
+ - svc/router
+ - sa/router
+ - secret/router-certs
+ - clusterrolebinding/router-router-role
+ '''
+ super(Router, self).__init__('default', router_config.kubeconfig, verbose)
+ self.config = router_config
+ self.verbose = verbose
+ self.router_parts = [{'kind': 'dc', 'name': self.config.name},
+ {'kind': 'svc', 'name': self.config.name},
+ {'kind': 'sa', 'name': self.config.config_options['service_account']['value']},
+ {'kind': 'secret', 'name': self.config.name + '-certs'},
+ {'kind': 'clusterrolebinding', 'name': 'router-' + self.config.name + '-role'},
+ ]
+
+ self.__prepared_router = None
+ self.dconfig = None
+ self.svc = None
+ self._secret = None
+ self._serviceaccount = None
+ self._rolebinding = None
+
+ @property
+ def prepared_router(self):
+ ''' property for the prepared router'''
+ if self.__prepared_router is None:
+ results = self._prepare_router()
+ if not results or 'returncode' in results and results['returncode'] != 0:
+ if 'stderr' in results:
+ raise RouterException('Could not perform router preparation: %s' % results['stderr'])
+
+ raise RouterException('Could not perform router preparation.')
+ self.__prepared_router = results
+
+ return self.__prepared_router
+
+ @prepared_router.setter
+ def prepared_router(self, obj):
+ '''setter for the prepared_router'''
+ self.__prepared_router = obj
+
+ @property
+ def deploymentconfig(self):
+ ''' property deploymentconfig'''
+ return self.dconfig
+
+ @deploymentconfig.setter
+ def deploymentconfig(self, config):
+ ''' setter for property deploymentconfig '''
+ self.dconfig = config
+
+ @property
+ def service(self):
+ ''' property for service '''
+ return self.svc
+
+ @service.setter
+ def service(self, config):
+ ''' setter for property service '''
+ self.svc = config
+
+ @property
+ def secret(self):
+ ''' property secret '''
+ return self._secret
+
+ @secret.setter
+ def secret(self, config):
+ ''' setter for property secret '''
+ self._secret = config
+
+ @property
+ def serviceaccount(self):
+ ''' property for serviceaccount '''
+ return self._serviceaccount
+
+ @serviceaccount.setter
+ def serviceaccount(self, config):
+ ''' setter for property serviceaccount '''
+ self._serviceaccount = config
+
+ @property
+ def rolebinding(self):
+ ''' property rolebinding '''
+ return self._rolebinding
+
+ @rolebinding.setter
+ def rolebinding(self, config):
+ ''' setter for property rolebinding '''
+ self._rolebinding = config
+
+ def get_object_by_kind(self, kind):
+ '''return the current object kind by name'''
+ if re.match("^(dc|deploymentconfig)$", kind, flags=re.IGNORECASE):
+ return self.deploymentconfig
+ elif re.match("^(svc|service)$", kind, flags=re.IGNORECASE):
+ return self.service
+ elif re.match("^(sa|serviceaccount)$", kind, flags=re.IGNORECASE):
+ return self.serviceaccount
+ elif re.match("secret", kind, flags=re.IGNORECASE):
+ return self.secret
+ elif re.match("clusterrolebinding", kind, flags=re.IGNORECASE):
+ return self.rolebinding
+
+ return None
+
+ def get(self):
+ ''' return the self.router_parts '''
+ self.service = None
+ self.deploymentconfig = None
+ self.serviceaccount = None
+ self.secret = None
+ self.rolebinding = None
+ for part in self.router_parts:
+ result = self._get(part['kind'], rname=part['name'])
+ if result['returncode'] == 0 and part['kind'] == 'dc':
+ self.deploymentconfig = DeploymentConfig(result['results'][0])
+ elif result['returncode'] == 0 and part['kind'] == 'svc':
+ self.service = Service(content=result['results'][0])
+ elif result['returncode'] == 0 and part['kind'] == 'sa':
+ self.serviceaccount = ServiceAccount(content=result['results'][0])
+ elif result['returncode'] == 0 and part['kind'] == 'secret':
+ self.secret = Secret(content=result['results'][0])
+ elif result['returncode'] == 0 and part['kind'] == 'clusterrolebinding':
+ self.rolebinding = RoleBinding(content=result['results'][0])
+
+ return {'deploymentconfig': self.deploymentconfig,
+ 'service': self.service,
+ 'serviceaccount': self.serviceaccount,
+ 'secret': self.secret,
+ 'clusterrolebinding': self.rolebinding,
+ }
+
+ def exists(self):
+ '''return a whether svc or dc exists '''
+ if self.deploymentconfig and self.service and self.secret and self.serviceaccount:
+ return True
+
+ return False
+
+ def delete(self):
+ '''return all pods '''
+ parts = []
+ for part in self.router_parts:
+ parts.append(self._delete(part['kind'], part['name']))
+
+ rval = 0
+ for part in parts:
+ if part['returncode'] != 0 and not 'already exist' in part['stderr']:
+ rval = part['returncode']
+
+ return {'returncode': rval, 'results': parts}
+
+ def add_modifications(self, deploymentconfig):
+ '''modify the deployment config'''
+ # We want modifications in the form of edits coming in from the module.
+ # Let's apply these here
+ edit_results = []
+ for edit in self.config.config_options['edits'].get('value', []):
+ if edit['action'] == 'put':
+ edit_results.append(deploymentconfig.put(edit['key'],
+ edit['value']))
+ if edit['action'] == 'update':
+ edit_results.append(deploymentconfig.update(edit['key'],
+ edit['value'],
+ edit.get('index', None),
+ edit.get('curr_value', None)))
+ if edit['action'] == 'append':
+ edit_results.append(deploymentconfig.append(edit['key'],
+ edit['value']))
+
+ if edit_results and not any([res[0] for res in edit_results]):
+ return None
+
+ return deploymentconfig
+
+ # pylint: disable=too-many-branches
+ def _prepare_router(self):
+ '''prepare router for instantiation'''
+ # if cacert, key, and cert were passed, combine them into a pem file
+ if (self.config.config_options['cacert_file']['value'] and
+ self.config.config_options['cert_file']['value'] and
+ self.config.config_options['key_file']['value']):
+
+ router_pem = '/tmp/router.pem'
+ with open(router_pem, 'w') as rfd:
+ rfd.write(open(self.config.config_options['cert_file']['value']).read())
+ rfd.write(open(self.config.config_options['key_file']['value']).read())
+ if self.config.config_options['cacert_file']['value'] and \
+ os.path.exists(self.config.config_options['cacert_file']['value']):
+ rfd.write(open(self.config.config_options['cacert_file']['value']).read())
+
+ atexit.register(Utils.cleanup, [router_pem])
+
+ self.config.config_options['default_cert']['value'] = router_pem
+
+ elif self.config.config_options['default_cert']['value'] is None:
+ # No certificate was passed to us. do not pass one to oc adm router
+ self.config.config_options['default_cert']['include'] = False
+
+ options = self.config.to_option_list()
+
+ cmd = ['router', self.config.name, '-n', self.config.namespace]
+ cmd.extend(options)
+ cmd.extend(['--dry-run=True', '-o', 'json'])
+
+ results = self.openshift_cmd(cmd, oadm=True, output=True, output_type='json')
+
+ # pylint: disable=maybe-no-member
+ if results['returncode'] != 0 or 'items' not in results['results']:
+ return results
+
+ oc_objects = {'DeploymentConfig': {'obj': None, 'path': None, 'update': False},
+ 'Secret': {'obj': None, 'path': None, 'update': False},
+ 'ServiceAccount': {'obj': None, 'path': None, 'update': False},
+ 'ClusterRoleBinding': {'obj': None, 'path': None, 'update': False},
+ 'Service': {'obj': None, 'path': None, 'update': False},
+ }
+ # pylint: disable=invalid-sequence-index
+ for res in results['results']['items']:
+ if res['kind'] == 'DeploymentConfig':
+ oc_objects['DeploymentConfig']['obj'] = DeploymentConfig(res)
+ elif res['kind'] == 'Service':
+ oc_objects['Service']['obj'] = Service(res)
+ elif res['kind'] == 'ServiceAccount':
+ oc_objects['ServiceAccount']['obj'] = ServiceAccount(res)
+ elif res['kind'] == 'Secret':
+ oc_objects['Secret']['obj'] = Secret(res)
+ elif res['kind'] == 'ClusterRoleBinding':
+ oc_objects['ClusterRoleBinding']['obj'] = RoleBinding(res)
+
+ # Currently only deploymentconfig needs updating
+ # Verify we got a deploymentconfig
+ if not oc_objects['DeploymentConfig']['obj']:
+ return results
+
+ # add modifications added
+ oc_objects['DeploymentConfig']['obj'] = self.add_modifications(oc_objects['DeploymentConfig']['obj'])
+
+ for oc_type, oc_data in oc_objects.items():
+ if oc_data['obj'] is not None:
+ oc_data['path'] = Utils.create_tmp_file_from_contents(oc_type, oc_data['obj'].yaml_dict)
+
+ return oc_objects
+
+ def create(self):
+ '''Create a router
+
+ This includes the different parts:
+ - deploymentconfig
+ - service
+ - serviceaccount
+ - secrets
+ - clusterrolebinding
+ '''
+ results = []
+ self.needs_update()
+
+ import time
+ # pylint: disable=maybe-no-member
+ for kind, oc_data in self.prepared_router.items():
+ if oc_data['obj'] is not None:
+ time.sleep(1)
+ if self.get_object_by_kind(kind) is None:
+ results.append(self._create(oc_data['path']))
+
+ elif oc_data['update']:
+ results.append(self._replace(oc_data['path']))
+
+
+ rval = 0
+ for result in results:
+ if result['returncode'] != 0 and not 'already exist' in result['stderr']:
+ rval = result['returncode']
+
+ return {'returncode': rval, 'results': results}
+
+ def update(self):
+ '''run update for the router. This performs a replace'''
+ results = []
+
+ # pylint: disable=maybe-no-member
+ for _, oc_data in self.prepared_router.items():
+ if oc_data['update']:
+ results.append(self._replace(oc_data['path']))
+
+ rval = 0
+ for result in results:
+ if result['returncode'] != 0:
+ rval = result['returncode']
+
+ return {'returncode': rval, 'results': results}
+
+ # pylint: disable=too-many-return-statements,too-many-branches
+ def needs_update(self):
+ ''' check to see if we need to update '''
+ # ServiceAccount:
+ # Need to determine changes from the pregenerated ones from the original
+ # Since these are auto generated, we can skip
+ skip = ['secrets', 'imagePullSecrets']
+ if self.serviceaccount is None or \
+ not Utils.check_def_equal(self.prepared_router['ServiceAccount']['obj'].yaml_dict,
+ self.serviceaccount.yaml_dict,
+ skip_keys=skip,
+ debug=self.verbose):
+ self.prepared_router['ServiceAccount']['update'] = True
+
+ # Secret:
+ # See if one was generated from our dry-run and verify it if needed
+ if self.prepared_router['Secret']['obj']:
+ if not self.secret:
+ self.prepared_router['Secret']['update'] = True
+
+ if self.secret is None or \
+ not Utils.check_def_equal(self.prepared_router['Secret']['obj'].yaml_dict,
+ self.secret.yaml_dict,
+ skip_keys=skip,
+ debug=self.verbose):
+ self.prepared_router['Secret']['update'] = True
+
+ # Service:
+ # Fix the ports to have protocol=TCP
+ for port in self.prepared_router['Service']['obj'].get('spec.ports'):
+ port['protocol'] = 'TCP'
+
+ skip = ['portalIP', 'clusterIP', 'sessionAffinity', 'type']
+ if self.service is None or \
+ not Utils.check_def_equal(self.prepared_router['Service']['obj'].yaml_dict,
+ self.service.yaml_dict,
+ skip_keys=skip,
+ debug=self.verbose):
+ self.prepared_router['Service']['update'] = True
+
+ # DeploymentConfig:
+ # Router needs some exceptions.
+ # We do not want to check the autogenerated password for stats admin
+ if self.deploymentconfig is not None:
+ if not self.config.config_options['stats_password']['value']:
+ for idx, env_var in enumerate(self.prepared_router['DeploymentConfig']['obj'].get(\
+ 'spec.template.spec.containers[0].env') or []):
+ if env_var['name'] == 'STATS_PASSWORD':
+ env_var['value'] = \
+ self.deploymentconfig.get('spec.template.spec.containers[0].env[%s].value' % idx)
+ break
+
+ # dry-run doesn't add the protocol to the ports section. We will manually do that.
+ for idx, port in enumerate(self.prepared_router['DeploymentConfig']['obj'].get(\
+ 'spec.template.spec.containers[0].ports') or []):
+ if not 'protocol' in port:
+ port['protocol'] = 'TCP'
+
+ # These are different when generating
+ skip = ['dnsPolicy',
+ 'terminationGracePeriodSeconds',
+ 'restartPolicy', 'timeoutSeconds',
+ 'livenessProbe', 'readinessProbe',
+ 'terminationMessagePath', 'hostPort',
+ 'defaultMode',
+ ]
+
+ if self.deploymentconfig is None or \
+ not Utils.check_def_equal(self.prepared_router['DeploymentConfig']['obj'].yaml_dict,
+ self.deploymentconfig.yaml_dict,
+ skip_keys=skip,
+ debug=self.verbose):
+ self.prepared_router['DeploymentConfig']['update'] = True
+
+ # Check if any of the parts need updating, if so, return True
+ # else, no need to update
+ # pylint: disable=no-member
+ return any([self.prepared_router[oc_type]['update'] for oc_type in self.prepared_router.keys()])
+
+ @staticmethod
+ def run_ansible(params, check_mode):
+ '''run ansible idempotent code'''
+
+ rconfig = RouterConfig(params['name'],
+ params['namespace'],
+ params['kubeconfig'],
+ {'default_cert': {'value': params['default_cert'], 'include': True},
+ 'cert_file': {'value': params['cert_file'], 'include': False},
+ 'key_file': {'value': params['key_file'], 'include': False},
+ 'images': {'value': params['images'], 'include': True},
+ 'latest_images': {'value': params['latest_images'], 'include': True},
+ 'labels': {'value': params['labels'], 'include': True},
+ 'ports': {'value': ','.join(params['ports']), 'include': True},
+ 'replicas': {'value': params['replicas'], 'include': True},
+ 'selector': {'value': params['selector'], 'include': True},
+ 'service_account': {'value': params['service_account'], 'include': True},
+ 'router_type': {'value': params['router_type'], 'include': False},
+ 'host_network': {'value': params['host_network'], 'include': True},
+ 'external_host': {'value': params['external_host'], 'include': True},
+ 'external_host_vserver': {'value': params['external_host_vserver'],
+ 'include': True},
+ 'external_host_insecure': {'value': params['external_host_insecure'],
+ 'include': True},
+ 'external_host_partition_path': {'value': params['external_host_partition_path'],
+ 'include': True},
+ 'external_host_username': {'value': params['external_host_username'],
+ 'include': True},
+ 'external_host_password': {'value': params['external_host_password'],
+ 'include': True},
+ 'external_host_private_key': {'value': params['external_host_private_key'],
+ 'include': True},
+ 'expose_metrics': {'value': params['expose_metrics'], 'include': True},
+ 'metrics_image': {'value': params['metrics_image'], 'include': True},
+ 'stats_user': {'value': params['stats_user'], 'include': True},
+ 'stats_password': {'value': params['stats_password'], 'include': True},
+ 'stats_port': {'value': params['stats_port'], 'include': True},
+ # extra
+ 'cacert_file': {'value': params['cacert_file'], 'include': False},
+ # edits
+ 'edits': {'value': params['edits'], 'include': False},
+ })
+
+
+ state = params['state']
+
+ ocrouter = Router(rconfig, verbose=params['debug'])
+
+ api_rval = ocrouter.get()
+
+ ########
+ # get
+ ########
+ if state == 'list':
+ return {'changed': False, 'results': api_rval, 'state': state}
+
+ ########
+ # Delete
+ ########
+ if state == 'absent':
+ if not ocrouter.exists():
+ return {'changed': False, 'state': state}
+
+ if check_mode:
+ return {'changed': True, 'msg': 'CHECK_MODE: Would have performed a delete.'}
+
+ # In case of delete we return a list of each object
+ # that represents a router and its result in a list
+ # pylint: disable=redefined-variable-type
+ api_rval = ocrouter.delete()
+
+ return {'changed': True, 'results': api_rval, 'state': state}
+
+ if state == 'present':
+ ########
+ # Create
+ ########
+ if not ocrouter.exists():
+
+ if check_mode:
+ return {'changed': True, 'msg': 'CHECK_MODE: Would have performed a create.'}
+
+ api_rval = ocrouter.create()
+
+ if api_rval['returncode'] != 0:
+ return {'failed': True, 'msg': api_rval}
+
+ return {'changed': True, 'results': api_rval, 'state': state}
+
+ ########
+ # Update
+ ########
+ if not ocrouter.needs_update():
+ return {'changed': False, 'state': state}
+
+ if check_mode:
+ return {'changed': False, 'msg': 'CHECK_MODE: Would have performed an update.'}
+
+ api_rval = ocrouter.update()
+
+ if api_rval['returncode'] != 0:
+ return {'failed': True, 'msg': api_rval}
+
+ return {'changed': True, 'results': api_rval, 'state': state}
diff --git a/roles/lib_openshift/src/class/oc_obj.py b/roles/lib_openshift/src/class/oc_obj.py
index 21129a50c..51d3ce996 100644
--- a/roles/lib_openshift/src/class/oc_obj.py
+++ b/roles/lib_openshift/src/class/oc_obj.py
@@ -77,7 +77,6 @@ class OCObject(OpenShiftCLI):
if objects['returncode'] != 0:
return objects
- # pylint: disable=no-member
data = None
if files:
data = Utils.get_resource_file(files[0], content_type)
diff --git a/roles/lib_openshift/src/class/oc_route.py b/roles/lib_openshift/src/class/oc_route.py
index cb743e19d..3935525f1 100644
--- a/roles/lib_openshift/src/class/oc_route.py
+++ b/roles/lib_openshift/src/class/oc_route.py
@@ -55,13 +55,15 @@ class OCRoute(OpenShiftCLI):
def update(self):
'''update the object'''
- # need to update the tls information and the service name
- return self._replace_content(self.kind, self.config.name, self.config.data)
+ return self._replace_content(self.kind,
+ self.config.name,
+ self.config.data,
+ force=(self.config.host != self.route.get_host()))
def needs_update(self):
''' verify an update is needed '''
skip = []
- return not Utils.check_def_equal(self.config.data, self.route.yaml_dict, skip_keys=skip, debug=True)
+ return not Utils.check_def_equal(self.config.data, self.route.yaml_dict, skip_keys=skip, debug=self.verbose)
@staticmethod
def get_cert_data(path, content):
diff --git a/roles/lib_openshift/src/class/oc_sdnvalidator.py b/roles/lib_openshift/src/class/oc_sdnvalidator.py
new file mode 100644
index 000000000..da923337b
--- /dev/null
+++ b/roles/lib_openshift/src/class/oc_sdnvalidator.py
@@ -0,0 +1,58 @@
+# pylint: skip-file
+# flake8: noqa
+
+# pylint: disable=too-many-instance-attributes
+class OCSDNValidator(OpenShiftCLI):
+ ''' Class to wrap the oc command line tools '''
+
+ def __init__(self, kubeconfig):
+ ''' Constructor for OCSDNValidator '''
+ # namespace has no meaning for SDN validation, hardcode to 'default'
+ super(OCSDNValidator, self).__init__('default', kubeconfig)
+
+ def get(self, kind, invalid_filter):
+ ''' return SDN information '''
+
+ rval = self._get(kind)
+ if rval['returncode'] != 0:
+ return False, rval, []
+
+ return True, rval, filter(invalid_filter, rval['results'][0]['items'])
+
+ # pylint: disable=too-many-return-statements
+ @staticmethod
+ def run_ansible(params):
+ ''' run the idempotent ansible code
+
+ params comes from the ansible portion of this module
+ '''
+
+ sdnvalidator = OCSDNValidator(params['kubeconfig'])
+ all_invalid = {}
+ failed = False
+
+ checks = (
+ (
+ 'hostsubnet',
+ lambda x: x['metadata']['name'] != x['host'],
+ u'hostsubnets where metadata.name != host',
+ ),
+ (
+ 'netnamespace',
+ lambda x: x['metadata']['name'] != x['netname'],
+ u'netnamespaces where metadata.name != netname',
+ ),
+ )
+
+ for resource, invalid_filter, invalid_msg in checks:
+ success, rval, invalid = sdnvalidator.get(resource, invalid_filter)
+ if not success:
+ return {'failed': True, 'msg': 'Failed to GET {}.'.format(resource), 'state': 'list', 'results': rval}
+ if invalid:
+ failed = True
+ all_invalid[invalid_msg] = invalid
+
+ if failed:
+ return {'failed': True, 'msg': 'All SDN objects are not valid.', 'state': 'list', 'results': all_invalid}
+
+ return {'msg': 'All SDN objects are valid.'}
diff --git a/roles/lib_openshift/src/class/oc_secret.py b/roles/lib_openshift/src/class/oc_secret.py
index 5eac27572..deb36a9fa 100644
--- a/roles/lib_openshift/src/class/oc_secret.py
+++ b/roles/lib_openshift/src/class/oc_secret.py
@@ -29,7 +29,7 @@ class OCSecret(OpenShiftCLI):
if results['returncode'] == 0 and results['results'][0]:
results['exists'] = True
if self.decode:
- if results['results'][0].has_key('data'):
+ if 'data' in results['results'][0]:
for sname, value in results['results'][0]['data'].items():
results['decoded'][sname] = base64.b64decode(value)
diff --git a/roles/lib_openshift/src/class/oc_service.py b/roles/lib_openshift/src/class/oc_service.py
index d4cc83a59..20cf23df5 100644
--- a/roles/lib_openshift/src/class/oc_service.py
+++ b/roles/lib_openshift/src/class/oc_service.py
@@ -22,7 +22,7 @@ class OCService(OpenShiftCLI):
kubeconfig='/etc/origin/master/admin.kubeconfig',
verbose=False):
''' Constructor for OCVolume '''
- super(OCService, self).__init__(namespace, kubeconfig)
+ super(OCService, self).__init__(namespace, kubeconfig, verbose)
self.namespace = namespace
self.config = ServiceConfig(sname, namespace, ports, selector, labels,
cluster_ip, portal_ip, session_affinity, service_type)
@@ -93,7 +93,9 @@ class OCService(OpenShiftCLI):
params['portalip'],
params['ports'],
params['session_affinity'],
- params['service_type'])
+ params['service_type'],
+ params['kubeconfig'],
+ params['debug'])
state = params['state']
diff --git a/roles/lib_openshift/src/doc/atomic_container b/roles/lib_openshift/src/doc/atomic_container
new file mode 100644
index 000000000..53fc40f36
--- /dev/null
+++ b/roles/lib_openshift/src/doc/atomic_container
@@ -0,0 +1,36 @@
+# flake8: noqa
+# pylint: skip-file
+
+DOCUMENTATION = '''
+---
+module: oc_atomic_container
+short_description: Manage the container images on the atomic host platform
+description:
+ - Manage the container images on the atomic host platform
+ - Allows to execute the commands on the container images
+requirements:
+ - atomic
+ - "python >= 2.6"
+options:
+ name:
+ description:
+ - Name of the container
+ required: True
+ default: null
+ image:
+ description:
+ - The image to use to install the container
+ required: True
+ default: null
+ state:
+ description:
+ - State of the container
+ required: True
+ choices: ["latest", "absent", "latest", "rollback"]
+ default: "latest"
+ values:
+ description:
+ - Values for the installation of the container
+ required: False
+ default: None
+'''
diff --git a/roles/lib_openshift/src/doc/ca_server_cert b/roles/lib_openshift/src/doc/ca_server_cert
new file mode 100644
index 000000000..ff9229281
--- /dev/null
+++ b/roles/lib_openshift/src/doc/ca_server_cert
@@ -0,0 +1,96 @@
+# flake8: noqa
+# pylint: skip-file
+
+DOCUMENTATION = '''
+---
+module: oc_adm_ca_server_cert
+short_description: Module to run openshift oc adm ca create-server-cert
+description:
+ - Wrapper around the openshift `oc adm ca create-server-cert` command.
+options:
+ state:
+ description:
+ - Present is the only supported state. The state present means that `oc adm ca` will generate a certificate
+ - and verify if the hostnames and the ClusterIP exists in the certificate.
+ - When create-server-cert is desired then the following parameters are passed.
+ - ['cert', 'key', 'signer_cert', 'signer_key', 'signer_serial']
+ required: false
+ default: present
+ choices:
+ - present
+ aliases: []
+ kubeconfig:
+ description:
+ - The path for the kubeconfig file to use for authentication
+ required: false
+ default: /etc/origin/master/admin.kubeconfig
+ aliases: []
+ debug:
+ description:
+ - Turn on debug output.
+ required: false
+ default: False
+ aliases: []
+ cert:
+ description:
+ - The certificate file. Choose a name that indicates what the service is.
+ required: false
+ default: None
+ aliases: []
+ key:
+ description:
+ - The key file. Choose a name that indicates what the service is.
+ required: false
+ default: None
+ aliases: []
+ force:
+ description:
+ - Force updating of the existing cert and key files
+ required: false
+ default: False
+ aliases: []
+ signer_cert:
+ description:
+ - The signer certificate file.
+ required: false
+ default: /etc/origin/master/ca.crt
+ aliases: []
+ signer_key:
+ description:
+ - The signer key file.
+ required: false
+ default: /etc/origin/master/ca.key
+ aliases: []
+ signer_serial:
+ description:
+ - The signer serial file.
+ required: false
+ default: /etc/origin/master/ca.serial.txt
+ aliases: []
+ hostnames:
+ description:
+ - Every hostname or IP that server certs should be valid for
+ required: false
+ default: []
+ aliases: []
+ backup:
+ description:
+ - Whether to backup the cert and key files before writing them.
+ required: false
+ default: True
+ aliases: []
+author:
+- "Kenny Woodson <kwoodson@redhat.com>"
+extends_documentation_fragment: []
+'''
+
+EXAMPLES = '''
+- name: Create a self-signed cert
+ oc_adm_ca_server_cert:
+ signer_cert: /etc/origin/master/ca.crt
+ signer_key: /etc/origin/master/ca.key
+ signer_serial: /etc/origin/master/ca.serial.txt
+ hostnames: "registry.test.openshift.com,127.0.0.1,docker-registry.default.svc.cluster.local"
+ cert: /etc/origin/master/registry.crt
+ key: /etc/origin/master/registry.key
+'''
diff --git a/roles/lib_openshift/src/doc/registry b/roles/lib_openshift/src/doc/registry
new file mode 100644
index 000000000..ebc714b7a
--- /dev/null
+++ b/roles/lib_openshift/src/doc/registry
@@ -0,0 +1,192 @@
+# flake8: noqa
+# pylint: skip-file
+
+DOCUMENTATION = '''
+---
+module: oc_adm_registry
+short_description: Module to manage openshift registry
+description:
+ - Manage openshift registry programmatically.
+options:
+ state:
+ description:
+ - The desired action when managing openshift registry
+ - present - update or create the registry
+ - absent - tear down the registry service and deploymentconfig
+ - list - returns the current representiation of a registry
+ required: false
+ default: False
+ aliases: []
+ kubeconfig:
+ description:
+ - The path for the kubeconfig file to use for authentication
+ required: false
+ default: /etc/origin/master/admin.kubeconfig
+ aliases: []
+ debug:
+ description:
+ - Turn on debug output.
+ required: false
+ default: False
+ aliases: []
+ name:
+ description:
+ - The name of the registry
+ required: false
+ default: None
+ aliases: []
+ namespace:
+ description:
+ - The selector when filtering on node labels
+ required: false
+ default: None
+ aliases: []
+ images:
+ description:
+ - The image to base this registry on - ${component} will be replaced with --type
+ required: 'openshift3/ose-${component}:${version}'
+ default: None
+ aliases: []
+ latest_images:
+ description:
+ - If true, attempt to use the latest image for the registry instead of the latest release.
+ required: false
+ default: False
+ aliases: []
+ labels:
+ description:
+ - A set of labels to uniquely identify the registry and its components.
+ required: false
+ default: None
+ aliases: []
+ enforce_quota:
+ description:
+ - If set, the registry will refuse to write blobs if they exceed quota limits
+ required: False
+ default: False
+ aliases: []
+ mount_host:
+ description:
+ - If set, the registry volume will be created as a host-mount at this path.
+ required: False
+ default: False
+ aliases: []
+ ports:
+ description:
+ - A comma delimited list of ports or port pairs to expose on the registry pod. The default is set for 5000.
+ required: False
+ default: [5000]
+ aliases: []
+ replicas:
+ description:
+ - The replication factor of the registry; commonly 2 when high availability is desired.
+ required: False
+ default: 1
+ aliases: []
+ selector:
+ description:
+ - Selector used to filter nodes on deployment. Used to run registries on a specific set of nodes.
+ required: False
+ default: None
+ aliases: []
+ service_account:
+ description:
+ - Name of the service account to use to run the registry pod.
+ required: False
+ default: 'registry'
+ aliases: []
+ tls_certificate:
+ description:
+ - An optional path to a PEM encoded certificate (which may contain the private key) for serving over TLS
+ required: false
+ default: None
+ aliases: []
+ tls_key:
+ description:
+ - An optional path to a PEM encoded private key for serving over TLS
+ required: false
+ default: None
+ aliases: []
+ volume_mounts:
+ description:
+ - The volume mounts for the registry.
+ required: false
+ default: None
+ aliases: []
+ daemonset:
+ description:
+ - Use a daemonset instead of a deployment config.
+ required: false
+ default: False
+ aliases: []
+ edits:
+ description:
+ - A list of modifications to make on the deploymentconfig
+ required: false
+ default: None
+ aliases: []
+ env_vars:
+ description:
+ - A dictionary of modifications to make on the deploymentconfig. e.g. FOO: BAR
+ required: false
+ default: None
+ aliases: []
+ force:
+ description:
+ - Force a registry update.
+ required: false
+ default: False
+ aliases: []
+author:
+- "Kenny Woodson <kwoodson@redhat.com>"
+extends_documentation_fragment: []
+'''
+
+EXAMPLES = '''
+- name: create a secure registry
+ oc_adm_registry:
+ name: docker-registry
+ service_account: registry
+ replicas: 2
+ namespace: default
+ selector: type=infra
+ images: "registry.ops.openshift.com/openshift3/ose-${component}:${version}"
+ env_vars:
+ REGISTRY_CONFIGURATION_PATH: /etc/registryconfig/config.yml
+ REGISTRY_HTTP_TLS_CERTIFICATE: /etc/secrets/registry.crt
+ REGISTRY_HTTP_TLS_KEY: /etc/secrets/registry.key
+ REGISTRY_HTTP_SECRET: supersecret
+ volume_mounts:
+ - path: /etc/secrets
+ name: dockercerts
+ type: secret
+ secret_name: registry-secret
+ - path: /etc/registryconfig
+ name: dockersecrets
+ type: secret
+ secret_name: docker-registry-config
+ edits:
+ - key: spec.template.spec.containers[0].livenessProbe.httpGet.scheme
+ value: HTTPS
+ action: put
+ - key: spec.template.spec.containers[0].readinessProbe.httpGet.scheme
+ value: HTTPS
+ action: put
+ - key: spec.strategy.rollingParams
+ value:
+ intervalSeconds: 1
+ maxSurge: 50%
+ maxUnavailable: 50%
+ timeoutSeconds: 600
+ updatePeriodSeconds: 1
+ action: put
+ - key: spec.template.spec.containers[0].resources.limits.memory
+ value: 2G
+ action: update
+ - key: spec.template.spec.containers[0].resources.requests.memory
+ value: 1G
+ action: update
+
+ register: registryout
+
+'''
diff --git a/roles/lib_openshift/src/doc/router b/roles/lib_openshift/src/doc/router
new file mode 100644
index 000000000..ed46e03a0
--- /dev/null
+++ b/roles/lib_openshift/src/doc/router
@@ -0,0 +1,217 @@
+# flake8: noqa
+# pylint: skip-file
+
+DOCUMENTATION = '''
+---
+module: oc_adm_router
+short_description: Module to manage openshift router
+description:
+ - Manage openshift router programmatically.
+options:
+ state:
+ description:
+ - Whether to create or delete the router
+ - present - create the router
+ - absent - remove the router
+ - list - return the current representation of a router
+ required: false
+ default: present
+ choices:
+ - present
+ - absent
+ aliases: []
+ kubeconfig:
+ description:
+ - The path for the kubeconfig file to use for authentication
+ required: false
+ default: /etc/origin/master/admin.kubeconfig
+ aliases: []
+ debug:
+ description:
+ - Turn on debug output.
+ required: false
+ default: False
+ aliases: []
+ name:
+ description:
+ - The name of the router
+ required: false
+ default: router
+ aliases: []
+ namespace:
+ description:
+ - The namespace where to manage the router.
+ required: false
+ default: default
+ aliases: []
+ images:
+ description:
+ - The image to base this router on - ${component} will be replaced with --type
+ required: 'openshift3/ose-${component}:${version}'
+ default: None
+ aliases: []
+ latest_images:
+ description:
+ - If true, attempt to use the latest image for the registry instead of the latest release.
+ required: false
+ default: False
+ aliases: []
+ labels:
+ description:
+ - A set of labels to uniquely identify the registry and its components.
+ required: false
+ default: None
+ aliases: []
+ ports:
+ description:
+ - A list of strings in the 'port:port' format
+ required: False
+ default:
+ - 80:80
+ - 443:443
+ aliases: []
+ replicas:
+ description:
+ - The replication factor of the registry; commonly 2 when high availability is desired.
+ required: False
+ default: 1
+ aliases: []
+ selector:
+ description:
+ - Selector used to filter nodes on deployment. Used to run routers on a specific set of nodes.
+ required: False
+ default: None
+ aliases: []
+ service_account:
+ description:
+ - Name of the service account to use to run the router pod.
+ required: False
+ default: router
+ aliases: []
+ router_type:
+ description:
+ - The router image to use - if you specify --images this flag may be ignored.
+ required: false
+ default: haproxy-router
+ aliases: []
+ external_host:
+ description:
+ - If the underlying router implementation connects with an external host, this is the external host's hostname.
+ required: false
+ default: None
+ aliases: []
+ external_host_vserver:
+ description:
+ - If the underlying router implementation uses virtual servers, this is the name of the virtual server for HTTP connections.
+ required: false
+ default: None
+ aliases: []
+ external_host_insecure:
+ description:
+ - If the underlying router implementation connects with an external host
+ - over a secure connection, this causes the router to skip strict certificate verification with the external host.
+ required: false
+ default: False
+ aliases: []
+ external_host_partition_path:
+ description:
+ - If the underlying router implementation uses partitions for control boundaries, this is the path to use for that partition.
+ required: false
+ default: None
+ aliases: []
+ external_host_username:
+ description:
+ - If the underlying router implementation connects with an external host, this is the username for authenticating with the external host.
+ required: false
+ default: None
+ aliases: []
+ external_host_password:
+ description:
+ - If the underlying router implementation connects with an external host, this is the password for authenticating with the external host.
+ required: false
+ default: None
+ aliases: []
+ external_host_private_key:
+ description:
+ - If the underlying router implementation requires an SSH private key, this is the path to the private key file.
+ required: false
+ default: None
+ aliases: []
+ expose_metrics:
+ description:
+ - This is a hint to run an extra container in the pod to expose metrics - the image
+ - will either be set depending on the router implementation or provided with --metrics-image.
+ required: false
+ default: False
+ aliases: []
+ metrics_image:
+ description:
+ - If expose_metrics is specified this is the image to use to run a sidecar container
+ - in the pod exposing metrics. If not set and --expose-metrics is true the image will
+ - depend on router implementation.
+ required: false
+ default: None
+ aliases: []
+author:
+- "Kenny Woodson <kwoodson@redhat.com>"
+extends_documentation_fragment:
+- There are some exceptions to note when doing the idempotency in this module.
+- The strategy is to use the oc adm router command to generate a default
+- configuration when creating or updating a router. Often times there
+- differences from the generated template and what is in memory in openshift.
+- We make exceptions to not check these specific values when comparing objects.
+- Here are a list of exceptions:
+- - DeploymentConfig:
+ - dnsPolicy
+ - terminationGracePeriodSeconds
+ - restartPolicy
+ - timeoutSeconds
+ - livenessProbe
+ - readinessProbe
+ - terminationMessagePath
+ - hostPort
+ - defaultMode
+ - Service:
+ - portalIP
+ - clusterIP
+ - sessionAffinity
+ - type
+ - ServiceAccount:
+ - secrets
+ - imagePullSecrets
+'''
+
+EXAMPLES = '''
+- name: create routers
+ oc_adm_router:
+ name: router
+ service_account: router
+ replicas: 2
+ namespace: default
+ selector: type=infra
+ cert_file: /etc/origin/master/named_certificates/router.crt
+ key_file: /etc/origin/master/named_certificates/router.key
+ cacert_file: /etc/origin/master/named_certificates/router.ca
+ edits:
+ - key: spec.strategy.rollingParams
+ value:
+ intervalSeconds: 1
+ maxSurge: 50%
+ maxUnavailable: 50%
+ timeoutSeconds: 600
+ updatePeriodSeconds: 1
+ action: put
+ - key: spec.template.spec.containers[0].resources.limits.memory
+ value: 2G
+ action: put
+ - key: spec.template.spec.containers[0].resources.requests.memory
+ value: 1G
+ action: put
+ - key: spec.template.spec.containers[0].env
+ value:
+ name: EXTENDED_VALIDATION
+ value: 'false'
+ action: update
+ register: router_out
+ run_once: True
+'''
diff --git a/roles/lib_openshift/src/doc/sdnvalidator b/roles/lib_openshift/src/doc/sdnvalidator
new file mode 100644
index 000000000..0b1862ed1
--- /dev/null
+++ b/roles/lib_openshift/src/doc/sdnvalidator
@@ -0,0 +1,27 @@
+# flake8: noqa
+# pylint: skip-file
+
+DOCUMENTATION = '''
+---
+module: oc_sdnvalidator
+short_description: Validate SDN objects
+description:
+ - Validate SDN objects
+options:
+ kubeconfig:
+ description:
+ - The path for the kubeconfig file to use for authentication
+ required: false
+ default: /etc/origin/master/admin.kubeconfig
+ aliases: []
+author:
+- "Mo Khan <monis@redhat.com>"
+extends_documentation_fragment: []
+'''
+
+EXAMPLES = '''
+oc_version:
+- name: get oc sdnvalidator
+ sdnvalidator:
+ register: oc_sdnvalidator
+'''
diff --git a/roles/lib_openshift/src/lib/base.py b/roles/lib_openshift/src/lib/base.py
index a895b40b3..d037074a5 100644
--- a/roles/lib_openshift/src/lib/base.py
+++ b/roles/lib_openshift/src/lib/base.py
@@ -9,6 +9,32 @@ class OpenShiftCLIError(Exception):
pass
+ADDITIONAL_PATH_LOOKUPS = ['/usr/local/bin', os.path.expanduser('~/bin')]
+
+
+def locate_oc_binary():
+ ''' Find and return oc binary file '''
+ # https://github.com/openshift/openshift-ansible/issues/3410
+ # oc can be in /usr/local/bin in some cases, but that may not
+ # be in $PATH due to ansible/sudo
+ paths = os.environ.get("PATH", os.defpath).split(os.pathsep) + ADDITIONAL_PATH_LOOKUPS
+
+ oc_binary = 'oc'
+
+ # Use shutil.which if it is available, otherwise fallback to a naive path search
+ try:
+ which_result = shutil.which(oc_binary, path=os.pathsep.join(paths))
+ if which_result is not None:
+ oc_binary = which_result
+ except AttributeError:
+ for path in paths:
+ if os.path.exists(os.path.join(path, oc_binary)):
+ oc_binary = os.path.join(path, oc_binary)
+ break
+
+ return oc_binary
+
+
# pylint: disable=too-few-public-methods
class OpenShiftCLI(object):
''' Class to wrap the command line tools '''
@@ -22,6 +48,7 @@ class OpenShiftCLI(object):
self.verbose = verbose
self.kubeconfig = Utils.create_tmpfile_copy(kubeconfig)
self.all_namespaces = all_namespaces
+ self.oc_binary = locate_oc_binary()
# Pylint allows only 5 arguments to be passed.
# pylint: disable=too-many-arguments
@@ -218,24 +245,23 @@ class OpenShiftCLI(object):
stdout, stderr = proc.communicate(input_data)
- return proc.returncode, stdout, stderr
+ return proc.returncode, stdout.decode(), stderr.decode()
# pylint: disable=too-many-arguments,too-many-branches
def openshift_cmd(self, cmd, oadm=False, output=False, output_type='json', input_data=None):
'''Base command for oc '''
- cmds = []
+ cmds = [self.oc_binary]
+
if oadm:
- cmds = ['oadm']
- else:
- cmds = ['oc']
+ cmds.append('adm')
+
+ cmds.extend(cmd)
if self.all_namespaces:
cmds.extend(['--all-namespaces'])
elif self.namespace is not None and self.namespace.lower() not in ['none', 'emtpy']: # E501
cmds.extend(['-n', self.namespace])
- cmds.extend(cmd)
-
rval = {}
results = ''
err = None
@@ -243,7 +269,10 @@ class OpenShiftCLI(object):
if self.verbose:
print(' '.join(cmds))
- returncode, stdout, stderr = self._run(cmds, input_data)
+ try:
+ returncode, stdout, stderr = self._run(cmds, input_data)
+ except OSError as ex:
+ returncode, stdout, stderr = 1, '', 'Failed to execute {}: {}'.format(subprocess.list2cmdline(cmds), ex)
rval = {"returncode": returncode,
"results": results,
@@ -295,7 +324,13 @@ class Utils(object):
tmp = Utils.create_tmpfile(prefix=rname)
if ftype == 'yaml':
- Utils._write(tmp, yaml.dump(data, Dumper=yaml.RoundTripDumper))
+ # AUDIT:no-member makes sense here due to ruamel.YAML/PyYAML usage
+ # pylint: disable=no-member
+ if hasattr(yaml, 'RoundTripDumper'):
+ Utils._write(tmp, yaml.dump(data, Dumper=yaml.RoundTripDumper))
+ else:
+ Utils._write(tmp, yaml.safe_dump(data, default_flow_style=False))
+
elif ftype == 'json':
Utils._write(tmp, json.dumps(data))
else:
@@ -377,7 +412,12 @@ class Utils(object):
contents = sfd.read()
if sfile_type == 'yaml':
- contents = yaml.load(contents, yaml.RoundTripLoader)
+ # AUDIT:no-member makes sense here due to ruamel.YAML/PyYAML usage
+ # pylint: disable=no-member
+ if hasattr(yaml, 'RoundTripLoader'):
+ contents = yaml.load(contents, yaml.RoundTripLoader)
+ else:
+ contents = yaml.safe_load(contents)
elif sfile_type == 'json':
contents = json.loads(contents)
@@ -483,8 +523,8 @@ class Utils(object):
elif value != user_def[key]:
if debug:
print('value should be identical')
- print(value)
print(user_def[key])
+ print(value)
return False
# recurse on a dictionary
@@ -504,8 +544,8 @@ class Utils(object):
if api_values != user_values:
if debug:
print("keys are not equal in dict")
- print(api_values)
print(user_values)
+ print(api_values)
return False
result = Utils.check_def_equal(user_def[key], value, skip_keys=skip_keys, debug=debug)
diff --git a/roles/lib_openshift/src/lib/deploymentconfig.py b/roles/lib_openshift/src/lib/deploymentconfig.py
index e060d3707..0549bba84 100644
--- a/roles/lib_openshift/src/lib/deploymentconfig.py
+++ b/roles/lib_openshift/src/lib/deploymentconfig.py
@@ -4,7 +4,7 @@
# pylint: disable=too-many-public-methods
class DeploymentConfig(Yedit):
- ''' Class to wrap the oc command line tools '''
+ ''' Class to model an openshift DeploymentConfig'''
default_deployment_config = '''
apiVersion: v1
kind: DeploymentConfig
@@ -68,7 +68,6 @@ spec:
super(DeploymentConfig, self).__init__(content=content)
- # pylint: disable=no-member
def add_env_value(self, key, value):
''' add key, value pair to env array '''
rval = False
@@ -106,6 +105,18 @@ spec:
return False
+ def get_env_var(self, key):
+ '''return a environment variables '''
+ results = self.get(DeploymentConfig.env_path) or []
+ if not results:
+ return None
+
+ for env_var in results:
+ if env_var['name'] == key:
+ return env_var
+
+ return None
+
def get_env_vars(self):
'''return a environment variables '''
return self.get(DeploymentConfig.env_path) or []
diff --git a/roles/lib_openshift/src/lib/import.py b/roles/lib_openshift/src/lib/import.py
index 6344c1a54..a79297898 100644
--- a/roles/lib_openshift/src/lib/import.py
+++ b/roles/lib_openshift/src/lib/import.py
@@ -7,6 +7,7 @@
from __future__ import print_function
import atexit
+import copy
import json
import os
import re
@@ -14,5 +15,9 @@ import shutil
import subprocess
import tempfile
# pylint: disable=import-error
-import ruamel.yaml as yaml
+try:
+ import ruamel.yaml as yaml
+except ImportError:
+ import yaml
+
from ansible.module_utils.basic import AnsibleModule
diff --git a/roles/lib_openshift/src/lib/replicationcontroller.py b/roles/lib_openshift/src/lib/replicationcontroller.py
index ae585a986..8bcc1e3cc 100644
--- a/roles/lib_openshift/src/lib/replicationcontroller.py
+++ b/roles/lib_openshift/src/lib/replicationcontroller.py
@@ -4,7 +4,12 @@
# pylint: disable=too-many-public-methods
class ReplicationController(DeploymentConfig):
- ''' Class to wrap the oc command line tools '''
+ ''' Class to model a replicationcontroller openshift object.
+
+ Currently we are modeled after a deployment config since they
+ are very similar. In the future, when the need arises we
+ will add functionality to this class.
+ '''
replicas_path = "spec.replicas"
env_path = "spec.template.spec.containers[0].env"
volumes_path = "spec.template.spec.volumes"
diff --git a/roles/lib_openshift/src/lib/rolebinding.py b/roles/lib_openshift/src/lib/rolebinding.py
new file mode 100644
index 000000000..69629f9f5
--- /dev/null
+++ b/roles/lib_openshift/src/lib/rolebinding.py
@@ -0,0 +1,289 @@
+# pylint: skip-file
+# flake8: noqa
+
+# pylint: disable=too-many-instance-attributes
+class RoleBindingConfig(object):
+ ''' Handle rolebinding config '''
+ # pylint: disable=too-many-arguments
+ def __init__(self,
+ name,
+ namespace,
+ kubeconfig,
+ group_names=None,
+ role_ref=None,
+ subjects=None,
+ usernames=None):
+ ''' constructor for handling rolebinding options '''
+ self.kubeconfig = kubeconfig
+ self.name = name
+ self.namespace = namespace
+ self.group_names = group_names
+ self.role_ref = role_ref
+ self.subjects = subjects
+ self.usernames = usernames
+ self.data = {}
+
+ self.create_dict()
+
+ def create_dict(self):
+ ''' create a default rolebinding as a dict '''
+ self.data['apiVersion'] = 'v1'
+ self.data['kind'] = 'RoleBinding'
+ self.data['groupNames'] = self.group_names
+ self.data['metadata']['name'] = self.name
+ self.data['metadata']['namespace'] = self.namespace
+
+ self.data['roleRef'] = self.role_ref
+ self.data['subjects'] = self.subjects
+ self.data['userNames'] = self.usernames
+
+
+# pylint: disable=too-many-instance-attributes,too-many-public-methods
+class RoleBinding(Yedit):
+ ''' Class to model a rolebinding openshift object'''
+ group_names_path = "groupNames"
+ role_ref_path = "roleRef"
+ subjects_path = "subjects"
+ user_names_path = "userNames"
+
+ kind = 'RoleBinding'
+
+ def __init__(self, content):
+ '''RoleBinding constructor'''
+ super(RoleBinding, self).__init__(content=content)
+ self._subjects = None
+ self._role_ref = None
+ self._group_names = None
+ self._user_names = None
+
+ @property
+ def subjects(self):
+ ''' subjects property '''
+ if self._subjects is None:
+ self._subjects = self.get_subjects()
+ return self._subjects
+
+ @subjects.setter
+ def subjects(self, data):
+ ''' subjects property setter'''
+ self._subjects = data
+
+ @property
+ def role_ref(self):
+ ''' role_ref property '''
+ if self._role_ref is None:
+ self._role_ref = self.get_role_ref()
+ return self._role_ref
+
+ @role_ref.setter
+ def role_ref(self, data):
+ ''' role_ref property setter'''
+ self._role_ref = data
+
+ @property
+ def group_names(self):
+ ''' group_names property '''
+ if self._group_names is None:
+ self._group_names = self.get_group_names()
+ return self._group_names
+
+ @group_names.setter
+ def group_names(self, data):
+ ''' group_names property setter'''
+ self._group_names = data
+
+ @property
+ def user_names(self):
+ ''' user_names property '''
+ if self._user_names is None:
+ self._user_names = self.get_user_names()
+ return self._user_names
+
+ @user_names.setter
+ def user_names(self, data):
+ ''' user_names property setter'''
+ self._user_names = data
+
+ def get_group_names(self):
+ ''' return groupNames '''
+ return self.get(RoleBinding.group_names_path) or []
+
+ def get_user_names(self):
+ ''' return usernames '''
+ return self.get(RoleBinding.user_names_path) or []
+
+ def get_role_ref(self):
+ ''' return role_ref '''
+ return self.get(RoleBinding.role_ref_path) or {}
+
+ def get_subjects(self):
+ ''' return subjects '''
+ return self.get(RoleBinding.subjects_path) or []
+
+ #### ADD #####
+ def add_subject(self, inc_subject):
+ ''' add a subject '''
+ if self.subjects:
+ # pylint: disable=no-member
+ self.subjects.append(inc_subject)
+ else:
+ self.put(RoleBinding.subjects_path, [inc_subject])
+
+ return True
+
+ def add_role_ref(self, inc_role_ref):
+ ''' add a role_ref '''
+ if not self.role_ref:
+ self.put(RoleBinding.role_ref_path, {"name": inc_role_ref})
+ return True
+
+ return False
+
+ def add_group_names(self, inc_group_names):
+ ''' add a group_names '''
+ if self.group_names:
+ # pylint: disable=no-member
+ self.group_names.append(inc_group_names)
+ else:
+ self.put(RoleBinding.group_names_path, [inc_group_names])
+
+ return True
+
+ def add_user_name(self, inc_user_name):
+ ''' add a username '''
+ if self.user_names:
+ # pylint: disable=no-member
+ self.user_names.append(inc_user_name)
+ else:
+ self.put(RoleBinding.user_names_path, [inc_user_name])
+
+ return True
+
+ #### /ADD #####
+
+ #### Remove #####
+ def remove_subject(self, inc_subject):
+ ''' remove a subject '''
+ try:
+ # pylint: disable=no-member
+ self.subjects.remove(inc_subject)
+ except ValueError as _:
+ return False
+
+ return True
+
+ def remove_role_ref(self, inc_role_ref):
+ ''' remove a role_ref '''
+ if self.role_ref and self.role_ref['name'] == inc_role_ref:
+ del self.role_ref['name']
+ return True
+
+ return False
+
+ def remove_group_name(self, inc_group_name):
+ ''' remove a groupname '''
+ try:
+ # pylint: disable=no-member
+ self.group_names.remove(inc_group_name)
+ except ValueError as _:
+ return False
+
+ return True
+
+ def remove_user_name(self, inc_user_name):
+ ''' remove a username '''
+ try:
+ # pylint: disable=no-member
+ self.user_names.remove(inc_user_name)
+ except ValueError as _:
+ return False
+
+ return True
+
+ #### /REMOVE #####
+
+ #### UPDATE #####
+ def update_subject(self, inc_subject):
+ ''' update a subject '''
+ try:
+ # pylint: disable=no-member
+ index = self.subjects.index(inc_subject)
+ except ValueError as _:
+ return self.add_subject(inc_subject)
+
+ self.subjects[index] = inc_subject
+
+ return True
+
+ def update_group_name(self, inc_group_name):
+ ''' update a groupname '''
+ try:
+ # pylint: disable=no-member
+ index = self.group_names.index(inc_group_name)
+ except ValueError as _:
+ return self.add_group_names(inc_group_name)
+
+ self.group_names[index] = inc_group_name
+
+ return True
+
+ def update_user_name(self, inc_user_name):
+ ''' update a username '''
+ try:
+ # pylint: disable=no-member
+ index = self.user_names.index(inc_user_name)
+ except ValueError as _:
+ return self.add_user_name(inc_user_name)
+
+ self.user_names[index] = inc_user_name
+
+ return True
+
+ def update_role_ref(self, inc_role_ref):
+ ''' update a role_ref '''
+ self.role_ref['name'] = inc_role_ref
+
+ return True
+
+ #### /UPDATE #####
+
+ #### FIND ####
+ def find_subject(self, inc_subject):
+ ''' find a subject '''
+ index = None
+ try:
+ # pylint: disable=no-member
+ index = self.subjects.index(inc_subject)
+ except ValueError as _:
+ return index
+
+ return index
+
+ def find_group_name(self, inc_group_name):
+ ''' find a group_name '''
+ index = None
+ try:
+ # pylint: disable=no-member
+ index = self.group_names.index(inc_group_name)
+ except ValueError as _:
+ return index
+
+ return index
+
+ def find_user_name(self, inc_user_name):
+ ''' find a user_name '''
+ index = None
+ try:
+ # pylint: disable=no-member
+ index = self.user_names.index(inc_user_name)
+ except ValueError as _:
+ return index
+
+ return index
+
+ def find_role_ref(self, inc_role_ref):
+ ''' find a user_name '''
+ if self.role_ref and self.role_ref['name'] == inc_role_ref['name']:
+ return self.role_ref
+
+ return None
diff --git a/roles/lib_openshift/src/lib/secret.py b/roles/lib_openshift/src/lib/secret.py
index 39bf3c33a..75c32e8b1 100644
--- a/roles/lib_openshift/src/lib/secret.py
+++ b/roles/lib_openshift/src/lib/secret.py
@@ -20,7 +20,7 @@ class SecretConfig(object):
self.create_dict()
def create_dict(self):
- ''' return a secret as a dict '''
+ ''' assign the correct properties for a secret dict '''
self.data['apiVersion'] = 'v1'
self.data['kind'] = 'Secret'
self.data['metadata'] = {}
@@ -90,8 +90,7 @@ class Secret(Yedit):
def update_secret(self, key, value):
''' update a secret'''
- # pylint: disable=no-member
- if self.secrets.has_key(key):
+ if key in self.secrets:
self.secrets[key] = value
else:
self.add_secret(key, value)
diff --git a/roles/lib_openshift/src/lib/service.py b/roles/lib_openshift/src/lib/service.py
index ffe27da47..eef568779 100644
--- a/roles/lib_openshift/src/lib/service.py
+++ b/roles/lib_openshift/src/lib/service.py
@@ -67,6 +67,7 @@ class Service(Yedit):
port_path = "spec.ports"
portal_ip = "spec.portalIP"
cluster_ip = "spec.clusterIP"
+ selector_path = 'spec.selector'
kind = 'Service'
def __init__(self, content):
@@ -77,6 +78,10 @@ class Service(Yedit):
''' get a list of ports '''
return self.get(Service.port_path) or []
+ def get_selector(self):
+ ''' get the service selector'''
+ return self.get(Service.selector_path) or {}
+
def add_ports(self, inc_ports):
''' add a port object to the ports list '''
if not isinstance(inc_ports, list):
diff --git a/roles/lib_openshift/src/lib/serviceaccount.py b/roles/lib_openshift/src/lib/serviceaccount.py
index 47a55757e..50c104d44 100644
--- a/roles/lib_openshift/src/lib/serviceaccount.py
+++ b/roles/lib_openshift/src/lib/serviceaccount.py
@@ -18,7 +18,7 @@ class ServiceAccountConfig(object):
self.create_dict()
def create_dict(self):
- ''' return a properly structured volume '''
+ ''' instantiate a properly structured volume '''
self.data['apiVersion'] = 'v1'
self.data['kind'] = 'ServiceAccount'
self.data['metadata'] = {}
diff --git a/roles/lib_openshift/src/lib/volume.py b/roles/lib_openshift/src/lib/volume.py
new file mode 100644
index 000000000..e0abb1d1b
--- /dev/null
+++ b/roles/lib_openshift/src/lib/volume.py
@@ -0,0 +1,38 @@
+# pylint: skip-file
+# flake8: noqa
+
+class Volume(object):
+ ''' Class to model an openshift volume object'''
+ volume_mounts_path = {"pod": "spec.containers[0].volumeMounts",
+ "dc": "spec.template.spec.containers[0].volumeMounts",
+ "rc": "spec.template.spec.containers[0].volumeMounts",
+ }
+ volumes_path = {"pod": "spec.volumes",
+ "dc": "spec.template.spec.volumes",
+ "rc": "spec.template.spec.volumes",
+ }
+
+ @staticmethod
+ def create_volume_structure(volume_info):
+ ''' return a properly structured volume '''
+ volume_mount = None
+ volume = {'name': volume_info['name']}
+ volume_type = volume_info['type'].lower()
+ if volume_type == 'secret':
+ volume['secret'] = {}
+ volume[volume_info['type']] = {'secretName': volume_info['secret_name']}
+ volume_mount = {'mountPath': volume_info['path'],
+ 'name': volume_info['name']}
+ elif volume_type == 'emptydir':
+ volume['emptyDir'] = {}
+ volume_mount = {'mountPath': volume_info['path'],
+ 'name': volume_info['name']}
+ elif volume_type == 'pvc' or volume_type == 'persistentvolumeclaim':
+ volume['persistentVolumeClaim'] = {}
+ volume['persistentVolumeClaim']['claimName'] = volume_info['claimName']
+ volume['persistentVolumeClaim']['claimSize'] = volume_info['claimSize']
+ elif volume_type == 'hostpath':
+ volume['hostPath'] = {}
+ volume['hostPath']['path'] = volume_info['path']
+
+ return (volume, volume_mount)
diff --git a/roles/lib_openshift/src/sources.yml b/roles/lib_openshift/src/sources.yml
index 091aaef2e..a48fdf0c2 100644
--- a/roles/lib_openshift/src/sources.yml
+++ b/roles/lib_openshift/src/sources.yml
@@ -1,4 +1,14 @@
---
+oc_adm_ca_server_cert.py:
+- doc/generated
+- doc/license
+- lib/import.py
+- doc/ca_server_cert
+- ../../lib_utils/src/class/yedit.py
+- lib/base.py
+- class/oc_adm_ca_server_cert.py
+- ansible/oc_adm_ca_server_cert.py
+
oadm_manage_node.py:
- doc/generated
- doc/license
@@ -9,6 +19,42 @@ oadm_manage_node.py:
- class/oadm_manage_node.py
- ansible/oadm_manage_node.py
+oc_adm_registry.py:
+- doc/generated
+- doc/license
+- lib/import.py
+- doc/registry
+- ../../lib_utils/src/class/yedit.py
+- lib/base.py
+- lib/deploymentconfig.py
+- lib/secret.py
+- lib/service.py
+- lib/volume.py
+- class/oc_version.py
+- class/oc_adm_registry.py
+- ansible/oc_adm_registry.py
+
+oc_adm_router.py:
+- doc/generated
+- doc/license
+- lib/import.py
+- doc/router
+- ../../lib_utils/src/class/yedit.py
+- lib/base.py
+- lib/service.py
+- lib/deploymentconfig.py
+- lib/serviceaccount.py
+- lib/secret.py
+- lib/rolebinding.py
+- class/oc_adm_router.py
+- ansible/oc_adm_router.py
+
+oc_atomic_container.py:
+- doc/generated
+- doc/license
+- doc/atomic_container
+- ansible/oc_atomic_container.py
+
oc_edit.py:
- doc/generated
- doc/license
@@ -136,3 +182,13 @@ oc_version.py:
- lib/base.py
- class/oc_version.py
- ansible/oc_version.py
+
+oc_sdnvalidator.py:
+- doc/generated
+- doc/license
+- lib/import.py
+- doc/sdnvalidator
+- ../../lib_utils/src/class/yedit.py
+- lib/base.py
+- class/oc_sdnvalidator.py
+- ansible/oc_sdnvalidator.py
diff --git a/roles/lib_openshift/src/test/unit/oc_sdnvalidator.py b/roles/lib_openshift/src/test/unit/oc_sdnvalidator.py
new file mode 100755
index 000000000..49e2aadb2
--- /dev/null
+++ b/roles/lib_openshift/src/test/unit/oc_sdnvalidator.py
@@ -0,0 +1,481 @@
+#!/usr/bin/env python2
+'''
+ Unit tests for oc sdnvalidator
+'''
+# To run
+# ./oc_sdnvalidator.py
+#
+# ....
+# ----------------------------------------------------------------------
+# Ran 4 tests in 0.002s
+#
+# OK
+
+import os
+import sys
+import unittest
+import mock
+
+# Removing invalid variable names for tests so that I can
+# keep them brief
+# pylint: disable=invalid-name,no-name-in-module
+# Disable import-error b/c our libraries aren't loaded in jenkins
+# pylint: disable=import-error
+# place class in our python path
+module_path = os.path.join('/'.join(os.path.realpath(__file__).split('/')[:-4]), 'library') # noqa: E501
+sys.path.insert(0, module_path)
+from oc_sdnvalidator import OCSDNValidator # noqa: E402
+
+
+class OCSDNValidatorTest(unittest.TestCase):
+ '''
+ Test class for OCSDNValidator
+ '''
+
+ @mock.patch('oc_sdnvalidator.Utils.create_tmpfile_copy')
+ @mock.patch('oc_sdnvalidator.OCSDNValidator._run')
+ def test_no_data(self, mock_cmd, mock_tmpfile_copy):
+ ''' Testing when both SDN objects are empty '''
+
+ # Arrange
+
+ # run_ansible input parameters
+ params = {
+ 'kubeconfig': '/etc/origin/master/admin.kubeconfig',
+ }
+
+ empty = '''{
+ "apiVersion": "v1",
+ "items": [],
+ "kind": "List",
+ "metadata": {},
+ "resourceVersion": "",
+ "selfLink": ""
+}'''
+
+ # Return values of our mocked function call. These get returned once per call.
+ mock_cmd.side_effect = [
+ # First call to mock
+ (0, empty, ''),
+
+ # Second call to mock
+ (0, empty, ''),
+ ]
+
+ mock_tmpfile_copy.side_effect = [
+ '/tmp/mocked_kubeconfig',
+ ]
+
+ # Act
+ results = OCSDNValidator.run_ansible(params)
+
+ # Assert
+ self.assertNotIn('failed', results)
+ self.assertEqual(results['msg'], 'All SDN objects are valid.')
+
+ # Making sure our mock was called as we expected
+ mock_cmd.assert_has_calls([
+ mock.call(['oc', '-n', 'default', 'get', 'hostsubnet', '-o', 'json'], None),
+ mock.call(['oc', '-n', 'default', 'get', 'netnamespace', '-o', 'json'], None),
+ ])
+
+ @mock.patch('oc_sdnvalidator.Utils.create_tmpfile_copy')
+ @mock.patch('oc_sdnvalidator.OCSDNValidator._run')
+ def test_error_code(self, mock_cmd, mock_tmpfile_copy):
+ ''' Testing when both we fail to get SDN objects '''
+
+ # Arrange
+
+ # run_ansible input parameters
+ params = {
+ 'kubeconfig': '/etc/origin/master/admin.kubeconfig',
+ }
+
+ # Return values of our mocked function call. These get returned once per call.
+ mock_cmd.side_effect = [
+ # First call to mock
+ (1, '', 'Error.'),
+ ]
+
+ mock_tmpfile_copy.side_effect = [
+ '/tmp/mocked_kubeconfig',
+ ]
+
+ error_results = {
+ 'returncode': 1,
+ 'stderr': 'Error.',
+ 'stdout': '',
+ 'cmd': 'oc -n default get hostsubnet -o json',
+ 'results': [{}]
+ }
+
+ # Act
+ results = OCSDNValidator.run_ansible(params)
+
+ # Assert
+ self.assertTrue(results['failed'])
+ self.assertEqual(results['msg'], 'Failed to GET hostsubnet.')
+ self.assertEqual(results['state'], 'list')
+ self.assertEqual(results['results'], error_results)
+
+ # Making sure our mock was called as we expected
+ mock_cmd.assert_has_calls([
+ mock.call(['oc', '-n', 'default', 'get', 'hostsubnet', '-o', 'json'], None),
+ ])
+
+ @mock.patch('oc_sdnvalidator.Utils.create_tmpfile_copy')
+ @mock.patch('oc_sdnvalidator.OCSDNValidator._run')
+ def test_valid_both(self, mock_cmd, mock_tmpfile_copy):
+ ''' Testing when both SDN objects are valid '''
+
+ # Arrange
+
+ # run_ansible input parameters
+ params = {
+ 'kubeconfig': '/etc/origin/master/admin.kubeconfig',
+ }
+
+ valid_hostsubnet = '''{
+ "apiVersion": "v1",
+ "items": [
+ {
+ "apiVersion": "v1",
+ "host": "bar0",
+ "hostIP": "1.1.1.1",
+ "kind": "HostSubnet",
+ "metadata": {
+ "creationTimestamp": "2017-02-16T18:47:09Z",
+ "name": "bar0",
+ "namespace": "",
+ "resourceVersion": "986",
+ "selfLink": "/oapi/v1/hostsubnetsbar0",
+ "uid": "528dbb41-f478-11e6-aae0-507b9dac97ff"
+ },
+ "subnet": "1.1.0.0/24"
+ },
+ {
+ "apiVersion": "v1",
+ "host": "bar1",
+ "hostIP": "1.1.1.1",
+ "kind": "HostSubnet",
+ "metadata": {
+ "creationTimestamp": "2017-02-16T18:47:18Z",
+ "name": "bar1",
+ "namespace": "",
+ "resourceVersion": "988",
+ "selfLink": "/oapi/v1/hostsubnetsbar1",
+ "uid": "57710d84-f478-11e6-aae0-507b9dac97ff"
+ },
+ "subnet": "1.1.0.0/24"
+ },
+ {
+ "apiVersion": "v1",
+ "host": "bar2",
+ "hostIP": "1.1.1.1",
+ "kind": "HostSubnet",
+ "metadata": {
+ "creationTimestamp": "2017-02-16T18:47:26Z",
+ "name": "bar2",
+ "namespace": "",
+ "resourceVersion": "991",
+ "selfLink": "/oapi/v1/hostsubnetsbar2",
+ "uid": "5c59a28c-f478-11e6-aae0-507b9dac97ff"
+ },
+ "subnet": "1.1.0.0/24"
+ }
+ ],
+ "kind": "List",
+ "metadata": {},
+ "resourceVersion": "",
+ "selfLink": ""
+ }'''
+
+ valid_netnamespace = '''{
+ "apiVersion": "v1",
+ "items": [
+ {
+ "apiVersion": "v1",
+ "kind": "NetNamespace",
+ "metadata": {
+ "creationTimestamp": "2017-02-16T18:45:16Z",
+ "name": "foo0",
+ "namespace": "",
+ "resourceVersion": "959",
+ "selfLink": "/oapi/v1/netnamespacesfoo0",
+ "uid": "0f1c85b2-f478-11e6-aae0-507b9dac97ff"
+ },
+ "netid": 100,
+ "netname": "foo0"
+ },
+ {
+ "apiVersion": "v1",
+ "kind": "NetNamespace",
+ "metadata": {
+ "creationTimestamp": "2017-02-16T18:45:26Z",
+ "name": "foo1",
+ "namespace": "",
+ "resourceVersion": "962",
+ "selfLink": "/oapi/v1/netnamespacesfoo1",
+ "uid": "14effa0d-f478-11e6-aae0-507b9dac97ff"
+ },
+ "netid": 100,
+ "netname": "foo1"
+ },
+ {
+ "apiVersion": "v1",
+ "kind": "NetNamespace",
+ "metadata": {
+ "creationTimestamp": "2017-02-16T18:45:36Z",
+ "name": "foo2",
+ "namespace": "",
+ "resourceVersion": "965",
+ "selfLink": "/oapi/v1/netnamespacesfoo2",
+ "uid": "1aabdf84-f478-11e6-aae0-507b9dac97ff"
+ },
+ "netid": 100,
+ "netname": "foo2"
+ }
+ ],
+ "kind": "List",
+ "metadata": {},
+ "resourceVersion": "",
+ "selfLink": ""
+ }'''
+
+ # Return values of our mocked function call. These get returned once per call.
+ mock_cmd.side_effect = [
+ # First call to mock
+ (0, valid_hostsubnet, ''),
+
+ # Second call to mock
+ (0, valid_netnamespace, ''),
+ ]
+
+ mock_tmpfile_copy.side_effect = [
+ '/tmp/mocked_kubeconfig',
+ ]
+
+ # Act
+ results = OCSDNValidator.run_ansible(params)
+
+ # Assert
+ self.assertNotIn('failed', results)
+ self.assertEqual(results['msg'], 'All SDN objects are valid.')
+
+ # Making sure our mock was called as we expected
+ mock_cmd.assert_has_calls([
+ mock.call(['oc', '-n', 'default', 'get', 'hostsubnet', '-o', 'json'], None),
+ mock.call(['oc', '-n', 'default', 'get', 'netnamespace', '-o', 'json'], None),
+ ])
+
+ @mock.patch('oc_sdnvalidator.Utils.create_tmpfile_copy')
+ @mock.patch('oc_sdnvalidator.OCSDNValidator._run')
+ def test_invalid_both(self, mock_cmd, mock_tmpfile_copy):
+ ''' Testing when both SDN objects are invalid '''
+
+ # Arrange
+
+ # run_ansible input parameters
+ params = {
+ 'kubeconfig': '/etc/origin/master/admin.kubeconfig',
+ }
+
+ invalid_hostsubnet = '''{
+ "apiVersion": "v1",
+ "items": [
+ {
+ "apiVersion": "v1",
+ "host": "bar0",
+ "hostIP": "1.1.1.1",
+ "kind": "HostSubnet",
+ "metadata": {
+ "creationTimestamp": "2017-02-16T18:47:09Z",
+ "name": "bar0",
+ "namespace": "",
+ "resourceVersion": "986",
+ "selfLink": "/oapi/v1/hostsubnetsbar0",
+ "uid": "528dbb41-f478-11e6-aae0-507b9dac97ff"
+ },
+ "subnet": "1.1.0.0/24"
+ },
+ {
+ "apiVersion": "v1",
+ "host": "bar1",
+ "hostIP": "1.1.1.1",
+ "kind": "HostSubnet",
+ "metadata": {
+ "creationTimestamp": "2017-02-16T18:47:18Z",
+ "name": "bar1",
+ "namespace": "",
+ "resourceVersion": "988",
+ "selfLink": "/oapi/v1/hostsubnetsbar1",
+ "uid": "57710d84-f478-11e6-aae0-507b9dac97ff"
+ },
+ "subnet": "1.1.0.0/24"
+ },
+ {
+ "apiVersion": "v1",
+ "host": "bar2",
+ "hostIP": "1.1.1.1",
+ "kind": "HostSubnet",
+ "metadata": {
+ "creationTimestamp": "2017-02-16T18:47:26Z",
+ "name": "bar2",
+ "namespace": "",
+ "resourceVersion": "991",
+ "selfLink": "/oapi/v1/hostsubnetsbar2",
+ "uid": "5c59a28c-f478-11e6-aae0-507b9dac97ff"
+ },
+ "subnet": "1.1.0.0/24"
+ },
+ {
+ "apiVersion": "v1",
+ "host": "baz1",
+ "hostIP": "1.1.1.1",
+ "kind": "HostSubnet",
+ "metadata": {
+ "creationTimestamp": "2017-02-16T18:47:49Z",
+ "name": "baz0",
+ "namespace": "",
+ "resourceVersion": "996",
+ "selfLink": "/oapi/v1/hostsubnetsbaz0",
+ "uid": "69f75f87-f478-11e6-aae0-507b9dac97ff"
+ },
+ "subnet": "1.1.0.0/24"
+ }
+ ],
+ "kind": "List",
+ "metadata": {},
+ "resourceVersion": "",
+ "selfLink": ""
+}'''
+
+ invalid_netnamespace = '''{
+ "apiVersion": "v1",
+ "items": [
+ {
+ "apiVersion": "v1",
+ "kind": "NetNamespace",
+ "metadata": {
+ "creationTimestamp": "2017-02-16T18:45:52Z",
+ "name": "bar0",
+ "namespace": "",
+ "resourceVersion": "969",
+ "selfLink": "/oapi/v1/netnamespacesbar0",
+ "uid": "245d416e-f478-11e6-aae0-507b9dac97ff"
+ },
+ "netid": 100,
+ "netname": "bar1"
+ },
+ {
+ "apiVersion": "v1",
+ "kind": "NetNamespace",
+ "metadata": {
+ "creationTimestamp": "2017-02-16T18:45:16Z",
+ "name": "foo0",
+ "namespace": "",
+ "resourceVersion": "959",
+ "selfLink": "/oapi/v1/netnamespacesfoo0",
+ "uid": "0f1c85b2-f478-11e6-aae0-507b9dac97ff"
+ },
+ "netid": 100,
+ "netname": "foo0"
+ },
+ {
+ "apiVersion": "v1",
+ "kind": "NetNamespace",
+ "metadata": {
+ "creationTimestamp": "2017-02-16T18:45:26Z",
+ "name": "foo1",
+ "namespace": "",
+ "resourceVersion": "962",
+ "selfLink": "/oapi/v1/netnamespacesfoo1",
+ "uid": "14effa0d-f478-11e6-aae0-507b9dac97ff"
+ },
+ "netid": 100,
+ "netname": "foo1"
+ },
+ {
+ "apiVersion": "v1",
+ "kind": "NetNamespace",
+ "metadata": {
+ "creationTimestamp": "2017-02-16T18:45:36Z",
+ "name": "foo2",
+ "namespace": "",
+ "resourceVersion": "965",
+ "selfLink": "/oapi/v1/netnamespacesfoo2",
+ "uid": "1aabdf84-f478-11e6-aae0-507b9dac97ff"
+ },
+ "netid": 100,
+ "netname": "foo2"
+ }
+ ],
+ "kind": "List",
+ "metadata": {},
+ "resourceVersion": "",
+ "selfLink": ""
+}'''
+
+ invalid_results = {
+ 'hostsubnets where metadata.name != host': [{
+ 'apiVersion': 'v1',
+ 'host': 'baz1',
+ 'hostIP': '1.1.1.1',
+ 'kind': 'HostSubnet',
+ 'metadata': {
+ 'creationTimestamp': '2017-02-16T18:47:49Z',
+ 'name': 'baz0',
+ 'namespace': '',
+ 'resourceVersion': '996',
+ 'selfLink': '/oapi/v1/hostsubnetsbaz0',
+ 'uid': '69f75f87-f478-11e6-aae0-507b9dac97ff'
+ },
+ 'subnet': '1.1.0.0/24'
+ }],
+ 'netnamespaces where metadata.name != netname': [{
+ 'apiVersion': 'v1',
+ 'kind': 'NetNamespace',
+ 'metadata': {
+ 'creationTimestamp': '2017-02-16T18:45:52Z',
+ 'name': 'bar0',
+ 'namespace': '',
+ 'resourceVersion': '969',
+ 'selfLink': '/oapi/v1/netnamespacesbar0',
+ 'uid': '245d416e-f478-11e6-aae0-507b9dac97ff'
+ },
+ 'netid': 100,
+ 'netname': 'bar1'
+ }],
+ }
+
+ # Return values of our mocked function call. These get returned once per call.
+ mock_cmd.side_effect = [
+ # First call to mock
+ (0, invalid_hostsubnet, ''),
+
+ # Second call to mock
+ (0, invalid_netnamespace, ''),
+ ]
+
+ mock_tmpfile_copy.side_effect = [
+ '/tmp/mocked_kubeconfig',
+ ]
+
+ # Act
+ results = OCSDNValidator.run_ansible(params)
+
+ # Assert
+ self.assertTrue(results['failed'])
+ self.assertEqual(results['msg'], 'All SDN objects are not valid.')
+ self.assertEqual(results['state'], 'list')
+ self.assertEqual(results['results'], invalid_results)
+
+ # Making sure our mock was called as we expected
+ mock_cmd.assert_has_calls([
+ mock.call(['oc', '-n', 'default', 'get', 'hostsubnet', '-o', 'json'], None),
+ mock.call(['oc', '-n', 'default', 'get', 'netnamespace', '-o', 'json'], None),
+ ])
+
+
+if __name__ == '__main__':
+ unittest.main()
diff --git a/roles/lib_openshift/src/test/unit/oc_secret.py b/roles/lib_openshift/src/test/unit/oc_secret.py
deleted file mode 100755
index 645aac82b..000000000
--- a/roles/lib_openshift/src/test/unit/oc_secret.py
+++ /dev/null
@@ -1,96 +0,0 @@
-#!/usr/bin/env python2
-'''
- Unit tests for oc secret
-'''
-# To run:
-# ./oc_secret.py
-#
-# .
-# Ran 1 test in 0.002s
-#
-# OK
-
-import os
-import sys
-import unittest
-import mock
-
-# Removing invalid variable names for tests so that I can
-# keep them brief
-# pylint: disable=invalid-name,no-name-in-module
-# Disable import-error b/c our libraries aren't loaded in jenkins
-# pylint: disable=import-error,wrong-import-position
-# place class in our python path
-module_path = os.path.join('/'.join(os.path.realpath(__file__).split('/')[:-4]), 'library') # noqa: E501
-sys.path.insert(0, module_path)
-from oc_secret import OCSecret # noqa: E402
-
-
-class OCSecretTest(unittest.TestCase):
- '''
- Test class for OCSecret
- '''
-
- def setUp(self):
- ''' setup method will create a file and set to known configuration '''
- pass
-
- @mock.patch('oc_secret.Utils.create_tmpfile_copy')
- @mock.patch('oc_secret.Utils._write')
- @mock.patch('oc_secret.OCSecret._run')
- def test_adding_a_secret(self, mock_cmd, mock_write, mock_tmpfile_copy):
- ''' Testing adding a secret '''
-
- # Arrange
-
- # run_ansible input parameters
- params = {
- 'state': 'present',
- 'namespace': 'default',
- 'name': 'testsecretname',
- 'contents': [{
- 'path': "/tmp/somesecret.json",
- 'data': "{'one': 1, 'two': 2, 'three': 3}",
- }],
- 'decode': False,
- 'kubeconfig': '/etc/origin/master/admin.kubeconfig',
- 'debug': False,
- 'files': None,
- 'delete_after': True,
- }
-
- # Return values of our mocked function call. These get returned once per call.
- mock_cmd.side_effect = [
- (1, '', 'Error from server: secrets "testsecretname" not found'),
- (0, 'secret/testsecretname', ''),
- ]
-
- mock_tmpfile_copy.side_effect = [
- '/tmp/mocked_kubeconfig',
- ]
-
- # Act
- results = OCSecret.run_ansible(params, False)
-
- # Assert
- self.assertTrue(results['changed'])
- self.assertEqual(results['results']['returncode'], 0)
- self.assertEqual(results['state'], 'present')
-
- # Making sure our mock was called as we expected
- mock_cmd.assert_has_calls([
- mock.call(['oc', '-n', 'default', 'get', 'secrets', 'testsecretname', '-o', 'json'], None),
- mock.call(['oc', '-n', 'default', 'secrets', 'new', 'testsecretname', mock.ANY], None),
- ])
-
- mock_write.assert_has_calls([
- mock.call(mock.ANY, "{'one': 1, 'two': 2, 'three': 3}"),
- ])
-
- def tearDown(self):
- '''TearDown method'''
- pass
-
-
-if __name__ == "__main__":
- unittest.main()
diff --git a/roles/lib_openshift/src/test/unit/oc_serviceaccount.py b/roles/lib_openshift/src/test/unit/oc_serviceaccount.py
deleted file mode 100755
index 256b569eb..000000000
--- a/roles/lib_openshift/src/test/unit/oc_serviceaccount.py
+++ /dev/null
@@ -1,119 +0,0 @@
-#!/usr/bin/env python2
-'''
- Unit tests for oc serviceaccount
-'''
-# To run:
-# ./oc_serviceaccount.py
-#
-# .
-# Ran 1 test in 0.002s
-#
-# OK
-
-import os
-import sys
-import unittest
-import mock
-
-# Removing invalid variable names for tests so that I can
-# keep them brief
-# pylint: disable=invalid-name,no-name-in-module
-# Disable import-error b/c our libraries aren't loaded in jenkins
-# pylint: disable=import-error,wrong-import-position
-# place class in our python path
-module_path = os.path.join('/'.join(os.path.realpath(__file__).split('/')[:-4]), 'library') # noqa: E501
-sys.path.insert(0, module_path)
-from oc_serviceaccount import OCServiceAccount # noqa: E402
-
-
-class OCServiceAccountTest(unittest.TestCase):
- '''
- Test class for OCServiceAccount
- '''
-
- def setUp(self):
- ''' setup method will create a file and set to known configuration '''
- pass
-
- @mock.patch('oc_serviceaccount.Utils.create_tmpfile_copy')
- @mock.patch('oc_serviceaccount.OCServiceAccount._run')
- def test_adding_a_serviceaccount(self, mock_cmd, mock_tmpfile_copy):
- ''' Testing adding a serviceaccount '''
-
- # Arrange
-
- # run_ansible input parameters
- params = {
- 'kubeconfig': '/etc/origin/master/admin.kubeconfig',
- 'state': 'present',
- 'debug': False,
- 'name': 'testserviceaccountname',
- 'namespace': 'default',
- 'secrets': None,
- 'image_pull_secrets': None,
- }
-
- valid_result_json = '''{
- "kind": "ServiceAccount",
- "apiVersion": "v1",
- "metadata": {
- "name": "testserviceaccountname",
- "namespace": "default",
- "selfLink": "/api/v1/namespaces/default/serviceaccounts/testserviceaccountname",
- "uid": "4d8320c9-e66f-11e6-8edc-0eece8f2ce22",
- "resourceVersion": "328450",
- "creationTimestamp": "2017-01-29T22:07:19Z"
- },
- "secrets": [
- {
- "name": "testserviceaccountname-dockercfg-4lqd0"
- },
- {
- "name": "testserviceaccountname-token-9h0ej"
- }
- ],
- "imagePullSecrets": [
- {
- "name": "testserviceaccountname-dockercfg-4lqd0"
- }
- ]
- }'''
-
- # Return values of our mocked function call. These get returned once per call.
- mock_cmd.side_effect = [
- # First call to mock
- (1, '', 'Error from server: serviceaccounts "testserviceaccountname" not found'),
-
- # Second call to mock
- (0, 'serviceaccount "testserviceaccountname" created', ''),
-
- # Third call to mock
- (0, valid_result_json, ''),
- ]
-
- mock_tmpfile_copy.side_effect = [
- '/tmp/mocked_kubeconfig',
- ]
-
- # Act
- results = OCServiceAccount.run_ansible(params, False)
-
- # Assert
- self.assertTrue(results['changed'])
- self.assertEqual(results['results']['returncode'], 0)
- self.assertEqual(results['state'], 'present')
-
- # Making sure our mock was called as we expected
- mock_cmd.assert_has_calls([
- mock.call(['oc', '-n', 'default', 'get', 'sa', 'testserviceaccountname', '-o', 'json'], None),
- mock.call(['oc', '-n', 'default', 'create', '-f', mock.ANY], None),
- mock.call(['oc', '-n', 'default', 'get', 'sa', 'testserviceaccountname', '-o', 'json'], None),
- ])
-
- def tearDown(self):
- '''TearDown method'''
- pass
-
-
-if __name__ == "__main__":
- unittest.main()
diff --git a/roles/lib_openshift/src/test/unit/oc_version.py b/roles/lib_openshift/src/test/unit/oc_version.py
deleted file mode 100755
index 67dea415b..000000000
--- a/roles/lib_openshift/src/test/unit/oc_version.py
+++ /dev/null
@@ -1,73 +0,0 @@
-#!/usr/bin/env python2
-'''
- Unit tests for oc version
-'''
-# To run
-# python -m unittest version
-#
-# .
-# Ran 1 test in 0.597s
-#
-# OK
-
-import os
-import sys
-import unittest
-import mock
-
-# Removing invalid variable names for tests so that I can
-# keep them brief
-# pylint: disable=invalid-name,no-name-in-module
-# Disable import-error b/c our libraries aren't loaded in jenkins
-# pylint: disable=import-error,wrong-import-position
-# place class in our python path
-module_path = os.path.join('/'.join(os.path.realpath(__file__).split('/')[:-4]), 'library') # noqa: E501
-sys.path.insert(0, module_path)
-from oc_version import OCVersion # noqa: E402
-
-
-class OCVersionTest(unittest.TestCase):
- '''
- Test class for OCVersion
- '''
-
- def setUp(self):
- ''' setup method will create a file and set to known configuration '''
- pass
-
- @mock.patch('oc_version.Utils.create_tmpfile_copy')
- @mock.patch('oc_version.OCVersion.openshift_cmd')
- def test_get(self, mock_openshift_cmd, mock_tmpfile_copy):
- ''' Testing a get '''
- params = {'kubeconfig': '/etc/origin/master/admin.kubeconfig',
- 'state': 'list',
- 'debug': False}
-
- mock_openshift_cmd.side_effect = [
- {"cmd": "oc version",
- "results": "oc v3.4.0.39\nkubernetes v1.4.0+776c994\n" +
- "features: Basic-Auth GSSAPI Kerberos SPNEGO\n\n" +
- "Server https://internal.api.opstest.openshift.com" +
- "openshift v3.4.0.39\n" +
- "kubernetes v1.4.0+776c994\n",
- "returncode": 0}
- ]
-
- mock_tmpfile_copy.side_effect = [
- '/tmp/mocked_kubeconfig',
- ]
-
- results = OCVersion.run_ansible(params)
-
- self.assertFalse(results['changed'])
- self.assertEqual(results['results']['oc_short'], '3.4')
- self.assertEqual(results['results']['oc_numeric'], '3.4.0.39')
- self.assertEqual(results['results']['kubernetes_numeric'], '1.4.0')
-
- def tearDown(self):
- '''TearDown method'''
- pass
-
-
-if __name__ == "__main__":
- unittest.main()
diff --git a/roles/lib_openshift/src/test/unit/oadm_manage_node.py b/roles/lib_openshift/src/test/unit/test_oadm_manage_node.py
index b0786dfac..761c849fb 100755
--- a/roles/lib_openshift/src/test/unit/oadm_manage_node.py
+++ b/roles/lib_openshift/src/test/unit/test_oadm_manage_node.py
@@ -11,6 +11,7 @@
# OK
import os
+import six
import sys
import unittest
import mock
@@ -23,7 +24,7 @@ import mock
# place class in our python path
module_path = os.path.join('/'.join(os.path.realpath(__file__).split('/')[:-4]), 'library') # noqa: E501
sys.path.insert(0, module_path)
-from oadm_manage_node import ManageNode # noqa: E402
+from oadm_manage_node import ManageNode, locate_oc_binary # noqa: E402
class ManageNodeTest(unittest.TestCase):
@@ -179,6 +180,114 @@ class ManageNodeTest(unittest.TestCase):
self.assertEqual(results['results']['nodes'][0]['name'], 'ip-172-31-49-140.ec2.internal')
self.assertEqual(results['results']['nodes'][0]['schedulable'], False)
+ @unittest.skipIf(six.PY3, 'py2 test only')
+ @mock.patch('os.path.exists')
+ @mock.patch('os.environ.get')
+ def test_binary_lookup_fallback(self, mock_env_get, mock_path_exists):
+ ''' Testing binary lookup fallback '''
+
+ mock_env_get.side_effect = lambda _v, _d: ''
+
+ mock_path_exists.side_effect = lambda _: False
+
+ self.assertEqual(locate_oc_binary(), 'oc')
+
+ @unittest.skipIf(six.PY2, 'py3 test only')
+ @mock.patch('shutil.which')
+ @mock.patch('os.environ.get')
+ def test_binary_lookup_in_path_py3(self, mock_env_get, mock_shutil_which):
+ ''' Testing binary lookup in path '''
+
+ oc_bin = '/usr/bin/oc'
+
+ mock_env_get.side_effect = lambda _v, _d: '/bin:/usr/bin'
+
+ mock_shutil_which.side_effect = lambda _f, path=None: oc_bin
+
+ self.assertEqual(locate_oc_binary(), oc_bin)
+
+ @unittest.skipIf(six.PY3, 'py2 test only')
+ @mock.patch('os.path.exists')
+ @mock.patch('os.environ.get')
+ def test_binary_lookup_in_path(self, mock_env_get, mock_path_exists):
+ ''' Testing binary lookup in path '''
+
+ oc_bin = '/usr/bin/oc'
+
+ mock_env_get.side_effect = lambda _v, _d: '/bin:/usr/bin'
+
+ mock_path_exists.side_effect = lambda f: f == oc_bin
+
+ self.assertEqual(locate_oc_binary(), oc_bin)
+
+ @unittest.skipIf(six.PY3, 'py2 test only')
+ @mock.patch('os.path.exists')
+ @mock.patch('os.environ.get')
+ def test_binary_lookup_in_usr_local(self, mock_env_get, mock_path_exists):
+ ''' Testing binary lookup in /usr/local/bin '''
+
+ oc_bin = '/usr/local/bin/oc'
+
+ mock_env_get.side_effect = lambda _v, _d: '/bin:/usr/bin'
+
+ mock_path_exists.side_effect = lambda f: f == oc_bin
+
+ self.assertEqual(locate_oc_binary(), oc_bin)
+
+ @unittest.skipIf(six.PY3, 'py2 test only')
+ @mock.patch('os.path.exists')
+ @mock.patch('os.environ.get')
+ def test_binary_lookup_in_home(self, mock_env_get, mock_path_exists):
+ ''' Testing binary lookup in ~/bin '''
+
+ oc_bin = os.path.expanduser('~/bin/oc')
+
+ mock_env_get.side_effect = lambda _v, _d: '/bin:/usr/bin'
+
+ mock_path_exists.side_effect = lambda f: f == oc_bin
+
+ self.assertEqual(locate_oc_binary(), oc_bin)
+
+ @unittest.skipIf(six.PY2, 'py3 test only')
+ @mock.patch('shutil.which')
+ @mock.patch('os.environ.get')
+ def test_binary_lookup_fallback_py3(self, mock_env_get, mock_shutil_which):
+ ''' Testing binary lookup fallback '''
+
+ mock_env_get.side_effect = lambda _v, _d: ''
+
+ mock_shutil_which.side_effect = lambda _f, path=None: None
+
+ self.assertEqual(locate_oc_binary(), 'oc')
+
+ @unittest.skipIf(six.PY2, 'py3 test only')
+ @mock.patch('shutil.which')
+ @mock.patch('os.environ.get')
+ def test_binary_lookup_in_usr_local_py3(self, mock_env_get, mock_shutil_which):
+ ''' Testing binary lookup in /usr/local/bin '''
+
+ oc_bin = '/usr/local/bin/oc'
+
+ mock_env_get.side_effect = lambda _v, _d: '/bin:/usr/bin'
+
+ mock_shutil_which.side_effect = lambda _f, path=None: oc_bin
+
+ self.assertEqual(locate_oc_binary(), oc_bin)
+
+ @unittest.skipIf(six.PY2, 'py3 test only')
+ @mock.patch('shutil.which')
+ @mock.patch('os.environ.get')
+ def test_binary_lookup_in_home_py3(self, mock_env_get, mock_shutil_which):
+ ''' Testing binary lookup in ~/bin '''
+
+ oc_bin = os.path.expanduser('~/bin/oc')
+
+ mock_env_get.side_effect = lambda _v, _d: '/bin:/usr/bin'
+
+ mock_shutil_which.side_effect = lambda _f, path=None: oc_bin
+
+ self.assertEqual(locate_oc_binary(), oc_bin)
+
def tearDown(self):
'''TearDown method'''
pass
diff --git a/roles/lib_openshift/src/test/unit/oc_env.py b/roles/lib_openshift/src/test/unit/test_oc_env.py
index 15bd7e464..45a3ef1eb 100755
--- a/roles/lib_openshift/src/test/unit/oc_env.py
+++ b/roles/lib_openshift/src/test/unit/test_oc_env.py
@@ -11,6 +11,7 @@
# OK
import os
+import six
import sys
import unittest
import mock
@@ -23,7 +24,7 @@ import mock
# place class in our python path
module_path = os.path.join('/'.join(os.path.realpath(__file__).split('/')[:-4]), 'library') # noqa: E501
sys.path.insert(0, module_path)
-from oc_env import OCEnv # noqa: E402
+from oc_env import OCEnv, locate_oc_binary # noqa: E402
class OCEnvTest(unittest.TestCase):
@@ -35,9 +36,10 @@ class OCEnvTest(unittest.TestCase):
''' setup method will create a file and set to known configuration '''
pass
+ @mock.patch('oc_env.locate_oc_binary')
@mock.patch('oc_env.Utils.create_tmpfile_copy')
@mock.patch('oc_env.OCEnv._run')
- def test_listing_all_env_vars(self, mock_cmd, mock_tmpfile_copy):
+ def test_listing_all_env_vars(self, mock_cmd, mock_tmpfile_copy, mock_oc_binary):
''' Testing listing all environment variables from a dc'''
# Arrange
@@ -123,6 +125,10 @@ class OCEnvTest(unittest.TestCase):
(0, dc_results, ''), # First call to the mock
]
+ mock_oc_binary.side_effect = [
+ 'oc'
+ ]
+
mock_tmpfile_copy.side_effect = [
'/tmp/mock_adminkubeconfig',
]
@@ -141,12 +147,13 @@ class OCEnvTest(unittest.TestCase):
# Making sure our mocks were called as we expected
mock_cmd.assert_has_calls([
- mock.call(['oc', '-n', 'default', 'get', 'dc', 'router', '-o', 'json'], None),
+ mock.call(['oc', 'get', 'dc', 'router', '-o', 'json', '-n', 'default'], None),
])
+ @mock.patch('oc_env.locate_oc_binary')
@mock.patch('oc_env.Utils.create_tmpfile_copy')
@mock.patch('oc_env.OCEnv._run')
- def test_adding_env_vars(self, mock_cmd, mock_tmpfile_copy):
+ def test_adding_env_vars(self, mock_cmd, mock_tmpfile_copy, mock_oc_binary):
''' Test add environment variables to a dc'''
# Arrange
@@ -304,6 +311,10 @@ class OCEnvTest(unittest.TestCase):
(0, dc_results_after, ''),
]
+ mock_oc_binary.side_effect = [
+ 'oc'
+ ]
+
mock_tmpfile_copy.side_effect = [
'/tmp/mock_adminkubeconfig',
]
@@ -322,12 +333,13 @@ class OCEnvTest(unittest.TestCase):
# Making sure our mocks were called as we expected
mock_cmd.assert_has_calls([
- mock.call(['oc', '-n', 'default', 'get', 'dc', 'router', '-o', 'json'], None),
+ mock.call(['oc', 'get', 'dc', 'router', '-o', 'json', '-n', 'default'], None),
])
+ @mock.patch('oc_env.locate_oc_binary')
@mock.patch('oc_env.Utils.create_tmpfile_copy')
@mock.patch('oc_env.OCEnv._run')
- def test_removing_env_vars(self, mock_cmd, mock_tmpfile_copy):
+ def test_removing_env_vars(self, mock_cmd, mock_tmpfile_copy, mock_oc_binary):
''' Test add environment variables to a dc'''
# Arrange
@@ -419,6 +431,10 @@ class OCEnvTest(unittest.TestCase):
(0, '', ''),
]
+ mock_oc_binary.side_effect = [
+ 'oc'
+ ]
+
mock_tmpfile_copy.side_effect = [
'/tmp/mock_adminkubeconfig',
]
@@ -432,9 +448,117 @@ class OCEnvTest(unittest.TestCase):
# Making sure our mocks were called as we expected
mock_cmd.assert_has_calls([
- mock.call(['oc', '-n', 'default', 'get', 'dc', 'router', '-o', 'json'], None),
+ mock.call(['oc', 'get', 'dc', 'router', '-o', 'json', '-n', 'default'], None),
])
+ @unittest.skipIf(six.PY3, 'py2 test only')
+ @mock.patch('os.path.exists')
+ @mock.patch('os.environ.get')
+ def test_binary_lookup_fallback(self, mock_env_get, mock_path_exists):
+ ''' Testing binary lookup fallback '''
+
+ mock_env_get.side_effect = lambda _v, _d: ''
+
+ mock_path_exists.side_effect = lambda _: False
+
+ self.assertEqual(locate_oc_binary(), 'oc')
+
+ @unittest.skipIf(six.PY3, 'py2 test only')
+ @mock.patch('os.path.exists')
+ @mock.patch('os.environ.get')
+ def test_binary_lookup_in_path(self, mock_env_get, mock_path_exists):
+ ''' Testing binary lookup in path '''
+
+ oc_bin = '/usr/bin/oc'
+
+ mock_env_get.side_effect = lambda _v, _d: '/bin:/usr/bin'
+
+ mock_path_exists.side_effect = lambda f: f == oc_bin
+
+ self.assertEqual(locate_oc_binary(), oc_bin)
+
+ @unittest.skipIf(six.PY3, 'py2 test only')
+ @mock.patch('os.path.exists')
+ @mock.patch('os.environ.get')
+ def test_binary_lookup_in_usr_local(self, mock_env_get, mock_path_exists):
+ ''' Testing binary lookup in /usr/local/bin '''
+
+ oc_bin = '/usr/local/bin/oc'
+
+ mock_env_get.side_effect = lambda _v, _d: '/bin:/usr/bin'
+
+ mock_path_exists.side_effect = lambda f: f == oc_bin
+
+ self.assertEqual(locate_oc_binary(), oc_bin)
+
+ @unittest.skipIf(six.PY3, 'py2 test only')
+ @mock.patch('os.path.exists')
+ @mock.patch('os.environ.get')
+ def test_binary_lookup_in_home(self, mock_env_get, mock_path_exists):
+ ''' Testing binary lookup in ~/bin '''
+
+ oc_bin = os.path.expanduser('~/bin/oc')
+
+ mock_env_get.side_effect = lambda _v, _d: '/bin:/usr/bin'
+
+ mock_path_exists.side_effect = lambda f: f == oc_bin
+
+ self.assertEqual(locate_oc_binary(), oc_bin)
+
+ @unittest.skipIf(six.PY2, 'py3 test only')
+ @mock.patch('shutil.which')
+ @mock.patch('os.environ.get')
+ def test_binary_lookup_fallback_py3(self, mock_env_get, mock_shutil_which):
+ ''' Testing binary lookup fallback '''
+
+ mock_env_get.side_effect = lambda _v, _d: ''
+
+ mock_shutil_which.side_effect = lambda _f, path=None: None
+
+ self.assertEqual(locate_oc_binary(), 'oc')
+
+ @unittest.skipIf(six.PY2, 'py3 test only')
+ @mock.patch('shutil.which')
+ @mock.patch('os.environ.get')
+ def test_binary_lookup_in_path_py3(self, mock_env_get, mock_shutil_which):
+ ''' Testing binary lookup in path '''
+
+ oc_bin = '/usr/bin/oc'
+
+ mock_env_get.side_effect = lambda _v, _d: '/bin:/usr/bin'
+
+ mock_shutil_which.side_effect = lambda _f, path=None: oc_bin
+
+ self.assertEqual(locate_oc_binary(), oc_bin)
+
+ @unittest.skipIf(six.PY2, 'py3 test only')
+ @mock.patch('shutil.which')
+ @mock.patch('os.environ.get')
+ def test_binary_lookup_in_usr_local_py3(self, mock_env_get, mock_shutil_which):
+ ''' Testing binary lookup in /usr/local/bin '''
+
+ oc_bin = '/usr/local/bin/oc'
+
+ mock_env_get.side_effect = lambda _v, _d: '/bin:/usr/bin'
+
+ mock_shutil_which.side_effect = lambda _f, path=None: oc_bin
+
+ self.assertEqual(locate_oc_binary(), oc_bin)
+
+ @unittest.skipIf(six.PY2, 'py3 test only')
+ @mock.patch('shutil.which')
+ @mock.patch('os.environ.get')
+ def test_binary_lookup_in_home_py3(self, mock_env_get, mock_shutil_which):
+ ''' Testing binary lookup in ~/bin '''
+
+ oc_bin = os.path.expanduser('~/bin/oc')
+
+ mock_env_get.side_effect = lambda _v, _d: '/bin:/usr/bin'
+
+ mock_shutil_which.side_effect = lambda _f, path=None: oc_bin
+
+ self.assertEqual(locate_oc_binary(), oc_bin)
+
def tearDown(self):
'''TearDown method'''
pass
diff --git a/roles/lib_openshift/src/test/unit/oc_label.py b/roles/lib_openshift/src/test/unit/test_oc_label.py
index 3176987b0..933b5f221 100755
--- a/roles/lib_openshift/src/test/unit/oc_label.py
+++ b/roles/lib_openshift/src/test/unit/test_oc_label.py
@@ -11,6 +11,7 @@
# OK
import os
+import six
import sys
import unittest
import mock
@@ -23,7 +24,7 @@ import mock
# place class in our python path
module_path = os.path.join('/'.join(os.path.realpath(__file__).split('/')[:-4]), 'library') # noqa: E501
sys.path.insert(0, module_path)
-from oc_label import OCLabel # noqa: E402
+from oc_label import OCLabel, locate_oc_binary # noqa: E402
class OCLabelTest(unittest.TestCase):
@@ -187,6 +188,114 @@ class OCLabelTest(unittest.TestCase):
self.assertTrue(results['results']['results']['labels'][0] ==
{'storage_pv_quota': 'False', 'awesomens': 'testinglabel'})
+ @unittest.skipIf(six.PY3, 'py2 test only')
+ @mock.patch('os.path.exists')
+ @mock.patch('os.environ.get')
+ def test_binary_lookup_fallback(self, mock_env_get, mock_path_exists):
+ ''' Testing binary lookup fallback '''
+
+ mock_env_get.side_effect = lambda _v, _d: ''
+
+ mock_path_exists.side_effect = lambda _: False
+
+ self.assertEqual(locate_oc_binary(), 'oc')
+
+ @unittest.skipIf(six.PY3, 'py2 test only')
+ @mock.patch('os.path.exists')
+ @mock.patch('os.environ.get')
+ def test_binary_lookup_in_path(self, mock_env_get, mock_path_exists):
+ ''' Testing binary lookup in path '''
+
+ oc_bin = '/usr/bin/oc'
+
+ mock_env_get.side_effect = lambda _v, _d: '/bin:/usr/bin'
+
+ mock_path_exists.side_effect = lambda f: f == oc_bin
+
+ self.assertEqual(locate_oc_binary(), oc_bin)
+
+ @unittest.skipIf(six.PY3, 'py2 test only')
+ @mock.patch('os.path.exists')
+ @mock.patch('os.environ.get')
+ def test_binary_lookup_in_usr_local(self, mock_env_get, mock_path_exists):
+ ''' Testing binary lookup in /usr/local/bin '''
+
+ oc_bin = '/usr/local/bin/oc'
+
+ mock_env_get.side_effect = lambda _v, _d: '/bin:/usr/bin'
+
+ mock_path_exists.side_effect = lambda f: f == oc_bin
+
+ self.assertEqual(locate_oc_binary(), oc_bin)
+
+ @unittest.skipIf(six.PY3, 'py2 test only')
+ @mock.patch('os.path.exists')
+ @mock.patch('os.environ.get')
+ def test_binary_lookup_in_home(self, mock_env_get, mock_path_exists):
+ ''' Testing binary lookup in ~/bin '''
+
+ oc_bin = os.path.expanduser('~/bin/oc')
+
+ mock_env_get.side_effect = lambda _v, _d: '/bin:/usr/bin'
+
+ mock_path_exists.side_effect = lambda f: f == oc_bin
+
+ self.assertEqual(locate_oc_binary(), oc_bin)
+
+ @unittest.skipIf(six.PY2, 'py3 test only')
+ @mock.patch('shutil.which')
+ @mock.patch('os.environ.get')
+ def test_binary_lookup_fallback_py3(self, mock_env_get, mock_shutil_which):
+ ''' Testing binary lookup fallback '''
+
+ mock_env_get.side_effect = lambda _v, _d: ''
+
+ mock_shutil_which.side_effect = lambda _f, path=None: None
+
+ self.assertEqual(locate_oc_binary(), 'oc')
+
+ @unittest.skipIf(six.PY2, 'py3 test only')
+ @mock.patch('shutil.which')
+ @mock.patch('os.environ.get')
+ def test_binary_lookup_in_path_py3(self, mock_env_get, mock_shutil_which):
+ ''' Testing binary lookup in path '''
+
+ oc_bin = '/usr/bin/oc'
+
+ mock_env_get.side_effect = lambda _v, _d: '/bin:/usr/bin'
+
+ mock_shutil_which.side_effect = lambda _f, path=None: oc_bin
+
+ self.assertEqual(locate_oc_binary(), oc_bin)
+
+ @unittest.skipIf(six.PY2, 'py3 test only')
+ @mock.patch('shutil.which')
+ @mock.patch('os.environ.get')
+ def test_binary_lookup_in_usr_local_py3(self, mock_env_get, mock_shutil_which):
+ ''' Testing binary lookup in /usr/local/bin '''
+
+ oc_bin = '/usr/local/bin/oc'
+
+ mock_env_get.side_effect = lambda _v, _d: '/bin:/usr/bin'
+
+ mock_shutil_which.side_effect = lambda _f, path=None: oc_bin
+
+ self.assertEqual(locate_oc_binary(), oc_bin)
+
+ @unittest.skipIf(six.PY2, 'py3 test only')
+ @mock.patch('shutil.which')
+ @mock.patch('os.environ.get')
+ def test_binary_lookup_in_home_py3(self, mock_env_get, mock_shutil_which):
+ ''' Testing binary lookup in ~/bin '''
+
+ oc_bin = os.path.expanduser('~/bin/oc')
+
+ mock_env_get.side_effect = lambda _v, _d: '/bin:/usr/bin'
+
+ mock_shutil_which.side_effect = lambda _f, path=None: oc_bin
+
+ self.assertEqual(locate_oc_binary(), oc_bin)
+
def tearDown(self):
'''TearDown method'''
pass
diff --git a/roles/lib_openshift/src/test/unit/oc_process.py b/roles/lib_openshift/src/test/unit/test_oc_process.py
index 450ff7071..c4b36928b 100755
--- a/roles/lib_openshift/src/test/unit/oc_process.py
+++ b/roles/lib_openshift/src/test/unit/test_oc_process.py
@@ -11,6 +11,7 @@
# OK
import os
+import six
import sys
import unittest
import mock
@@ -23,7 +24,7 @@ import mock
# place class in our python path
module_path = os.path.join('/'.join(os.path.realpath(__file__).split('/')[:-4]), 'library') # noqa: E501
sys.path.insert(0, module_path)
-from oc_process import OCProcess # noqa: E402
+from oc_process import OCProcess, locate_oc_binary # noqa: E402
# pylint: disable=too-many-public-methods
@@ -474,6 +475,114 @@ class OCProcessTest(unittest.TestCase):
self.assertFalse(results['changed'])
self.assertEqual(results['results']['results']['items'][0]['metadata']['name'], 'testdb')
+ @unittest.skipIf(six.PY3, 'py2 test only')
+ @mock.patch('os.path.exists')
+ @mock.patch('os.environ.get')
+ def test_binary_lookup_fallback(self, mock_env_get, mock_path_exists):
+ ''' Testing binary lookup fallback '''
+
+ mock_env_get.side_effect = lambda _v, _d: ''
+
+ mock_path_exists.side_effect = lambda _: False
+
+ self.assertEqual(locate_oc_binary(), 'oc')
+
+ @unittest.skipIf(six.PY3, 'py2 test only')
+ @mock.patch('os.path.exists')
+ @mock.patch('os.environ.get')
+ def test_binary_lookup_in_path(self, mock_env_get, mock_path_exists):
+ ''' Testing binary lookup in path '''
+
+ oc_bin = '/usr/bin/oc'
+
+ mock_env_get.side_effect = lambda _v, _d: '/bin:/usr/bin'
+
+ mock_path_exists.side_effect = lambda f: f == oc_bin
+
+ self.assertEqual(locate_oc_binary(), oc_bin)
+
+ @unittest.skipIf(six.PY3, 'py2 test only')
+ @mock.patch('os.path.exists')
+ @mock.patch('os.environ.get')
+ def test_binary_lookup_in_usr_local(self, mock_env_get, mock_path_exists):
+ ''' Testing binary lookup in /usr/local/bin '''
+
+ oc_bin = '/usr/local/bin/oc'
+
+ mock_env_get.side_effect = lambda _v, _d: '/bin:/usr/bin'
+
+ mock_path_exists.side_effect = lambda f: f == oc_bin
+
+ self.assertEqual(locate_oc_binary(), oc_bin)
+
+ @unittest.skipIf(six.PY3, 'py2 test only')
+ @mock.patch('os.path.exists')
+ @mock.patch('os.environ.get')
+ def test_binary_lookup_in_home(self, mock_env_get, mock_path_exists):
+ ''' Testing binary lookup in ~/bin '''
+
+ oc_bin = os.path.expanduser('~/bin/oc')
+
+ mock_env_get.side_effect = lambda _v, _d: '/bin:/usr/bin'
+
+ mock_path_exists.side_effect = lambda f: f == oc_bin
+
+ self.assertEqual(locate_oc_binary(), oc_bin)
+
+ @unittest.skipIf(six.PY2, 'py3 test only')
+ @mock.patch('shutil.which')
+ @mock.patch('os.environ.get')
+ def test_binary_lookup_fallback_py3(self, mock_env_get, mock_shutil_which):
+ ''' Testing binary lookup fallback '''
+
+ mock_env_get.side_effect = lambda _v, _d: ''
+
+ mock_shutil_which.side_effect = lambda _f, path=None: None
+
+ self.assertEqual(locate_oc_binary(), 'oc')
+
+ @unittest.skipIf(six.PY2, 'py3 test only')
+ @mock.patch('shutil.which')
+ @mock.patch('os.environ.get')
+ def test_binary_lookup_in_path_py3(self, mock_env_get, mock_shutil_which):
+ ''' Testing binary lookup in path '''
+
+ oc_bin = '/usr/bin/oc'
+
+ mock_env_get.side_effect = lambda _v, _d: '/bin:/usr/bin'
+
+ mock_shutil_which.side_effect = lambda _f, path=None: oc_bin
+
+ self.assertEqual(locate_oc_binary(), oc_bin)
+
+ @unittest.skipIf(six.PY2, 'py3 test only')
+ @mock.patch('shutil.which')
+ @mock.patch('os.environ.get')
+ def test_binary_lookup_in_usr_local_py3(self, mock_env_get, mock_shutil_which):
+ ''' Testing binary lookup in /usr/local/bin '''
+
+ oc_bin = '/usr/local/bin/oc'
+
+ mock_env_get.side_effect = lambda _v, _d: '/bin:/usr/bin'
+
+ mock_shutil_which.side_effect = lambda _f, path=None: oc_bin
+
+ self.assertEqual(locate_oc_binary(), oc_bin)
+
+ @unittest.skipIf(six.PY2, 'py3 test only')
+ @mock.patch('shutil.which')
+ @mock.patch('os.environ.get')
+ def test_binary_lookup_in_home_py3(self, mock_env_get, mock_shutil_which):
+ ''' Testing binary lookup in ~/bin '''
+
+ oc_bin = os.path.expanduser('~/bin/oc')
+
+ mock_env_get.side_effect = lambda _v, _d: '/bin:/usr/bin'
+
+ mock_shutil_which.side_effect = lambda _f, path=None: oc_bin
+
+ self.assertEqual(locate_oc_binary(), oc_bin)
+
def tearDown(self):
'''TearDown method'''
pass
diff --git a/roles/lib_openshift/src/test/unit/oc_route.py b/roles/lib_openshift/src/test/unit/test_oc_route.py
index fcfa88cbf..e0f6d2f3c 100755
--- a/roles/lib_openshift/src/test/unit/oc_route.py
+++ b/roles/lib_openshift/src/test/unit/test_oc_route.py
@@ -11,6 +11,7 @@
# OK
import os
+import six
import sys
import unittest
import mock
@@ -23,7 +24,7 @@ import mock
# place class in our python path
module_path = os.path.join('/'.join(os.path.realpath(__file__).split('/')[:-4]), 'library') # noqa: E501
sys.path.insert(0, module_path)
-from oc_route import OCRoute # noqa: E402
+from oc_route import OCRoute, locate_oc_binary # noqa: E402
class OCRouteTest(unittest.TestCase):
@@ -35,8 +36,10 @@ class OCRouteTest(unittest.TestCase):
''' setup method will create a file and set to known configuration '''
pass
+ @mock.patch('oc_route.locate_oc_binary')
+ @mock.patch('oc_route.Utils.create_tmpfile_copy')
@mock.patch('oc_route.OCRoute._run')
- def test_list_route(self, mock_cmd):
+ def test_list_route(self, mock_cmd, mock_tmpfile_copy, mock_oc_binary):
''' Testing getting a route '''
# Arrange
@@ -114,6 +117,14 @@ class OCRouteTest(unittest.TestCase):
(0, route_result, ''),
]
+ mock_oc_binary.side_effect = [
+ 'oc'
+ ]
+
+ mock_tmpfile_copy.side_effect = [
+ '/tmp/mock.kubeconfig',
+ ]
+
# Act
results = OCRoute.run_ansible(params, False)
@@ -124,14 +135,15 @@ class OCRouteTest(unittest.TestCase):
# Making sure our mock was called as we expected
mock_cmd.assert_has_calls([
- mock.call(['oc', '-n', 'default', 'get', 'route', 'test', '-o', 'json'], None),
+ mock.call(['oc', 'get', 'route', 'test', '-o', 'json', '-n', 'default'], None),
])
+ @mock.patch('oc_route.locate_oc_binary')
+ @mock.patch('oc_route.Utils.create_tmpfile_copy')
@mock.patch('oc_route.Yedit._write')
@mock.patch('oc_route.OCRoute._run')
- def test_create_route(self, mock_cmd, mock_write):
+ def test_create_route(self, mock_cmd, mock_write, mock_tmpfile_copy, mock_oc_binary):
''' Testing getting a route '''
-
# Arrange
# run_ansible input parameters
@@ -230,6 +242,14 @@ metadata:
(0, route_result, ''),
]
+ mock_oc_binary.side_effect = [
+ 'oc'
+ ]
+
+ mock_tmpfile_copy.side_effect = [
+ '/tmp/mock.kubeconfig',
+ ]
+
mock_write.assert_has_calls = [
# First call to mock
mock.call('/tmp/test', test_route)
@@ -245,10 +265,119 @@ metadata:
# Making sure our mock was called as we expected
mock_cmd.assert_has_calls([
- mock.call(['oc', '-n', 'default', 'get', 'route', 'test', '-o', 'json'], None),
- mock.call(['oc', '-n', 'default', 'create', '-f', '/tmp/test'], None),
+ mock.call(['oc', 'get', 'route', 'test', '-o', 'json', '-n', 'default'], None),
+ mock.call(['oc', 'create', '-f', mock.ANY, '-n', 'default'], None),
+ mock.call(['oc', 'get', 'route', 'test', '-o', 'json', '-n', 'default'], None),
])
+ @unittest.skipIf(six.PY3, 'py2 test only')
+ @mock.patch('os.path.exists')
+ @mock.patch('os.environ.get')
+ def test_binary_lookup_fallback(self, mock_env_get, mock_path_exists):
+ ''' Testing binary lookup fallback '''
+
+ mock_env_get.side_effect = lambda _v, _d: ''
+
+ mock_path_exists.side_effect = lambda _: False
+
+ self.assertEqual(locate_oc_binary(), 'oc')
+
+ @unittest.skipIf(six.PY3, 'py2 test only')
+ @mock.patch('os.path.exists')
+ @mock.patch('os.environ.get')
+ def test_binary_lookup_in_path(self, mock_env_get, mock_path_exists):
+ ''' Testing binary lookup in path '''
+
+ oc_bin = '/usr/bin/oc'
+
+ mock_env_get.side_effect = lambda _v, _d: '/bin:/usr/bin'
+
+ mock_path_exists.side_effect = lambda f: f == oc_bin
+
+ self.assertEqual(locate_oc_binary(), oc_bin)
+
+ @unittest.skipIf(six.PY3, 'py2 test only')
+ @mock.patch('os.path.exists')
+ @mock.patch('os.environ.get')
+ def test_binary_lookup_in_usr_local(self, mock_env_get, mock_path_exists):
+ ''' Testing binary lookup in /usr/local/bin '''
+
+ oc_bin = '/usr/local/bin/oc'
+
+ mock_env_get.side_effect = lambda _v, _d: '/bin:/usr/bin'
+
+ mock_path_exists.side_effect = lambda f: f == oc_bin
+
+ self.assertEqual(locate_oc_binary(), oc_bin)
+
+ @unittest.skipIf(six.PY3, 'py2 test only')
+ @mock.patch('os.path.exists')
+ @mock.patch('os.environ.get')
+ def test_binary_lookup_in_home(self, mock_env_get, mock_path_exists):
+ ''' Testing binary lookup in ~/bin '''
+
+ oc_bin = os.path.expanduser('~/bin/oc')
+
+ mock_env_get.side_effect = lambda _v, _d: '/bin:/usr/bin'
+
+ mock_path_exists.side_effect = lambda f: f == oc_bin
+
+ self.assertEqual(locate_oc_binary(), oc_bin)
+
+ @unittest.skipIf(six.PY2, 'py3 test only')
+ @mock.patch('shutil.which')
+ @mock.patch('os.environ.get')
+ def test_binary_lookup_fallback_py3(self, mock_env_get, mock_shutil_which):
+ ''' Testing binary lookup fallback '''
+
+ mock_env_get.side_effect = lambda _v, _d: ''
+
+ mock_shutil_which.side_effect = lambda _f, path=None: None
+
+ self.assertEqual(locate_oc_binary(), 'oc')
+
+ @unittest.skipIf(six.PY2, 'py3 test only')
+ @mock.patch('shutil.which')
+ @mock.patch('os.environ.get')
+ def test_binary_lookup_in_path_py3(self, mock_env_get, mock_shutil_which):
+ ''' Testing binary lookup in path '''
+
+ oc_bin = '/usr/bin/oc'
+
+ mock_env_get.side_effect = lambda _v, _d: '/bin:/usr/bin'
+
+ mock_shutil_which.side_effect = lambda _f, path=None: oc_bin
+
+ self.assertEqual(locate_oc_binary(), oc_bin)
+
+ @unittest.skipIf(six.PY2, 'py3 test only')
+ @mock.patch('shutil.which')
+ @mock.patch('os.environ.get')
+ def test_binary_lookup_in_usr_local_py3(self, mock_env_get, mock_shutil_which):
+ ''' Testing binary lookup in /usr/local/bin '''
+
+ oc_bin = '/usr/local/bin/oc'
+
+ mock_env_get.side_effect = lambda _v, _d: '/bin:/usr/bin'
+
+ mock_shutil_which.side_effect = lambda _f, path=None: oc_bin
+
+ self.assertEqual(locate_oc_binary(), oc_bin)
+
+ @unittest.skipIf(six.PY2, 'py3 test only')
+ @mock.patch('shutil.which')
+ @mock.patch('os.environ.get')
+ def test_binary_lookup_in_home_py3(self, mock_env_get, mock_shutil_which):
+ ''' Testing binary lookup in ~/bin '''
+
+ oc_bin = os.path.expanduser('~/bin/oc')
+
+ mock_env_get.side_effect = lambda _v, _d: '/bin:/usr/bin'
+
+ mock_shutil_which.side_effect = lambda _f, path=None: oc_bin
+
+ self.assertEqual(locate_oc_binary(), oc_bin)
+
def tearDown(self):
'''TearDown method'''
pass
diff --git a/roles/lib_openshift/src/test/unit/oc_scale.py b/roles/lib_openshift/src/test/unit/test_oc_scale.py
index f15eb164d..b2dec2fbe 100755
--- a/roles/lib_openshift/src/test/unit/oc_scale.py
+++ b/roles/lib_openshift/src/test/unit/test_oc_scale.py
@@ -11,6 +11,7 @@
# OK
import os
+import six
import sys
import unittest
import mock
@@ -23,7 +24,7 @@ import mock
# place class in our python path
module_path = os.path.join('/'.join(os.path.realpath(__file__).split('/')[:-4]), 'library') # noqa: E501
sys.path.insert(0, module_path)
-from oc_scale import OCScale # noqa: E402
+from oc_scale import OCScale, locate_oc_binary # noqa: E402
class OCScaleTest(unittest.TestCase):
@@ -158,6 +159,114 @@ class OCScaleTest(unittest.TestCase):
self.assertTrue(results['failed'])
self.assertEqual(results['msg']['returncode'], 1)
+ @unittest.skipIf(six.PY3, 'py2 test only')
+ @mock.patch('os.path.exists')
+ @mock.patch('os.environ.get')
+ def test_binary_lookup_fallback(self, mock_env_get, mock_path_exists):
+ ''' Testing binary lookup fallback '''
+
+ mock_env_get.side_effect = lambda _v, _d: ''
+
+ mock_path_exists.side_effect = lambda _: False
+
+ self.assertEqual(locate_oc_binary(), 'oc')
+
+ @unittest.skipIf(six.PY3, 'py2 test only')
+ @mock.patch('os.path.exists')
+ @mock.patch('os.environ.get')
+ def test_binary_lookup_in_path(self, mock_env_get, mock_path_exists):
+ ''' Testing binary lookup in path '''
+
+ oc_bin = '/usr/bin/oc'
+
+ mock_env_get.side_effect = lambda _v, _d: '/bin:/usr/bin'
+
+ mock_path_exists.side_effect = lambda f: f == oc_bin
+
+ self.assertEqual(locate_oc_binary(), oc_bin)
+
+ @unittest.skipIf(six.PY3, 'py2 test only')
+ @mock.patch('os.path.exists')
+ @mock.patch('os.environ.get')
+ def test_binary_lookup_in_usr_local(self, mock_env_get, mock_path_exists):
+ ''' Testing binary lookup in /usr/local/bin '''
+
+ oc_bin = '/usr/local/bin/oc'
+
+ mock_env_get.side_effect = lambda _v, _d: '/bin:/usr/bin'
+
+ mock_path_exists.side_effect = lambda f: f == oc_bin
+
+ self.assertEqual(locate_oc_binary(), oc_bin)
+
+ @unittest.skipIf(six.PY3, 'py2 test only')
+ @mock.patch('os.path.exists')
+ @mock.patch('os.environ.get')
+ def test_binary_lookup_in_home(self, mock_env_get, mock_path_exists):
+ ''' Testing binary lookup in ~/bin '''
+
+ oc_bin = os.path.expanduser('~/bin/oc')
+
+ mock_env_get.side_effect = lambda _v, _d: '/bin:/usr/bin'
+
+ mock_path_exists.side_effect = lambda f: f == oc_bin
+
+ self.assertEqual(locate_oc_binary(), oc_bin)
+
+ @unittest.skipIf(six.PY2, 'py3 test only')
+ @mock.patch('shutil.which')
+ @mock.patch('os.environ.get')
+ def test_binary_lookup_fallback_py3(self, mock_env_get, mock_shutil_which):
+ ''' Testing binary lookup fallback '''
+
+ mock_env_get.side_effect = lambda _v, _d: ''
+
+ mock_shutil_which.side_effect = lambda _f, path=None: None
+
+ self.assertEqual(locate_oc_binary(), 'oc')
+
+ @unittest.skipIf(six.PY2, 'py3 test only')
+ @mock.patch('shutil.which')
+ @mock.patch('os.environ.get')
+ def test_binary_lookup_in_path_py3(self, mock_env_get, mock_shutil_which):
+ ''' Testing binary lookup in path '''
+
+ oc_bin = '/usr/bin/oc'
+
+ mock_env_get.side_effect = lambda _v, _d: '/bin:/usr/bin'
+
+ mock_shutil_which.side_effect = lambda _f, path=None: oc_bin
+
+ self.assertEqual(locate_oc_binary(), oc_bin)
+
+ @unittest.skipIf(six.PY2, 'py3 test only')
+ @mock.patch('shutil.which')
+ @mock.patch('os.environ.get')
+ def test_binary_lookup_in_usr_local_py3(self, mock_env_get, mock_shutil_which):
+ ''' Testing binary lookup in /usr/local/bin '''
+
+ oc_bin = '/usr/local/bin/oc'
+
+ mock_env_get.side_effect = lambda _v, _d: '/bin:/usr/bin'
+
+ mock_shutil_which.side_effect = lambda _f, path=None: oc_bin
+
+ self.assertEqual(locate_oc_binary(), oc_bin)
+
+ @unittest.skipIf(six.PY2, 'py3 test only')
+ @mock.patch('shutil.which')
+ @mock.patch('os.environ.get')
+ def test_binary_lookup_in_home_py3(self, mock_env_get, mock_shutil_which):
+ ''' Testing binary lookup in ~/bin '''
+
+ oc_bin = os.path.expanduser('~/bin/oc')
+
+ mock_env_get.side_effect = lambda _v, _d: '/bin:/usr/bin'
+
+ mock_shutil_which.side_effect = lambda _f, path=None: oc_bin
+
+ self.assertEqual(locate_oc_binary(), oc_bin)
+
def tearDown(self):
'''TearDown method'''
pass
diff --git a/roles/lib_openshift/src/test/unit/test_oc_secret.py b/roles/lib_openshift/src/test/unit/test_oc_secret.py
new file mode 100755
index 000000000..bf496769a
--- /dev/null
+++ b/roles/lib_openshift/src/test/unit/test_oc_secret.py
@@ -0,0 +1,210 @@
+#!/usr/bin/env python2
+'''
+ Unit tests for oc secret
+'''
+# To run:
+# ./oc_secret.py
+#
+# .
+# Ran 1 test in 0.002s
+#
+# OK
+
+import os
+import six
+import sys
+import unittest
+import mock
+
+# Removing invalid variable names for tests so that I can
+# keep them brief
+# pylint: disable=invalid-name,no-name-in-module
+# Disable import-error b/c our libraries aren't loaded in jenkins
+# pylint: disable=import-error,wrong-import-position
+# place class in our python path
+module_path = os.path.join('/'.join(os.path.realpath(__file__).split('/')[:-4]), 'library') # noqa: E501
+sys.path.insert(0, module_path)
+from oc_secret import OCSecret, locate_oc_binary # noqa: E402
+
+
+class OCSecretTest(unittest.TestCase):
+ '''
+ Test class for OCSecret
+ '''
+
+ def setUp(self):
+ ''' setup method will create a file and set to known configuration '''
+ pass
+
+ @mock.patch('oc_secret.locate_oc_binary')
+ @mock.patch('oc_secret.Utils.create_tmpfile_copy')
+ @mock.patch('oc_secret.Utils._write')
+ @mock.patch('oc_secret.OCSecret._run')
+ def test_adding_a_secret(self, mock_cmd, mock_write, mock_tmpfile_copy, mock_oc_binary):
+ ''' Testing adding a secret '''
+
+ # Arrange
+
+ # run_ansible input parameters
+ params = {
+ 'state': 'present',
+ 'namespace': 'default',
+ 'name': 'testsecretname',
+ 'contents': [{
+ 'path': "/tmp/somesecret.json",
+ 'data': "{'one': 1, 'two': 2, 'three': 3}",
+ }],
+ 'decode': False,
+ 'kubeconfig': '/etc/origin/master/admin.kubeconfig',
+ 'debug': False,
+ 'files': None,
+ 'delete_after': True,
+ }
+
+ # Return values of our mocked function call. These get returned once per call.
+ mock_cmd.side_effect = [
+ (1, '', 'Error from server: secrets "testsecretname" not found'),
+ (0, 'secret/testsecretname', ''),
+ ]
+
+ mock_oc_binary.side_effect = [
+ 'oc'
+ ]
+
+ mock_tmpfile_copy.side_effect = [
+ '/tmp/mocked_kubeconfig',
+ ]
+
+ # Act
+ results = OCSecret.run_ansible(params, False)
+
+ # Assert
+ self.assertTrue(results['changed'])
+ self.assertEqual(results['results']['returncode'], 0)
+ self.assertEqual(results['state'], 'present')
+
+ # Making sure our mock was called as we expected
+ mock_cmd.assert_has_calls([
+ mock.call(['oc', 'get', 'secrets', 'testsecretname', '-o', 'json', '-n', 'default'], None),
+ mock.call(['oc', 'secrets', 'new', 'testsecretname', mock.ANY, '-n', 'default'], None),
+ ])
+
+ mock_write.assert_has_calls([
+ mock.call(mock.ANY, "{'one': 1, 'two': 2, 'three': 3}"),
+ ])
+
+ @unittest.skipIf(six.PY3, 'py2 test only')
+ @mock.patch('os.path.exists')
+ @mock.patch('os.environ.get')
+ def test_binary_lookup_fallback(self, mock_env_get, mock_path_exists):
+ ''' Testing binary lookup fallback '''
+
+ mock_env_get.side_effect = lambda _v, _d: ''
+
+ mock_path_exists.side_effect = lambda _: False
+
+ self.assertEqual(locate_oc_binary(), 'oc')
+
+ @unittest.skipIf(six.PY3, 'py2 test only')
+ @mock.patch('os.path.exists')
+ @mock.patch('os.environ.get')
+ def test_binary_lookup_in_path(self, mock_env_get, mock_path_exists):
+ ''' Testing binary lookup in path '''
+
+ oc_bin = '/usr/bin/oc'
+
+ mock_env_get.side_effect = lambda _v, _d: '/bin:/usr/bin'
+
+ mock_path_exists.side_effect = lambda f: f == oc_bin
+
+ self.assertEqual(locate_oc_binary(), oc_bin)
+
+ @unittest.skipIf(six.PY3, 'py2 test only')
+ @mock.patch('os.path.exists')
+ @mock.patch('os.environ.get')
+ def test_binary_lookup_in_usr_local(self, mock_env_get, mock_path_exists):
+ ''' Testing binary lookup in /usr/local/bin '''
+
+ oc_bin = '/usr/local/bin/oc'
+
+ mock_env_get.side_effect = lambda _v, _d: '/bin:/usr/bin'
+
+ mock_path_exists.side_effect = lambda f: f == oc_bin
+
+ self.assertEqual(locate_oc_binary(), oc_bin)
+
+ @unittest.skipIf(six.PY3, 'py2 test only')
+ @mock.patch('os.path.exists')
+ @mock.patch('os.environ.get')
+ def test_binary_lookup_in_home(self, mock_env_get, mock_path_exists):
+ ''' Testing binary lookup in ~/bin '''
+
+ oc_bin = os.path.expanduser('~/bin/oc')
+
+ mock_env_get.side_effect = lambda _v, _d: '/bin:/usr/bin'
+
+ mock_path_exists.side_effect = lambda f: f == oc_bin
+
+ self.assertEqual(locate_oc_binary(), oc_bin)
+
+ @unittest.skipIf(six.PY2, 'py3 test only')
+ @mock.patch('shutil.which')
+ @mock.patch('os.environ.get')
+ def test_binary_lookup_fallback_py3(self, mock_env_get, mock_shutil_which):
+ ''' Testing binary lookup fallback '''
+
+ mock_env_get.side_effect = lambda _v, _d: ''
+
+ mock_shutil_which.side_effect = lambda _f, path=None: None
+
+ self.assertEqual(locate_oc_binary(), 'oc')
+
+ @unittest.skipIf(six.PY2, 'py3 test only')
+ @mock.patch('shutil.which')
+ @mock.patch('os.environ.get')
+ def test_binary_lookup_in_path_py3(self, mock_env_get, mock_shutil_which):
+ ''' Testing binary lookup in path '''
+
+ oc_bin = '/usr/bin/oc'
+
+ mock_env_get.side_effect = lambda _v, _d: '/bin:/usr/bin'
+
+ mock_shutil_which.side_effect = lambda _f, path=None: oc_bin
+
+ self.assertEqual(locate_oc_binary(), oc_bin)
+
+ @unittest.skipIf(six.PY2, 'py3 test only')
+ @mock.patch('shutil.which')
+ @mock.patch('os.environ.get')
+ def test_binary_lookup_in_usr_local_py3(self, mock_env_get, mock_shutil_which):
+ ''' Testing binary lookup in /usr/local/bin '''
+
+ oc_bin = '/usr/local/bin/oc'
+
+ mock_env_get.side_effect = lambda _v, _d: '/bin:/usr/bin'
+
+ mock_shutil_which.side_effect = lambda _f, path=None: oc_bin
+
+ self.assertEqual(locate_oc_binary(), oc_bin)
+
+ @unittest.skipIf(six.PY2, 'py3 test only')
+ @mock.patch('shutil.which')
+ @mock.patch('os.environ.get')
+ def test_binary_lookup_in_home_py3(self, mock_env_get, mock_shutil_which):
+ ''' Testing binary lookup in ~/bin '''
+
+ oc_bin = os.path.expanduser('~/bin/oc')
+
+ mock_env_get.side_effect = lambda _v, _d: '/bin:/usr/bin'
+
+ mock_shutil_which.side_effect = lambda _f, path=None: oc_bin
+
+ self.assertEqual(locate_oc_binary(), oc_bin)
+
+ def tearDown(self):
+ '''TearDown method'''
+ pass
+
+
+if __name__ == "__main__":
+ unittest.main()
diff --git a/roles/lib_openshift/src/test/unit/oc_service.py b/roles/lib_openshift/src/test/unit/test_oc_service.py
index 4a845e9f3..8974eb6c6 100755
--- a/roles/lib_openshift/src/test/unit/oc_service.py
+++ b/roles/lib_openshift/src/test/unit/test_oc_service.py
@@ -11,6 +11,7 @@
# OK
import os
+import six
import sys
import unittest
import mock
@@ -23,7 +24,7 @@ import mock
# place class in our python path
module_path = os.path.join('/'.join(os.path.realpath(__file__).split('/')[:-4]), 'library') # noqa: E501
sys.path.insert(0, module_path)
-from oc_service import OCService # noqa: E402
+from oc_service import OCService, locate_oc_binary # noqa: E402
# pylint: disable=too-many-public-methods
@@ -207,6 +208,114 @@ class OCServiceTest(unittest.TestCase):
self.assertTrue(results['results']['returncode'] == 0)
self.assertEqual(results['results']['results'][0]['metadata']['name'], 'router')
+ @unittest.skipIf(six.PY3, 'py2 test only')
+ @mock.patch('os.path.exists')
+ @mock.patch('os.environ.get')
+ def test_binary_lookup_fallback(self, mock_env_get, mock_path_exists):
+ ''' Testing binary lookup fallback '''
+
+ mock_env_get.side_effect = lambda _v, _d: ''
+
+ mock_path_exists.side_effect = lambda _: False
+
+ self.assertEqual(locate_oc_binary(), 'oc')
+
+ @unittest.skipIf(six.PY3, 'py2 test only')
+ @mock.patch('os.path.exists')
+ @mock.patch('os.environ.get')
+ def test_binary_lookup_in_path(self, mock_env_get, mock_path_exists):
+ ''' Testing binary lookup in path '''
+
+ oc_bin = '/usr/bin/oc'
+
+ mock_env_get.side_effect = lambda _v, _d: '/bin:/usr/bin'
+
+ mock_path_exists.side_effect = lambda f: f == oc_bin
+
+ self.assertEqual(locate_oc_binary(), oc_bin)
+
+ @unittest.skipIf(six.PY3, 'py2 test only')
+ @mock.patch('os.path.exists')
+ @mock.patch('os.environ.get')
+ def test_binary_lookup_in_usr_local(self, mock_env_get, mock_path_exists):
+ ''' Testing binary lookup in /usr/local/bin '''
+
+ oc_bin = '/usr/local/bin/oc'
+
+ mock_env_get.side_effect = lambda _v, _d: '/bin:/usr/bin'
+
+ mock_path_exists.side_effect = lambda f: f == oc_bin
+
+ self.assertEqual(locate_oc_binary(), oc_bin)
+
+ @unittest.skipIf(six.PY3, 'py2 test only')
+ @mock.patch('os.path.exists')
+ @mock.patch('os.environ.get')
+ def test_binary_lookup_in_home(self, mock_env_get, mock_path_exists):
+ ''' Testing binary lookup in ~/bin '''
+
+ oc_bin = os.path.expanduser('~/bin/oc')
+
+ mock_env_get.side_effect = lambda _v, _d: '/bin:/usr/bin'
+
+ mock_path_exists.side_effect = lambda f: f == oc_bin
+
+ self.assertEqual(locate_oc_binary(), oc_bin)
+
+ @unittest.skipIf(six.PY2, 'py3 test only')
+ @mock.patch('shutil.which')
+ @mock.patch('os.environ.get')
+ def test_binary_lookup_fallback_py3(self, mock_env_get, mock_shutil_which):
+ ''' Testing binary lookup fallback '''
+
+ mock_env_get.side_effect = lambda _v, _d: ''
+
+ mock_shutil_which.side_effect = lambda _f, path=None: None
+
+ self.assertEqual(locate_oc_binary(), 'oc')
+
+ @unittest.skipIf(six.PY2, 'py3 test only')
+ @mock.patch('shutil.which')
+ @mock.patch('os.environ.get')
+ def test_binary_lookup_in_path_py3(self, mock_env_get, mock_shutil_which):
+ ''' Testing binary lookup in path '''
+
+ oc_bin = '/usr/bin/oc'
+
+ mock_env_get.side_effect = lambda _v, _d: '/bin:/usr/bin'
+
+ mock_shutil_which.side_effect = lambda _f, path=None: oc_bin
+
+ self.assertEqual(locate_oc_binary(), oc_bin)
+
+ @unittest.skipIf(six.PY2, 'py3 test only')
+ @mock.patch('shutil.which')
+ @mock.patch('os.environ.get')
+ def test_binary_lookup_in_usr_local_py3(self, mock_env_get, mock_shutil_which):
+ ''' Testing binary lookup in /usr/local/bin '''
+
+ oc_bin = '/usr/local/bin/oc'
+
+ mock_env_get.side_effect = lambda _v, _d: '/bin:/usr/bin'
+
+ mock_shutil_which.side_effect = lambda _f, path=None: oc_bin
+
+ self.assertEqual(locate_oc_binary(), oc_bin)
+
+ @unittest.skipIf(six.PY2, 'py3 test only')
+ @mock.patch('shutil.which')
+ @mock.patch('os.environ.get')
+ def test_binary_lookup_in_home_py3(self, mock_env_get, mock_shutil_which):
+ ''' Testing binary lookup in ~/bin '''
+
+ oc_bin = os.path.expanduser('~/bin/oc')
+
+ mock_env_get.side_effect = lambda _v, _d: '/bin:/usr/bin'
+
+ mock_shutil_which.side_effect = lambda _f, path=None: oc_bin
+
+ self.assertEqual(locate_oc_binary(), oc_bin)
+
def tearDown(self):
'''TearDown method'''
pass
diff --git a/roles/lib_openshift/src/test/unit/test_oc_serviceaccount.py b/roles/lib_openshift/src/test/unit/test_oc_serviceaccount.py
new file mode 100755
index 000000000..3572a6728
--- /dev/null
+++ b/roles/lib_openshift/src/test/unit/test_oc_serviceaccount.py
@@ -0,0 +1,233 @@
+#!/usr/bin/env python2
+'''
+ Unit tests for oc serviceaccount
+'''
+# To run:
+# ./oc_serviceaccount.py
+#
+# .
+# Ran 1 test in 0.002s
+#
+# OK
+
+import os
+import six
+import sys
+import unittest
+import mock
+
+# Removing invalid variable names for tests so that I can
+# keep them brief
+# pylint: disable=invalid-name,no-name-in-module
+# Disable import-error b/c our libraries aren't loaded in jenkins
+# pylint: disable=import-error,wrong-import-position
+# place class in our python path
+module_path = os.path.join('/'.join(os.path.realpath(__file__).split('/')[:-4]), 'library') # noqa: E501
+sys.path.insert(0, module_path)
+from oc_serviceaccount import OCServiceAccount, locate_oc_binary # noqa: E402
+
+
+class OCServiceAccountTest(unittest.TestCase):
+ '''
+ Test class for OCServiceAccount
+ '''
+
+ def setUp(self):
+ ''' setup method will create a file and set to known configuration '''
+ pass
+
+ @mock.patch('oc_serviceaccount.locate_oc_binary')
+ @mock.patch('oc_serviceaccount.Utils.create_tmpfile_copy')
+ @mock.patch('oc_serviceaccount.OCServiceAccount._run')
+ def test_adding_a_serviceaccount(self, mock_cmd, mock_tmpfile_copy, mock_oc_binary):
+ ''' Testing adding a serviceaccount '''
+
+ # Arrange
+
+ # run_ansible input parameters
+ params = {
+ 'kubeconfig': '/etc/origin/master/admin.kubeconfig',
+ 'state': 'present',
+ 'debug': False,
+ 'name': 'testserviceaccountname',
+ 'namespace': 'default',
+ 'secrets': None,
+ 'image_pull_secrets': None,
+ }
+
+ valid_result_json = '''{
+ "kind": "ServiceAccount",
+ "apiVersion": "v1",
+ "metadata": {
+ "name": "testserviceaccountname",
+ "namespace": "default",
+ "selfLink": "/api/v1/namespaces/default/serviceaccounts/testserviceaccountname",
+ "uid": "4d8320c9-e66f-11e6-8edc-0eece8f2ce22",
+ "resourceVersion": "328450",
+ "creationTimestamp": "2017-01-29T22:07:19Z"
+ },
+ "secrets": [
+ {
+ "name": "testserviceaccountname-dockercfg-4lqd0"
+ },
+ {
+ "name": "testserviceaccountname-token-9h0ej"
+ }
+ ],
+ "imagePullSecrets": [
+ {
+ "name": "testserviceaccountname-dockercfg-4lqd0"
+ }
+ ]
+ }'''
+
+ # Return values of our mocked function call. These get returned once per call.
+ mock_cmd.side_effect = [
+ # First call to mock
+ (1, '', 'Error from server: serviceaccounts "testserviceaccountname" not found'),
+
+ # Second call to mock
+ (0, 'serviceaccount "testserviceaccountname" created', ''),
+
+ # Third call to mock
+ (0, valid_result_json, ''),
+ ]
+
+ mock_oc_binary.side_effect = [
+ 'oc'
+ ]
+
+ mock_tmpfile_copy.side_effect = [
+ '/tmp/mocked_kubeconfig',
+ ]
+
+ # Act
+ results = OCServiceAccount.run_ansible(params, False)
+
+ # Assert
+ self.assertTrue(results['changed'])
+ self.assertEqual(results['results']['returncode'], 0)
+ self.assertEqual(results['state'], 'present')
+
+ # Making sure our mock was called as we expected
+ mock_cmd.assert_has_calls([
+ mock.call(['oc', 'get', 'sa', 'testserviceaccountname', '-o', 'json', '-n', 'default'], None),
+ mock.call(['oc', 'create', '-f', mock.ANY, '-n', 'default'], None),
+ mock.call(['oc', 'get', 'sa', 'testserviceaccountname', '-o', 'json', '-n', 'default'], None),
+ ])
+
+ @unittest.skipIf(six.PY3, 'py2 test only')
+ @mock.patch('os.path.exists')
+ @mock.patch('os.environ.get')
+ def test_binary_lookup_fallback(self, mock_env_get, mock_path_exists):
+ ''' Testing binary lookup fallback '''
+
+ mock_env_get.side_effect = lambda _v, _d: ''
+
+ mock_path_exists.side_effect = lambda _: False
+
+ self.assertEqual(locate_oc_binary(), 'oc')
+
+ @unittest.skipIf(six.PY3, 'py2 test only')
+ @mock.patch('os.path.exists')
+ @mock.patch('os.environ.get')
+ def test_binary_lookup_in_path(self, mock_env_get, mock_path_exists):
+ ''' Testing binary lookup in path '''
+
+ oc_bin = '/usr/bin/oc'
+
+ mock_env_get.side_effect = lambda _v, _d: '/bin:/usr/bin'
+
+ mock_path_exists.side_effect = lambda f: f == oc_bin
+
+ self.assertEqual(locate_oc_binary(), oc_bin)
+
+ @unittest.skipIf(six.PY3, 'py2 test only')
+ @mock.patch('os.path.exists')
+ @mock.patch('os.environ.get')
+ def test_binary_lookup_in_usr_local(self, mock_env_get, mock_path_exists):
+ ''' Testing binary lookup in /usr/local/bin '''
+
+ oc_bin = '/usr/local/bin/oc'
+
+ mock_env_get.side_effect = lambda _v, _d: '/bin:/usr/bin'
+
+ mock_path_exists.side_effect = lambda f: f == oc_bin
+
+ self.assertEqual(locate_oc_binary(), oc_bin)
+
+ @unittest.skipIf(six.PY3, 'py2 test only')
+ @mock.patch('os.path.exists')
+ @mock.patch('os.environ.get')
+ def test_binary_lookup_in_home(self, mock_env_get, mock_path_exists):
+ ''' Testing binary lookup in ~/bin '''
+
+ oc_bin = os.path.expanduser('~/bin/oc')
+
+ mock_env_get.side_effect = lambda _v, _d: '/bin:/usr/bin'
+
+ mock_path_exists.side_effect = lambda f: f == oc_bin
+
+ self.assertEqual(locate_oc_binary(), oc_bin)
+
+ @unittest.skipIf(six.PY2, 'py3 test only')
+ @mock.patch('shutil.which')
+ @mock.patch('os.environ.get')
+ def test_binary_lookup_fallback_py3(self, mock_env_get, mock_shutil_which):
+ ''' Testing binary lookup fallback '''
+
+ mock_env_get.side_effect = lambda _v, _d: ''
+
+ mock_shutil_which.side_effect = lambda _f, path=None: None
+
+ self.assertEqual(locate_oc_binary(), 'oc')
+
+ @unittest.skipIf(six.PY2, 'py3 test only')
+ @mock.patch('shutil.which')
+ @mock.patch('os.environ.get')
+ def test_binary_lookup_in_path_py3(self, mock_env_get, mock_shutil_which):
+ ''' Testing binary lookup in path '''
+
+ oc_bin = '/usr/bin/oc'
+
+ mock_env_get.side_effect = lambda _v, _d: '/bin:/usr/bin'
+
+ mock_shutil_which.side_effect = lambda _f, path=None: oc_bin
+
+ self.assertEqual(locate_oc_binary(), oc_bin)
+
+ @unittest.skipIf(six.PY2, 'py3 test only')
+ @mock.patch('shutil.which')
+ @mock.patch('os.environ.get')
+ def test_binary_lookup_in_usr_local_py3(self, mock_env_get, mock_shutil_which):
+ ''' Testing binary lookup in /usr/local/bin '''
+
+ oc_bin = '/usr/local/bin/oc'
+
+ mock_env_get.side_effect = lambda _v, _d: '/bin:/usr/bin'
+
+ mock_shutil_which.side_effect = lambda _f, path=None: oc_bin
+
+ self.assertEqual(locate_oc_binary(), oc_bin)
+
+ @unittest.skipIf(six.PY2, 'py3 test only')
+ @mock.patch('shutil.which')
+ @mock.patch('os.environ.get')
+ def test_binary_lookup_in_home_py3(self, mock_env_get, mock_shutil_which):
+ ''' Testing binary lookup in ~/bin '''
+
+ oc_bin = os.path.expanduser('~/bin/oc')
+
+ mock_env_get.side_effect = lambda _v, _d: '/bin:/usr/bin'
+
+ mock_shutil_which.side_effect = lambda _f, path=None: oc_bin
+
+ self.assertEqual(locate_oc_binary(), oc_bin)
+
+ def tearDown(self):
+ '''TearDown method'''
+ pass
+
+
+if __name__ == "__main__":
+ unittest.main()
diff --git a/roles/lib_openshift/src/test/unit/oc_serviceaccount_secret.py b/roles/lib_openshift/src/test/unit/test_oc_serviceaccount_secret.py
index 4d555d412..d78349e34 100755
--- a/roles/lib_openshift/src/test/unit/oc_serviceaccount_secret.py
+++ b/roles/lib_openshift/src/test/unit/test_oc_serviceaccount_secret.py
@@ -11,6 +11,7 @@
# OK
import os
+import six
import sys
import unittest
import mock
@@ -23,7 +24,13 @@ import mock
# place class in our python path
module_path = os.path.join('/'.join(os.path.realpath(__file__).split('/')[:-4]), 'library') # noqa: E501
sys.path.insert(0, module_path)
-from oc_serviceaccount_secret import OCServiceAccountSecret # noqa: E402
+from oc_serviceaccount_secret import OCServiceAccountSecret, locate_oc_binary # noqa: E402
+
+try:
+ import ruamel.yaml as yaml # noqa: EF401
+ YAML_TYPE = 'ruamel'
+except ImportError:
+ YAML_TYPE = 'pyyaml'
class OCServiceAccountSecretTest(unittest.TestCase):
@@ -35,10 +42,11 @@ class OCServiceAccountSecretTest(unittest.TestCase):
''' setup method will create a file and set to known configuration '''
pass
+ @mock.patch('oc_serviceaccount_secret.locate_oc_binary')
@mock.patch('oc_serviceaccount_secret.Utils.create_tmpfile_copy')
@mock.patch('oc_serviceaccount_secret.Yedit._write')
@mock.patch('oc_serviceaccount_secret.OCServiceAccountSecret._run')
- def test_adding_a_secret_to_a_serviceaccount(self, mock_cmd, mock_write, mock_tmpfile_copy):
+ def test_adding_a_secret_to_a_serviceaccount(self, mock_cmd, mock_write, mock_tmpfile_copy, mock_oc_binary):
''' Testing adding a secret to a service account '''
# Arrange
@@ -54,8 +62,13 @@ class OCServiceAccountSecretTest(unittest.TestCase):
}
oc_get_sa_before = '''{
- "kind": "ServiceAccount",
"apiVersion": "v1",
+ "imagePullSecrets": [
+ {
+ "name": "builder-dockercfg-rsrua"
+ }
+ ],
+ "kind": "ServiceAccount",
"metadata": {
"name": "builder",
"namespace": "default",
@@ -72,18 +85,18 @@ class OCServiceAccountSecretTest(unittest.TestCase):
"name": "builder-token-akqxi"
}
- ],
- "imagePullSecrets": [
- {
- "name": "builder-dockercfg-rsrua"
- }
]
}
'''
oc_get_sa_after = '''{
- "kind": "ServiceAccount",
"apiVersion": "v1",
+ "imagePullSecrets": [
+ {
+ "name": "builder-dockercfg-rsrua"
+ }
+ ],
+ "kind": "ServiceAccount",
"metadata": {
"name": "builder",
"namespace": "default",
@@ -103,16 +116,10 @@ class OCServiceAccountSecretTest(unittest.TestCase):
"name": "newsecret"
}
- ],
- "imagePullSecrets": [
- {
- "name": "builder-dockercfg-rsrua"
- }
]
}
'''
-
- builder_yaml_file = '''\
+ builder_ryaml_file = '''\
secrets:
- name: builder-dockercfg-rsrua
- name: builder-token-akqxi
@@ -130,6 +137,24 @@ metadata:
uid: cf47bca7-ebc4-11e6-b041-0ed9df7abc38
'''
+ builder_pyyaml_file = '''\
+apiVersion: v1
+imagePullSecrets:
+- name: builder-dockercfg-rsrua
+kind: ServiceAccount
+metadata:
+ creationTimestamp: '2017-02-05T17:02:00Z'
+ name: builder
+ namespace: default
+ resourceVersion: '302879'
+ selfLink: /api/v1/namespaces/default/serviceaccounts/builder
+ uid: cf47bca7-ebc4-11e6-b041-0ed9df7abc38
+secrets:
+- name: builder-dockercfg-rsrua
+- name: builder-token-akqxi
+- name: newsecret
+'''
+
# Return values of our mocked function call. These get returned once per call.
mock_cmd.side_effect = [
(0, oc_get_sa_before, ''), # First call to the mock
@@ -138,6 +163,10 @@ metadata:
(0, oc_get_sa_after, ''), # Fourth call to the mock
]
+ mock_oc_binary.side_effect = [
+ 'oc'
+ ]
+
mock_tmpfile_copy.side_effect = [
'/tmp/mocked_kubeconfig',
]
@@ -152,20 +181,25 @@ metadata:
# Making sure our mocks were called as we expected
mock_cmd.assert_has_calls([
- mock.call(['oc', '-n', 'default', 'get', 'sa', 'builder', '-o', 'json'], None),
- mock.call(['oc', '-n', 'default', 'get', 'sa', 'builder', '-o', 'json'], None),
- mock.call(['oc', '-n', 'default', 'replace', '-f', mock.ANY], None),
- mock.call(['oc', '-n', 'default', 'get', 'sa', 'builder', '-o', 'json'], None)
+ mock.call(['oc', 'get', 'sa', 'builder', '-o', 'json', '-n', 'default'], None),
+ mock.call(['oc', 'get', 'sa', 'builder', '-o', 'json', '-n', 'default'], None),
+ mock.call(['oc', 'replace', '-f', mock.ANY, '-n', 'default'], None),
+ mock.call(['oc', 'get', 'sa', 'builder', '-o', 'json', '-n', 'default'], None)
])
+ yaml_file = builder_pyyaml_file
+
+ if YAML_TYPE == 'ruamel':
+ yaml_file = builder_ryaml_file
mock_write.assert_has_calls([
- mock.call(mock.ANY, builder_yaml_file)
+ mock.call(mock.ANY, yaml_file)
])
+ @mock.patch('oc_serviceaccount_secret.locate_oc_binary')
@mock.patch('oc_serviceaccount_secret.Utils.create_tmpfile_copy')
@mock.patch('oc_serviceaccount_secret.Yedit._write')
@mock.patch('oc_serviceaccount_secret.OCServiceAccountSecret._run')
- def test_removing_a_secret_to_a_serviceaccount(self, mock_cmd, mock_write, mock_tmpfile_copy):
+ def test_removing_a_secret_to_a_serviceaccount(self, mock_cmd, mock_write, mock_tmpfile_copy, mock_oc_binary):
''' Testing removing a secret to a service account '''
# Arrange
@@ -181,8 +215,13 @@ metadata:
}
oc_get_sa_before = '''{
- "kind": "ServiceAccount",
"apiVersion": "v1",
+ "imagePullSecrets": [
+ {
+ "name": "builder-dockercfg-rsrua"
+ }
+ ],
+ "kind": "ServiceAccount",
"metadata": {
"name": "builder",
"namespace": "default",
@@ -202,16 +241,11 @@ metadata:
"name": "newsecret"
}
- ],
- "imagePullSecrets": [
- {
- "name": "builder-dockercfg-rsrua"
- }
]
}
'''
- builder_yaml_file = '''\
+ builder_ryaml_file = '''\
secrets:
- name: builder-dockercfg-rsrua
- name: builder-token-akqxi
@@ -228,6 +262,23 @@ metadata:
uid: cf47bca7-ebc4-11e6-b041-0ed9df7abc38
'''
+ builder_pyyaml_file = '''\
+apiVersion: v1
+imagePullSecrets:
+- name: builder-dockercfg-rsrua
+kind: ServiceAccount
+metadata:
+ creationTimestamp: '2017-02-05T17:02:00Z'
+ name: builder
+ namespace: default
+ resourceVersion: '302879'
+ selfLink: /api/v1/namespaces/default/serviceaccounts/builder
+ uid: cf47bca7-ebc4-11e6-b041-0ed9df7abc38
+secrets:
+- name: builder-dockercfg-rsrua
+- name: builder-token-akqxi
+'''
+
# Return values of our mocked function call. These get returned once per call.
mock_cmd.side_effect = [
(0, oc_get_sa_before, ''), # First call to the mock
@@ -235,6 +286,10 @@ metadata:
(0, 'serviceaccount "builder" replaced', ''), # Third call to the mock
]
+ mock_oc_binary.side_effect = [
+ 'oc'
+ ]
+
mock_tmpfile_copy.side_effect = [
'/tmp/mocked_kubeconfig',
]
@@ -249,15 +304,127 @@ metadata:
# Making sure our mocks were called as we expected
mock_cmd.assert_has_calls([
- mock.call(['oc', '-n', 'default', 'get', 'sa', 'builder', '-o', 'json'], None),
- mock.call(['oc', '-n', 'default', 'get', 'sa', 'builder', '-o', 'json'], None),
- mock.call(['oc', '-n', 'default', 'replace', '-f', mock.ANY], None),
+ mock.call(['oc', 'get', 'sa', 'builder', '-o', 'json', '-n', 'default'], None),
+ mock.call(['oc', 'get', 'sa', 'builder', '-o', 'json', '-n', 'default'], None),
+ mock.call(['oc', 'replace', '-f', mock.ANY, '-n', 'default'], None),
])
+ yaml_file = builder_pyyaml_file
+
+ if YAML_TYPE == 'ruamel':
+ yaml_file = builder_ryaml_file
mock_write.assert_has_calls([
- mock.call(mock.ANY, builder_yaml_file)
+ mock.call(mock.ANY, yaml_file)
])
+ @unittest.skipIf(six.PY3, 'py2 test only')
+ @mock.patch('os.path.exists')
+ @mock.patch('os.environ.get')
+ def test_binary_lookup_fallback(self, mock_env_get, mock_path_exists):
+ ''' Testing binary lookup fallback '''
+
+ mock_env_get.side_effect = lambda _v, _d: ''
+
+ mock_path_exists.side_effect = lambda _: False
+
+ self.assertEqual(locate_oc_binary(), 'oc')
+
+ @unittest.skipIf(six.PY3, 'py2 test only')
+ @mock.patch('os.path.exists')
+ @mock.patch('os.environ.get')
+ def test_binary_lookup_in_path(self, mock_env_get, mock_path_exists):
+ ''' Testing binary lookup in path '''
+
+ oc_bin = '/usr/bin/oc'
+
+ mock_env_get.side_effect = lambda _v, _d: '/bin:/usr/bin'
+
+ mock_path_exists.side_effect = lambda f: f == oc_bin
+
+ self.assertEqual(locate_oc_binary(), oc_bin)
+
+ @unittest.skipIf(six.PY3, 'py2 test only')
+ @mock.patch('os.path.exists')
+ @mock.patch('os.environ.get')
+ def test_binary_lookup_in_usr_local(self, mock_env_get, mock_path_exists):
+ ''' Testing binary lookup in /usr/local/bin '''
+
+ oc_bin = '/usr/local/bin/oc'
+
+ mock_env_get.side_effect = lambda _v, _d: '/bin:/usr/bin'
+
+ mock_path_exists.side_effect = lambda f: f == oc_bin
+
+ self.assertEqual(locate_oc_binary(), oc_bin)
+
+ @unittest.skipIf(six.PY3, 'py2 test only')
+ @mock.patch('os.path.exists')
+ @mock.patch('os.environ.get')
+ def test_binary_lookup_in_home(self, mock_env_get, mock_path_exists):
+ ''' Testing binary lookup in ~/bin '''
+
+ oc_bin = os.path.expanduser('~/bin/oc')
+
+ mock_env_get.side_effect = lambda _v, _d: '/bin:/usr/bin'
+
+ mock_path_exists.side_effect = lambda f: f == oc_bin
+
+ self.assertEqual(locate_oc_binary(), oc_bin)
+
+ @unittest.skipIf(six.PY2, 'py3 test only')
+ @mock.patch('shutil.which')
+ @mock.patch('os.environ.get')
+ def test_binary_lookup_fallback_py3(self, mock_env_get, mock_shutil_which):
+ ''' Testing binary lookup fallback '''
+
+ mock_env_get.side_effect = lambda _v, _d: ''
+
+ mock_shutil_which.side_effect = lambda _f, path=None: None
+
+ self.assertEqual(locate_oc_binary(), 'oc')
+
+ @unittest.skipIf(six.PY2, 'py3 test only')
+ @mock.patch('shutil.which')
+ @mock.patch('os.environ.get')
+ def test_binary_lookup_in_path_py3(self, mock_env_get, mock_shutil_which):
+ ''' Testing binary lookup in path '''
+
+ oc_bin = '/usr/bin/oc'
+
+ mock_env_get.side_effect = lambda _v, _d: '/bin:/usr/bin'
+
+ mock_shutil_which.side_effect = lambda _f, path=None: oc_bin
+
+ self.assertEqual(locate_oc_binary(), oc_bin)
+
+ @unittest.skipIf(six.PY2, 'py3 test only')
+ @mock.patch('shutil.which')
+ @mock.patch('os.environ.get')
+ def test_binary_lookup_in_usr_local_py3(self, mock_env_get, mock_shutil_which):
+ ''' Testing binary lookup in /usr/local/bin '''
+
+ oc_bin = '/usr/local/bin/oc'
+
+ mock_env_get.side_effect = lambda _v, _d: '/bin:/usr/bin'
+
+ mock_shutil_which.side_effect = lambda _f, path=None: oc_bin
+
+ self.assertEqual(locate_oc_binary(), oc_bin)
+
+ @unittest.skipIf(six.PY2, 'py3 test only')
+ @mock.patch('shutil.which')
+ @mock.patch('os.environ.get')
+ def test_binary_lookup_in_home_py3(self, mock_env_get, mock_shutil_which):
+ ''' Testing binary lookup in ~/bin '''
+
+ oc_bin = os.path.expanduser('~/bin/oc')
+
+ mock_env_get.side_effect = lambda _v, _d: '/bin:/usr/bin'
+
+ mock_shutil_which.side_effect = lambda _f, path=None: oc_bin
+
+ self.assertEqual(locate_oc_binary(), oc_bin)
+
def tearDown(self):
'''TearDown method'''
pass
diff --git a/roles/lib_openshift/src/test/unit/test_oc_version.py b/roles/lib_openshift/src/test/unit/test_oc_version.py
new file mode 100755
index 000000000..6daf5b00d
--- /dev/null
+++ b/roles/lib_openshift/src/test/unit/test_oc_version.py
@@ -0,0 +1,182 @@
+#!/usr/bin/env python2
+'''
+ Unit tests for oc version
+'''
+# To run
+# python -m unittest version
+#
+# .
+# Ran 1 test in 0.597s
+#
+# OK
+
+import os
+import six
+import sys
+import unittest
+import mock
+
+# Removing invalid variable names for tests so that I can
+# keep them brief
+# pylint: disable=invalid-name,no-name-in-module
+# Disable import-error b/c our libraries aren't loaded in jenkins
+# pylint: disable=import-error,wrong-import-position
+# place class in our python path
+module_path = os.path.join('/'.join(os.path.realpath(__file__).split('/')[:-4]), 'library') # noqa: E501
+sys.path.insert(0, module_path)
+from oc_version import OCVersion, locate_oc_binary # noqa: E402
+
+
+class OCVersionTest(unittest.TestCase):
+ '''
+ Test class for OCVersion
+ '''
+
+ def setUp(self):
+ ''' setup method will create a file and set to known configuration '''
+ pass
+
+ @mock.patch('oc_version.Utils.create_tmpfile_copy')
+ @mock.patch('oc_version.OCVersion.openshift_cmd')
+ def test_get(self, mock_openshift_cmd, mock_tmpfile_copy):
+ ''' Testing a get '''
+ params = {'kubeconfig': '/etc/origin/master/admin.kubeconfig',
+ 'state': 'list',
+ 'debug': False}
+
+ mock_openshift_cmd.side_effect = [
+ {"cmd": "oc version",
+ "results": "oc v3.4.0.39\nkubernetes v1.4.0+776c994\n" +
+ "features: Basic-Auth GSSAPI Kerberos SPNEGO\n\n" +
+ "Server https://internal.api.opstest.openshift.com" +
+ "openshift v3.4.0.39\n" +
+ "kubernetes v1.4.0+776c994\n",
+ "returncode": 0}
+ ]
+
+ mock_tmpfile_copy.side_effect = [
+ '/tmp/mocked_kubeconfig',
+ ]
+
+ results = OCVersion.run_ansible(params)
+
+ self.assertFalse(results['changed'])
+ self.assertEqual(results['results']['oc_short'], '3.4')
+ self.assertEqual(results['results']['oc_numeric'], '3.4.0.39')
+ self.assertEqual(results['results']['kubernetes_numeric'], '1.4.0')
+
+ @unittest.skipIf(six.PY3, 'py2 test only')
+ @mock.patch('os.path.exists')
+ @mock.patch('os.environ.get')
+ def test_binary_lookup_fallback(self, mock_env_get, mock_path_exists):
+ ''' Testing binary lookup fallback '''
+
+ mock_env_get.side_effect = lambda _v, _d: ''
+
+ mock_path_exists.side_effect = lambda _: False
+
+ self.assertEqual(locate_oc_binary(), 'oc')
+
+ @unittest.skipIf(six.PY3, 'py2 test only')
+ @mock.patch('os.path.exists')
+ @mock.patch('os.environ.get')
+ def test_binary_lookup_in_path(self, mock_env_get, mock_path_exists):
+ ''' Testing binary lookup in path '''
+
+ oc_bin = '/usr/bin/oc'
+
+ mock_env_get.side_effect = lambda _v, _d: '/bin:/usr/bin'
+
+ mock_path_exists.side_effect = lambda f: f == oc_bin
+
+ self.assertEqual(locate_oc_binary(), oc_bin)
+
+ @unittest.skipIf(six.PY3, 'py2 test only')
+ @mock.patch('os.path.exists')
+ @mock.patch('os.environ.get')
+ def test_binary_lookup_in_usr_local(self, mock_env_get, mock_path_exists):
+ ''' Testing binary lookup in /usr/local/bin '''
+
+ oc_bin = '/usr/local/bin/oc'
+
+ mock_env_get.side_effect = lambda _v, _d: '/bin:/usr/bin'
+
+ mock_path_exists.side_effect = lambda f: f == oc_bin
+
+ self.assertEqual(locate_oc_binary(), oc_bin)
+
+ @unittest.skipIf(six.PY3, 'py2 test only')
+ @mock.patch('os.path.exists')
+ @mock.patch('os.environ.get')
+ def test_binary_lookup_in_home(self, mock_env_get, mock_path_exists):
+ ''' Testing binary lookup in ~/bin '''
+
+ oc_bin = os.path.expanduser('~/bin/oc')
+
+ mock_env_get.side_effect = lambda _v, _d: '/bin:/usr/bin'
+
+ mock_path_exists.side_effect = lambda f: f == oc_bin
+
+ self.assertEqual(locate_oc_binary(), oc_bin)
+
+ @unittest.skipIf(six.PY2, 'py3 test only')
+ @mock.patch('shutil.which')
+ @mock.patch('os.environ.get')
+ def test_binary_lookup_fallback_py3(self, mock_env_get, mock_shutil_which):
+ ''' Testing binary lookup fallback '''
+
+ mock_env_get.side_effect = lambda _v, _d: ''
+
+ mock_shutil_which.side_effect = lambda _f, path=None: None
+
+ self.assertEqual(locate_oc_binary(), 'oc')
+
+ @unittest.skipIf(six.PY2, 'py3 test only')
+ @mock.patch('shutil.which')
+ @mock.patch('os.environ.get')
+ def test_binary_lookup_in_path_py3(self, mock_env_get, mock_shutil_which):
+ ''' Testing binary lookup in path '''
+
+ oc_bin = '/usr/bin/oc'
+
+ mock_env_get.side_effect = lambda _v, _d: '/bin:/usr/bin'
+
+ mock_shutil_which.side_effect = lambda _f, path=None: oc_bin
+
+ self.assertEqual(locate_oc_binary(), oc_bin)
+
+ @unittest.skipIf(six.PY2, 'py3 test only')
+ @mock.patch('shutil.which')
+ @mock.patch('os.environ.get')
+ def test_binary_lookup_in_usr_local_py3(self, mock_env_get, mock_shutil_which):
+ ''' Testing binary lookup in /usr/local/bin '''
+
+ oc_bin = '/usr/local/bin/oc'
+
+ mock_env_get.side_effect = lambda _v, _d: '/bin:/usr/bin'
+
+ mock_shutil_which.side_effect = lambda _f, path=None: oc_bin
+
+ self.assertEqual(locate_oc_binary(), oc_bin)
+
+ @unittest.skipIf(six.PY2, 'py3 test only')
+ @mock.patch('shutil.which')
+ @mock.patch('os.environ.get')
+ def test_binary_lookup_in_home_py3(self, mock_env_get, mock_shutil_which):
+ ''' Testing binary lookup in ~/bin '''
+
+ oc_bin = os.path.expanduser('~/bin/oc')
+
+ mock_env_get.side_effect = lambda _v, _d: '/bin:/usr/bin'
+
+ mock_shutil_which.side_effect = lambda _f, path=None: oc_bin
+
+ self.assertEqual(locate_oc_binary(), oc_bin)
+
+ def tearDown(self):
+ '''TearDown method'''
+ pass
+
+
+if __name__ == "__main__":
+ unittest.main()
diff --git a/roles/lib_openshift/tasks/main.yml b/roles/lib_openshift/tasks/main.yml
index 2980c8a8d..b8af7c7c9 100644
--- a/roles/lib_openshift/tasks/main.yml
+++ b/roles/lib_openshift/tasks/main.yml
@@ -1,5 +1,11 @@
---
+- name: lib_openshift detect ostree
+ stat:
+ path: /run/ostree-booted
+ register: ostree_booted
+
- name: lib_openshift ensure python-ruamel-yaml package is on target
package:
- name: python-ruamel-yaml
+ name: python2-ruamel-yaml
state: present
+ when: not ostree_booted.stat.exists
diff --git a/roles/lib_utils/library/repoquery.py b/roles/lib_utils/library/repoquery.py
index 7f0105290..ee98470b0 100644
--- a/roles/lib_utils/library/repoquery.py
+++ b/roles/lib_utils/library/repoquery.py
@@ -29,13 +29,17 @@
# pylint: disable=wrong-import-order,wrong-import-position,unused-import
from __future__ import print_function # noqa: F401
+import copy # noqa: F401
import json # noqa: F401
import os # noqa: F401
import re # noqa: F401
-# pylint: disable=import-error
-import ruamel.yaml as yaml # noqa: F401
import shutil # noqa: F401
+try:
+ import ruamel.yaml as yaml # noqa: F401
+except ImportError:
+ import yaml # noqa: F401
+
from ansible.module_utils.basic import AnsibleModule
# -*- -*- -*- End included fragment: lib/import.py -*- -*- -*-
@@ -472,7 +476,7 @@ class Repoquery(RepoqueryCLI):
''' Gather and present the versions of each package '''
versions_dict = {}
- versions_dict['available_versions_full'] = formatted_versions.keys()
+ versions_dict['available_versions_full'] = list(formatted_versions.keys())
# set the match version, if called
if self.match_version:
diff --git a/roles/lib_utils/library/yedit.py b/roles/lib_utils/library/yedit.py
index 1c74b4d3f..b1d9d6869 100644
--- a/roles/lib_utils/library/yedit.py
+++ b/roles/lib_utils/library/yedit.py
@@ -29,13 +29,17 @@
# pylint: disable=wrong-import-order,wrong-import-position,unused-import
from __future__ import print_function # noqa: F401
+import copy # noqa: F401
import json # noqa: F401
import os # noqa: F401
import re # noqa: F401
-# pylint: disable=import-error
-import ruamel.yaml as yaml # noqa: F401
import shutil # noqa: F401
+try:
+ import ruamel.yaml as yaml # noqa: F401
+except ImportError:
+ import yaml # noqa: F401
+
from ansible.module_utils.basic import AnsibleModule
# -*- -*- -*- End included fragment: lib/import.py -*- -*- -*-
@@ -181,6 +185,7 @@ EXAMPLES = '''
# -*- -*- -*- End included fragment: doc/yedit -*- -*- -*-
# -*- -*- -*- Begin included fragment: class/yedit.py -*- -*- -*-
+# pylint: disable=undefined-variable,missing-docstring
# noqa: E301,E302
@@ -375,11 +380,17 @@ class Yedit(object):
if self.backup and self.file_exists():
shutil.copy(self.filename, self.filename + '.orig')
- # pylint: disable=no-member
- if hasattr(self.yaml_dict, 'fa'):
+ # Try to set format attributes if supported
+ try:
self.yaml_dict.fa.set_block_style()
+ except AttributeError:
+ pass
- Yedit._write(self.filename, yaml.dump(self.yaml_dict, Dumper=yaml.RoundTripDumper))
+ # Try to use RoundTripDumper if supported.
+ try:
+ Yedit._write(self.filename, yaml.dump(self.yaml_dict, Dumper=yaml.RoundTripDumper))
+ except AttributeError:
+ Yedit._write(self.filename, yaml.safe_dump(self.yaml_dict, default_flow_style=False))
return (True, self.yaml_dict)
@@ -419,10 +430,24 @@ class Yedit(object):
# check if it is yaml
try:
if content_type == 'yaml' and contents:
- self.yaml_dict = yaml.load(contents, yaml.RoundTripLoader)
- # pylint: disable=no-member
- if hasattr(self.yaml_dict, 'fa'):
+ # Try to set format attributes if supported
+ try:
self.yaml_dict.fa.set_block_style()
+ except AttributeError:
+ pass
+
+ # Try to use RoundTripLoader if supported.
+ try:
+ self.yaml_dict = yaml.safe_load(contents, yaml.RoundTripLoader)
+ except AttributeError:
+ self.yaml_dict = yaml.safe_load(contents)
+
+ # Try to set format attributes if supported
+ try:
+ self.yaml_dict.fa.set_block_style()
+ except AttributeError:
+ pass
+
elif content_type == 'json' and contents:
self.yaml_dict = json.loads(contents)
except yaml.YAMLError as err:
@@ -451,14 +476,16 @@ class Yedit(object):
return (False, self.yaml_dict)
if isinstance(entry, dict):
- # pylint: disable=no-member,maybe-no-member
+ # AUDIT:maybe-no-member makes sense due to fuzzy types
+ # pylint: disable=maybe-no-member
if key_or_item in entry:
entry.pop(key_or_item)
return (True, self.yaml_dict)
return (False, self.yaml_dict)
elif isinstance(entry, list):
- # pylint: disable=no-member,maybe-no-member
+ # AUDIT:maybe-no-member makes sense due to fuzzy types
+ # pylint: disable=maybe-no-member
ind = None
try:
ind = entry.index(key_or_item)
@@ -526,7 +553,9 @@ class Yedit(object):
if not isinstance(entry, list):
return (False, self.yaml_dict)
- # pylint: disable=no-member,maybe-no-member
+ # AUDIT:maybe-no-member makes sense due to loading data from
+ # a serialized format.
+ # pylint: disable=maybe-no-member
entry.append(value)
return (True, self.yaml_dict)
@@ -539,7 +568,8 @@ class Yedit(object):
entry = None
if isinstance(entry, dict):
- # pylint: disable=no-member,maybe-no-member
+ # AUDIT:maybe-no-member makes sense due to fuzzy types
+ # pylint: disable=maybe-no-member
if not isinstance(value, dict):
raise YeditException('Cannot replace key, value entry in ' +
'dict with non-dict type. value=[%s] [%s]' % (value, type(value))) # noqa: E501
@@ -548,7 +578,8 @@ class Yedit(object):
return (True, self.yaml_dict)
elif isinstance(entry, list):
- # pylint: disable=no-member,maybe-no-member
+ # AUDIT:maybe-no-member makes sense due to fuzzy types
+ # pylint: disable=maybe-no-member
ind = None
if curr_value:
try:
@@ -587,12 +618,20 @@ class Yedit(object):
return (False, self.yaml_dict)
# deepcopy didn't work
- tmp_copy = yaml.load(yaml.round_trip_dump(self.yaml_dict,
- default_flow_style=False),
- yaml.RoundTripLoader)
- # pylint: disable=no-member
- if hasattr(self.yaml_dict, 'fa'):
+ # Try to use ruamel.yaml and fallback to pyyaml
+ try:
+ tmp_copy = yaml.load(yaml.round_trip_dump(self.yaml_dict,
+ default_flow_style=False),
+ yaml.RoundTripLoader)
+ except AttributeError:
+ tmp_copy = copy.deepcopy(self.yaml_dict)
+
+ # set the format attributes if available
+ try:
tmp_copy.fa.set_block_style()
+ except AttributeError:
+ pass
+
result = Yedit.add_entry(tmp_copy, path, value, self.separator)
if not result:
return (False, self.yaml_dict)
@@ -605,11 +644,20 @@ class Yedit(object):
''' create a yaml file '''
if not self.file_exists():
# deepcopy didn't work
- tmp_copy = yaml.load(yaml.round_trip_dump(self.yaml_dict, default_flow_style=False), # noqa: E501
- yaml.RoundTripLoader)
- # pylint: disable=no-member
- if hasattr(self.yaml_dict, 'fa'):
+ # Try to use ruamel.yaml and fallback to pyyaml
+ try:
+ tmp_copy = yaml.load(yaml.round_trip_dump(self.yaml_dict,
+ default_flow_style=False),
+ yaml.RoundTripLoader)
+ except AttributeError:
+ tmp_copy = copy.deepcopy(self.yaml_dict)
+
+ # set the format attributes if available
+ try:
tmp_copy.fa.set_block_style()
+ except AttributeError:
+ pass
+
result = Yedit.add_entry(tmp_copy, path, value, self.separator)
if result:
self.yaml_dict = tmp_copy
diff --git a/roles/lib_utils/meta/main.yml b/roles/lib_utils/meta/main.yml
new file mode 100644
index 000000000..e06b9a0f1
--- /dev/null
+++ b/roles/lib_utils/meta/main.yml
@@ -0,0 +1,15 @@
+---
+galaxy_info:
+ author: TODO
+ description: OpenShift Repositories
+ company: Red Hat, Inc.
+ license: Apache License, Version 2.0
+ min_ansible_version: 1.7
+ platforms:
+ - name: EL
+ versions:
+ - 7
+ categories:
+ - cloud
+dependencies:
+- { role: openshift_repos }
diff --git a/roles/lib_utils/src/class/repoquery.py b/roles/lib_utils/src/class/repoquery.py
index 2447719e2..82adcada5 100644
--- a/roles/lib_utils/src/class/repoquery.py
+++ b/roles/lib_utils/src/class/repoquery.py
@@ -60,7 +60,7 @@ class Repoquery(RepoqueryCLI):
''' Gather and present the versions of each package '''
versions_dict = {}
- versions_dict['available_versions_full'] = formatted_versions.keys()
+ versions_dict['available_versions_full'] = list(formatted_versions.keys())
# set the match version, if called
if self.match_version:
diff --git a/roles/lib_utils/src/class/yedit.py b/roles/lib_utils/src/class/yedit.py
index 8542fe5c7..74ee52fe3 100644
--- a/roles/lib_utils/src/class/yedit.py
+++ b/roles/lib_utils/src/class/yedit.py
@@ -1,5 +1,5 @@
# flake8: noqa
-# pylint: skip-file
+# pylint: disable=undefined-variable,missing-docstring
# noqa: E301,E302
@@ -194,11 +194,17 @@ class Yedit(object):
if self.backup and self.file_exists():
shutil.copy(self.filename, self.filename + '.orig')
- # pylint: disable=no-member
- if hasattr(self.yaml_dict, 'fa'):
+ # Try to set format attributes if supported
+ try:
self.yaml_dict.fa.set_block_style()
+ except AttributeError:
+ pass
- Yedit._write(self.filename, yaml.dump(self.yaml_dict, Dumper=yaml.RoundTripDumper))
+ # Try to use RoundTripDumper if supported.
+ try:
+ Yedit._write(self.filename, yaml.dump(self.yaml_dict, Dumper=yaml.RoundTripDumper))
+ except AttributeError:
+ Yedit._write(self.filename, yaml.safe_dump(self.yaml_dict, default_flow_style=False))
return (True, self.yaml_dict)
@@ -238,10 +244,24 @@ class Yedit(object):
# check if it is yaml
try:
if content_type == 'yaml' and contents:
- self.yaml_dict = yaml.load(contents, yaml.RoundTripLoader)
- # pylint: disable=no-member
- if hasattr(self.yaml_dict, 'fa'):
+ # Try to set format attributes if supported
+ try:
self.yaml_dict.fa.set_block_style()
+ except AttributeError:
+ pass
+
+ # Try to use RoundTripLoader if supported.
+ try:
+ self.yaml_dict = yaml.safe_load(contents, yaml.RoundTripLoader)
+ except AttributeError:
+ self.yaml_dict = yaml.safe_load(contents)
+
+ # Try to set format attributes if supported
+ try:
+ self.yaml_dict.fa.set_block_style()
+ except AttributeError:
+ pass
+
elif content_type == 'json' and contents:
self.yaml_dict = json.loads(contents)
except yaml.YAMLError as err:
@@ -270,14 +290,16 @@ class Yedit(object):
return (False, self.yaml_dict)
if isinstance(entry, dict):
- # pylint: disable=no-member,maybe-no-member
+ # AUDIT:maybe-no-member makes sense due to fuzzy types
+ # pylint: disable=maybe-no-member
if key_or_item in entry:
entry.pop(key_or_item)
return (True, self.yaml_dict)
return (False, self.yaml_dict)
elif isinstance(entry, list):
- # pylint: disable=no-member,maybe-no-member
+ # AUDIT:maybe-no-member makes sense due to fuzzy types
+ # pylint: disable=maybe-no-member
ind = None
try:
ind = entry.index(key_or_item)
@@ -345,7 +367,9 @@ class Yedit(object):
if not isinstance(entry, list):
return (False, self.yaml_dict)
- # pylint: disable=no-member,maybe-no-member
+ # AUDIT:maybe-no-member makes sense due to loading data from
+ # a serialized format.
+ # pylint: disable=maybe-no-member
entry.append(value)
return (True, self.yaml_dict)
@@ -358,7 +382,8 @@ class Yedit(object):
entry = None
if isinstance(entry, dict):
- # pylint: disable=no-member,maybe-no-member
+ # AUDIT:maybe-no-member makes sense due to fuzzy types
+ # pylint: disable=maybe-no-member
if not isinstance(value, dict):
raise YeditException('Cannot replace key, value entry in ' +
'dict with non-dict type. value=[%s] [%s]' % (value, type(value))) # noqa: E501
@@ -367,7 +392,8 @@ class Yedit(object):
return (True, self.yaml_dict)
elif isinstance(entry, list):
- # pylint: disable=no-member,maybe-no-member
+ # AUDIT:maybe-no-member makes sense due to fuzzy types
+ # pylint: disable=maybe-no-member
ind = None
if curr_value:
try:
@@ -406,12 +432,20 @@ class Yedit(object):
return (False, self.yaml_dict)
# deepcopy didn't work
- tmp_copy = yaml.load(yaml.round_trip_dump(self.yaml_dict,
- default_flow_style=False),
- yaml.RoundTripLoader)
- # pylint: disable=no-member
- if hasattr(self.yaml_dict, 'fa'):
+ # Try to use ruamel.yaml and fallback to pyyaml
+ try:
+ tmp_copy = yaml.load(yaml.round_trip_dump(self.yaml_dict,
+ default_flow_style=False),
+ yaml.RoundTripLoader)
+ except AttributeError:
+ tmp_copy = copy.deepcopy(self.yaml_dict)
+
+ # set the format attributes if available
+ try:
tmp_copy.fa.set_block_style()
+ except AttributeError:
+ pass
+
result = Yedit.add_entry(tmp_copy, path, value, self.separator)
if not result:
return (False, self.yaml_dict)
@@ -424,11 +458,20 @@ class Yedit(object):
''' create a yaml file '''
if not self.file_exists():
# deepcopy didn't work
- tmp_copy = yaml.load(yaml.round_trip_dump(self.yaml_dict, default_flow_style=False), # noqa: E501
- yaml.RoundTripLoader)
- # pylint: disable=no-member
- if hasattr(self.yaml_dict, 'fa'):
+ # Try to use ruamel.yaml and fallback to pyyaml
+ try:
+ tmp_copy = yaml.load(yaml.round_trip_dump(self.yaml_dict,
+ default_flow_style=False),
+ yaml.RoundTripLoader)
+ except AttributeError:
+ tmp_copy = copy.deepcopy(self.yaml_dict)
+
+ # set the format attributes if available
+ try:
tmp_copy.fa.set_block_style()
+ except AttributeError:
+ pass
+
result = Yedit.add_entry(tmp_copy, path, value, self.separator)
if result:
self.yaml_dict = tmp_copy
diff --git a/roles/lib_utils/src/lib/import.py b/roles/lib_utils/src/lib/import.py
index d892353a1..b0ab7c92c 100644
--- a/roles/lib_utils/src/lib/import.py
+++ b/roles/lib_utils/src/lib/import.py
@@ -4,11 +4,15 @@
# pylint: disable=wrong-import-order,wrong-import-position,unused-import
from __future__ import print_function # noqa: F401
+import copy # noqa: F401
import json # noqa: F401
import os # noqa: F401
import re # noqa: F401
-# pylint: disable=import-error
-import ruamel.yaml as yaml # noqa: F401
import shutil # noqa: F401
+try:
+ import ruamel.yaml as yaml # noqa: F401
+except ImportError:
+ import yaml # noqa: F401
+
from ansible.module_utils.basic import AnsibleModule
diff --git a/roles/lib_utils/src/test/unit/repoquery.py b/roles/lib_utils/src/test/unit/test_repoquery.py
index c487ab254..c487ab254 100755
--- a/roles/lib_utils/src/test/unit/repoquery.py
+++ b/roles/lib_utils/src/test/unit/test_repoquery.py
diff --git a/roles/lib_utils/src/test/unit/yedit_test.py b/roles/lib_utils/src/test/unit/test_yedit.py
index 2793c5c1a..ed07ac96e 100755
--- a/roles/lib_utils/src/test/unit/yedit_test.py
+++ b/roles/lib_utils/src/test/unit/test_yedit.py
@@ -256,7 +256,7 @@ class YeditTest(unittest.TestCase):
def test_pop_list_item_2(self):
'''test dict value with none value'''
- z = range(10)
+ z = list(range(10))
yed = Yedit(content=z, separator=':')
yed.pop('', 5)
z.pop(5)
diff --git a/roles/lib_utils/tasks/main.yml b/roles/lib_utils/tasks/main.yml
index 8a350da88..32ab9e0c6 100644
--- a/roles/lib_utils/tasks/main.yml
+++ b/roles/lib_utils/tasks/main.yml
@@ -1,5 +1,11 @@
---
+- name: lib_utils detect ostree
+ stat:
+ path: /run/ostree-booted
+ register: ostree_booted
+
- name: lib_utils ensure python-ruamel-yaml package is on target
package:
name: python-ruamel-yaml
state: present
+ when: not ostree_booted.stat.exists
diff --git a/roles/nuage_node/tasks/iptables.yml b/roles/nuage_node/tasks/iptables.yml
index 8e2c29620..847c8395d 100644
--- a/roles/nuage_node/tasks/iptables.yml
+++ b/roles/nuage_node/tasks/iptables.yml
@@ -2,7 +2,7 @@
- name: IPtables | Get iptables rules
command: iptables -L --wait
register: iptablesrules
- always_run: yes
+ check_mode: no
- name: Allow traffic from overlay to underlay
command: /sbin/iptables --wait -I FORWARD 1 -s {{ hostvars[groups.oo_first_master.0].openshift.master.sdn_cluster_network_cidr }} -j ACCEPT -m comment --comment "nuage-overlay-underlay"
diff --git a/roles/openshift_ca/tasks/main.yml b/roles/openshift_ca/tasks/main.yml
index ae3ad31c3..70c2a9121 100644
--- a/roles/openshift_ca/tasks/main.yml
+++ b/roles/openshift_ca/tasks/main.yml
@@ -60,6 +60,7 @@
copy:
src: "{{ item.src }}"
dest: "{{ openshift_ca_config_dir }}/{{ item.dest }}"
+ force: no
with_items:
- src: "{{ (openshift_master_ca_certificate | default({'certfile':none})).certfile }}"
dest: ca.crt
diff --git a/roles/openshift_certificate_expiry/library/openshift_cert_expiry.py b/roles/openshift_certificate_expiry/library/openshift_cert_expiry.py
index 85671b164..b093d84fe 100644
--- a/roles/openshift_certificate_expiry/library/openshift_cert_expiry.py
+++ b/roles/openshift_certificate_expiry/library/openshift_cert_expiry.py
@@ -5,13 +5,32 @@
"""For details on this module see DOCUMENTATION (below)"""
import datetime
+import io
import os
import subprocess
+import sys
+import tempfile
+# File pointers from io.open require unicode inputs when using their
+# `write` method
+import six
from six.moves import configparser
import yaml
-import OpenSSL.crypto
+try:
+ # You can comment this import out and include a 'pass' in this
+ # block if you're manually testing this module on a NON-ATOMIC
+ # HOST (or any host that just doesn't have PyOpenSSL
+ # available). That will force the `load_and_handle_cert` function
+ # to use the Fake OpenSSL classes.
+ import OpenSSL.crypto
+except ImportError:
+ # Some platforms (such as RHEL Atomic) may not have the Python
+ # OpenSSL library installed. In this case we will use a manual
+ # work-around to parse each certificate.
+ #
+ # Check for 'OpenSSL.crypto' in `sys.modules` later.
+ pass
DOCUMENTATION = '''
---
@@ -66,6 +85,128 @@ EXAMPLES = '''
'''
+class FakeOpenSSLCertificate(object):
+ """This provides a rough mock of what you get from
+`OpenSSL.crypto.load_certificate()`. This is a work-around for
+platforms missing the Python OpenSSL library.
+ """
+ def __init__(self, cert_string):
+ """`cert_string` is a certificate in the form you get from running a
+.crt through 'openssl x509 -in CERT.cert -text'"""
+ self.cert_string = cert_string
+ self.serial = None
+ self.subject = None
+ self.extensions = []
+ self.not_after = None
+ self._parse_cert()
+
+ def _parse_cert(self):
+ """Manually parse the certificate line by line"""
+ self.extensions = []
+
+ PARSING_ALT_NAMES = False
+ for line in self.cert_string.split('\n'):
+ l = line.strip()
+ if PARSING_ALT_NAMES:
+ # We're parsing a 'Subject Alternative Name' line
+ self.extensions.append(
+ FakeOpenSSLCertificateSANExtension(l))
+
+ PARSING_ALT_NAMES = False
+ continue
+
+ # parse out the bits that we can
+ if l.startswith('Serial Number:'):
+ # Serial Number: 11 (0xb)
+ # => 11
+ self.serial = int(l.split()[-2])
+
+ elif l.startswith('Not After :'):
+ # Not After : Feb 7 18:19:35 2019 GMT
+ # => strptime(str, '%b %d %H:%M:%S %Y %Z')
+ # => strftime('%Y%m%d%H%M%SZ')
+ # => 20190207181935Z
+ not_after_raw = l.partition(' : ')[-1]
+ # Last item: ('Not After', ' : ', 'Feb 7 18:19:35 2019 GMT')
+ not_after_parsed = datetime.datetime.strptime(not_after_raw, '%b %d %H:%M:%S %Y %Z')
+ self.not_after = not_after_parsed.strftime('%Y%m%d%H%M%SZ')
+
+ elif l.startswith('X509v3 Subject Alternative Name:'):
+ PARSING_ALT_NAMES = True
+ continue
+
+ elif l.startswith('Subject:'):
+ # O=system:nodes, CN=system:node:m01.example.com
+ self.subject = FakeOpenSSLCertificateSubjects(l.partition(': ')[-1])
+
+ def get_serial_number(self):
+ """Return the serial number of the cert"""
+ return self.serial
+
+ def get_subject(self):
+ """Subjects must implement get_components() and return dicts or
+tuples. An 'openssl x509 -in CERT.cert -text' with 'Subject':
+
+ Subject: Subject: O=system:nodes, CN=system:node:m01.example.com
+
+might return: [('O=system', 'nodes'), ('CN=system', 'node:m01.example.com')]
+ """
+ return self.subject
+
+ def get_extension(self, i):
+ """Extensions must implement get_short_name() and return the string
+'subjectAltName'"""
+ return self.extensions[i]
+
+ def get_notAfter(self):
+ """Returns a date stamp as a string in the form
+'20180922170439Z'. strptime the result with format param:
+'%Y%m%d%H%M%SZ'."""
+ return self.not_after
+
+
+class FakeOpenSSLCertificateSANExtension(object): # pylint: disable=too-few-public-methods
+ """Mocks what happens when `get_extension` is called on a certificate
+object"""
+
+ def __init__(self, san_string):
+ """With `san_string` as you get from:
+
+ $ openssl x509 -in certificate.crt -text
+ """
+ self.san_string = san_string
+ self.short_name = 'subjectAltName'
+
+ def get_short_name(self):
+ """Return the 'type' of this extension. It's always the same though
+because we only care about subjectAltName's"""
+ return self.short_name
+
+ def __str__(self):
+ """Return this extension and the value as a simple string"""
+ return self.san_string
+
+
+# pylint: disable=too-few-public-methods
+class FakeOpenSSLCertificateSubjects(object):
+ """Mocks what happens when `get_subject` is called on a certificate
+object"""
+
+ def __init__(self, subject_string):
+ """With `subject_string` as you get from:
+
+ $ openssl x509 -in certificate.crt -text
+ """
+ self.subjects = []
+ for s in subject_string.split(', '):
+ name, _, value = s.partition('=')
+ self.subjects.append((name, value))
+
+ def get_components(self):
+ """Returns a list of tuples"""
+ return self.subjects
+
+
# We only need this for one thing, we don't care if it doesn't have
# that many public methods
#
@@ -100,7 +241,10 @@ will be returned
return [p for p in path_list if os.path.exists(os.path.realpath(p))]
-def load_and_handle_cert(cert_string, now, base64decode=False):
+# pylint: disable=too-many-locals,too-many-branches
+#
+# TODO: Break this function down into smaller chunks
+def load_and_handle_cert(cert_string, now, base64decode=False, ans_module=None):
"""Load a certificate, split off the good parts, and return some
useful data
@@ -109,20 +253,45 @@ Params:
- `cert_string` (string) - a certificate loaded into a string object
- `now` (datetime) - a datetime object of the time to calculate the certificate 'time_remaining' against
- `base64decode` (bool) - run .decode('base64') on the input?
+- `ans_module` (AnsibleModule) - The AnsibleModule object for this module (so we can raise errors)
Returns:
-A 3-tuple of the form: (certificate_common_name, certificate_expiry_date, certificate_time_remaining)
-
+A tuple of the form:
+ (cert_subject, cert_expiry_date, time_remaining, cert_serial_number)
"""
if base64decode:
_cert_string = cert_string.decode('base-64')
else:
_cert_string = cert_string
- cert_loaded = OpenSSL.crypto.load_certificate(
- OpenSSL.crypto.FILETYPE_PEM, _cert_string)
-
- cert_serial = cert_loaded.get_serial_number()
+ # Disable this. We 'redefine' the type because we are working
+ # around a missing library on the target host.
+ #
+ # pylint: disable=redefined-variable-type
+ if 'OpenSSL.crypto' in sys.modules:
+ # No work-around required
+ cert_loaded = OpenSSL.crypto.load_certificate(
+ OpenSSL.crypto.FILETYPE_PEM, _cert_string)
+ else:
+ # Missing library, work-around required. We need to write the
+ # cert out to disk temporarily so we can run the 'openssl'
+ # command on it to decode it
+ _, path = tempfile.mkstemp()
+ with io.open(path, 'w') as fp:
+ fp.write(six.u(_cert_string))
+ fp.flush()
+
+ cmd = 'openssl x509 -in {} -text'.format(path)
+ try:
+ openssl_decoded = subprocess.Popen(cmd.split(),
+ stdout=subprocess.PIPE)
+ except OSError:
+ ans_module.fail_json(msg="Error: The 'OpenSSL' python library and CLI command were not found on the target host. Unable to parse any certificates. This host will not be included in generated reports.")
+ else:
+ openssl_decoded = openssl_decoded.communicate()[0]
+ cert_loaded = FakeOpenSSLCertificate(openssl_decoded)
+ finally:
+ os.remove(path)
######################################################################
# Read all possible names from the cert
@@ -172,15 +341,14 @@ A 3-tuple of the form: (certificate_common_name, certificate_expiry_date, certif
######################################################################
# Grab the expiration date
- cert_expiry = cert_loaded.get_notAfter()
cert_expiry_date = datetime.datetime.strptime(
- cert_expiry,
+ cert_loaded.get_notAfter(),
# example get_notAfter() => 20180922170439Z
'%Y%m%d%H%M%SZ')
time_remaining = cert_expiry_date - now
- return (cert_subject, cert_expiry_date, time_remaining, cert_serial)
+ return (cert_subject, cert_expiry_date, time_remaining, cert_loaded.get_serial_number())
def classify_cert(cert_meta, now, time_remaining, expire_window, cert_list):
@@ -379,7 +547,7 @@ an OpenShift Container Platform cluster
(cert_subject,
cert_expiry_date,
time_remaining,
- cert_serial) = load_and_handle_cert(cert, now)
+ cert_serial) = load_and_handle_cert(cert, now, ans_module=module)
expire_check_result = {
'cert_cn': cert_subject,
@@ -428,7 +596,7 @@ an OpenShift Container Platform cluster
(cert_subject,
cert_expiry_date,
time_remaining,
- cert_serial) = load_and_handle_cert(c, now, base64decode=True)
+ cert_serial) = load_and_handle_cert(c, now, base64decode=True, ans_module=module)
expire_check_result = {
'cert_cn': cert_subject,
@@ -458,7 +626,7 @@ an OpenShift Container Platform cluster
(cert_subject,
cert_expiry_date,
time_remaining,
- cert_serial) = load_and_handle_cert(c, now, base64decode=True)
+ cert_serial) = load_and_handle_cert(c, now, base64decode=True, ans_module=module)
expire_check_result = {
'cert_cn': cert_subject,
@@ -512,7 +680,7 @@ an OpenShift Container Platform cluster
(cert_subject,
cert_expiry_date,
time_remaining,
- cert_serial) = load_and_handle_cert(c, now)
+ cert_serial) = load_and_handle_cert(c, now, ans_module=module)
expire_check_result = {
'cert_cn': cert_subject,
@@ -551,7 +719,7 @@ an OpenShift Container Platform cluster
(cert_subject,
cert_expiry_date,
time_remaining,
- cert_serial) = load_and_handle_cert(etcd_fp.read(), now)
+ cert_serial) = load_and_handle_cert(etcd_fp.read(), now, ans_module=module)
expire_check_result = {
'cert_cn': cert_subject,
@@ -597,7 +765,7 @@ an OpenShift Container Platform cluster
(cert_subject,
cert_expiry_date,
time_remaining,
- cert_serial) = load_and_handle_cert(router_c, now, base64decode=True)
+ cert_serial) = load_and_handle_cert(router_c, now, base64decode=True, ans_module=module)
expire_check_result = {
'cert_cn': cert_subject,
@@ -628,7 +796,7 @@ an OpenShift Container Platform cluster
(cert_subject,
cert_expiry_date,
time_remaining,
- cert_serial) = load_and_handle_cert(registry_c, now, base64decode=True)
+ cert_serial) = load_and_handle_cert(registry_c, now, base64decode=True, ans_module=module)
expire_check_result = {
'cert_cn': cert_subject,
diff --git a/roles/openshift_certificate_expiry/test/master.server.crt b/roles/openshift_certificate_expiry/test/master.server.crt
new file mode 100644
index 000000000..51aa85c8c
--- /dev/null
+++ b/roles/openshift_certificate_expiry/test/master.server.crt
@@ -0,0 +1,42 @@
+-----BEGIN CERTIFICATE-----
+MIID7zCCAtegAwIBAgIBBDANBgkqhkiG9w0BAQsFADAmMSQwIgYDVQQDDBtvcGVu
+c2hpZnQtc2lnbmVyQDE0ODY0OTExNTgwHhcNMTcwMjA3MTgxMjM5WhcNMTkwMjA3
+MTgxMjQwWjAVMRMwEQYDVQQDEwoxNzIuMzAuMC4xMIIBIjANBgkqhkiG9w0BAQEF
+AAOCAQ8AMIIBCgKCAQEA44n6kVlnRXSwgnKhXX7JrRvxZm+nCqEE/vpKRfNtrMDP
+AuVtcLUWdEDdT0L7QdceLTCBFe7VugrfokPhVi0XavrC2xFpYJ6+wPpuo7HyBRhf
+z/8rOxftAnMeFU5JhFDaeLwSbDjiRgjE1ZYYz8Hcq9YlPujptD6j6YaW1Inae+Vs
+QKXc1uAobemhClLKazEzccVGu53CaSHe4kJoKUZwJ8Ujt/nRHUr+wekbkpx0NfmF
+UEGgNRXN46cq7ZwkLHsjtuR2pttC6JhF+KHgXTRyWM9ssfvL2McmhTFxrToAlhsq
+8MuHMn0y9DMzmAK6EntvlC5AscxTRljtwHZEicspFwIDAQABo4IBNzCCATMwDgYD
+VR0PAQH/BAQDAgWgMBMGA1UdJQQMMAoGCCsGAQUFBwMBMAwGA1UdEwEB/wQCMAAw
+gf0GA1UdEQSB9TCB8oIKa3ViZXJuZXRlc4ISa3ViZXJuZXRlcy5kZWZhdWx0ghZr
+dWJlcm5ldGVzLmRlZmF1bHQuc3ZjgiRrdWJlcm5ldGVzLmRlZmF1bHQuc3ZjLmNs
+dXN0ZXIubG9jYWyCD20wMS5leGFtcGxlLmNvbYIJb3BlbnNoaWZ0ghFvcGVuc2hp
+ZnQuZGVmYXVsdIIVb3BlbnNoaWZ0LmRlZmF1bHQuc3ZjgiNvcGVuc2hpZnQuZGVm
+YXVsdC5zdmMuY2x1c3Rlci5sb2NhbIIKMTcyLjMwLjAuMYIPMTkyLjE2OC4xMjIu
+MjQxhwSsHgABhwTAqHrxMA0GCSqGSIb3DQEBCwUAA4IBAQDSdKBpUVB5Sgn1JB//
+bk804+zrUf01koaT83/17WMI+zG8IOwCZ9Be5+zDe4ThXH+PQC6obbwUi9dn8SN6
+rlihvrhNvAJaknY1YRjW07L7aus2RFKXpzsLuWoWLVlLXBTpmfWhQ2w40bCo4Kri
+jQqvezBQ+u1otFzozWmF7nrI/JK+7o89hLvaobx+mDj5wCPQLO+cM/q11Jcz3htv
+VOTFsMh2VnuKOxZqLGJz2CXkr6YXvAhJiFQWaRCnJEaA2ogTYbDahV5ixFKwqpGZ
+o+yDEroPlCw54Bxs0P1ewUx4TRsqd+Qzhnr73xiFBQ0M7JjhKHF6EczHt87XPvsn
+HEL2
+-----END CERTIFICATE-----
+-----BEGIN CERTIFICATE-----
+MIIC6jCCAdKgAwIBAgIBATANBgkqhkiG9w0BAQsFADAmMSQwIgYDVQQDDBtvcGVu
+c2hpZnQtc2lnbmVyQDE0ODY0OTExNTgwHhcNMTcwMjA3MTgxMjM3WhcNMjIwMjA2
+MTgxMjM4WjAmMSQwIgYDVQQDDBtvcGVuc2hpZnQtc2lnbmVyQDE0ODY0OTExNTgw
+ggEiMA0GCSqGSIb3DQEBAQUAA4IBDwAwggEKAoIBAQDdyU8AD7sTXHP5jk04i1HY
+cmUXuSiXdIByeIAtTZqiHU46Od0qnZib7tY1NSbo9FtGRl5YEvrfNL+1ig0hZjDh
+gKZK4fNbsanuKgj2SWx11bt4yJH0YSbm9+H45y0E15IY1h30jGHnHFNFZDdYwxtO
+8u+GShb4MOqZL9aUEfvfaoGJIIpfR+eW5NaBZQr6tiM89Z1wJYqoqzgzI/XIyUXR
+zWLOayP1M/eeSXPvBncwZfTPLzphZjB2rz3MdloPrdYMm2b5tfbEOjD7L2aYOJJU
+nVSkgjoFXAazL8KuXSIGcdrdDecyJ4ta8ijD4VIZRN9PnBlYiKaz0DsagkGjUVRd
+AgMBAAGjIzAhMA4GA1UdDwEB/wQEAwICpDAPBgNVHRMBAf8EBTADAQH/MA0GCSqG
+SIb3DQEBCwUAA4IBAQAZ/Kerb5eJXbByQ29fq60V+MQgLIJ1iTo3+zfaXxGlmV9v
+fTp3S1xQhdGyhww7UC0Ze940eRq6BQ5/I6qPcgSGNpUB064pnjSf0CexCY4qoGqK
+4VSvHRrG9TP5V+YIlX9UR1zuPy//a+wuCwKaqiWedTMb4jpvj5jsEOGIrciSmurg
+/9nKvvJXRbgqRYQeJGLT5QW5clHywsyTrE7oYytYSEcAvEs3UZT37H74wj2RFxk6
+KcEzsxUB3W+iYst0QdOPByt64OCwAaUJ96VJstaOYMmyWSShAxGAKDSjcrr4JJnF
+KtqOC1K56x0ONuBsY4MB15TNGPp8SbOhVV6OfIWj
+-----END CERTIFICATE-----
diff --git a/roles/openshift_certificate_expiry/test/master.server.crt.txt b/roles/openshift_certificate_expiry/test/master.server.crt.txt
new file mode 100644
index 000000000..6b3c8fb03
--- /dev/null
+++ b/roles/openshift_certificate_expiry/test/master.server.crt.txt
@@ -0,0 +1,82 @@
+Certificate:
+ Data:
+ Version: 3 (0x2)
+ Serial Number: 4 (0x4)
+ Signature Algorithm: sha256WithRSAEncryption
+ Issuer: CN=openshift-signer@1486491158
+ Validity
+ Not Before: Feb 7 18:12:39 2017 GMT
+ Not After : Feb 7 18:12:40 2019 GMT
+ Subject: CN=172.30.0.1
+ Subject Public Key Info:
+ Public Key Algorithm: rsaEncryption
+ Public-Key: (2048 bit)
+ Modulus:
+ 00:e3:89:fa:91:59:67:45:74:b0:82:72:a1:5d:7e:
+ c9:ad:1b:f1:66:6f:a7:0a:a1:04:fe:fa:4a:45:f3:
+ 6d:ac:c0:cf:02:e5:6d:70:b5:16:74:40:dd:4f:42:
+ fb:41:d7:1e:2d:30:81:15:ee:d5:ba:0a:df:a2:43:
+ e1:56:2d:17:6a:fa:c2:db:11:69:60:9e:be:c0:fa:
+ 6e:a3:b1:f2:05:18:5f:cf:ff:2b:3b:17:ed:02:73:
+ 1e:15:4e:49:84:50:da:78:bc:12:6c:38:e2:46:08:
+ c4:d5:96:18:cf:c1:dc:ab:d6:25:3e:e8:e9:b4:3e:
+ a3:e9:86:96:d4:89:da:7b:e5:6c:40:a5:dc:d6:e0:
+ 28:6d:e9:a1:0a:52:ca:6b:31:33:71:c5:46:bb:9d:
+ c2:69:21:de:e2:42:68:29:46:70:27:c5:23:b7:f9:
+ d1:1d:4a:fe:c1:e9:1b:92:9c:74:35:f9:85:50:41:
+ a0:35:15:cd:e3:a7:2a:ed:9c:24:2c:7b:23:b6:e4:
+ 76:a6:db:42:e8:98:45:f8:a1:e0:5d:34:72:58:cf:
+ 6c:b1:fb:cb:d8:c7:26:85:31:71:ad:3a:00:96:1b:
+ 2a:f0:cb:87:32:7d:32:f4:33:33:98:02:ba:12:7b:
+ 6f:94:2e:40:b1:cc:53:46:58:ed:c0:76:44:89:cb:
+ 29:17
+ Exponent: 65537 (0x10001)
+ X509v3 extensions:
+ X509v3 Key Usage: critical
+ Digital Signature, Key Encipherment
+ X509v3 Extended Key Usage:
+ TLS Web Server Authentication
+ X509v3 Basic Constraints: critical
+ CA:FALSE
+ X509v3 Subject Alternative Name:
+ DNS:kubernetes, DNS:kubernetes.default, DNS:kubernetes.default.svc, DNS:kubernetes.default.svc.cluster.local, DNS:m01.example.com, DNS:openshift, DNS:openshift.default, DNS:openshift.default.svc, DNS:openshift.default.svc.cluster.local, DNS:172.30.0.1, DNS:192.168.122.241, IP Address:172.30.0.1, IP Address:192.168.122.241
+ Signature Algorithm: sha256WithRSAEncryption
+ d2:74:a0:69:51:50:79:4a:09:f5:24:1f:ff:6e:4f:34:e3:ec:
+ eb:51:fd:35:92:86:93:f3:7f:f5:ed:63:08:fb:31:bc:20:ec:
+ 02:67:d0:5e:e7:ec:c3:7b:84:e1:5c:7f:8f:40:2e:a8:6d:bc:
+ 14:8b:d7:67:f1:23:7a:ae:58:a1:be:b8:4d:bc:02:5a:92:76:
+ 35:61:18:d6:d3:b2:fb:6a:eb:36:44:52:97:a7:3b:0b:b9:6a:
+ 16:2d:59:4b:5c:14:e9:99:f5:a1:43:6c:38:d1:b0:a8:e0:aa:
+ e2:8d:0a:af:7b:30:50:fa:ed:68:b4:5c:e8:cd:69:85:ee:7a:
+ c8:fc:92:be:ee:8f:3d:84:bb:da:a1:bc:7e:98:38:f9:c0:23:
+ d0:2c:ef:9c:33:fa:b5:d4:97:33:de:1b:6f:54:e4:c5:b0:c8:
+ 76:56:7b:8a:3b:16:6a:2c:62:73:d8:25:e4:af:a6:17:bc:08:
+ 49:88:54:16:69:10:a7:24:46:80:da:88:13:61:b0:da:85:5e:
+ 62:c4:52:b0:aa:91:99:a3:ec:83:12:ba:0f:94:2c:39:e0:1c:
+ 6c:d0:fd:5e:c1:4c:78:4d:1b:2a:77:e4:33:86:7a:fb:df:18:
+ 85:05:0d:0c:ec:98:e1:28:71:7a:11:cc:c7:b7:ce:d7:3e:fb:
+ 27:1c:42:f6
+-----BEGIN CERTIFICATE-----
+MIID7zCCAtegAwIBAgIBBDANBgkqhkiG9w0BAQsFADAmMSQwIgYDVQQDDBtvcGVu
+c2hpZnQtc2lnbmVyQDE0ODY0OTExNTgwHhcNMTcwMjA3MTgxMjM5WhcNMTkwMjA3
+MTgxMjQwWjAVMRMwEQYDVQQDEwoxNzIuMzAuMC4xMIIBIjANBgkqhkiG9w0BAQEF
+AAOCAQ8AMIIBCgKCAQEA44n6kVlnRXSwgnKhXX7JrRvxZm+nCqEE/vpKRfNtrMDP
+AuVtcLUWdEDdT0L7QdceLTCBFe7VugrfokPhVi0XavrC2xFpYJ6+wPpuo7HyBRhf
+z/8rOxftAnMeFU5JhFDaeLwSbDjiRgjE1ZYYz8Hcq9YlPujptD6j6YaW1Inae+Vs
+QKXc1uAobemhClLKazEzccVGu53CaSHe4kJoKUZwJ8Ujt/nRHUr+wekbkpx0NfmF
+UEGgNRXN46cq7ZwkLHsjtuR2pttC6JhF+KHgXTRyWM9ssfvL2McmhTFxrToAlhsq
+8MuHMn0y9DMzmAK6EntvlC5AscxTRljtwHZEicspFwIDAQABo4IBNzCCATMwDgYD
+VR0PAQH/BAQDAgWgMBMGA1UdJQQMMAoGCCsGAQUFBwMBMAwGA1UdEwEB/wQCMAAw
+gf0GA1UdEQSB9TCB8oIKa3ViZXJuZXRlc4ISa3ViZXJuZXRlcy5kZWZhdWx0ghZr
+dWJlcm5ldGVzLmRlZmF1bHQuc3ZjgiRrdWJlcm5ldGVzLmRlZmF1bHQuc3ZjLmNs
+dXN0ZXIubG9jYWyCD20wMS5leGFtcGxlLmNvbYIJb3BlbnNoaWZ0ghFvcGVuc2hp
+ZnQuZGVmYXVsdIIVb3BlbnNoaWZ0LmRlZmF1bHQuc3ZjgiNvcGVuc2hpZnQuZGVm
+YXVsdC5zdmMuY2x1c3Rlci5sb2NhbIIKMTcyLjMwLjAuMYIPMTkyLjE2OC4xMjIu
+MjQxhwSsHgABhwTAqHrxMA0GCSqGSIb3DQEBCwUAA4IBAQDSdKBpUVB5Sgn1JB//
+bk804+zrUf01koaT83/17WMI+zG8IOwCZ9Be5+zDe4ThXH+PQC6obbwUi9dn8SN6
+rlihvrhNvAJaknY1YRjW07L7aus2RFKXpzsLuWoWLVlLXBTpmfWhQ2w40bCo4Kri
+jQqvezBQ+u1otFzozWmF7nrI/JK+7o89hLvaobx+mDj5wCPQLO+cM/q11Jcz3htv
+VOTFsMh2VnuKOxZqLGJz2CXkr6YXvAhJiFQWaRCnJEaA2ogTYbDahV5ixFKwqpGZ
+o+yDEroPlCw54Bxs0P1ewUx4TRsqd+Qzhnr73xiFBQ0M7JjhKHF6EczHt87XPvsn
+HEL2
+-----END CERTIFICATE-----
diff --git a/roles/openshift_certificate_expiry/test/system-node-m01.example.com.crt b/roles/openshift_certificate_expiry/test/system-node-m01.example.com.crt
new file mode 100644
index 000000000..cd13ddc38
--- /dev/null
+++ b/roles/openshift_certificate_expiry/test/system-node-m01.example.com.crt
@@ -0,0 +1,19 @@
+-----BEGIN CERTIFICATE-----
+MIIDEzCCAfugAwIBAgIBCzANBgkqhkiG9w0BAQsFADAmMSQwIgYDVQQDDBtvcGVu
+c2hpZnQtc2lnbmVyQDE0ODY0OTExNTgwHhcNMTcwMjA3MTgxOTM0WhcNMTkwMjA3
+MTgxOTM1WjA9MRUwEwYDVQQKEwxzeXN0ZW06bm9kZXMxJDAiBgNVBAMTG3N5c3Rl
+bTpub2RlOm0wMS5leGFtcGxlLmNvbTCCASIwDQYJKoZIhvcNAQEBBQADggEPADCC
+AQoCggEBAOcVdDaSmeXuSp+7VCHUjEDeTP3j9aH0nreBj3079sEzethlLoQmwAqf
+CZp23qXGYm0R89+CC55buaH1FN/ltQ8QDGUzi4tdow9Af/0OcD0EINO2ukmyG5/9
+N+X905mo+y923wppvrchAA6AcxxeDyA63zouGS4exI98iuZlcdS48zbsGERkRPGg
+hoGCo7HoiKtUNL5X8MYibnFYnA4EUngdHZsRKuKte4t8GY4PYq4cxIOYXsJsNmT5
+mkFy4ThGFfR9IGg/VfyeWIkRe2VWyaUgzL0gHytAhlRJ9l54ynx96YEWrjCtp/kh
+d3KeVj0IUcMzvoXX5hipYUPkoezcxI8CAwEAAaM1MDMwDgYDVR0PAQH/BAQDAgWg
+MBMGA1UdJQQMMAoGCCsGAQUFBwMCMAwGA1UdEwEB/wQCMAAwDQYJKoZIhvcNAQEL
+BQADggEBAM1jexLtuOOTsOPfEal/ICzfP9aX3m0R/yGPjwQv43jOc81NcL5d+CeD
+MX36tKAxFIe+wvXo0kUQOzTK3D7ol4x2YTtB4uDzNE5tVh5dWi2LrKYSqZDIrhKO
+MOmJRWR3AFEopaaGQpxsD/FSfZ5Mg0OMMBPHABxMrsiserHO1nh4ax3+SI0i7Jen
+gVsB4B/Xxg9Lw9JDX3/XMcI+fyLVw5ctO62BaluljpT+HkdbRWnH8ar7TmcJjzTo
+/TyXOeOLkbozx0BQK16d/CbtLomJ+PO4cdwCNs2Z6HGSPTL7S9y0pct52N0kfJfx
+ZGXMsW+N62S2vVSXEekMR0GJgJnLNSo=
+-----END CERTIFICATE-----
diff --git a/roles/openshift_certificate_expiry/test/system-node-m01.example.com.crt.txt b/roles/openshift_certificate_expiry/test/system-node-m01.example.com.crt.txt
new file mode 100644
index 000000000..67a3eb81c
--- /dev/null
+++ b/roles/openshift_certificate_expiry/test/system-node-m01.example.com.crt.txt
@@ -0,0 +1,75 @@
+Certificate:
+ Data:
+ Version: 3 (0x2)
+ Serial Number: 11 (0xb)
+ Signature Algorithm: sha256WithRSAEncryption
+ Issuer: CN=openshift-signer@1486491158
+ Validity
+ Not Before: Feb 7 18:19:34 2017 GMT
+ Not After : Feb 7 18:19:35 2019 GMT
+ Subject: O=system:nodes, CN=system:node:m01.example.com
+ Subject Public Key Info:
+ Public Key Algorithm: rsaEncryption
+ Public-Key: (2048 bit)
+ Modulus:
+ 00:e7:15:74:36:92:99:e5:ee:4a:9f:bb:54:21:d4:
+ 8c:40:de:4c:fd:e3:f5:a1:f4:9e:b7:81:8f:7d:3b:
+ f6:c1:33:7a:d8:65:2e:84:26:c0:0a:9f:09:9a:76:
+ de:a5:c6:62:6d:11:f3:df:82:0b:9e:5b:b9:a1:f5:
+ 14:df:e5:b5:0f:10:0c:65:33:8b:8b:5d:a3:0f:40:
+ 7f:fd:0e:70:3d:04:20:d3:b6:ba:49:b2:1b:9f:fd:
+ 37:e5:fd:d3:99:a8:fb:2f:76:df:0a:69:be:b7:21:
+ 00:0e:80:73:1c:5e:0f:20:3a:df:3a:2e:19:2e:1e:
+ c4:8f:7c:8a:e6:65:71:d4:b8:f3:36:ec:18:44:64:
+ 44:f1:a0:86:81:82:a3:b1:e8:88:ab:54:34:be:57:
+ f0:c6:22:6e:71:58:9c:0e:04:52:78:1d:1d:9b:11:
+ 2a:e2:ad:7b:8b:7c:19:8e:0f:62:ae:1c:c4:83:98:
+ 5e:c2:6c:36:64:f9:9a:41:72:e1:38:46:15:f4:7d:
+ 20:68:3f:55:fc:9e:58:89:11:7b:65:56:c9:a5:20:
+ cc:bd:20:1f:2b:40:86:54:49:f6:5e:78:ca:7c:7d:
+ e9:81:16:ae:30:ad:a7:f9:21:77:72:9e:56:3d:08:
+ 51:c3:33:be:85:d7:e6:18:a9:61:43:e4:a1:ec:dc:
+ c4:8f
+ Exponent: 65537 (0x10001)
+ X509v3 extensions:
+ X509v3 Key Usage: critical
+ Digital Signature, Key Encipherment
+ X509v3 Extended Key Usage:
+ TLS Web Client Authentication
+ X509v3 Basic Constraints: critical
+ CA:FALSE
+ Signature Algorithm: sha256WithRSAEncryption
+ cd:63:7b:12:ed:b8:e3:93:b0:e3:df:11:a9:7f:20:2c:df:3f:
+ d6:97:de:6d:11:ff:21:8f:8f:04:2f:e3:78:ce:73:cd:4d:70:
+ be:5d:f8:27:83:31:7d:fa:b4:a0:31:14:87:be:c2:f5:e8:d2:
+ 45:10:3b:34:ca:dc:3e:e8:97:8c:76:61:3b:41:e2:e0:f3:34:
+ 4e:6d:56:1e:5d:5a:2d:8b:ac:a6:12:a9:90:c8:ae:12:8e:30:
+ e9:89:45:64:77:00:51:28:a5:a6:86:42:9c:6c:0f:f1:52:7d:
+ 9e:4c:83:43:8c:30:13:c7:00:1c:4c:ae:c8:ac:7a:b1:ce:d6:
+ 78:78:6b:1d:fe:48:8d:22:ec:97:a7:81:5b:01:e0:1f:d7:c6:
+ 0f:4b:c3:d2:43:5f:7f:d7:31:c2:3e:7f:22:d5:c3:97:2d:3b:
+ ad:81:6a:5b:a5:8e:94:fe:1e:47:5b:45:69:c7:f1:aa:fb:4e:
+ 67:09:8f:34:e8:fd:3c:97:39:e3:8b:91:ba:33:c7:40:50:2b:
+ 5e:9d:fc:26:ed:2e:89:89:f8:f3:b8:71:dc:02:36:cd:99:e8:
+ 71:92:3d:32:fb:4b:dc:b4:a5:cb:79:d8:dd:24:7c:97:f1:64:
+ 65:cc:b1:6f:8d:eb:64:b6:bd:54:97:11:e9:0c:47:41:89:80:
+ 99:cb:35:2a
+-----BEGIN CERTIFICATE-----
+MIIDEzCCAfugAwIBAgIBCzANBgkqhkiG9w0BAQsFADAmMSQwIgYDVQQDDBtvcGVu
+c2hpZnQtc2lnbmVyQDE0ODY0OTExNTgwHhcNMTcwMjA3MTgxOTM0WhcNMTkwMjA3
+MTgxOTM1WjA9MRUwEwYDVQQKEwxzeXN0ZW06bm9kZXMxJDAiBgNVBAMTG3N5c3Rl
+bTpub2RlOm0wMS5leGFtcGxlLmNvbTCCASIwDQYJKoZIhvcNAQEBBQADggEPADCC
+AQoCggEBAOcVdDaSmeXuSp+7VCHUjEDeTP3j9aH0nreBj3079sEzethlLoQmwAqf
+CZp23qXGYm0R89+CC55buaH1FN/ltQ8QDGUzi4tdow9Af/0OcD0EINO2ukmyG5/9
+N+X905mo+y923wppvrchAA6AcxxeDyA63zouGS4exI98iuZlcdS48zbsGERkRPGg
+hoGCo7HoiKtUNL5X8MYibnFYnA4EUngdHZsRKuKte4t8GY4PYq4cxIOYXsJsNmT5
+mkFy4ThGFfR9IGg/VfyeWIkRe2VWyaUgzL0gHytAhlRJ9l54ynx96YEWrjCtp/kh
+d3KeVj0IUcMzvoXX5hipYUPkoezcxI8CAwEAAaM1MDMwDgYDVR0PAQH/BAQDAgWg
+MBMGA1UdJQQMMAoGCCsGAQUFBwMCMAwGA1UdEwEB/wQCMAAwDQYJKoZIhvcNAQEL
+BQADggEBAM1jexLtuOOTsOPfEal/ICzfP9aX3m0R/yGPjwQv43jOc81NcL5d+CeD
+MX36tKAxFIe+wvXo0kUQOzTK3D7ol4x2YTtB4uDzNE5tVh5dWi2LrKYSqZDIrhKO
+MOmJRWR3AFEopaaGQpxsD/FSfZ5Mg0OMMBPHABxMrsiserHO1nh4ax3+SI0i7Jen
+gVsB4B/Xxg9Lw9JDX3/XMcI+fyLVw5ctO62BaluljpT+HkdbRWnH8ar7TmcJjzTo
+/TyXOeOLkbozx0BQK16d/CbtLomJ+PO4cdwCNs2Z6HGSPTL7S9y0pct52N0kfJfx
+ZGXMsW+N62S2vVSXEekMR0GJgJnLNSo=
+-----END CERTIFICATE-----
diff --git a/roles/openshift_certificate_expiry/test/test_fakeopensslclasses.py b/roles/openshift_certificate_expiry/test/test_fakeopensslclasses.py
new file mode 100644
index 000000000..2e245191f
--- /dev/null
+++ b/roles/openshift_certificate_expiry/test/test_fakeopensslclasses.py
@@ -0,0 +1,82 @@
+#!/usr/bin/env python
+'''
+ Unit tests for the FakeOpenSSL classes
+'''
+
+import os
+import sys
+import unittest
+import pytest
+
+# Disable import-error b/c our libraries aren't loaded in jenkins
+# pylint: disable=import-error,wrong-import-position
+# place class in our python path
+module_path = os.path.join('/'.join(os.path.realpath(__file__).split(os.path.sep)[:-1]), 'library')
+sys.path.insert(0, module_path)
+openshift_cert_expiry = pytest.importorskip("openshift_cert_expiry")
+
+
+@pytest.mark.skip('Skipping all tests because of unresolved import errors')
+class TestFakeOpenSSLClasses(unittest.TestCase):
+ '''
+ Test class for FakeOpenSSL classes
+ '''
+
+ def setUp(self):
+ ''' setup method for other tests '''
+ with open('test/system-node-m01.example.com.crt.txt', 'r') as fp:
+ self.cert_string = fp.read()
+
+ self.fake_cert = openshift_cert_expiry.FakeOpenSSLCertificate(self.cert_string)
+
+ with open('test/master.server.crt.txt', 'r') as fp:
+ self.cert_san_string = fp.read()
+
+ self.fake_san_cert = openshift_cert_expiry.FakeOpenSSLCertificate(self.cert_san_string)
+
+ def test_FakeOpenSSLCertificate_get_serial_number(self):
+ """We can read the serial number from the cert"""
+ self.assertEqual(11, self.fake_cert.get_serial_number())
+
+ def test_FakeOpenSSLCertificate_get_notAfter(self):
+ """We can read the cert expiry date"""
+ expiry = self.fake_cert.get_notAfter()
+ self.assertEqual('20190207181935Z', expiry)
+
+ def test_FakeOpenSSLCertificate_get_sans(self):
+ """We can read Subject Alt Names from a cert"""
+ ext = self.fake_san_cert.get_extension(0)
+
+ if ext.get_short_name() == 'subjectAltName':
+ sans = str(ext)
+
+ self.assertEqual('DNS:kubernetes, DNS:kubernetes.default, DNS:kubernetes.default.svc, DNS:kubernetes.default.svc.cluster.local, DNS:m01.example.com, DNS:openshift, DNS:openshift.default, DNS:openshift.default.svc, DNS:openshift.default.svc.cluster.local, DNS:172.30.0.1, DNS:192.168.122.241, IP Address:172.30.0.1, IP Address:192.168.122.241', sans)
+
+ def test_FakeOpenSSLCertificate_get_sans_no_sans(self):
+ """We can tell when there are no Subject Alt Names in a cert"""
+ with self.assertRaises(IndexError):
+ self.fake_cert.get_extension(0)
+
+ def test_FakeOpenSSLCertificate_get_subject(self):
+ """We can read the Subject from a cert"""
+ # Subject: O=system:nodes, CN=system:node:m01.example.com
+ subject = self.fake_cert.get_subject()
+ subjects = []
+ for name, value in subject.get_components():
+ subjects.append('{}={}'.format(name, value))
+
+ self.assertEqual('O=system:nodes, CN=system:node:m01.example.com', ', '.join(subjects))
+
+ def test_FakeOpenSSLCertificate_get_subject_san_cert(self):
+ """We can read the Subject from a cert with sans"""
+ # Subject: O=system:nodes, CN=system:node:m01.example.com
+ subject = self.fake_san_cert.get_subject()
+ subjects = []
+ for name, value in subject.get_components():
+ subjects.append('{}={}'.format(name, value))
+
+ self.assertEqual('CN=172.30.0.1', ', '.join(subjects))
+
+
+if __name__ == "__main__":
+ unittest.main()
diff --git a/roles/openshift_examples/examples-sync.sh b/roles/openshift_examples/examples-sync.sh
index a501ad938..e3cc3a9b4 100755
--- a/roles/openshift_examples/examples-sync.sh
+++ b/roles/openshift_examples/examples-sync.sh
@@ -5,7 +5,7 @@
#
# This script should be run from openshift-ansible/roles/openshift_examples
-XPAAS_VERSION=ose-v1.3.5
+XPAAS_VERSION=ose-v1.3.6
ORIGIN_VERSION=${1:-v1.5}
RHAMP_TAG=1.0.0.GA
RHAMP_TEMPLATE=https://raw.githubusercontent.com/3scale/rhamp-openshift-templates/${RHAMP_TAG}/apicast-gateway/apicast-gateway-template.yml
@@ -15,16 +15,21 @@ TEMP=`mktemp -d`
pushd $TEMP
wget https://github.com/openshift/origin/archive/master.zip -O origin-master.zip
+wget https://github.com/jboss-fuse/application-templates/archive/GA.zip -O fis-GA.zip
wget https://github.com/jboss-openshift/application-templates/archive/${XPAAS_VERSION}.zip -O application-templates-master.zip
unzip origin-master.zip
unzip application-templates-master.zip
-cp origin-master/examples/db-templates/* ${EXAMPLES_BASE}/db-templates/
-cp origin-master/examples/quickstarts/* ${EXAMPLES_BASE}/quickstart-templates/
-cp origin-master/examples/jenkins/jenkins-*template.json ${EXAMPLES_BASE}/quickstart-templates/
-cp origin-master/examples/image-streams/* ${EXAMPLES_BASE}/image-streams/
+unzip fis-GA.zip
+mv origin-master/examples/db-templates/* ${EXAMPLES_BASE}/db-templates/
+mv origin-master/examples/quickstarts/* ${EXAMPLES_BASE}/quickstart-templates/
+mv origin-master/examples/jenkins/jenkins-*template.json ${EXAMPLES_BASE}/quickstart-templates/
+mv origin-master/examples/image-streams/* ${EXAMPLES_BASE}/image-streams/
mv application-templates-${XPAAS_VERSION}/jboss-image-streams.json ${EXAMPLES_BASE}/xpaas-streams/
+# fis content from jboss-fuse/application-templates-GA would collide with jboss-openshift/application-templates
+# as soon as they use the same branch/tag names
+mv application-templates-GA/fis-image-streams.json ${EXAMPLES_BASE}/xpaas-streams/fis-image-streams.json
+mv application-templates-GA/quickstarts/* ${EXAMPLES_BASE}/xpaas-templates/
find application-templates-${XPAAS_VERSION}/ -name '*.json' ! -wholename '*secret*' ! -wholename '*demo*' -exec mv {} ${EXAMPLES_BASE}/xpaas-templates/ \;
-wget https://raw.githubusercontent.com/jboss-fuse/application-templates/GA/fis-image-streams.json -O ${EXAMPLES_BASE}/xpaas-streams/fis-image-streams.json
wget https://raw.githubusercontent.com/redhat-developer/s2i-dotnetcore/master/dotnet_imagestreams.json -O ${EXAMPLES_BASE}/image-streams/dotnet_imagestreams.json
wget https://raw.githubusercontent.com/openshift/origin-metrics/master/metrics.yaml -O ../openshift_hosted_templates/files/${ORIGIN_VERSION}/origin/metrics-deployer.yaml
wget https://raw.githubusercontent.com/openshift/origin-metrics/enterprise/metrics.yaml -O ../openshift_hosted_templates/files/${ORIGIN_VERSION}/enterprise/metrics-deployer.yaml
diff --git a/roles/openshift_examples/files/examples/v1.3/xpaas-streams/fis-image-streams.json b/roles/openshift_examples/files/examples/v1.3/xpaas-streams/fis-image-streams.json
index ed0e94bed..9d99973be 100644
--- a/roles/openshift_examples/files/examples/v1.3/xpaas-streams/fis-image-streams.json
+++ b/roles/openshift_examples/files/examples/v1.3/xpaas-streams/fis-image-streams.json
@@ -20,12 +20,22 @@
{
"name": "1.0",
"annotations": {
- "description": "JBoss Fuse Integration Services 6.2.1 Java S2I images.",
+ "description": "JBoss Fuse Integration Services 1.0 Java S2I images.",
"iconClass": "icon-jboss",
"tags": "builder,jboss-fuse,java,xpaas",
"supports":"jboss-fuse:6.2.1,java:8,xpaas:1.2",
"version": "1.0"
}
+ },
+ {
+ "name": "2.0",
+ "annotations": {
+ "description": "JBoss Fuse Integration Services 2.0 Java S2I images.",
+ "iconClass": "icon-jboss",
+ "tags": "builder,jboss-fuse,java,xpaas",
+ "supports":"jboss-fuse:6.3.0,java:8,xpaas:1.2",
+ "version": "2.0"
+ }
}
]
}
@@ -42,12 +52,22 @@
{
"name": "1.0",
"annotations": {
- "description": "JBoss Fuse Integration Services 6.2.1 Karaf S2I images.",
+ "description": "JBoss Fuse Integration Services 1.0 Karaf S2I images.",
"iconClass": "icon-jboss",
"tags": "builder,jboss-fuse,java,karaf,xpaas",
"supports":"jboss-fuse:6.2.1,java:8,xpaas:1.2",
"version": "1.0"
}
+ },
+ {
+ "name": "2.0",
+ "annotations": {
+ "description": "JBoss Fuse Integration Services 2.0 Karaf S2I images.",
+ "iconClass": "icon-jboss",
+ "tags": "builder,jboss-fuse,java,karaf,xpaas",
+ "supports":"jboss-fuse:6.3.0,java:8,xpaas:1.2",
+ "version": "2.0"
+ }
}
]
}
diff --git a/roles/openshift_examples/files/examples/v1.3/xpaas-templates/karaf2-camel-amq-template.json b/roles/openshift_examples/files/examples/v1.3/xpaas-templates/karaf2-camel-amq-template.json
new file mode 100644
index 000000000..cd0bec3c1
--- /dev/null
+++ b/roles/openshift_examples/files/examples/v1.3/xpaas-templates/karaf2-camel-amq-template.json
@@ -0,0 +1,362 @@
+{
+ "apiVersion": "v1",
+ "kind": "Template",
+ "metadata": {
+ "annotations": {
+ "description": "Camel route using ActiveMQ in Karaf container.",
+ "tags": "quickstart,java,karaf,fis",
+ "iconClass": "icon-jboss",
+ "version": "2.0"
+ },
+ "name": "s2i-karaf2-camel-amq"
+ },
+ "labels": {
+ "template": "s2i-karaf2-camel-amq"
+ },
+ "parameters": [
+ {
+ "name": "APP_NAME",
+ "displayName": "Application Name",
+ "required": true,
+ "value": "s2i-karaf2-camel-amq",
+ "description": "The name assigned to the application."
+ },
+ {
+ "name": "GIT_REPO",
+ "displayName": "Git Repository URL",
+ "value": "https://github.com/fabric8-quickstarts/karaf2-camel-amq.git",
+ "required": true,
+ "description": "The URL of the repository with your application source code."
+ },
+ {
+ "name": "GIT_REF",
+ "displayName": "Git Reference",
+ "value": "karaf2-camel-amq-1.0.0.redhat-000010",
+ "description": "Set this to a branch name, tag or other ref of your repository if you are not using the default branch."
+ },
+ {
+ "name": "SERVICE_NAME",
+ "displayName": "Service Name",
+ "value": "s2i-karaf2-camel-amq",
+ "description": "Exposed Service name."
+ },
+ {
+ "name": "BUILDER_VERSION",
+ "displayName": "Builder version",
+ "value": "2.0",
+ "description": "The version of the FIS S2I builder image to use."
+ },
+ {
+ "name": "APP_VERSION",
+ "displayName": "Application Version",
+ "value": "1.0.0.redhat-000010",
+ "description": "The application version."
+ },
+ {
+ "name": "MAVEN_ARGS",
+ "displayName": "Maven Arguments",
+ "value": "install -DskipTests -Dfabric8.skip -e -B",
+ "description": "Arguments passed to mvn in the build."
+ },
+ {
+ "name": "MAVEN_ARGS_APPEND",
+ "displayName": "Extra Maven Arguments",
+ "description": "Extra arguments passed to mvn, e.g. for multi-module builds."
+ },
+ {
+ "name": "ARTIFACT_DIR",
+ "displayName": "Maven build directory",
+ "description": "Directory of the artifact to be built, e.g. for multi-module builds."
+ },
+ {
+ "name": "IMAGE_STREAM_NAMESPACE",
+ "displayName": "Image Stream Namespace",
+ "value": "openshift",
+ "required": true,
+ "description": "Namespace in which the Fuse ImageStreams are installed. These ImageStreams are normally installed in the openshift namespace. You should only need to modify this if you've installed the ImageStreams in a different namespace/project."
+ },
+ {
+ "name": "BUILD_SECRET",
+ "displayName": "Git Build Secret",
+ "generate": "expression",
+ "description": "The secret needed to trigger a build.",
+ "from": "[a-zA-Z0-9]{40}"
+ },
+ {
+ "name": "ACTIVEMQ_SERVICE_NAME",
+ "displayName": "ActiveMQ Broker Service",
+ "required": true,
+ "value": "broker-amq-tcp",
+ "description": "Set this to the name of the TCP service of the ActiveMQ broker. You may need to create a broker first."
+ },
+ {
+ "name": "ACTIVEMQ_USERNAME",
+ "displayName": "ActiveMQ Broker Username",
+ "description": "The username used to authenticate with the ActiveMQ broker. Leave it empty if authentication is disabled."
+ },
+ {
+ "name": "ACTIVEMQ_PASSWORD",
+ "displayName": "ActiveMQ Broker Password",
+ "description": "The password used to authenticate with the ActiveMQ broker. Leave it empty if authentication is disabled."
+ },
+ {
+ "name": "CPU_REQUEST",
+ "displayName": "CPU request",
+ "value": "0.2",
+ "required": true,
+ "description": "The amount of CPU to requests."
+ },
+ {
+ "name": "MEMORY_REQUEST",
+ "displayName": "Memory request",
+ "value": "1.5G",
+ "required": true,
+ "description": "The amount of memory required for the container to run."
+ },
+ {
+ "name": "CPU_LIMIT",
+ "displayName": "CPU limit",
+ "value": "1.0",
+ "required": true,
+ "description": "The amount of CPU the container is limited to use."
+ },
+ {
+ "name": "MEMORY_LIMIT",
+ "displayName": "Memory limit",
+ "value": "2G",
+ "required": true,
+ "description": "The amount of memory the container is limited to use."
+ }
+ ],
+ "objects": [
+ {
+ "kind": "ImageStream",
+ "apiVersion": "v1",
+ "metadata": {
+ "name": "${APP_NAME}",
+ "creationTimestamp": null,
+ "labels": {
+ "component": "${APP_NAME}",
+ "container": "karaf",
+ "group": "quickstarts",
+ "project": "${APP_NAME}",
+ "provider": "s2i",
+ "version": "${APP_VERSION}"
+ }
+ },
+ "spec": {},
+ "status": {
+ "dockerImageRepository": ""
+ }
+ },
+ {
+ "kind": "BuildConfig",
+ "apiVersion": "v1",
+ "metadata": {
+ "name": "${APP_NAME}",
+ "creationTimestamp": null,
+ "labels": {
+ "component": "${APP_NAME}",
+ "container": "karaf",
+ "group": "quickstarts",
+ "project": "${APP_NAME}",
+ "provider": "s2i",
+ "version": "${APP_VERSION}"
+ }
+ },
+ "spec": {
+ "triggers": [
+ {
+ "type": "GitHub",
+ "github": {
+ "secret": "${BUILD_SECRET}"
+ }
+ },
+ {
+ "type": "Generic",
+ "generic": {
+ "secret": "${BUILD_SECRET}"
+ }
+ },
+ {
+ "type": "ConfigChange"
+ },
+ {
+ "type": "ImageChange",
+ "imageChange": {}
+ }
+ ],
+ "source": {
+ "type": "Git",
+ "git": {
+ "uri": "${GIT_REPO}",
+ "ref": "${GIT_REF}"
+ }
+ },
+ "strategy": {
+ "type": "Source",
+ "sourceStrategy": {
+ "from": {
+ "kind": "ImageStreamTag",
+ "namespace": "${IMAGE_STREAM_NAMESPACE}",
+ "name": "fis-karaf-openshift:${BUILDER_VERSION}"
+ },
+ "forcePull": true,
+ "incremental": true,
+ "env": [
+ {
+ "name": "BUILD_LOGLEVEL",
+ "value": "5"
+ },
+ {
+ "name": "ARTIFACT_DIR",
+ "value": "${ARTIFACT_DIR}"
+ },
+ {
+ "name": "MAVEN_ARGS",
+ "value": "${MAVEN_ARGS}"
+ },
+ {
+ "name": "MAVEN_ARGS_APPEND",
+ "value": "${MAVEN_ARGS_APPEND}"
+ }
+ ]
+ }
+ },
+ "output": {
+ "to": {
+ "kind": "ImageStreamTag",
+ "name": "${APP_NAME}:latest"
+ }
+ },
+ "resources": {}
+ },
+ "status": {
+ "lastVersion": 0
+ }
+ },
+ {
+ "kind": "DeploymentConfig",
+ "apiVersion": "v1",
+ "metadata": {
+ "name": "${APP_NAME}",
+ "creationTimestamp": null,
+ "labels": {
+ "component": "${APP_NAME}",
+ "container": "karaf",
+ "group": "quickstarts",
+ "project": "${APP_NAME}",
+ "provider": "s2i",
+ "version": "${APP_VERSION}"
+ }
+ },
+ "spec": {
+ "strategy": {
+ "resources": {}
+ },
+ "triggers": [
+ {
+ "type": "ConfigChange"
+ },
+ {
+ "type": "ImageChange",
+ "imageChangeParams": {
+ "automatic": true,
+ "containerNames": [
+ "${APP_NAME}"
+ ],
+ "from": {
+ "kind": "ImageStreamTag",
+ "name": "${APP_NAME}:latest"
+ }
+ }
+ }
+ ],
+ "replicas": 1,
+ "selector": {
+ "component": "${APP_NAME}",
+ "container": "karaf",
+ "deploymentconfig": "${APP_NAME}",
+ "group": "quickstarts",
+ "project": "${APP_NAME}",
+ "provider": "s2i",
+ "version": "${APP_VERSION}"
+ },
+ "template": {
+ "metadata": {
+ "creationTimestamp": null,
+ "labels": {
+ "component": "${APP_NAME}",
+ "container": "karaf",
+ "deploymentconfig": "${APP_NAME}",
+ "group": "quickstarts",
+ "project": "${APP_NAME}",
+ "provider": "s2i",
+ "version": "${APP_VERSION}"
+ }
+ },
+ "spec": {
+ "containers": [
+ {
+ "name": "${APP_NAME}",
+ "image": "library/${APP_NAME}:latest",
+ "readinessProbe" : {
+ "httpGet" : {
+ "path" : "/readiness-check",
+ "port" : 8181
+ },
+ "initialDelaySeconds" : 10
+ },
+ "livenessProbe" : {
+ "httpGet" : {
+ "path" : "/health-check",
+ "port" : 8181
+ },
+ "initialDelaySeconds" : 180
+ },
+ "ports": [
+ {
+ "containerPort": 8181,
+ "name": "http"
+ },
+ {
+ "containerPort": 8778,
+ "name": "jolokia"
+ }
+ ],
+ "env" : [ {
+ "name" : "KUBERNETES_NAMESPACE",
+ "valueFrom" : {
+ "fieldRef" : {
+ "fieldPath" : "metadata.namespace"
+ }
+ }
+ }, {
+ "name": "ACTIVEMQ_SERVICE_NAME",
+ "value": "${ACTIVEMQ_SERVICE_NAME}"
+ }, {
+ "name": "ACTIVEMQ_USERNAME",
+ "value": "${ACTIVEMQ_USERNAME}"
+ }, {
+ "name": "ACTIVEMQ_PASSWORD",
+ "value": "${ACTIVEMQ_PASSWORD}"
+ } ],
+ "resources": {
+ "requests": {
+ "cpu": "${CPU_REQUEST}",
+ "memory": "${MEMORY_REQUEST}"
+ },
+ "limits": {
+ "cpu": "${CPU_LIMIT}",
+ "memory": "${MEMORY_LIMIT}"
+ }
+ }
+ }
+ ]
+ }
+ }
+ },
+ "status": {}
+ }
+ ]
+}
diff --git a/roles/openshift_examples/files/examples/v1.3/xpaas-templates/karaf2-camel-log-template.json b/roles/openshift_examples/files/examples/v1.3/xpaas-templates/karaf2-camel-log-template.json
new file mode 100644
index 000000000..2ecce08a9
--- /dev/null
+++ b/roles/openshift_examples/files/examples/v1.3/xpaas-templates/karaf2-camel-log-template.json
@@ -0,0 +1,336 @@
+{
+ "apiVersion": "v1",
+ "kind": "Template",
+ "metadata": {
+ "annotations": {
+ "description": "A simple Camel route in Karaf container.",
+ "tags": "quickstart,java,karaf,fis",
+ "iconClass": "icon-jboss",
+ "version": "2.0"
+ },
+ "name": "s2i-karaf2-camel-log"
+ },
+ "labels": {
+ "template": "s2i-karaf2-camel-log"
+ },
+ "parameters": [
+ {
+ "name": "APP_NAME",
+ "displayName": "Application Name",
+ "required": true,
+ "value": "s2i-karaf2-camel-log",
+ "description": "The name assigned to the application."
+ },
+ {
+ "name": "GIT_REPO",
+ "displayName": "Git Repository URL",
+ "value": "https://github.com/fabric8-quickstarts/karaf2-camel-log.git",
+ "required": true,
+ "description": "The URL of the repository with your application source code."
+ },
+ {
+ "name": "GIT_REF",
+ "displayName": "Git Reference",
+ "value": "karaf2-camel-log-1.0.0.redhat-000010",
+ "description": "Set this to a branch name, tag or other ref of your repository if you are not using the default branch."
+ },
+ {
+ "name": "SERVICE_NAME",
+ "displayName": "Service Name",
+ "value": "s2i-karaf2-camel-log",
+ "description": "Exposed Service name."
+ },
+ {
+ "name": "BUILDER_VERSION",
+ "displayName": "Builder version",
+ "value": "2.0",
+ "description": "The version of the FIS S2I builder image to use."
+ },
+ {
+ "name": "APP_VERSION",
+ "displayName": "Application Version",
+ "value": "1.0.0.redhat-000010",
+ "description": "The application version."
+ },
+ {
+ "name": "MAVEN_ARGS",
+ "displayName": "Maven Arguments",
+ "value": "install -DskipTests -Dfabric8.skip -e -B",
+ "description": "Arguments passed to mvn in the build."
+ },
+ {
+ "name": "MAVEN_ARGS_APPEND",
+ "displayName": "Extra Maven Arguments",
+ "description": "Extra arguments passed to mvn, e.g. for multi-module builds."
+ },
+ {
+ "name": "ARTIFACT_DIR",
+ "displayName": "Maven build directory",
+ "description": "Directory of the artifact to be built, e.g. for multi-module builds."
+ },
+ {
+ "name": "IMAGE_STREAM_NAMESPACE",
+ "displayName": "Image Stream Namespace",
+ "value": "openshift",
+ "required": true,
+ "description": "Namespace in which the Fuse ImageStreams are installed. These ImageStreams are normally installed in the openshift namespace. You should only need to modify this if you've installed the ImageStreams in a different namespace/project."
+ },
+ {
+ "name": "BUILD_SECRET",
+ "displayName": "Git Build Secret",
+ "generate": "expression",
+ "description": "The secret needed to trigger a build.",
+ "from": "[a-zA-Z0-9]{40}"
+ },
+ {
+ "name": "CPU_REQUEST",
+ "displayName": "CPU request",
+ "value": "0.2",
+ "required": true,
+ "description": "The amount of CPU to requests."
+ },
+ {
+ "name": "MEMORY_REQUEST",
+ "displayName": "Memory request",
+ "value": "1.5G",
+ "required": true,
+ "description": "The amount of memory required for the container to run."
+ },
+ {
+ "name": "CPU_LIMIT",
+ "displayName": "CPU limit",
+ "value": "1.0",
+ "required": true,
+ "description": "The amount of CPU the container is limited to use."
+ },
+ {
+ "name": "MEMORY_LIMIT",
+ "displayName": "Memory limit",
+ "value": "2G",
+ "required": true,
+ "description": "The amount of memory the container is limited to use."
+ }
+ ],
+ "objects": [
+ {
+ "kind": "ImageStream",
+ "apiVersion": "v1",
+ "metadata": {
+ "name": "${APP_NAME}",
+ "creationTimestamp": null,
+ "labels": {
+ "component": "${APP_NAME}",
+ "container": "karaf",
+ "group": "quickstarts",
+ "project": "${APP_NAME}",
+ "provider": "s2i",
+ "version": "${APP_VERSION}"
+ }
+ },
+ "spec": {},
+ "status": {
+ "dockerImageRepository": ""
+ }
+ },
+ {
+ "kind": "BuildConfig",
+ "apiVersion": "v1",
+ "metadata": {
+ "name": "${APP_NAME}",
+ "creationTimestamp": null,
+ "labels": {
+ "component": "${APP_NAME}",
+ "container": "karaf",
+ "group": "quickstarts",
+ "project": "${APP_NAME}",
+ "provider": "s2i",
+ "version": "${APP_VERSION}"
+ }
+ },
+ "spec": {
+ "triggers": [
+ {
+ "type": "GitHub",
+ "github": {
+ "secret": "${BUILD_SECRET}"
+ }
+ },
+ {
+ "type": "Generic",
+ "generic": {
+ "secret": "${BUILD_SECRET}"
+ }
+ },
+ {
+ "type": "ConfigChange"
+ },
+ {
+ "type": "ImageChange",
+ "imageChange": {}
+ }
+ ],
+ "source": {
+ "type": "Git",
+ "git": {
+ "uri": "${GIT_REPO}",
+ "ref": "${GIT_REF}"
+ }
+ },
+ "strategy": {
+ "type": "Source",
+ "sourceStrategy": {
+ "from": {
+ "kind": "ImageStreamTag",
+ "namespace": "${IMAGE_STREAM_NAMESPACE}",
+ "name": "fis-karaf-openshift:${BUILDER_VERSION}"
+ },
+ "forcePull": true,
+ "incremental": true,
+ "env": [
+ {
+ "name": "BUILD_LOGLEVEL",
+ "value": "5"
+ },
+ {
+ "name": "ARTIFACT_DIR",
+ "value": "${ARTIFACT_DIR}"
+ },
+ {
+ "name": "MAVEN_ARGS",
+ "value": "${MAVEN_ARGS}"
+ },
+ {
+ "name": "MAVEN_ARGS_APPEND",
+ "value": "${MAVEN_ARGS_APPEND}"
+ }
+ ]
+ }
+ },
+ "output": {
+ "to": {
+ "kind": "ImageStreamTag",
+ "name": "${APP_NAME}:latest"
+ }
+ },
+ "resources": {}
+ },
+ "status": {
+ "lastVersion": 0
+ }
+ },
+ {
+ "kind": "DeploymentConfig",
+ "apiVersion": "v1",
+ "metadata": {
+ "name": "${APP_NAME}",
+ "creationTimestamp": null,
+ "labels": {
+ "component": "${APP_NAME}",
+ "container": "karaf",
+ "group": "quickstarts",
+ "project": "${APP_NAME}",
+ "provider": "s2i",
+ "version": "${APP_VERSION}"
+ }
+ },
+ "spec": {
+ "strategy": {
+ "resources": {}
+ },
+ "triggers": [
+ {
+ "type": "ConfigChange"
+ },
+ {
+ "type": "ImageChange",
+ "imageChangeParams": {
+ "automatic": true,
+ "containerNames": [
+ "${APP_NAME}"
+ ],
+ "from": {
+ "kind": "ImageStreamTag",
+ "name": "${APP_NAME}:latest"
+ }
+ }
+ }
+ ],
+ "replicas": 1,
+ "selector": {
+ "component": "${APP_NAME}",
+ "container": "karaf",
+ "deploymentconfig": "${APP_NAME}",
+ "group": "quickstarts",
+ "project": "${APP_NAME}",
+ "provider": "s2i",
+ "version": "${APP_VERSION}"
+ },
+ "template": {
+ "metadata": {
+ "creationTimestamp": null,
+ "labels": {
+ "component": "${APP_NAME}",
+ "container": "karaf",
+ "deploymentconfig": "${APP_NAME}",
+ "group": "quickstarts",
+ "project": "${APP_NAME}",
+ "provider": "s2i",
+ "version": "${APP_VERSION}"
+ }
+ },
+ "spec": {
+ "containers": [
+ {
+ "name": "${APP_NAME}",
+ "image": "library/${APP_NAME}:latest",
+ "readinessProbe" : {
+ "httpGet" : {
+ "path" : "/readiness-check",
+ "port" : 8181
+ },
+ "initialDelaySeconds" : 10
+ },
+ "livenessProbe" : {
+ "httpGet" : {
+ "path" : "/health-check",
+ "port" : 8181
+ },
+ "initialDelaySeconds" : 180
+ },
+ "ports": [
+ {
+ "containerPort": 8181,
+ "name": "http"
+ },
+ {
+ "containerPort": 8778,
+ "name": "jolokia"
+ }
+ ],
+ "env" : [ {
+ "name" : "KUBERNETES_NAMESPACE",
+ "valueFrom" : {
+ "fieldRef" : {
+ "fieldPath" : "metadata.namespace"
+ }
+ }
+ } ],
+ "resources": {
+ "requests": {
+ "cpu": "${CPU_REQUEST}",
+ "memory": "${MEMORY_REQUEST}"
+ },
+ "limits": {
+ "cpu": "${CPU_LIMIT}",
+ "memory": "${MEMORY_LIMIT}"
+ }
+ }
+ }
+ ]
+ }
+ }
+ },
+ "status": {}
+ }
+ ]
+}
diff --git a/roles/openshift_examples/files/examples/v1.3/xpaas-templates/karaf2-camel-rest-sql-template.json b/roles/openshift_examples/files/examples/v1.3/xpaas-templates/karaf2-camel-rest-sql-template.json
new file mode 100644
index 000000000..d80939efb
--- /dev/null
+++ b/roles/openshift_examples/files/examples/v1.3/xpaas-templates/karaf2-camel-rest-sql-template.json
@@ -0,0 +1,421 @@
+{
+ "apiVersion": "v1",
+ "kind": "Template",
+ "metadata": {
+ "annotations": {
+ "description": "Camel example using Rest DSL with SQL Database in Karaf container.",
+ "tags": "quickstart,java,karaf,fis",
+ "iconClass": "icon-jboss",
+ "version": "2.0"
+ },
+ "name": "s2i-karaf2-camel-rest-sql"
+ },
+ "labels": {
+ "template": "s2i-karaf2-camel-rest-sql"
+ },
+ "parameters": [
+ {
+ "name": "APP_NAME",
+ "displayName": "Application Name",
+ "required": true,
+ "value": "s2i-karaf2-camel-rest-sql",
+ "description": "The name assigned to the application."
+ },
+ {
+ "name": "GIT_REPO",
+ "displayName": "Git Repository URL",
+ "value": "https://github.com/fabric8-quickstarts/karaf2-camel-rest-sql.git",
+ "required": true,
+ "description": "The URL of the repository with your application source code."
+ },
+ {
+ "name": "GIT_REF",
+ "displayName": "Git Reference",
+ "value": "karaf2-camel-rest-sql-1.0.0.redhat-000010",
+ "description": "Set this to a branch name, tag or other ref of your repository if you are not using the default branch."
+ },
+ {
+ "name": "SERVICE_NAME",
+ "displayName": "Service Name",
+ "value": "s2i-karaf2-camel-rest",
+ "description": "Exposed Service name."
+ },
+ {
+ "name": "MYSQL_SERVICE_NAME",
+ "displayName": "MySQL Server Service",
+ "required": true,
+ "value": "mysql",
+ "description": "Set this to the name of the TCP service of the MySQL server. You may need to create a server first."
+ },
+ {
+ "name": "MYSQL_SERVICE_DATABASE",
+ "displayName": "MySQL Server Database",
+ "required": true,
+ "value": "sampledb",
+ "description": "The database hosted by the MySQL server to be used by the application."
+ },
+ {
+ "name": "MYSQL_SERVICE_USERNAME",
+ "displayName": "MySQL Server Username",
+ "description": "The username used to authenticate with the MySQL server. Leave it empty if authentication is disabled."
+ },
+ {
+ "name": "MYSQL_SERVICE_PASSWORD",
+ "displayName": "MySQL Server Password",
+ "description": "The password used to authenticate with the MySQL server. Leave it empty if authentication is disabled."
+ },
+ {
+ "name": "BUILDER_VERSION",
+ "displayName": "Builder version",
+ "value": "2.0",
+ "description": "The version of the FIS S2I builder image to use."
+ },
+ {
+ "name": "APP_VERSION",
+ "displayName": "Application Version",
+ "value": "1.0.0.redhat-000010",
+ "description": "The application version."
+ },
+ {
+ "name": "MAVEN_ARGS",
+ "displayName": "Maven Arguments",
+ "value": "install -DskipTests -Dfabric8.skip -e -B",
+ "description": "Arguments passed to mvn in the build."
+ },
+ {
+ "name": "MAVEN_ARGS_APPEND",
+ "displayName": "Extra Maven Arguments",
+ "description": "Extra arguments passed to mvn, e.g. for multi-module builds."
+ },
+ {
+ "name": "ARTIFACT_DIR",
+ "displayName": "Maven build directory",
+ "description": "Directory of the artifact to be built, e.g. for multi-module builds."
+ },
+ {
+ "name": "IMAGE_STREAM_NAMESPACE",
+ "displayName": "Image Stream Namespace",
+ "value": "openshift",
+ "required": true,
+ "description": "Namespace in which the Fuse ImageStreams are installed. These ImageStreams are normally installed in the openshift namespace. You should only need to modify this if you've installed the ImageStreams in a different namespace/project."
+ },
+ {
+ "name": "CPU_REQUEST",
+ "displayName": "CPU request",
+ "value": "0.2",
+ "required": true,
+ "description": "The amount of CPU to requests."
+ },
+ {
+ "name": "MEMORY_REQUEST",
+ "displayName": "Memory request",
+ "value": "1.5G",
+ "required": true,
+ "description": "The amount of memory required for the container to run."
+ },
+ {
+ "name": "CPU_LIMIT",
+ "displayName": "CPU limit",
+ "value": "1.0",
+ "required": true,
+ "description": "The amount of CPU the container is limited to use."
+ },
+ {
+ "name": "MEMORY_LIMIT",
+ "displayName": "Memory limit",
+ "value": "2G",
+ "required": true,
+ "description": "The amount of memory the container is limited to use."
+ }
+ ],
+ "objects": [
+ {
+ "apiVersion": "v1",
+ "kind": "Route",
+ "metadata": {
+ "labels": {
+ "container": "karaf",
+ "component": "${APP_NAME}",
+ "provider": "s2i",
+ "project": "${APP_NAME}",
+ "version": "${APP_VERSION}",
+ "group": "quickstarts"
+ },
+ "name": "${SERVICE_NAME}-route"
+ },
+ "spec": {
+ "to": {
+ "kind": "Service",
+ "name": "${SERVICE_NAME}"
+ }
+ }
+ },
+ {
+ "apiVersion": "v1",
+ "kind": "Service",
+ "metadata": {
+ "annotations": {
+ },
+ "labels": {
+ "container": "karaf",
+ "component": "${APP_NAME}",
+ "provider": "s2i",
+ "project": "${APP_NAME}",
+ "version": "${APP_VERSION}",
+ "group": "quickstarts"
+ },
+ "name": "${SERVICE_NAME}"
+ },
+ "spec": {
+ "clusterIP": "None",
+ "deprecatedPublicIPs": [],
+ "ports": [
+ {
+ "port": 9411,
+ "protocol": "TCP",
+ "targetPort": 8181
+ }
+ ],
+ "selector": {
+ "container": "karaf",
+ "project": "${APP_NAME}",
+ "component": "${APP_NAME}",
+ "provider": "s2i",
+ "group": "quickstarts"
+ }
+ }
+ },
+ {
+ "kind": "ImageStream",
+ "apiVersion": "v1",
+ "metadata": {
+ "name": "${APP_NAME}",
+ "creationTimestamp": null,
+ "labels": {
+ "component": "${APP_NAME}",
+ "container": "karaf",
+ "group": "quickstarts",
+ "project": "${APP_NAME}",
+ "provider": "s2i",
+ "version": "${APP_VERSION}"
+ }
+ },
+ "spec": {},
+ "status": {
+ "dockerImageRepository": ""
+ }
+ },
+ {
+ "kind": "BuildConfig",
+ "apiVersion": "v1",
+ "metadata": {
+ "name": "${APP_NAME}",
+ "creationTimestamp": null,
+ "labels": {
+ "component": "${APP_NAME}",
+ "container": "karaf",
+ "group": "quickstarts",
+ "project": "${APP_NAME}",
+ "provider": "s2i",
+ "version": "${APP_VERSION}"
+ }
+ },
+ "spec": {
+ "triggers": [
+ {
+ "type": "GitHub",
+ "github": {
+ "secret": "${BUILD_SECRET}"
+ }
+ },
+ {
+ "type": "Generic",
+ "generic": {
+ "secret": "${BUILD_SECRET}"
+ }
+ },
+ {
+ "type": "ConfigChange"
+ },
+ {
+ "type": "ImageChange",
+ "imageChange": {}
+ }
+ ],
+ "source": {
+ "type": "Git",
+ "git": {
+ "uri": "${GIT_REPO}",
+ "ref": "${GIT_REF}"
+ }
+ },
+ "strategy": {
+ "type": "Source",
+ "sourceStrategy": {
+ "from": {
+ "kind": "ImageStreamTag",
+ "namespace": "${IMAGE_STREAM_NAMESPACE}",
+ "name": "fis-karaf-openshift:${BUILDER_VERSION}"
+ },
+ "forcePull": true,
+ "incremental": true,
+ "env": [
+ {
+ "name": "BUILD_LOGLEVEL",
+ "value": "5"
+ },
+ {
+ "name": "ARTIFACT_DIR",
+ "value": "${ARTIFACT_DIR}"
+ },
+ {
+ "name": "MAVEN_ARGS",
+ "value": "${MAVEN_ARGS}"
+ },
+ {
+ "name": "MAVEN_ARGS_APPEND",
+ "value": "${MAVEN_ARGS_APPEND}"
+ }
+ ]
+ }
+ },
+ "output": {
+ "to": {
+ "kind": "ImageStreamTag",
+ "name": "${APP_NAME}:latest"
+ }
+ },
+ "resources": {}
+ },
+ "status": {
+ "lastVersion": 0
+ }
+ },
+ {
+ "kind": "DeploymentConfig",
+ "apiVersion": "v1",
+ "metadata": {
+ "name": "${APP_NAME}",
+ "creationTimestamp": null,
+ "labels": {
+ "component": "${APP_NAME}",
+ "container": "karaf",
+ "group": "quickstarts",
+ "project": "${APP_NAME}",
+ "provider": "s2i",
+ "version": "${APP_VERSION}"
+ }
+ },
+ "spec": {
+ "strategy": {
+ "resources": {}
+ },
+ "triggers": [
+ {
+ "type": "ConfigChange"
+ },
+ {
+ "type": "ImageChange",
+ "imageChangeParams": {
+ "automatic": true,
+ "containerNames": [
+ "${APP_NAME}"
+ ],
+ "from": {
+ "kind": "ImageStreamTag",
+ "name": "${APP_NAME}:latest"
+ }
+ }
+ }
+ ],
+ "replicas": 1,
+ "selector": {
+ "component": "${APP_NAME}",
+ "container": "karaf",
+ "deploymentconfig": "${APP_NAME}",
+ "group": "quickstarts",
+ "project": "${APP_NAME}",
+ "provider": "s2i",
+ "version": "${APP_VERSION}"
+ },
+ "template": {
+ "metadata": {
+ "creationTimestamp": null,
+ "labels": {
+ "component": "${APP_NAME}",
+ "container": "karaf",
+ "deploymentconfig": "${APP_NAME}",
+ "group": "quickstarts",
+ "project": "${APP_NAME}",
+ "provider": "s2i",
+ "version": "${APP_VERSION}"
+ }
+ },
+ "spec": {
+ "containers": [
+ {
+ "name": "${APP_NAME}",
+ "image": "library/${APP_NAME}:latest",
+ "readinessProbe" : {
+ "httpGet" : {
+ "path" : "/readiness-check",
+ "port" : 8181
+ },
+ "initialDelaySeconds" : 10
+ },
+ "livenessProbe" : {
+ "httpGet" : {
+ "path" : "/health-check",
+ "port" : 8181
+ },
+ "initialDelaySeconds" : 180
+ },
+ "ports": [
+ {
+ "containerPort": 8181,
+ "name": "http"
+ },
+ {
+ "containerPort": 8778,
+ "name": "jolokia"
+ }
+ ],
+ "env" : [ {
+ "name" : "KUBERNETES_NAMESPACE",
+ "valueFrom" : {
+ "fieldRef" : {
+ "fieldPath" : "metadata.namespace"
+ }
+ }
+ } , {
+ "name": "MYSQL_SERVICE_NAME",
+ "value": "${MYSQL_SERVICE_NAME}"
+ }, {
+ "name": "MYSQL_SERVICE_DATABASE",
+ "value": "${MYSQL_SERVICE_DATABASE}"
+ }, {
+ "name": "MYSQL_SERVICE_USERNAME",
+ "value": "${MYSQL_SERVICE_USERNAME}"
+ }, {
+ "name": "MYSQL_SERVICE_PASSWORD",
+ "value": "${MYSQL_SERVICE_PASSWORD}"
+ } ],
+ "resources": {
+ "requests": {
+ "cpu": "${CPU_REQUEST}",
+ "memory": "${MEMORY_REQUEST}"
+ },
+ "limits": {
+ "cpu": "${CPU_LIMIT}",
+ "memory": "${MEMORY_LIMIT}"
+ }
+ }
+ }
+ ]
+ }
+ }
+ },
+ "status": {}
+ }
+ ]
+}
diff --git a/roles/openshift_examples/files/examples/v1.3/xpaas-templates/karaf2-cxf-rest-template.json b/roles/openshift_examples/files/examples/v1.3/xpaas-templates/karaf2-cxf-rest-template.json
new file mode 100644
index 000000000..f99099868
--- /dev/null
+++ b/roles/openshift_examples/files/examples/v1.3/xpaas-templates/karaf2-cxf-rest-template.json
@@ -0,0 +1,385 @@
+{
+ "apiVersion": "v1",
+ "kind": "Template",
+ "metadata": {
+ "annotations": {
+ "description": "REST example using CXF in Karaf container.",
+ "tags": "quickstart,java,karaf,fis",
+ "iconClass": "icon-jboss",
+ "version": "2.0"
+ },
+ "name": "s2i-karaf2-cxf-rest"
+ },
+ "labels": {
+ "template": "s2i-karaf2-cxf-rest"
+ },
+ "parameters": [
+ {
+ "name": "APP_NAME",
+ "displayName": "Application Name",
+ "required": true,
+ "value": "s2i-karaf2-cxf-rest",
+ "description": "The name assigned to the application."
+ },
+ {
+ "name": "GIT_REPO",
+ "displayName": "Git Repository URL",
+ "value": "https://github.com/fabric8-quickstarts/karaf2-cxf-rest.git",
+ "required": true,
+ "description": "The URL of the repository with your application source code."
+ },
+ {
+ "name": "GIT_REF",
+ "displayName": "Git Reference",
+ "value": "karaf2-cxf-rest-1.0.0.redhat-000010",
+ "description": "Set this to a branch name, tag or other ref of your repository if you are not using the default branch."
+ },
+ {
+ "name": "SERVICE_NAME",
+ "displayName": "Service Name",
+ "value": "s2i-karaf2-cxf-rest",
+ "description": "Exposed Service name."
+ },
+ {
+ "name": "BUILDER_VERSION",
+ "displayName": "Builder version",
+ "value": "2.0",
+ "description": "The version of the FIS S2I builder image to use."
+ },
+ {
+ "name": "APP_VERSION",
+ "displayName": "Application Version",
+ "value": "1.0.0.redhat-000010",
+ "description": "The application version."
+ },
+ {
+ "name": "MAVEN_ARGS",
+ "displayName": "Maven Arguments",
+ "value": "install -DskipTests -Dfabric8.skip -e -B",
+ "description": "Arguments passed to mvn in the build."
+ },
+ {
+ "name": "MAVEN_ARGS_APPEND",
+ "displayName": "Extra Maven Arguments",
+ "description": "Extra arguments passed to mvn, e.g. for multi-module builds."
+ },
+ {
+ "name": "ARTIFACT_DIR",
+ "displayName": "Maven build directory",
+ "description": "Directory of the artifact to be built, e.g. for multi-module builds."
+ },
+ {
+ "name": "IMAGE_STREAM_NAMESPACE",
+ "displayName": "Image Stream Namespace",
+ "value": "openshift",
+ "required": true,
+ "description": "Namespace in which the Fuse ImageStreams are installed. These ImageStreams are normally installed in the openshift namespace. You should only need to modify this if you've installed the ImageStreams in a different namespace/project."
+ },
+ {
+ "name": "CPU_REQUEST",
+ "displayName": "CPU request",
+ "value": "0.2",
+ "required": true,
+ "description": "The amount of CPU to requests."
+ },
+ {
+ "name": "MEMORY_REQUEST",
+ "displayName": "Memory request",
+ "value": "1.5G",
+ "required": true,
+ "description": "The amount of memory required for the container to run."
+ },
+ {
+ "name": "CPU_LIMIT",
+ "displayName": "CPU limit",
+ "value": "1.0",
+ "required": true,
+ "description": "The amount of CPU the container is limited to use."
+ },
+ {
+ "name": "MEMORY_LIMIT",
+ "displayName": "Memory limit",
+ "value": "2G",
+ "required": true,
+ "description": "The amount of memory the container is limited to use."
+ }
+ ],
+ "objects": [
+ {
+ "apiVersion": "v1",
+ "kind": "Route",
+ "metadata": {
+ "labels": {
+ "container": "java",
+ "component": "${APP_NAME}",
+ "provider": "s2i",
+ "project": "${APP_NAME}",
+ "version": "${APP_VERSION}",
+ "group": "quickstarts"
+ },
+ "name": "${SERVICE_NAME}-route"
+ },
+ "spec": {
+ "to": {
+ "kind": "Service",
+ "name": "${SERVICE_NAME}"
+ }
+ }
+ },
+ {
+ "apiVersion": "v1",
+ "kind": "Service",
+ "metadata": {
+ "annotations": {
+ },
+ "labels": {
+ "container": "karaf",
+ "component": "${APP_NAME}",
+ "provider": "s2i",
+ "project": "${APP_NAME}",
+ "version": "${APP_VERSION}",
+ "group": "quickstarts"
+ },
+ "name": "${SERVICE_NAME}"
+ },
+ "spec": {
+ "clusterIP": "None",
+ "deprecatedPublicIPs": [],
+ "ports": [
+ {
+ "port": 9412,
+ "protocol": "TCP",
+ "targetPort": 8181
+ }
+ ],
+ "selector": {
+ "container": "karaf",
+ "project": "${APP_NAME}",
+ "component": "${APP_NAME}",
+ "provider": "s2i",
+ "group": "quickstarts"
+ }
+ }
+ },
+ {
+ "kind": "ImageStream",
+ "apiVersion": "v1",
+ "metadata": {
+ "name": "${APP_NAME}",
+ "creationTimestamp": null,
+ "labels": {
+ "component": "${APP_NAME}",
+ "container": "karaf",
+ "group": "quickstarts",
+ "project": "${APP_NAME}",
+ "provider": "s2i",
+ "version": "${APP_VERSION}"
+ }
+ },
+ "spec": {},
+ "status": {
+ "dockerImageRepository": ""
+ }
+ },
+ {
+ "kind": "BuildConfig",
+ "apiVersion": "v1",
+ "metadata": {
+ "name": "${APP_NAME}",
+ "creationTimestamp": null,
+ "labels": {
+ "component": "${APP_NAME}",
+ "container": "karaf",
+ "group": "quickstarts",
+ "project": "${APP_NAME}",
+ "provider": "s2i",
+ "version": "${APP_VERSION}"
+ }
+ },
+ "spec": {
+ "triggers": [
+ {
+ "type": "GitHub",
+ "github": {
+ "secret": "${BUILD_SECRET}"
+ }
+ },
+ {
+ "type": "Generic",
+ "generic": {
+ "secret": "${BUILD_SECRET}"
+ }
+ },
+ {
+ "type": "ConfigChange"
+ },
+ {
+ "type": "ImageChange",
+ "imageChange": {}
+ }
+ ],
+ "source": {
+ "type": "Git",
+ "git": {
+ "uri": "${GIT_REPO}",
+ "ref": "${GIT_REF}"
+ }
+ },
+ "strategy": {
+ "type": "Source",
+ "sourceStrategy": {
+ "from": {
+ "kind": "ImageStreamTag",
+ "namespace": "${IMAGE_STREAM_NAMESPACE}",
+ "name": "fis-karaf-openshift:${BUILDER_VERSION}"
+ },
+ "forcePull": true,
+ "incremental": true,
+ "env": [
+ {
+ "name": "BUILD_LOGLEVEL",
+ "value": "5"
+ },
+ {
+ "name": "ARTIFACT_DIR",
+ "value": "${ARTIFACT_DIR}"
+ },
+ {
+ "name": "MAVEN_ARGS",
+ "value": "${MAVEN_ARGS}"
+ },
+ {
+ "name": "MAVEN_ARGS_APPEND",
+ "value": "${MAVEN_ARGS_APPEND}"
+ }
+ ]
+ }
+ },
+ "output": {
+ "to": {
+ "kind": "ImageStreamTag",
+ "name": "${APP_NAME}:latest"
+ }
+ },
+ "resources": {}
+ },
+ "status": {
+ "lastVersion": 0
+ }
+ },
+ {
+ "kind": "DeploymentConfig",
+ "apiVersion": "v1",
+ "metadata": {
+ "name": "${APP_NAME}",
+ "creationTimestamp": null,
+ "labels": {
+ "component": "${APP_NAME}",
+ "container": "karaf",
+ "group": "quickstarts",
+ "project": "${APP_NAME}",
+ "provider": "s2i",
+ "version": "${APP_VERSION}"
+ }
+ },
+ "spec": {
+ "strategy": {
+ "resources": {}
+ },
+ "triggers": [
+ {
+ "type": "ConfigChange"
+ },
+ {
+ "type": "ImageChange",
+ "imageChangeParams": {
+ "automatic": true,
+ "containerNames": [
+ "${APP_NAME}"
+ ],
+ "from": {
+ "kind": "ImageStreamTag",
+ "name": "${APP_NAME}:latest"
+ }
+ }
+ }
+ ],
+ "replicas": 1,
+ "selector": {
+ "component": "${APP_NAME}",
+ "container": "karaf",
+ "deploymentconfig": "${APP_NAME}",
+ "group": "quickstarts",
+ "project": "${APP_NAME}",
+ "provider": "s2i",
+ "version": "${APP_VERSION}"
+ },
+ "template": {
+ "metadata": {
+ "creationTimestamp": null,
+ "labels": {
+ "component": "${APP_NAME}",
+ "container": "karaf",
+ "deploymentconfig": "${APP_NAME}",
+ "group": "quickstarts",
+ "project": "${APP_NAME}",
+ "provider": "s2i",
+ "version": "${APP_VERSION}"
+ }
+ },
+ "spec": {
+ "containers": [
+ {
+ "name": "${APP_NAME}",
+ "image": "library/${APP_NAME}:latest",
+ "readinessProbe" : {
+ "httpGet" : {
+ "path" : "/readiness-check",
+ "port" : 8181
+ },
+ "initialDelaySeconds" : 10
+ },
+ "livenessProbe" : {
+ "httpGet" : {
+ "path" : "/health-check",
+ "port" : 8181
+ },
+ "initialDelaySeconds" : 180
+ },
+ "ports": [
+ {
+ "containerPort": 8181,
+ "name": "http"
+ },
+ {
+ "containerPort": 8778,
+ "name": "jolokia"
+ }
+ ],
+ "env" : [ {
+ "name" : "KUBERNETES_NAMESPACE",
+ "valueFrom" : {
+ "fieldRef" : {
+ "fieldPath" : "metadata.namespace"
+ }
+ }
+ } ],
+ "resources": {
+ "requests": {
+ "cpu": "${CPU_REQUEST}",
+ "memory": "${MEMORY_REQUEST}"
+ },
+ "limits": {
+ "cpu": "${CPU_LIMIT}",
+ "memory": "${MEMORY_LIMIT}"
+ }
+ }
+ }
+ ]
+ }
+ }
+ },
+ "status": {}
+ }
+ ]
+}
diff --git a/roles/openshift_examples/files/examples/v1.3/xpaas-templates/openjdk18-web-basic-s2i.json b/roles/openshift_examples/files/examples/v1.3/xpaas-templates/openjdk18-web-basic-s2i.json
new file mode 100644
index 000000000..143e16756
--- /dev/null
+++ b/roles/openshift_examples/files/examples/v1.3/xpaas-templates/openjdk18-web-basic-s2i.json
@@ -0,0 +1,267 @@
+{
+ "kind": "Template",
+ "apiVersion": "v1",
+ "metadata": {
+ "annotations": {
+ "iconClass": "icon-jboss",
+ "description": "Application template for Java applications built using S2I.",
+ "tags": "java,xpaas",
+ "version": "1.0.0"
+ },
+ "name": "openjdk18-web-basic-s2i"
+ },
+ "labels": {
+ "template": "openjdk18-web-basic-s2i",
+ "xpaas": "1.0.0"
+ },
+ "message": "A new java application has been created in your project.",
+ "parameters": [
+ {
+ "description": "The name for the application.",
+ "displayName": "Application Name",
+ "name": "APPLICATION_NAME",
+ "value": "openjdk-app",
+ "required": true
+ },
+ {
+ "description": "Custom hostname for http service route. Leave blank for default hostname, e.g.: <application-name>-<project>.<default-domain-suffix>",
+ "displayName": "Custom http Route Hostname",
+ "name": "HOSTNAME_HTTP",
+ "value": "",
+ "required": false
+ },
+ {
+ "description": "Git source URI for application",
+ "displayName": "Git Repository URL",
+ "name": "SOURCE_REPOSITORY_URL",
+ "value": "https://github.com/jboss-openshift/openshift-quickstarts",
+ "required": true
+ },
+ {
+ "description": "Git branch/tag reference",
+ "displayName": "Git Reference",
+ "name": "SOURCE_REPOSITORY_REF",
+ "value": "master",
+ "required": false
+ },
+ {
+ "description": "Path within Git project to build; empty for root project directory.",
+ "displayName": "Context Directory",
+ "name": "CONTEXT_DIR",
+ "value": "undertow-servlet",
+ "required": false
+ },
+ {
+ "description": "GitHub trigger secret",
+ "displayName": "Github Webhook Secret",
+ "name": "GITHUB_WEBHOOK_SECRET",
+ "from": "[a-zA-Z0-9]{8}",
+ "generate": "expression",
+ "required": true
+ },
+ {
+ "description": "Generic build trigger secret",
+ "displayName": "Generic Webhook Secret",
+ "name": "GENERIC_WEBHOOK_SECRET",
+ "from": "[a-zA-Z0-9]{8}",
+ "generate": "expression",
+ "required": true
+ },
+ {
+ "description": "Namespace in which the ImageStreams for Red Hat Middleware images are installed. These ImageStreams are normally installed in the openshift namespace. You should only need to modify this if you've installed the ImageStreams in a different namespace/project.",
+ "displayName": "ImageStream Namespace",
+ "name": "IMAGE_STREAM_NAMESPACE",
+ "value": "openshift",
+ "required": true
+ }
+ ],
+ "objects": [
+ {
+ "kind": "Service",
+ "apiVersion": "v1",
+ "spec": {
+ "ports": [
+ {
+ "port": 8080,
+ "targetPort": 8080
+ }
+ ],
+ "selector": {
+ "deploymentConfig": "${APPLICATION_NAME}"
+ }
+ },
+ "metadata": {
+ "name": "${APPLICATION_NAME}",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ },
+ "annotations": {
+ "description": "The application's http port."
+ }
+ }
+ },
+ {
+ "kind": "Route",
+ "apiVersion": "v1",
+ "id": "${APPLICATION_NAME}-http",
+ "metadata": {
+ "name": "${APPLICATION_NAME}",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ },
+ "annotations": {
+ "description": "Route for application's http service."
+ }
+ },
+ "spec": {
+ "host": "${HOSTNAME_HTTP}",
+ "to": {
+ "name": "${APPLICATION_NAME}"
+ }
+ }
+ },
+ {
+ "kind": "ImageStream",
+ "apiVersion": "v1",
+ "metadata": {
+ "name": "${APPLICATION_NAME}",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ }
+ }
+ },
+ {
+ "kind": "BuildConfig",
+ "apiVersion": "v1",
+ "metadata": {
+ "name": "${APPLICATION_NAME}",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ }
+ },
+ "spec": {
+ "source": {
+ "type": "Git",
+ "git": {
+ "uri": "${SOURCE_REPOSITORY_URL}",
+ "ref": "${SOURCE_REPOSITORY_REF}"
+ },
+ "contextDir": "${CONTEXT_DIR}"
+ },
+ "strategy": {
+ "type": "Source",
+ "sourceStrategy": {
+ "forcePull": true,
+ "from": {
+ "kind": "ImageStreamTag",
+ "namespace": "${IMAGE_STREAM_NAMESPACE}",
+ "name": "redhat-openjdk18-openshift:1.0"
+ }
+ }
+ },
+ "output": {
+ "to": {
+ "kind": "ImageStreamTag",
+ "name": "${APPLICATION_NAME}:latest"
+ }
+ },
+ "triggers": [
+ {
+ "type": "GitHub",
+ "github": {
+ "secret": "${GITHUB_WEBHOOK_SECRET}"
+ }
+ },
+ {
+ "type": "Generic",
+ "generic": {
+ "secret": "${GENERIC_WEBHOOK_SECRET}"
+ }
+ },
+ {
+ "type": "ImageChange",
+ "imageChange": {}
+ },
+ {
+ "type": "ConfigChange"
+ }
+ ]
+ }
+ },
+ {
+ "kind": "DeploymentConfig",
+ "apiVersion": "v1",
+ "metadata": {
+ "name": "${APPLICATION_NAME}",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ }
+ },
+ "spec": {
+ "strategy": {
+ "type": "Recreate"
+ },
+ "triggers": [
+ {
+ "type": "ImageChange",
+ "imageChangeParams": {
+ "automatic": true,
+ "containerNames": [
+ "${APPLICATION_NAME}"
+ ],
+ "from": {
+ "kind": "ImageStreamTag",
+ "name": "${APPLICATION_NAME}:latest"
+ }
+ }
+ },
+ {
+ "type": "ConfigChange"
+ }
+ ],
+ "replicas": 1,
+ "selector": {
+ "deploymentConfig": "${APPLICATION_NAME}"
+ },
+ "template": {
+ "metadata": {
+ "name": "${APPLICATION_NAME}",
+ "labels": {
+ "deploymentConfig": "${APPLICATION_NAME}",
+ "application": "${APPLICATION_NAME}"
+ }
+ },
+ "spec": {
+ "terminationGracePeriodSeconds": 75,
+ "containers": [
+ {
+ "name": "${APPLICATION_NAME}",
+ "image": "${APPLICATION_NAME}",
+ "imagePullPolicy": "Always",
+ "env": [
+ ],
+ "ports": [
+ {
+ "name": "jolokia",
+ "containerPort": 8778,
+ "protocol": "TCP"
+ },
+ {
+ "name": "http",
+ "containerPort": 8080,
+ "protocol": "TCP"
+ },
+ {
+ "name": "https",
+ "containerPort": 8443,
+ "protocol": "TCP"
+ }
+ ]
+ }
+ ]
+ }
+ }
+ }
+ }
+ ]
+}
diff --git a/roles/openshift_examples/files/examples/v1.3/xpaas-templates/spring-boot-camel-amq-template.json b/roles/openshift_examples/files/examples/v1.3/xpaas-templates/spring-boot-camel-amq-template.json
new file mode 100644
index 000000000..8b3cd6ed0
--- /dev/null
+++ b/roles/openshift_examples/files/examples/v1.3/xpaas-templates/spring-boot-camel-amq-template.json
@@ -0,0 +1,331 @@
+{
+ "apiVersion": "v1",
+ "kind": "Template",
+ "metadata": {
+ "annotations": {
+ "description": "Spring Boot, Camel and ActiveMQ QuickStart. This quickstart demonstrates how to connect a Spring-Boot application to an ActiveMQ broker and use JMS messaging between two Camel routes using OpenShift. In this example we will use two containers, one container to run as a ActiveMQ broker, and another as a client to the broker, where the Camel routes are running. This quickstart requires the ActiveMQ broker has been deployed and running first.",
+ "tags": "quickstart,java,springboot,fis",
+ "iconClass": "icon-jboss",
+ "version": "2.0"
+ },
+ "name": "s2i-spring-boot-camel-amq"
+ },
+ "labels": {
+ "template": "s2i-spring-boot-camel-amq"
+ },
+ "parameters": [
+ {
+ "name": "APP_NAME",
+ "displayName": "Application Name",
+ "required": true,
+ "value": "s2i-spring-boot-camel-amq",
+ "description": "The name assigned to the application."
+ },
+ {
+ "name": "GIT_REPO",
+ "displayName": "Git Repository URL",
+ "required": true,
+ "value": "https://github.com/fabric8-quickstarts/spring-boot-camel-amq.git",
+ "description": "The URL of the repository with your application source code."
+ },
+ {
+ "name": "GIT_REF",
+ "displayName": "Git Reference",
+ "value": "spring-boot-camel-amq-1.0.0.redhat-000055",
+ "description": "Set this to a branch name, tag or other ref of your repository if you are not using the default branch."
+ },
+ {
+ "name": "ACTIVEMQ_SERVICE_NAME",
+ "displayName": "ActiveMQ Broker Service",
+ "required": true,
+ "value": "broker-amq-tcp",
+ "description": "Set this to the name of the TCP service of the ActiveMQ broker. You may need to create a broker first."
+ },
+ {
+ "name": "ACTIVEMQ_BROKER_USERNAME",
+ "displayName": "ActiveMQ Broker Username",
+ "description": "The username used to authenticate with the ActiveMQ broker. Leave it empty if authentication is disabled."
+ },
+ {
+ "name": "ACTIVEMQ_BROKER_PASSWORD",
+ "displayName": "ActiveMQ Broker Password",
+ "description": "The password used to authenticate with the ActiveMQ broker. Leave it empty if authentication is disabled."
+ },
+ {
+ "name": "BUILDER_VERSION",
+ "displayName": "Builder version",
+ "value": "2.0",
+ "description": "The version of the FIS S2I builder image to use."
+ },
+ {
+ "name": "APP_VERSION",
+ "displayName": "Application Version",
+ "value": "1.0.0.redhat-000055",
+ "description": "The application version."
+ },
+ {
+ "name": "MAVEN_ARGS",
+ "displayName": "Maven Arguments",
+ "value": "package -DskipTests -Dfabric8.skip -e -B",
+ "description": "Arguments passed to mvn in the build."
+ },
+ {
+ "name": "MAVEN_ARGS_APPEND",
+ "displayName": "Extra Maven Arguments",
+ "description": "Extra arguments passed to mvn, e.g. for multi-module builds."
+ },
+ {
+ "name": "ARTIFACT_DIR",
+ "displayName": "Maven build directory",
+ "description": "Directory of the artifact to be built, e.g. for multi-module builds."
+ },
+ {
+ "name": "IMAGE_STREAM_NAMESPACE",
+ "displayName": "Image Stream Namespace",
+ "value": "openshift",
+ "required": true,
+ "description": "Namespace in which the Fuse ImageStreams are installed. These ImageStreams are normally installed in the openshift namespace. You should only need to modify this if you've installed the ImageStreams in a different namespace/project."
+ },
+ {
+ "name": "BUILD_SECRET",
+ "displayName": "Git Build Secret",
+ "generate": "expression",
+ "description": "The secret needed to trigger a build.",
+ "from": "[a-zA-Z0-9]{40}"
+ },
+ {
+ "name": "CPU_REQUEST",
+ "displayName": "CPU request",
+ "value": "0.2",
+ "required": true,
+ "description": "The amount of CPU to requests."
+ },
+ {
+ "name": "CPU_LIMIT",
+ "displayName": "CPU limit",
+ "value": "1.0",
+ "required": true,
+ "description": "The amount of CPU the container is limited to use."
+ }
+ ],
+ "objects": [
+ {
+ "kind": "ImageStream",
+ "apiVersion": "v1",
+ "metadata": {
+ "name": "${APP_NAME}",
+ "creationTimestamp": null,
+ "labels": {
+ "component": "${APP_NAME}",
+ "group": "quickstarts",
+ "project": "${APP_NAME}",
+ "provider": "s2i",
+ "version": "${APP_VERSION}"
+ }
+ },
+ "spec": {},
+ "status": {
+ "dockerImageRepository": ""
+ }
+ },
+ {
+ "kind": "BuildConfig",
+ "apiVersion": "v1",
+ "metadata": {
+ "name": "${APP_NAME}",
+ "creationTimestamp": null,
+ "labels": {
+ "component": "${APP_NAME}",
+ "group": "quickstarts",
+ "project": "${APP_NAME}",
+ "provider": "s2i",
+ "version": "${APP_VERSION}"
+ }
+ },
+ "spec": {
+ "triggers": [
+ {
+ "type": "GitHub",
+ "github": {
+ "secret": "${BUILD_SECRET}"
+ }
+ },
+ {
+ "type": "Generic",
+ "generic": {
+ "secret": "${BUILD_SECRET}"
+ }
+ },
+ {
+ "type": "ConfigChange"
+ },
+ {
+ "type": "ImageChange",
+ "imageChange": {}
+ }
+ ],
+ "source": {
+ "type": "Git",
+ "git": {
+ "uri": "${GIT_REPO}",
+ "ref": "${GIT_REF}"
+ }
+ },
+ "strategy": {
+ "type": "Source",
+ "sourceStrategy": {
+ "from": {
+ "kind": "ImageStreamTag",
+ "namespace": "${IMAGE_STREAM_NAMESPACE}",
+ "name": "fis-java-openshift:${BUILDER_VERSION}"
+ },
+ "forcePull": true,
+ "incremental": true,
+ "env": [
+ {
+ "name": "BUILD_LOGLEVEL",
+ "value": "5"
+ },
+ {
+ "name": "ARTIFACT_DIR",
+ "value": "${ARTIFACT_DIR}"
+ },
+ {
+ "name": "MAVEN_ARGS",
+ "value": "${MAVEN_ARGS}"
+ },
+ {
+ "name": "MAVEN_ARGS_APPEND",
+ "value": "${MAVEN_ARGS_APPEND}"
+ }
+ ]
+ }
+ },
+ "output": {
+ "to": {
+ "kind": "ImageStreamTag",
+ "name": "${APP_NAME}:latest"
+ }
+ },
+ "resources": {}
+ },
+ "status": {
+ "lastVersion": 0
+ }
+ },
+ {
+ "kind": "DeploymentConfig",
+ "apiVersion": "v1",
+ "metadata": {
+ "name": "${APP_NAME}",
+ "creationTimestamp": null,
+ "labels": {
+ "component": "${APP_NAME}",
+ "group": "quickstarts",
+ "project": "${APP_NAME}",
+ "provider": "s2i",
+ "version": "${APP_VERSION}"
+ }
+ },
+ "spec": {
+ "strategy": {
+ "resources": {}
+ },
+ "triggers": [
+ {
+ "type": "ConfigChange"
+ },
+ {
+ "type": "ImageChange",
+ "imageChangeParams": {
+ "automatic": true,
+ "containerNames": [
+ "${APP_NAME}"
+ ],
+ "from": {
+ "kind": "ImageStreamTag",
+ "name": "${APP_NAME}:latest"
+ }
+ }
+ }
+ ],
+ "replicas": 1,
+ "selector": {
+ "component": "${APP_NAME}",
+ "deploymentconfig": "${APP_NAME}",
+ "group": "quickstarts",
+ "project": "${APP_NAME}",
+ "provider": "s2i",
+ "version": "${APP_VERSION}"
+ },
+ "template": {
+ "metadata": {
+ "creationTimestamp": null,
+ "labels": {
+ "component": "${APP_NAME}",
+ "deploymentconfig": "${APP_NAME}",
+ "group": "quickstarts",
+ "project": "${APP_NAME}",
+ "provider": "s2i",
+ "version": "${APP_VERSION}"
+ }
+ },
+ "spec": {
+ "containers": [
+ {
+ "name": "${APP_NAME}",
+ "image": "library/${APP_NAME}:latest",
+ "readinessProbe" : {
+ "httpGet" : {
+ "path" : "/health",
+ "port" : 8081
+ },
+ "initialDelaySeconds" : 10
+ },
+ "livenessProbe" : {
+ "httpGet" : {
+ "path" : "/health",
+ "port" : 8081
+ },
+ "initialDelaySeconds" : 180
+ },
+ "ports": [
+ {
+ "containerPort": 8778,
+ "name": "jolokia"
+ }
+ ],
+ "env" : [ {
+ "name" : "KUBERNETES_NAMESPACE",
+ "valueFrom" : {
+ "fieldRef" : {
+ "fieldPath" : "metadata.namespace"
+ }
+ }
+ } , {
+ "name": "ACTIVEMQ_SERVICE_NAME",
+ "value": "${ACTIVEMQ_SERVICE_NAME}"
+ }, {
+ "name": "ACTIVEMQ_BROKER_USERNAME",
+ "value": "${ACTIVEMQ_BROKER_USERNAME}"
+ }, {
+ "name": "ACTIVEMQ_BROKER_PASSWORD",
+ "value": "${ACTIVEMQ_BROKER_PASSWORD}"
+ } ],
+ "resources": {
+ "requests": {
+ "cpu": "${CPU_REQUEST}"
+ },
+ "limits": {
+ "cpu": "${CPU_LIMIT}"
+ }
+ }
+ }
+ ]
+ }
+ }
+ },
+ "status": {}
+ }
+ ]
+}
diff --git a/roles/openshift_examples/files/examples/v1.3/xpaas-templates/spring-boot-camel-config-template.json b/roles/openshift_examples/files/examples/v1.3/xpaas-templates/spring-boot-camel-config-template.json
new file mode 100644
index 000000000..bc5bbad22
--- /dev/null
+++ b/roles/openshift_examples/files/examples/v1.3/xpaas-templates/spring-boot-camel-config-template.json
@@ -0,0 +1,327 @@
+{
+ "apiVersion": "v1",
+ "kind": "Template",
+ "metadata": {
+ "annotations": {
+ "description": "Spring Boot and Camel using ConfigMaps and Secrets. This quickstart demonstrates how to configure a Spring-Boot application using Openshift ConfigMaps and Secrets.",
+ "tags": "quickstart,java,springboot,fis",
+ "iconClass": "icon-jboss",
+ "version": "2.0"
+ },
+ "name": "s2i-spring-boot-camel-config"
+ },
+ "labels": {
+ "template": "s2i-spring-boot-camel-config"
+ },
+ "parameters": [
+ {
+ "name": "APP_NAME",
+ "displayName": "Application Name",
+ "required": true,
+ "value": "s2i-spring-boot-camel-config",
+ "description": "The name assigned to the application."
+ },
+ {
+ "name": "GIT_REPO",
+ "displayName": "Git Repository URL",
+ "required": true,
+ "value": "https://github.com/fabric8-quickstarts/spring-boot-camel-config.git",
+ "description": "The URL of the repository with your application source code."
+ },
+ {
+ "name": "GIT_REF",
+ "displayName": "Git Reference",
+ "value": "spring-boot-camel-config-1.0.0.redhat-000005",
+ "description": "Set this to a branch name, tag or other ref of your repository if you are not using the default branch."
+ },
+ {
+ "name": "SERVICE_ACCOUNT_NAME",
+ "displayName": "Service Account",
+ "value": "qs-camel-config",
+ "required": true,
+ "description": "The Service Account that will be used to run the container. It must be already present in Openshift and have the view role."
+ },
+ {
+ "name": "SECRET_NAME",
+ "displayName": "Secret Name",
+ "value": "camel-config",
+ "required": true,
+ "description": "The name of the Openshift Secret that will be used to configure the application. It must be already present in Openshift."
+ },
+ {
+ "name": "CONFIGMAP_NAME",
+ "displayName": "ConfigMap Name",
+ "value": "camel-config",
+ "required": true,
+ "description": "The name of the Openshift ConfigMap that will be used to configure the application. It must be already present in Openshift."
+ },
+ {
+ "name": "BUILDER_VERSION",
+ "displayName": "Builder version",
+ "value": "2.0",
+ "description": "The version of the FIS S2I builder image to use."
+ },
+ {
+ "name": "APP_VERSION",
+ "displayName": "Application Version",
+ "value": "1.0.0.redhat-000005",
+ "description": "The application version."
+ },
+ {
+ "name": "MAVEN_ARGS",
+ "displayName": "Maven Arguments",
+ "value": "package -DskipTests -Dfabric8.skip -e -B",
+ "description": "Arguments passed to mvn in the build."
+ },
+ {
+ "name": "MAVEN_ARGS_APPEND",
+ "displayName": "Extra Maven Arguments",
+ "description": "Extra arguments passed to mvn, e.g. for multi-module builds."
+ },
+ {
+ "name": "ARTIFACT_DIR",
+ "displayName": "Maven build directory",
+ "description": "Directory of the artifact to be built, e.g. for multi-module builds."
+ },
+ {
+ "name": "IMAGE_STREAM_NAMESPACE",
+ "displayName": "Image Stream Namespace",
+ "value": "openshift",
+ "required": true,
+ "description": "Namespace in which the Fuse ImageStreams are installed. These ImageStreams are normally installed in the openshift namespace. You should only need to modify this if you've installed the ImageStreams in a different namespace/project."
+ },
+ {
+ "name": "BUILD_SECRET",
+ "displayName": "Git Build Secret",
+ "generate": "expression",
+ "description": "The secret needed to trigger a build.",
+ "from": "[a-zA-Z0-9]{40}"
+ }
+ ],
+ "objects": [
+ {
+ "kind": "ImageStream",
+ "apiVersion": "v1",
+ "metadata": {
+ "name": "${APP_NAME}",
+ "creationTimestamp": null,
+ "labels": {
+ "component": "${APP_NAME}",
+ "group": "quickstarts",
+ "project": "${APP_NAME}",
+ "provider": "s2i",
+ "version": "${APP_VERSION}"
+ }
+ },
+ "spec": {},
+ "status": {
+ "dockerImageRepository": ""
+ }
+ },
+ {
+ "kind": "BuildConfig",
+ "apiVersion": "v1",
+ "metadata": {
+ "name": "${APP_NAME}",
+ "creationTimestamp": null,
+ "labels": {
+ "component": "${APP_NAME}",
+ "group": "quickstarts",
+ "project": "${APP_NAME}",
+ "provider": "s2i",
+ "version": "${APP_VERSION}"
+ }
+ },
+ "spec": {
+ "triggers": [
+ {
+ "type": "GitHub",
+ "github": {
+ "secret": "${BUILD_SECRET}"
+ }
+ },
+ {
+ "type": "Generic",
+ "generic": {
+ "secret": "${BUILD_SECRET}"
+ }
+ },
+ {
+ "type": "ConfigChange"
+ },
+ {
+ "type": "ImageChange",
+ "imageChange": {}
+ }
+ ],
+ "source": {
+ "type": "Git",
+ "git": {
+ "uri": "${GIT_REPO}",
+ "ref": "${GIT_REF}"
+ }
+ },
+ "strategy": {
+ "type": "Source",
+ "sourceStrategy": {
+ "from": {
+ "kind": "ImageStreamTag",
+ "namespace": "${IMAGE_STREAM_NAMESPACE}",
+ "name": "fis-java-openshift:${BUILDER_VERSION}"
+ },
+ "forcePull": true,
+ "incremental": true,
+ "env": [
+ {
+ "name": "BUILD_LOGLEVEL",
+ "value": "5"
+ },
+ {
+ "name": "ARTIFACT_DIR",
+ "value": "${ARTIFACT_DIR}"
+ },
+ {
+ "name": "MAVEN_ARGS",
+ "value": "${MAVEN_ARGS}"
+ },
+ {
+ "name": "MAVEN_ARGS_APPEND",
+ "value": "${MAVEN_ARGS_APPEND}"
+ }
+ ]
+ }
+ },
+ "output": {
+ "to": {
+ "kind": "ImageStreamTag",
+ "name": "${APP_NAME}:latest"
+ }
+ },
+ "resources": {}
+ },
+ "status": {
+ "lastVersion": 0
+ }
+ },
+ {
+ "kind": "DeploymentConfig",
+ "apiVersion": "v1",
+ "metadata": {
+ "name": "${APP_NAME}",
+ "creationTimestamp": null,
+ "labels": {
+ "component": "${APP_NAME}",
+ "group": "quickstarts",
+ "project": "${APP_NAME}",
+ "provider": "s2i",
+ "version": "${APP_VERSION}"
+ }
+ },
+ "spec": {
+ "strategy": {
+ "resources": {}
+ },
+ "triggers": [
+ {
+ "type": "ConfigChange"
+ },
+ {
+ "type": "ImageChange",
+ "imageChangeParams": {
+ "automatic": true,
+ "containerNames": [
+ "${APP_NAME}"
+ ],
+ "from": {
+ "kind": "ImageStreamTag",
+ "name": "${APP_NAME}:latest"
+ }
+ }
+ }
+ ],
+ "replicas": 1,
+ "selector": {
+ "component": "${APP_NAME}",
+ "deploymentconfig": "${APP_NAME}",
+ "group": "quickstarts",
+ "project": "${APP_NAME}",
+ "provider": "s2i",
+ "version": "${APP_VERSION}"
+ },
+ "template": {
+ "metadata": {
+ "creationTimestamp": null,
+ "labels": {
+ "component": "${APP_NAME}",
+ "deploymentconfig": "${APP_NAME}",
+ "group": "quickstarts",
+ "project": "${APP_NAME}",
+ "provider": "s2i",
+ "version": "${APP_VERSION}"
+ }
+ },
+ "spec": {
+ "serviceAccountName": "${SERVICE_ACCOUNT_NAME}",
+ "volumes": [
+ {
+ "name": "camel-config",
+ "secret": {
+ "secretName": "${SECRET_NAME}"
+ }
+ }
+ ],
+ "containers": [
+ {
+ "name": "${APP_NAME}",
+ "image": "library/${APP_NAME}:latest",
+ "readinessProbe" : {
+ "httpGet" : {
+ "path" : "/health",
+ "port" : 8081
+ },
+ "initialDelaySeconds" : 10
+ },
+ "livenessProbe" : {
+ "httpGet" : {
+ "path" : "/health",
+ "port" : 8081
+ },
+ "initialDelaySeconds" : 180
+ },
+ "ports": [
+ {
+ "containerPort": 8778,
+ "name": "jolokia"
+ }
+ ],
+ "env" : [ {
+ "name" : "KUBERNETES_NAMESPACE",
+ "valueFrom" : {
+ "fieldRef" : {
+ "fieldPath" : "metadata.namespace"
+ }
+ }
+ }, {
+ "name": "SPRING_CLOUD_KUBERNETES_SECRETS_NAME",
+ "value": "${SECRET_NAME}"
+ }, {
+ "name": "SPRING_CLOUD_KUBERNETES_CONFIG_NAME",
+ "value": "${CONFIGMAP_NAME}"
+ } ],
+ "resources": {},
+ "volumeMounts": [
+ {
+ "name": "camel-config",
+ "readOnly": true,
+ "mountPath": "/etc/secrets/camel-config"
+ }
+ ]
+ }
+ ]
+ }
+ }
+ },
+ "status": {}
+ }
+ ]
+}
diff --git a/roles/openshift_examples/files/examples/v1.3/xpaas-templates/spring-boot-camel-drools-template.json b/roles/openshift_examples/files/examples/v1.3/xpaas-templates/spring-boot-camel-drools-template.json
new file mode 100644
index 000000000..e54fa0d59
--- /dev/null
+++ b/roles/openshift_examples/files/examples/v1.3/xpaas-templates/spring-boot-camel-drools-template.json
@@ -0,0 +1,334 @@
+{
+ "apiVersion": "v1",
+ "kind": "Template",
+ "metadata": {
+ "annotations": {
+ "description": "Spring-Boot, Camel and JBoss BRMS QuickStart. This example demonstrates how you can use Apache Camel and JBoss BRMS with Spring Boot on OpenShift. DRL files contain simple rules which are used to create knowledge session via Spring configuration file. Camel routes, defined via Spring as well, are then used to e.g. pass (insert) the Body of the message as a POJO to Drools engine for execution.",
+ "tags": "quickstart,java,springboot,fis",
+ "iconClass": "icon-jboss",
+ "version": "2.0"
+ },
+ "name": "s2i-spring-boot-camel-drools"
+ },
+ "labels": {
+ "template": "s2i-spring-boot-camel-drools"
+ },
+ "parameters": [
+ {
+ "name": "APP_NAME",
+ "displayName": "Application Name",
+ "required": true,
+ "value": "s2i-spring-boot-camel-drools",
+ "description": "The name assigned to the application."
+ },
+ {
+ "name": "GIT_REPO",
+ "displayName": "Git Repository URL",
+ "required": true,
+ "value": "https://github.com/fabric8-quickstarts/spring-boot-camel-drools.git",
+ "description": "The URL of the repository with your application source code."
+ },
+ {
+ "name": "GIT_REF",
+ "displayName": "Git Reference",
+ "value": "spring-boot-camel-drools-1.0.0.redhat-000054",
+ "description": "Set this to a branch name, tag or other ref of your repository if you are not using the default branch."
+ },
+ {
+ "name": "KIESERVER_SERVICE",
+ "displayName": "Decision Server Name",
+ "required": true,
+ "value": "kie-app",
+ "description": "Set this to the name of the Decision Server. You may need to create an instance before."
+ },
+ {
+ "name": "KIESERVER_USERNAME",
+ "displayName": "Decision Server Username",
+ "required": true,
+ "value": "kieserver",
+ "description": "The username used to authenticate with the Decision Server."
+ },
+ {
+ "name": "KIESERVER_PASSWORD",
+ "displayName": "Decision Server Password",
+ "required": true,
+ "description": "The password used to authenticate with the Decision Server."
+ },
+ {
+ "name": "BUILDER_VERSION",
+ "displayName": "Builder version",
+ "value": "2.0",
+ "description": "The version of the FIS S2I builder image to use."
+ },
+ {
+ "name": "APP_VERSION",
+ "displayName": "Application Version",
+ "value": "1.0.0.redhat-000054",
+ "description": "The application version."
+ },
+ {
+ "name": "MAVEN_ARGS",
+ "displayName": "Maven Arguments",
+ "value": "package -DskipTests -Dfabric8.skip -e -B",
+ "description": "Arguments passed to mvn in the build."
+ },
+ {
+ "name": "MAVEN_ARGS_APPEND",
+ "displayName": "Extra Maven Arguments",
+ "description": "Extra arguments passed to mvn, e.g. for multi-module builds."
+ },
+ {
+ "name": "ARTIFACT_DIR",
+ "displayName": "Maven build directory",
+ "description": "Directory of the artifact to be built, e.g. for multi-module builds."
+ },
+ {
+ "name": "IMAGE_STREAM_NAMESPACE",
+ "displayName": "Image Stream Namespace",
+ "value": "openshift",
+ "required": true,
+ "description": "Namespace in which the Fuse ImageStreams are installed. These ImageStreams are normally installed in the openshift namespace. You should only need to modify this if you've installed the ImageStreams in a different namespace/project."
+ },
+ {
+ "name": "BUILD_SECRET",
+ "displayName": "Git Build Secret",
+ "generate": "expression",
+ "description": "The secret needed to trigger a build.",
+ "from": "[a-zA-Z0-9]{40}"
+ },
+ {
+ "name": "CPU_REQUEST",
+ "displayName": "CPU request",
+ "value": "0.2",
+ "required": true,
+ "description": "The amount of CPU to requests."
+ },
+ {
+ "name": "CPU_LIMIT",
+ "displayName": "CPU limit",
+ "value": "1.0",
+ "required": true,
+ "description": "The amount of CPU the container is limited to use."
+ }
+ ],
+ "objects": [
+ {
+ "kind": "ImageStream",
+ "apiVersion": "v1",
+ "metadata": {
+ "name": "${APP_NAME}",
+ "creationTimestamp": null,
+ "labels": {
+ "component": "${APP_NAME}",
+ "group": "quickstarts",
+ "project": "${APP_NAME}",
+ "provider": "s2i",
+ "version": "${APP_VERSION}"
+ }
+ },
+ "spec": {},
+ "status": {
+ "dockerImageRepository": ""
+ }
+ },
+ {
+ "kind": "BuildConfig",
+ "apiVersion": "v1",
+ "metadata": {
+ "name": "${APP_NAME}",
+ "creationTimestamp": null,
+ "labels": {
+ "component": "${APP_NAME}",
+ "group": "quickstarts",
+ "project": "${APP_NAME}",
+ "provider": "s2i",
+ "version": "${APP_VERSION}"
+ }
+ },
+ "spec": {
+ "triggers": [
+ {
+ "type": "GitHub",
+ "github": {
+ "secret": "${BUILD_SECRET}"
+ }
+ },
+ {
+ "type": "Generic",
+ "generic": {
+ "secret": "${BUILD_SECRET}"
+ }
+ },
+ {
+ "type": "ConfigChange"
+ },
+ {
+ "type": "ImageChange",
+ "imageChange": {}
+ }
+ ],
+ "source": {
+ "type": "Git",
+ "git": {
+ "uri": "${GIT_REPO}",
+ "ref": "${GIT_REF}"
+ }
+ },
+ "strategy": {
+ "type": "Source",
+ "sourceStrategy": {
+ "from": {
+ "kind": "ImageStreamTag",
+ "namespace": "${IMAGE_STREAM_NAMESPACE}",
+ "name": "fis-java-openshift:${BUILDER_VERSION}"
+ },
+ "forcePull": true,
+ "incremental": true,
+ "env": [
+ {
+ "name": "BUILD_LOGLEVEL",
+ "value": "5"
+ },
+ {
+ "name": "ARTIFACT_DIR",
+ "value": "${ARTIFACT_DIR}"
+ },
+ {
+ "name": "MAVEN_ARGS",
+ "value": "${MAVEN_ARGS}"
+ },
+ {
+ "name": "MAVEN_ARGS_APPEND",
+ "value": "${MAVEN_ARGS_APPEND}"
+ }
+ ]
+ }
+ },
+ "output": {
+ "to": {
+ "kind": "ImageStreamTag",
+ "name": "${APP_NAME}:latest"
+ }
+ },
+ "resources": {}
+ },
+ "status": {
+ "lastVersion": 0
+ }
+ },
+ {
+ "kind": "DeploymentConfig",
+ "apiVersion": "v1",
+ "metadata": {
+ "name": "${APP_NAME}",
+ "creationTimestamp": null,
+ "labels": {
+ "component": "${APP_NAME}",
+ "group": "quickstarts",
+ "project": "${APP_NAME}",
+ "provider": "s2i",
+ "version": "${APP_VERSION}"
+ }
+ },
+ "spec": {
+ "strategy": {
+ "resources": {}
+ },
+ "triggers": [
+ {
+ "type": "ConfigChange"
+ },
+ {
+ "type": "ImageChange",
+ "imageChangeParams": {
+ "automatic": true,
+ "containerNames": [
+ "${APP_NAME}"
+ ],
+ "from": {
+ "kind": "ImageStreamTag",
+ "name": "${APP_NAME}:latest"
+ }
+ }
+ }
+ ],
+ "replicas": 1,
+ "selector": {
+ "component": "${APP_NAME}",
+ "deploymentconfig": "${APP_NAME}",
+ "group": "quickstarts",
+ "project": "${APP_NAME}",
+ "provider": "s2i",
+ "version": "${APP_VERSION}"
+ },
+ "template": {
+ "metadata": {
+ "creationTimestamp": null,
+ "labels": {
+ "component": "${APP_NAME}",
+ "deploymentconfig": "${APP_NAME}",
+ "group": "quickstarts",
+ "project": "${APP_NAME}",
+ "provider": "s2i",
+ "version": "${APP_VERSION}"
+ }
+ },
+ "spec": {
+ "containers": [
+ {
+ "name": "${APP_NAME}",
+ "image": "library/${APP_NAME}:latest",
+ "readinessProbe" : {
+ "httpGet" : {
+ "path" : "/health",
+ "port" : 8081
+ },
+ "initialDelaySeconds" : 10
+ },
+ "livenessProbe" : {
+ "httpGet" : {
+ "path" : "/health",
+ "port" : 8081
+ },
+ "initialDelaySeconds" : 180
+ },
+ "ports": [
+ {
+ "containerPort": 8778,
+ "name": "jolokia"
+ }
+ ],
+ "env" : [ {
+ "name" : "KUBERNETES_NAMESPACE",
+ "valueFrom" : {
+ "fieldRef" : {
+ "fieldPath" : "metadata.namespace"
+ }
+ }
+ }, {
+ "name": "KIESERVER_SERVICE",
+ "value": "${KIESERVER_SERVICE}"
+ }, {
+ "name": "KIESERVER_USERNAME",
+ "value": "${KIESERVER_USERNAME}"
+ }, {
+ "name": "KIESERVER_PASSWORD",
+ "value": "${KIESERVER_PASSWORD}"
+ } ],
+ "resources": {
+ "requests": {
+ "cpu": "${CPU_REQUEST}"
+ },
+ "limits": {
+ "cpu": "${CPU_LIMIT}"
+ }
+ }
+ }
+ ]
+ }
+ }
+ },
+ "status": {}
+ }
+ ]
+}
diff --git a/roles/openshift_examples/files/examples/v1.3/xpaas-templates/spring-boot-camel-infinispan-template.json b/roles/openshift_examples/files/examples/v1.3/xpaas-templates/spring-boot-camel-infinispan-template.json
new file mode 100644
index 000000000..20ba97dac
--- /dev/null
+++ b/roles/openshift_examples/files/examples/v1.3/xpaas-templates/spring-boot-camel-infinispan-template.json
@@ -0,0 +1,315 @@
+{
+ "apiVersion": "v1",
+ "kind": "Template",
+ "metadata": {
+ "annotations": {
+ "description": "Spring Boot, Camel and JBoss Data Grid QuickStart. This quickstart demonstrates how to connect a Spring-Boot application to a JBoss Data Grid (or Infinispan) server using the Hot Rod protocol. It requires that the data grid server (or cluster) has been deployed first.",
+ "tags": "quickstart,java,springboot,fis",
+ "iconClass": "icon-jboss",
+ "version": "2.0"
+ },
+ "name": "s2i-spring-boot-camel-infinispan"
+ },
+ "labels": {
+ "template": "s2i-spring-boot-camel-infinispan"
+ },
+ "parameters": [
+ {
+ "name": "APP_NAME",
+ "displayName": "Application Name",
+ "required": true,
+ "value": "s2i-spring-boot-camel-infinispan",
+ "description": "The name assigned to the application."
+ },
+ {
+ "name": "GIT_REPO",
+ "displayName": "Git Repository URL",
+ "required": true,
+ "value": "https://github.com/fabric8-quickstarts/spring-boot-camel-infinispan.git",
+ "description": "The URL of the repository with your application source code."
+ },
+ {
+ "name": "GIT_REF",
+ "displayName": "Git Reference",
+ "value": "spring-boot-camel-infinispan-1.0.0.redhat-000024",
+ "description": "Set this to a branch name, tag or other ref of your repository if you are not using the default branch."
+ },
+ {
+ "name": "INFINISPAN_SERVICE",
+ "displayName": "JBoss Data Grid Service (Hot Rod)",
+ "required": true,
+ "value": "datagrid-app-hotrod",
+ "description": "Set this to the name of the Hot Rod service of the JBoss Data Grid. You may need to create the data grid first."
+ },
+ {
+ "name": "BUILDER_VERSION",
+ "displayName": "Builder version",
+ "value": "2.0",
+ "description": "The version of the FIS S2I builder image to use."
+ },
+ {
+ "name": "APP_VERSION",
+ "displayName": "Application Version",
+ "value": "1.0.0.redhat-000024",
+ "description": "The application version."
+ },
+ {
+ "name": "MAVEN_ARGS",
+ "displayName": "Maven Arguments",
+ "value": "package -DskipTests -Dfabric8.skip -e -B",
+ "description": "Arguments passed to mvn in the build."
+ },
+ {
+ "name": "MAVEN_ARGS_APPEND",
+ "displayName": "Extra Maven Arguments",
+ "description": "Extra arguments passed to mvn, e.g. for multi-module builds."
+ },
+ {
+ "name": "ARTIFACT_DIR",
+ "displayName": "Maven build directory",
+ "description": "Directory of the artifact to be built, e.g. for multi-module builds."
+ },
+ {
+ "name": "IMAGE_STREAM_NAMESPACE",
+ "displayName": "Image Stream Namespace",
+ "value": "openshift",
+ "required": true,
+ "description": "Namespace in which the Fuse ImageStreams are installed. These ImageStreams are normally installed in the openshift namespace. You should only need to modify this if you've installed the ImageStreams in a different namespace/project."
+ },
+ {
+ "name": "BUILD_SECRET",
+ "displayName": "Git Build Secret",
+ "generate": "expression",
+ "description": "The secret needed to trigger a build.",
+ "from": "[a-zA-Z0-9]{40}"
+ },
+ {
+ "name": "CPU_REQUEST",
+ "displayName": "CPU request",
+ "value": "0.2",
+ "required": true,
+ "description": "The amount of CPU to requests."
+ },
+ {
+ "name": "CPU_LIMIT",
+ "displayName": "CPU limit",
+ "value": "1.0",
+ "required": true,
+ "description": "The amount of CPU the container is limited to use."
+ }
+ ],
+ "objects": [
+ {
+ "kind": "ImageStream",
+ "apiVersion": "v1",
+ "metadata": {
+ "name": "${APP_NAME}",
+ "creationTimestamp": null,
+ "labels": {
+ "component": "${APP_NAME}",
+ "group": "quickstarts",
+ "project": "${APP_NAME}",
+ "provider": "s2i",
+ "version": "${APP_VERSION}"
+ }
+ },
+ "spec": {},
+ "status": {
+ "dockerImageRepository": ""
+ }
+ },
+ {
+ "kind": "BuildConfig",
+ "apiVersion": "v1",
+ "metadata": {
+ "name": "${APP_NAME}",
+ "creationTimestamp": null,
+ "labels": {
+ "component": "${APP_NAME}",
+ "group": "quickstarts",
+ "project": "${APP_NAME}",
+ "provider": "s2i",
+ "version": "${APP_VERSION}"
+ }
+ },
+ "spec": {
+ "triggers": [
+ {
+ "type": "GitHub",
+ "github": {
+ "secret": "${BUILD_SECRET}"
+ }
+ },
+ {
+ "type": "Generic",
+ "generic": {
+ "secret": "${BUILD_SECRET}"
+ }
+ },
+ {
+ "type": "ConfigChange"
+ },
+ {
+ "type": "ImageChange",
+ "imageChange": {}
+ }
+ ],
+ "source": {
+ "type": "Git",
+ "git": {
+ "uri": "${GIT_REPO}",
+ "ref": "${GIT_REF}"
+ }
+ },
+ "strategy": {
+ "type": "Source",
+ "sourceStrategy": {
+ "from": {
+ "kind": "ImageStreamTag",
+ "namespace": "${IMAGE_STREAM_NAMESPACE}",
+ "name": "fis-java-openshift:${BUILDER_VERSION}"
+ },
+ "forcePull": true,
+ "incremental": true,
+ "env": [
+ {
+ "name": "BUILD_LOGLEVEL",
+ "value": "5"
+ },
+ {
+ "name": "ARTIFACT_DIR",
+ "value": "${ARTIFACT_DIR}"
+ },
+ {
+ "name": "MAVEN_ARGS",
+ "value": "${MAVEN_ARGS}"
+ },
+ {
+ "name": "MAVEN_ARGS_APPEND",
+ "value": "${MAVEN_ARGS_APPEND}"
+ }
+ ]
+ }
+ },
+ "output": {
+ "to": {
+ "kind": "ImageStreamTag",
+ "name": "${APP_NAME}:latest"
+ }
+ },
+ "resources": {}
+ },
+ "status": {
+ "lastVersion": 0
+ }
+ },
+ {
+ "kind": "DeploymentConfig",
+ "apiVersion": "v1",
+ "metadata": {
+ "name": "${APP_NAME}",
+ "creationTimestamp": null,
+ "labels": {
+ "component": "${APP_NAME}",
+ "group": "quickstarts",
+ "project": "${APP_NAME}",
+ "provider": "s2i",
+ "version": "${APP_VERSION}"
+ }
+ },
+ "spec": {
+ "strategy": {
+ "resources": {}
+ },
+ "triggers": [
+ {
+ "type": "ConfigChange"
+ },
+ {
+ "type": "ImageChange",
+ "imageChangeParams": {
+ "automatic": true,
+ "containerNames": [
+ "${APP_NAME}"
+ ],
+ "from": {
+ "kind": "ImageStreamTag",
+ "name": "${APP_NAME}:latest"
+ }
+ }
+ }
+ ],
+ "replicas": 1,
+ "selector": {
+ "component": "${APP_NAME}",
+ "deploymentconfig": "${APP_NAME}",
+ "group": "quickstarts",
+ "project": "${APP_NAME}",
+ "provider": "s2i",
+ "version": "${APP_VERSION}"
+ },
+ "template": {
+ "metadata": {
+ "creationTimestamp": null,
+ "labels": {
+ "component": "${APP_NAME}",
+ "deploymentconfig": "${APP_NAME}",
+ "group": "quickstarts",
+ "project": "${APP_NAME}",
+ "provider": "s2i",
+ "version": "${APP_VERSION}"
+ }
+ },
+ "spec": {
+ "containers": [
+ {
+ "name": "${APP_NAME}",
+ "image": "library/${APP_NAME}:latest",
+ "readinessProbe" : {
+ "httpGet" : {
+ "path" : "/health",
+ "port" : 8081
+ },
+ "initialDelaySeconds" : 10
+ },
+ "livenessProbe" : {
+ "httpGet" : {
+ "path" : "/health",
+ "port" : 8081
+ },
+ "initialDelaySeconds" : 180
+ },
+ "ports": [
+ {
+ "containerPort": 8778,
+ "name": "jolokia"
+ }
+ ],
+ "env" : [ {
+ "name" : "KUBERNETES_NAMESPACE",
+ "valueFrom" : {
+ "fieldRef" : {
+ "fieldPath" : "metadata.namespace"
+ }
+ }
+ } , {
+ "name": "INFINISPAN_SERVICE",
+ "value": "${INFINISPAN_SERVICE}"
+ } ],
+ "resources": {
+ "requests": {
+ "cpu": "${CPU_REQUEST}"
+ },
+ "limits": {
+ "cpu": "${CPU_LIMIT}"
+ }
+ }
+ }
+ ]
+ }
+ }
+ },
+ "status": {}
+ }
+ ]
+}
diff --git a/roles/openshift_examples/files/examples/v1.3/xpaas-templates/spring-boot-camel-rest-sql-template.json b/roles/openshift_examples/files/examples/v1.3/xpaas-templates/spring-boot-camel-rest-sql-template.json
new file mode 100644
index 000000000..555647fab
--- /dev/null
+++ b/roles/openshift_examples/files/examples/v1.3/xpaas-templates/spring-boot-camel-rest-sql-template.json
@@ -0,0 +1,403 @@
+{
+ "apiVersion": "v1",
+ "kind": "Template",
+ "metadata": {
+ "annotations": {
+ "description": "Spring Boot, Camel REST DSL and MySQL QuickStart. This quickstart demonstrates how to connect a Spring Boot application to a MySQL database and expose a REST API with Camel on OpenShift. In this example we will use two containers, one container to run as a MySQL server, and another as a client to the database, where the Camel routes are running. This quickstart requires the MySQL server to be deployed and started first.",
+ "tags": "quickstart,java,springboot,fis",
+ "iconClass": "icon-jboss",
+ "version": "2.0"
+ },
+ "name": "s2i-spring-boot-camel-rest-sql"
+ },
+ "labels": {
+ "template": "s2i-spring-boot-camel-rest-sql"
+ },
+ "parameters": [
+ {
+ "name": "APP_NAME",
+ "displayName": "Application Name",
+ "required": true,
+ "value": "s2i-spring-boot-camel-rest-sql",
+ "description": "The name assigned to the application."
+ },
+ {
+ "name": "GIT_REPO",
+ "displayName": "Git Repository URL",
+ "required": true,
+ "value": "https://github.com/fabric8-quickstarts/spring-boot-camel-rest-sql.git",
+ "description": "The URL of the repository with your application source code."
+ },
+ {
+ "name": "GIT_REF",
+ "displayName": "Git Reference",
+ "value": "spring-boot-camel-rest-sql-1.0.0.redhat-000055",
+ "description": "Set this to a branch name, tag or other ref of your repository if you are not using the default branch."
+ },
+ {
+ "name": "SERVICE_NAME",
+ "displayName": "Service Name",
+ "value": "camel-rest-sql",
+ "description": "Exposed service name."
+ },
+ {
+ "name": "MYSQL_SERVICE_NAME",
+ "displayName": "MySQL Server Service",
+ "required": true,
+ "value": "mysql",
+ "description": "Set this to the name of the TCP service of the MySQL server. You may need to create a server first."
+ },
+ {
+ "name": "MYSQL_SERVICE_DATABASE",
+ "displayName": "MySQL Server Database",
+ "value": "sampledb",
+ "description": "The database hosted by the MySQL server to be used by the application."
+ },
+ {
+ "name": "MYSQL_SERVICE_USERNAME",
+ "displayName": "MySQL Server Username",
+ "description": "The username used to authenticate with the MySQL server. Leave it empty if authentication is disabled."
+ },
+ {
+ "name": "MYSQL_SERVICE_PASSWORD",
+ "displayName": "MySQL Server Password",
+ "description": "The password used to authenticate with the MySQL server. Leave it empty if authentication is disabled."
+ },
+ {
+ "name": "BUILDER_VERSION",
+ "displayName": "Builder version",
+ "value": "2.0",
+ "description": "The version of the FIS S2I builder image to use."
+ },
+ {
+ "name": "APP_VERSION",
+ "displayName": "Application Version",
+ "value": "1.0.0.redhat-000055",
+ "description": "The application version."
+ },
+ {
+ "name": "MAVEN_ARGS",
+ "displayName": "Maven Arguments",
+ "value": "package -DskipTests -Dfabric8.skip -e -B",
+ "description": "Arguments passed to mvn in the build."
+ },
+ {
+ "name": "MAVEN_ARGS_APPEND",
+ "displayName": "Extra Maven Arguments",
+ "description": "Extra arguments passed to mvn, e.g. for multi-module builds."
+ },
+ {
+ "name": "ARTIFACT_DIR",
+ "displayName": "Maven build directory",
+ "description": "Directory of the artifact to be built, e.g. for multi-module builds."
+ },
+ {
+ "name": "IMAGE_STREAM_NAMESPACE",
+ "displayName": "Image Stream Namespace",
+ "value": "openshift",
+ "required": true,
+ "description": "Namespace in which the Fuse ImageStreams are installed. These ImageStreams are normally installed in the openshift namespace. You should only need to modify this if you've installed the ImageStreams in a different namespace/project."
+ },
+ {
+ "name": "BUILD_SECRET",
+ "displayName": "Git Build Secret",
+ "generate": "expression",
+ "description": "The secret needed to trigger a build.",
+ "from": "[a-zA-Z0-9]{40}"
+ },
+ {
+ "name": "CPU_REQUEST",
+ "displayName": "CPU request",
+ "value": "0.2",
+ "required": true,
+ "description": "The amount of CPU to requests."
+ },
+ {
+ "name": "CPU_LIMIT",
+ "displayName": "CPU limit",
+ "value": "1.0",
+ "required": true,
+ "description": "The amount of CPU the container is limited to use."
+ }
+ ],
+ "objects": [
+ {
+ "apiVersion": "v1",
+ "kind": "Route",
+ "metadata": {
+ "labels": {
+ "component": "${APP_NAME}",
+ "provider": "s2i",
+ "project": "${APP_NAME}",
+ "version": "${APP_VERSION}",
+ "group": "quickstarts"
+ },
+ "name": "${SERVICE_NAME}-route"
+ },
+ "spec": {
+ "to": {
+ "kind": "Service",
+ "name": "${SERVICE_NAME}"
+ }
+ }
+ },
+ {
+ "apiVersion": "v1",
+ "kind": "Service",
+ "metadata": {
+ "annotations": {
+ },
+ "labels": {
+ "component": "${APP_NAME}",
+ "provider": "s2i",
+ "project": "${APP_NAME}",
+ "version": "${APP_VERSION}",
+ "group": "quickstarts"
+ },
+ "name": "${SERVICE_NAME}"
+ },
+ "spec": {
+ "clusterIP": "None",
+ "deprecatedPublicIPs": [],
+ "ports": [
+ {
+ "port": 9411,
+ "protocol": "TCP",
+ "targetPort": 8080
+ }
+ ],
+ "selector": {
+ "project": "${APP_NAME}",
+ "component": "${APP_NAME}",
+ "provider": "s2i",
+ "group": "quickstarts"
+ }
+ }
+ },
+ {
+ "kind": "ImageStream",
+ "apiVersion": "v1",
+ "metadata": {
+ "name": "${APP_NAME}",
+ "creationTimestamp": null,
+ "labels": {
+ "component": "${APP_NAME}",
+ "group": "quickstarts",
+ "project": "${APP_NAME}",
+ "provider": "s2i",
+ "version": "${APP_VERSION}"
+ }
+ },
+ "spec": {},
+ "status": {
+ "dockerImageRepository": ""
+ }
+ },
+ {
+ "kind": "BuildConfig",
+ "apiVersion": "v1",
+ "metadata": {
+ "name": "${APP_NAME}",
+ "creationTimestamp": null,
+ "labels": {
+ "component": "${APP_NAME}",
+ "group": "quickstarts",
+ "project": "${APP_NAME}",
+ "provider": "s2i",
+ "version": "${APP_VERSION}"
+ }
+ },
+ "spec": {
+ "triggers": [
+ {
+ "type": "GitHub",
+ "github": {
+ "secret": "${BUILD_SECRET}"
+ }
+ },
+ {
+ "type": "Generic",
+ "generic": {
+ "secret": "${BUILD_SECRET}"
+ }
+ },
+ {
+ "type": "ConfigChange"
+ },
+ {
+ "type": "ImageChange",
+ "imageChange": {}
+ }
+ ],
+ "source": {
+ "type": "Git",
+ "git": {
+ "uri": "${GIT_REPO}",
+ "ref": "${GIT_REF}"
+ }
+ },
+ "strategy": {
+ "type": "Source",
+ "sourceStrategy": {
+ "from": {
+ "kind": "ImageStreamTag",
+ "namespace": "${IMAGE_STREAM_NAMESPACE}",
+ "name": "fis-java-openshift:${BUILDER_VERSION}"
+ },
+ "forcePull": true,
+ "incremental": true,
+ "env": [
+ {
+ "name": "BUILD_LOGLEVEL",
+ "value": "5"
+ },
+ {
+ "name": "ARTIFACT_DIR",
+ "value": "${ARTIFACT_DIR}"
+ },
+ {
+ "name": "MAVEN_ARGS",
+ "value": "${MAVEN_ARGS}"
+ },
+ {
+ "name": "MAVEN_ARGS_APPEND",
+ "value": "${MAVEN_ARGS_APPEND}"
+ }
+ ]
+ }
+ },
+ "output": {
+ "to": {
+ "kind": "ImageStreamTag",
+ "name": "${APP_NAME}:latest"
+ }
+ },
+ "resources": {}
+ },
+ "status": {
+ "lastVersion": 0
+ }
+ },
+ {
+ "kind": "DeploymentConfig",
+ "apiVersion": "v1",
+ "metadata": {
+ "name": "${APP_NAME}",
+ "creationTimestamp": null,
+ "labels": {
+ "component": "${APP_NAME}",
+ "group": "quickstarts",
+ "project": "${APP_NAME}",
+ "provider": "s2i",
+ "version": "${APP_VERSION}"
+ }
+ },
+ "spec": {
+ "strategy": {
+ "resources": {}
+ },
+ "triggers": [
+ {
+ "type": "ConfigChange"
+ },
+ {
+ "type": "ImageChange",
+ "imageChangeParams": {
+ "automatic": true,
+ "containerNames": [
+ "${APP_NAME}"
+ ],
+ "from": {
+ "kind": "ImageStreamTag",
+ "name": "${APP_NAME}:latest"
+ }
+ }
+ }
+ ],
+ "replicas": 1,
+ "selector": {
+ "component": "${APP_NAME}",
+ "deploymentconfig": "${APP_NAME}",
+ "group": "quickstarts",
+ "project": "${APP_NAME}",
+ "provider": "s2i",
+ "version": "${APP_VERSION}"
+ },
+ "template": {
+ "metadata": {
+ "creationTimestamp": null,
+ "labels": {
+ "component": "${APP_NAME}",
+ "deploymentconfig": "${APP_NAME}",
+ "group": "quickstarts",
+ "project": "${APP_NAME}",
+ "provider": "s2i",
+ "version": "${APP_VERSION}"
+ }
+ },
+ "spec": {
+ "containers": [
+ {
+ "name": "${APP_NAME}",
+ "image": "library/${APP_NAME}:latest",
+ "readinessProbe" : {
+ "httpGet" : {
+ "path" : "/health",
+ "port" : 8081
+ },
+ "initialDelaySeconds" : 10
+ },
+ "livenessProbe" : {
+ "httpGet" : {
+ "path" : "/health",
+ "port" : 8081
+ },
+ "initialDelaySeconds" : 180
+ },
+ "ports": [
+ {
+ "containerPort": 8080,
+ "name": "http"
+ },
+ {
+ "containerPort": 8778,
+ "name": "jolokia"
+ }
+ ],
+ "env" : [ {
+ "name" : "KUBERNETES_NAMESPACE",
+ "valueFrom" : {
+ "fieldRef" : {
+ "fieldPath" : "metadata.namespace"
+ }
+ }
+ } , {
+ "name": "MYSQL_SERVICE_NAME",
+ "value": "${MYSQL_SERVICE_NAME}"
+ }, {
+ "name": "MYSQL_SERVICE_DATABASE",
+ "value": "${MYSQL_SERVICE_DATABASE}"
+ }, {
+ "name": "MYSQL_SERVICE_USERNAME",
+ "value": "${MYSQL_SERVICE_USERNAME}"
+ }, {
+ "name": "MYSQL_SERVICE_PASSWORD",
+ "value": "${MYSQL_SERVICE_PASSWORD}"
+ } ],
+ "resources": {
+ "requests": {
+ "cpu": "${CPU_REQUEST}"
+ },
+ "limits": {
+ "cpu": "${CPU_LIMIT}"
+ }
+ }
+ }
+ ]
+ }
+ }
+ },
+ "status": {}
+ }
+ ]
+}
diff --git a/roles/openshift_examples/files/examples/v1.3/xpaas-templates/spring-boot-camel-teiid-template.json b/roles/openshift_examples/files/examples/v1.3/xpaas-templates/spring-boot-camel-teiid-template.json
new file mode 100644
index 000000000..cf9a4e903
--- /dev/null
+++ b/roles/openshift_examples/files/examples/v1.3/xpaas-templates/spring-boot-camel-teiid-template.json
@@ -0,0 +1,343 @@
+{
+ "apiVersion": "v1",
+ "kind": "Template",
+ "metadata": {
+ "annotations": {
+ "description": "Spring-Boot, Camel and JBoss Data Virtualization QuickStart. This example demonstrates how to connect Apache Camel to a remote JBoss Data Virtualization (or Teiid) Server using the JDBC protocol.",
+ "tags": "quickstart,java,springboot,fis",
+ "iconClass": "icon-jboss",
+ "version": "2.0"
+ },
+ "name": "s2i-spring-boot-camel-teiid"
+ },
+ "labels": {
+ "template": "s2i-spring-boot-camel-teiid"
+ },
+ "parameters": [
+ {
+ "name": "APP_NAME",
+ "displayName": "Application Name",
+ "required": true,
+ "value": "s2i-spring-boot-camel-teiid",
+ "description": "The name assigned to the application."
+ },
+ {
+ "name": "GIT_REPO",
+ "displayName": "Git Repository URL",
+ "required": true,
+ "value": "https://github.com/fabric8-quickstarts/spring-boot-camel-teiid.git",
+ "description": "The URL of the repository with your application source code."
+ },
+ {
+ "name": "GIT_REF",
+ "displayName": "Git Reference",
+ "value": "spring-boot-camel-teiid-1.0.0.redhat-000053",
+ "description": "Set this to a branch name, tag or other ref of your repository if you are not using the default branch."
+ },
+ {
+ "name": "TEIID_SERVICE_NAME",
+ "displayName": "JDV Server Name",
+ "required": true,
+ "value": "datavirt-app",
+ "description": "Set this to the name of the JDV Server. You may need to create an instance before."
+ },
+ {
+ "name": "TEIID_PORT_NAME",
+ "displayName": "JDV Port Name",
+ "value": "jdbc",
+ "description": "Set this to the name of the JDV port to use. Set this value if the JDV service contains multiple named ports."
+ },
+ {
+ "name": "TEIID_USERNAME",
+ "displayName": "JDV Server Username",
+ "required": true,
+ "description": "The username used to authenticate with the JDV Server."
+ },
+ {
+ "name": "TEIID_PASSWORD",
+ "displayName": "JDV Server Password",
+ "required": true,
+ "description": "The password used to authenticate with the JDV Server."
+ },
+ {
+ "name": "BUILDER_VERSION",
+ "displayName": "Builder version",
+ "value": "2.0",
+ "description": "The version of the FIS S2I builder image to use."
+ },
+ {
+ "name": "APP_VERSION",
+ "displayName": "Application Version",
+ "value": "1.0.0.redhat-000053",
+ "description": "The application version."
+ },
+ {
+ "name": "MAVEN_ARGS",
+ "displayName": "Maven Arguments",
+ "value": "package -DskipTests -Dfabric8.skip -e -B",
+ "description": "Arguments passed to mvn in the build."
+ },
+ {
+ "name": "MAVEN_ARGS_APPEND",
+ "displayName": "Extra Maven Arguments",
+ "description": "Extra arguments passed to mvn, e.g. for multi-module builds."
+ },
+ {
+ "name": "ARTIFACT_DIR",
+ "displayName": "Maven build directory",
+ "description": "Directory of the artifact to be built, e.g. for multi-module builds."
+ },
+ {
+ "name": "IMAGE_STREAM_NAMESPACE",
+ "displayName": "Image Stream Namespace",
+ "value": "openshift",
+ "required": true,
+ "description": "Namespace in which the Fuse ImageStreams are installed. These ImageStreams are normally installed in the openshift namespace. You should only need to modify this if you've installed the ImageStreams in a different namespace/project."
+ },
+ {
+ "name": "BUILD_SECRET",
+ "displayName": "Git Build Secret",
+ "generate": "expression",
+ "description": "The secret needed to trigger a build.",
+ "from": "[a-zA-Z0-9]{40}"
+ },
+ {
+ "name": "CPU_REQUEST",
+ "displayName": "CPU request",
+ "value": "0.2",
+ "required": true,
+ "description": "The amount of CPU to requests."
+ },
+ {
+ "name": "CPU_LIMIT",
+ "displayName": "CPU limit",
+ "value": "1.0",
+ "required": true,
+ "description": "The amount of CPU the container is limited to use."
+ }
+ ],
+ "objects": [
+ {
+ "kind": "ImageStream",
+ "apiVersion": "v1",
+ "metadata": {
+ "name": "${APP_NAME}",
+ "creationTimestamp": null,
+ "labels": {
+ "component": "${APP_NAME}",
+ "group": "quickstarts",
+ "project": "${APP_NAME}",
+ "provider": "s2i",
+ "version": "${APP_VERSION}"
+ }
+ },
+ "spec": {},
+ "status": {
+ "dockerImageRepository": ""
+ }
+ },
+ {
+ "kind": "BuildConfig",
+ "apiVersion": "v1",
+ "metadata": {
+ "name": "${APP_NAME}",
+ "creationTimestamp": null,
+ "labels": {
+ "component": "${APP_NAME}",
+ "group": "quickstarts",
+ "project": "${APP_NAME}",
+ "provider": "s2i",
+ "version": "${APP_VERSION}"
+ }
+ },
+ "spec": {
+ "triggers": [
+ {
+ "type": "GitHub",
+ "github": {
+ "secret": "${BUILD_SECRET}"
+ }
+ },
+ {
+ "type": "Generic",
+ "generic": {
+ "secret": "${BUILD_SECRET}"
+ }
+ },
+ {
+ "type": "ConfigChange"
+ },
+ {
+ "type": "ImageChange",
+ "imageChange": {}
+ }
+ ],
+ "source": {
+ "type": "Git",
+ "git": {
+ "uri": "${GIT_REPO}",
+ "ref": "${GIT_REF}"
+ }
+ },
+ "strategy": {
+ "type": "Source",
+ "sourceStrategy": {
+ "from": {
+ "kind": "ImageStreamTag",
+ "namespace": "${IMAGE_STREAM_NAMESPACE}",
+ "name": "fis-java-openshift:${BUILDER_VERSION}"
+ },
+ "forcePull": true,
+ "incremental": true,
+ "env": [
+ {
+ "name": "BUILD_LOGLEVEL",
+ "value": "5"
+ },
+ {
+ "name": "ARTIFACT_DIR",
+ "value": "${ARTIFACT_DIR}"
+ },
+ {
+ "name": "MAVEN_ARGS",
+ "value": "${MAVEN_ARGS}"
+ },
+ {
+ "name": "MAVEN_ARGS_APPEND",
+ "value": "${MAVEN_ARGS_APPEND}"
+ }
+ ]
+ }
+ },
+ "output": {
+ "to": {
+ "kind": "ImageStreamTag",
+ "name": "${APP_NAME}:latest"
+ }
+ },
+ "resources": {}
+ },
+ "status": {
+ "lastVersion": 0
+ }
+ },
+ {
+ "kind": "DeploymentConfig",
+ "apiVersion": "v1",
+ "metadata": {
+ "name": "${APP_NAME}",
+ "creationTimestamp": null,
+ "labels": {
+ "component": "${APP_NAME}",
+ "group": "quickstarts",
+ "project": "${APP_NAME}",
+ "provider": "s2i",
+ "version": "${APP_VERSION}"
+ }
+ },
+ "spec": {
+ "strategy": {
+ "resources": {}
+ },
+ "triggers": [
+ {
+ "type": "ConfigChange"
+ },
+ {
+ "type": "ImageChange",
+ "imageChangeParams": {
+ "automatic": true,
+ "containerNames": [
+ "${APP_NAME}"
+ ],
+ "from": {
+ "kind": "ImageStreamTag",
+ "name": "${APP_NAME}:latest"
+ }
+ }
+ }
+ ],
+ "replicas": 1,
+ "selector": {
+ "component": "${APP_NAME}",
+ "deploymentconfig": "${APP_NAME}",
+ "group": "quickstarts",
+ "project": "${APP_NAME}",
+ "provider": "s2i",
+ "version": "${APP_VERSION}"
+ },
+ "template": {
+ "metadata": {
+ "creationTimestamp": null,
+ "labels": {
+ "component": "${APP_NAME}",
+ "deploymentconfig": "${APP_NAME}",
+ "group": "quickstarts",
+ "project": "${APP_NAME}",
+ "provider": "s2i",
+ "version": "${APP_VERSION}"
+ }
+ },
+ "spec": {
+ "containers": [
+ {
+ "name": "${APP_NAME}",
+ "image": "library/${APP_NAME}:latest",
+ "readinessProbe" : {
+ "httpGet" : {
+ "path" : "/health",
+ "port" : 8081
+ },
+ "initialDelaySeconds" : 10
+ },
+ "livenessProbe" : {
+ "httpGet" : {
+ "path" : "/health",
+ "port" : 8081
+ },
+ "initialDelaySeconds" : 180
+ },
+ "ports": [
+ {
+ "containerPort": 8778,
+ "name": "jolokia"
+ }
+ ],
+ "env" : [
+ {
+ "name" : "KUBERNETES_NAMESPACE",
+ "valueFrom" : {
+ "fieldRef" : {
+ "fieldPath" : "metadata.namespace"
+ }
+ }
+ }, {
+ "name": "TEIID_SERVICE_NAME",
+ "value": "${TEIID_SERVICE_NAME}"
+ }, {
+ "name": "TEIID_PORT_NAME",
+ "value": "${TEIID_PORT_NAME}"
+ }, {
+ "name": "TEIID_USERNAME",
+ "value": "${TEIID_USERNAME}"
+ }, {
+ "name": "TEIID_PASSWORD",
+ "value": "${TEIID_PASSWORD}"
+ }],
+ "resources": {
+ "requests": {
+ "cpu": "${CPU_REQUEST}"
+ },
+ "limits": {
+ "cpu": "${CPU_LIMIT}"
+ }
+ }
+ }
+ ]
+ }
+ }
+ },
+ "status": {}
+ }
+ ]
+}
diff --git a/roles/openshift_examples/files/examples/v1.3/xpaas-templates/spring-boot-camel-template.json b/roles/openshift_examples/files/examples/v1.3/xpaas-templates/spring-boot-camel-template.json
new file mode 100644
index 000000000..c78a96f7c
--- /dev/null
+++ b/roles/openshift_examples/files/examples/v1.3/xpaas-templates/spring-boot-camel-template.json
@@ -0,0 +1,305 @@
+{
+ "apiVersion": "v1",
+ "kind": "Template",
+ "metadata": {
+ "annotations": {
+ "description": "Spring-Boot and Camel QuickStart. This example demonstrates how you can use Apache Camel with Spring Boot on Openshift. The quickstart uses Spring Boot to configure a little application that includes a Camel route that triggeres a message every 5th second, and routes the message to a log.",
+ "tags": "quickstart,java,springboot,fis",
+ "iconClass": "icon-jboss",
+ "version": "2.0"
+ },
+ "name": "s2i-spring-boot-camel"
+ },
+ "labels": {
+ "template": "s2i-spring-boot-camel"
+ },
+ "parameters": [
+ {
+ "name": "APP_NAME",
+ "displayName": "Application Name",
+ "required": true,
+ "value": "s2i-spring-boot-camel",
+ "description": "The name assigned to the application."
+ },
+ {
+ "name": "GIT_REPO",
+ "displayName": "Git Repository URL",
+ "required": true,
+ "value": "https://github.com/fabric8-quickstarts/spring-boot-camel.git",
+ "description": "The URL of the repository with your application source code."
+ },
+ {
+ "name": "GIT_REF",
+ "displayName": "Git Reference",
+ "value": "spring-boot-camel-1.0.0.redhat-000055",
+ "description": "Set this to a branch name, tag or other ref of your repository if you are not using the default branch."
+ },
+ {
+ "name": "BUILDER_VERSION",
+ "displayName": "Builder version",
+ "value": "2.0",
+ "description": "The version of the FIS S2I builder image to use."
+ },
+ {
+ "name": "APP_VERSION",
+ "displayName": "Application Version",
+ "value": "1.0.0.redhat-000055",
+ "description": "The application version."
+ },
+ {
+ "name": "MAVEN_ARGS",
+ "displayName": "Maven Arguments",
+ "value": "package -DskipTests -Dfabric8.skip -e -B",
+ "description": "Arguments passed to mvn in the build."
+ },
+ {
+ "name": "MAVEN_ARGS_APPEND",
+ "displayName": "Extra Maven Arguments",
+ "description": "Extra arguments passed to mvn, e.g. for multi-module builds."
+ },
+ {
+ "name": "ARTIFACT_DIR",
+ "displayName": "Maven build directory",
+ "description": "Directory of the artifact to be built, e.g. for multi-module builds."
+ },
+ {
+ "name": "IMAGE_STREAM_NAMESPACE",
+ "displayName": "Image Stream Namespace",
+ "value": "openshift",
+ "required": true,
+ "description": "Namespace in which the Fuse ImageStreams are installed. These ImageStreams are normally installed in the openshift namespace. You should only need to modify this if you've installed the ImageStreams in a different namespace/project."
+ },
+ {
+ "name": "BUILD_SECRET",
+ "displayName": "Git Build Secret",
+ "generate": "expression",
+ "description": "The secret needed to trigger a build.",
+ "from": "[a-zA-Z0-9]{40}"
+ },
+ {
+ "name": "CPU_REQUEST",
+ "displayName": "CPU request",
+ "value": "0.2",
+ "required": true,
+ "description": "The amount of CPU to requests."
+ },
+ {
+ "name": "CPU_LIMIT",
+ "displayName": "CPU limit",
+ "value": "1.0",
+ "required": true,
+ "description": "The amount of CPU the container is limited to use."
+ }
+ ],
+ "objects": [
+ {
+ "kind": "ImageStream",
+ "apiVersion": "v1",
+ "metadata": {
+ "name": "${APP_NAME}",
+ "creationTimestamp": null,
+ "labels": {
+ "component": "${APP_NAME}",
+ "group": "quickstarts",
+ "project": "${APP_NAME}",
+ "provider": "s2i",
+ "version": "${APP_VERSION}"
+ }
+ },
+ "spec": {},
+ "status": {
+ "dockerImageRepository": ""
+ }
+ },
+ {
+ "kind": "BuildConfig",
+ "apiVersion": "v1",
+ "metadata": {
+ "name": "${APP_NAME}",
+ "creationTimestamp": null,
+ "labels": {
+ "component": "${APP_NAME}",
+ "group": "quickstarts",
+ "project": "${APP_NAME}",
+ "provider": "s2i",
+ "version": "${APP_VERSION}"
+ }
+ },
+ "spec": {
+ "triggers": [
+ {
+ "type": "GitHub",
+ "github": {
+ "secret": "${BUILD_SECRET}"
+ }
+ },
+ {
+ "type": "Generic",
+ "generic": {
+ "secret": "${BUILD_SECRET}"
+ }
+ },
+ {
+ "type": "ConfigChange"
+ },
+ {
+ "type": "ImageChange",
+ "imageChange": {}
+ }
+ ],
+ "source": {
+ "type": "Git",
+ "git": {
+ "uri": "${GIT_REPO}",
+ "ref": "${GIT_REF}"
+ }
+ },
+ "strategy": {
+ "type": "Source",
+ "sourceStrategy": {
+ "from": {
+ "kind": "ImageStreamTag",
+ "namespace": "${IMAGE_STREAM_NAMESPACE}",
+ "name": "fis-java-openshift:${BUILDER_VERSION}"
+ },
+ "forcePull": true,
+ "incremental": true,
+ "env": [
+ {
+ "name": "BUILD_LOGLEVEL",
+ "value": "5"
+ },
+ {
+ "name": "ARTIFACT_DIR",
+ "value": "${ARTIFACT_DIR}"
+ },
+ {
+ "name": "MAVEN_ARGS",
+ "value": "${MAVEN_ARGS}"
+ },
+ {
+ "name": "MAVEN_ARGS_APPEND",
+ "value": "${MAVEN_ARGS_APPEND}"
+ }
+ ]
+ }
+ },
+ "output": {
+ "to": {
+ "kind": "ImageStreamTag",
+ "name": "${APP_NAME}:latest"
+ }
+ },
+ "resources": {}
+ },
+ "status": {
+ "lastVersion": 0
+ }
+ },
+ {
+ "kind": "DeploymentConfig",
+ "apiVersion": "v1",
+ "metadata": {
+ "name": "${APP_NAME}",
+ "creationTimestamp": null,
+ "labels": {
+ "component": "${APP_NAME}",
+ "group": "quickstarts",
+ "project": "${APP_NAME}",
+ "provider": "s2i",
+ "version": "${APP_VERSION}"
+ }
+ },
+ "spec": {
+ "strategy": {
+ "resources": {}
+ },
+ "triggers": [
+ {
+ "type": "ConfigChange"
+ },
+ {
+ "type": "ImageChange",
+ "imageChangeParams": {
+ "automatic": true,
+ "containerNames": [
+ "${APP_NAME}"
+ ],
+ "from": {
+ "kind": "ImageStreamTag",
+ "name": "${APP_NAME}:latest"
+ }
+ }
+ }
+ ],
+ "replicas": 1,
+ "selector": {
+ "component": "${APP_NAME}",
+ "deploymentconfig": "${APP_NAME}",
+ "group": "quickstarts",
+ "project": "${APP_NAME}",
+ "provider": "s2i",
+ "version": "${APP_VERSION}"
+ },
+ "template": {
+ "metadata": {
+ "creationTimestamp": null,
+ "labels": {
+ "component": "${APP_NAME}",
+ "deploymentconfig": "${APP_NAME}",
+ "group": "quickstarts",
+ "project": "${APP_NAME}",
+ "provider": "s2i",
+ "version": "${APP_VERSION}"
+ }
+ },
+ "spec": {
+ "containers": [
+ {
+ "name": "${APP_NAME}",
+ "image": "library/${APP_NAME}:latest",
+ "readinessProbe" : {
+ "httpGet" : {
+ "path" : "/health",
+ "port" : 8081
+ },
+ "initialDelaySeconds" : 10
+ },
+ "livenessProbe" : {
+ "httpGet" : {
+ "path" : "/health",
+ "port" : 8081
+ },
+ "initialDelaySeconds" : 180
+ },
+ "ports": [
+ {
+ "containerPort": 8778,
+ "name": "jolokia"
+ }
+ ],
+ "env" : [ {
+ "name" : "KUBERNETES_NAMESPACE",
+ "valueFrom" : {
+ "fieldRef" : {
+ "fieldPath" : "metadata.namespace"
+ }
+ }
+ } ],
+ "resources": {
+ "requests": {
+ "cpu": "${CPU_REQUEST}"
+ },
+ "limits": {
+ "cpu": "${CPU_LIMIT}"
+ }
+ }
+ }
+ ]
+ }
+ }
+ },
+ "status": {}
+ }
+ ]
+}
diff --git a/roles/openshift_examples/files/examples/v1.3/xpaas-templates/spring-boot-camel-xml-template.json b/roles/openshift_examples/files/examples/v1.3/xpaas-templates/spring-boot-camel-xml-template.json
new file mode 100644
index 000000000..620425902
--- /dev/null
+++ b/roles/openshift_examples/files/examples/v1.3/xpaas-templates/spring-boot-camel-xml-template.json
@@ -0,0 +1,305 @@
+{
+ "apiVersion": "v1",
+ "kind": "Template",
+ "metadata": {
+ "annotations": {
+ "description": "Spring-Boot and Camel Xml QuickStart. This example demonstrates how you can use Apache Camel with Spring Boot on Openshift. The quickstart uses Spring Boot to configure a little application that includes a Camel route (in Spring xml) that triggeres a message every 5th second, and routes the message to a log.",
+ "tags": "quickstart,java,springboot,fis",
+ "iconClass": "icon-jboss",
+ "version": "2.0"
+ },
+ "name": "s2i-spring-boot-camel-xml"
+ },
+ "labels": {
+ "template": "s2i-spring-boot-camel-xml"
+ },
+ "parameters": [
+ {
+ "name": "APP_NAME",
+ "displayName": "Application Name",
+ "required": true,
+ "value": "s2i-spring-boot-camel-xml",
+ "description": "The name assigned to the application."
+ },
+ {
+ "name": "GIT_REPO",
+ "displayName": "Git Repository URL",
+ "required": true,
+ "value": "https://github.com/fabric8-quickstarts/spring-boot-camel-xml.git",
+ "description": "The URL of the repository with your application source code."
+ },
+ {
+ "name": "GIT_REF",
+ "displayName": "Git Reference",
+ "value": "spring-boot-camel-xml-1.0.0.redhat-000055",
+ "description": "Set this to a branch name, tag or other ref of your repository if you are not using the default branch."
+ },
+ {
+ "name": "BUILDER_VERSION",
+ "displayName": "Builder version",
+ "value": "2.0",
+ "description": "The version of the FIS S2I builder image to use."
+ },
+ {
+ "name": "APP_VERSION",
+ "displayName": "Application Version",
+ "value": "1.0.0.redhat-000055",
+ "description": "The application version."
+ },
+ {
+ "name": "MAVEN_ARGS",
+ "displayName": "Maven Arguments",
+ "value": "package -DskipTests -Dfabric8.skip -e -B",
+ "description": "Arguments passed to mvn in the build."
+ },
+ {
+ "name": "MAVEN_ARGS_APPEND",
+ "displayName": "Extra Maven Arguments",
+ "description": "Extra arguments passed to mvn, e.g. for multi-module builds."
+ },
+ {
+ "name": "ARTIFACT_DIR",
+ "displayName": "Maven build directory",
+ "description": "Directory of the artifact to be built, e.g. for multi-module builds."
+ },
+ {
+ "name": "IMAGE_STREAM_NAMESPACE",
+ "displayName": "Image Stream Namespace",
+ "value": "openshift",
+ "required": true,
+ "description": "Namespace in which the Fuse ImageStreams are installed. These ImageStreams are normally installed in the openshift namespace. You should only need to modify this if you've installed the ImageStreams in a different namespace/project."
+ },
+ {
+ "name": "BUILD_SECRET",
+ "displayName": "Git Build Secret",
+ "generate": "expression",
+ "description": "The secret needed to trigger a build.",
+ "from": "[a-zA-Z0-9]{40}"
+ },
+ {
+ "name": "CPU_REQUEST",
+ "displayName": "CPU request",
+ "value": "0.2",
+ "required": true,
+ "description": "The amount of CPU to requests."
+ },
+ {
+ "name": "CPU_LIMIT",
+ "displayName": "CPU limit",
+ "value": "1.0",
+ "required": true,
+ "description": "The amount of CPU the container is limited to use."
+ }
+ ],
+ "objects": [
+ {
+ "kind": "ImageStream",
+ "apiVersion": "v1",
+ "metadata": {
+ "name": "${APP_NAME}",
+ "creationTimestamp": null,
+ "labels": {
+ "component": "${APP_NAME}",
+ "group": "quickstarts",
+ "project": "${APP_NAME}",
+ "provider": "s2i",
+ "version": "${APP_VERSION}"
+ }
+ },
+ "spec": {},
+ "status": {
+ "dockerImageRepository": ""
+ }
+ },
+ {
+ "kind": "BuildConfig",
+ "apiVersion": "v1",
+ "metadata": {
+ "name": "${APP_NAME}",
+ "creationTimestamp": null,
+ "labels": {
+ "component": "${APP_NAME}",
+ "group": "quickstarts",
+ "project": "${APP_NAME}",
+ "provider": "s2i",
+ "version": "${APP_VERSION}"
+ }
+ },
+ "spec": {
+ "triggers": [
+ {
+ "type": "GitHub",
+ "github": {
+ "secret": "${BUILD_SECRET}"
+ }
+ },
+ {
+ "type": "Generic",
+ "generic": {
+ "secret": "${BUILD_SECRET}"
+ }
+ },
+ {
+ "type": "ConfigChange"
+ },
+ {
+ "type": "ImageChange",
+ "imageChange": {}
+ }
+ ],
+ "source": {
+ "type": "Git",
+ "git": {
+ "uri": "${GIT_REPO}",
+ "ref": "${GIT_REF}"
+ }
+ },
+ "strategy": {
+ "type": "Source",
+ "sourceStrategy": {
+ "from": {
+ "kind": "ImageStreamTag",
+ "namespace": "${IMAGE_STREAM_NAMESPACE}",
+ "name": "fis-java-openshift:${BUILDER_VERSION}"
+ },
+ "forcePull": true,
+ "incremental": true,
+ "env": [
+ {
+ "name": "BUILD_LOGLEVEL",
+ "value": "5"
+ },
+ {
+ "name": "ARTIFACT_DIR",
+ "value": "${ARTIFACT_DIR}"
+ },
+ {
+ "name": "MAVEN_ARGS",
+ "value": "${MAVEN_ARGS}"
+ },
+ {
+ "name": "MAVEN_ARGS_APPEND",
+ "value": "${MAVEN_ARGS_APPEND}"
+ }
+ ]
+ }
+ },
+ "output": {
+ "to": {
+ "kind": "ImageStreamTag",
+ "name": "${APP_NAME}:latest"
+ }
+ },
+ "resources": {}
+ },
+ "status": {
+ "lastVersion": 0
+ }
+ },
+ {
+ "kind": "DeploymentConfig",
+ "apiVersion": "v1",
+ "metadata": {
+ "name": "${APP_NAME}",
+ "creationTimestamp": null,
+ "labels": {
+ "component": "${APP_NAME}",
+ "group": "quickstarts",
+ "project": "${APP_NAME}",
+ "provider": "s2i",
+ "version": "${APP_VERSION}"
+ }
+ },
+ "spec": {
+ "strategy": {
+ "resources": {}
+ },
+ "triggers": [
+ {
+ "type": "ConfigChange"
+ },
+ {
+ "type": "ImageChange",
+ "imageChangeParams": {
+ "automatic": true,
+ "containerNames": [
+ "${APP_NAME}"
+ ],
+ "from": {
+ "kind": "ImageStreamTag",
+ "name": "${APP_NAME}:latest"
+ }
+ }
+ }
+ ],
+ "replicas": 1,
+ "selector": {
+ "component": "${APP_NAME}",
+ "deploymentconfig": "${APP_NAME}",
+ "group": "quickstarts",
+ "project": "${APP_NAME}",
+ "provider": "s2i",
+ "version": "${APP_VERSION}"
+ },
+ "template": {
+ "metadata": {
+ "creationTimestamp": null,
+ "labels": {
+ "component": "${APP_NAME}",
+ "deploymentconfig": "${APP_NAME}",
+ "group": "quickstarts",
+ "project": "${APP_NAME}",
+ "provider": "s2i",
+ "version": "${APP_VERSION}"
+ }
+ },
+ "spec": {
+ "containers": [
+ {
+ "name": "${APP_NAME}",
+ "image": "library/${APP_NAME}:latest",
+ "readinessProbe" : {
+ "httpGet" : {
+ "path" : "/health",
+ "port" : 8081
+ },
+ "initialDelaySeconds" : 10
+ },
+ "livenessProbe" : {
+ "httpGet" : {
+ "path" : "/health",
+ "port" : 8081
+ },
+ "initialDelaySeconds" : 180
+ },
+ "ports": [
+ {
+ "containerPort": 8778,
+ "name": "jolokia"
+ }
+ ],
+ "env" : [ {
+ "name" : "KUBERNETES_NAMESPACE",
+ "valueFrom" : {
+ "fieldRef" : {
+ "fieldPath" : "metadata.namespace"
+ }
+ }
+ } ],
+ "resources": {
+ "requests": {
+ "cpu": "${CPU_REQUEST}"
+ },
+ "limits": {
+ "cpu": "${CPU_LIMIT}"
+ }
+ }
+ }
+ ]
+ }
+ }
+ },
+ "status": {}
+ }
+ ]
+}
diff --git a/roles/openshift_examples/files/examples/v1.3/xpaas-templates/spring-boot-cxf-jaxrs-template.json b/roles/openshift_examples/files/examples/v1.3/xpaas-templates/spring-boot-cxf-jaxrs-template.json
new file mode 100644
index 000000000..15cfc93fd
--- /dev/null
+++ b/roles/openshift_examples/files/examples/v1.3/xpaas-templates/spring-boot-cxf-jaxrs-template.json
@@ -0,0 +1,305 @@
+{
+ "apiVersion": "v1",
+ "kind": "Template",
+ "metadata": {
+ "annotations": {
+ "description": "Spring-Boot and CXF JAXRS QuickStart. This example demonstrates how you can use Apache CXF JAXRS with Spring Boot on Openshift. The quickstart uses Spring Boot to configure a little application that includes a CXF JAXRS endpoint with Swagger enabled.",
+ "tags": "quickstart,java,springboot,fis",
+ "iconClass": "icon-jboss",
+ "version": "2.0"
+ },
+ "name": "s2i-spring-boot-cxf-jaxrs"
+ },
+ "labels": {
+ "template": "s2i-spring-boot-cxf-jaxrs"
+ },
+ "parameters": [
+ {
+ "name": "APP_NAME",
+ "displayName": "Application Name",
+ "required": true,
+ "value": "s2i-spring-boot-cxf-jaxrs",
+ "description": "The name assigned to the application."
+ },
+ {
+ "name": "GIT_REPO",
+ "displayName": "Git Repository URL",
+ "required": true,
+ "value": "https://github.com/fabric8-quickstarts/spring-boot-cxf-jaxrs.git",
+ "description": "The URL of the repository with your application source code."
+ },
+ {
+ "name": "GIT_REF",
+ "displayName": "Git Reference",
+ "value": "spring-boot-cxf-jaxrs-1.0.0.redhat-000005",
+ "description": "Set this to a branch name, tag or other ref of your repository if you are not using the default branch."
+ },
+ {
+ "name": "BUILDER_VERSION",
+ "displayName": "Builder version",
+ "value": "2.0",
+ "description": "The version of the FIS S2I builder image to use."
+ },
+ {
+ "name": "APP_VERSION",
+ "displayName": "Application Version",
+ "value": "1.0.0.redhat-000005",
+ "description": "The application version."
+ },
+ {
+ "name": "MAVEN_ARGS",
+ "displayName": "Maven Arguments",
+ "value": "package -DskipTests -Dfabric8.skip -e -B",
+ "description": "Arguments passed to mvn in the build."
+ },
+ {
+ "name": "MAVEN_ARGS_APPEND",
+ "displayName": "Extra Maven Arguments",
+ "description": "Extra arguments passed to mvn, e.g. for multi-module builds."
+ },
+ {
+ "name": "ARTIFACT_DIR",
+ "displayName": "Maven build directory",
+ "description": "Directory of the artifact to be built, e.g. for multi-module builds."
+ },
+ {
+ "name": "IMAGE_STREAM_NAMESPACE",
+ "displayName": "Image Stream Namespace",
+ "value": "openshift",
+ "required": true,
+ "description": "Namespace in which the Fuse ImageStreams are installed. These ImageStreams are normally installed in the openshift namespace. You should only need to modify this if you've installed the ImageStreams in a different namespace/project."
+ },
+ {
+ "name": "BUILD_SECRET",
+ "displayName": "Git Build Secret",
+ "generate": "expression",
+ "description": "The secret needed to trigger a build.",
+ "from": "[a-zA-Z0-9]{40}"
+ },
+ {
+ "name": "CPU_REQUEST",
+ "displayName": "CPU request",
+ "value": "0.2",
+ "required": true,
+ "description": "The amount of CPU to requests."
+ },
+ {
+ "name": "CPU_LIMIT",
+ "displayName": "CPU limit",
+ "value": "1.0",
+ "required": true,
+ "description": "The amount of CPU the container is limited to use."
+ }
+ ],
+ "objects": [
+ {
+ "kind": "ImageStream",
+ "apiVersion": "v1",
+ "metadata": {
+ "name": "${APP_NAME}",
+ "creationTimestamp": null,
+ "labels": {
+ "component": "${APP_NAME}",
+ "group": "quickstarts",
+ "project": "${APP_NAME}",
+ "provider": "s2i",
+ "version": "${APP_VERSION}"
+ }
+ },
+ "spec": {},
+ "status": {
+ "dockerImageRepository": ""
+ }
+ },
+ {
+ "kind": "BuildConfig",
+ "apiVersion": "v1",
+ "metadata": {
+ "name": "${APP_NAME}",
+ "creationTimestamp": null,
+ "labels": {
+ "component": "${APP_NAME}",
+ "group": "quickstarts",
+ "project": "${APP_NAME}",
+ "provider": "s2i",
+ "version": "${APP_VERSION}"
+ }
+ },
+ "spec": {
+ "triggers": [
+ {
+ "type": "GitHub",
+ "github": {
+ "secret": "${BUILD_SECRET}"
+ }
+ },
+ {
+ "type": "Generic",
+ "generic": {
+ "secret": "${BUILD_SECRET}"
+ }
+ },
+ {
+ "type": "ConfigChange"
+ },
+ {
+ "type": "ImageChange",
+ "imageChange": {}
+ }
+ ],
+ "source": {
+ "type": "Git",
+ "git": {
+ "uri": "${GIT_REPO}",
+ "ref": "${GIT_REF}"
+ }
+ },
+ "strategy": {
+ "type": "Source",
+ "sourceStrategy": {
+ "from": {
+ "kind": "ImageStreamTag",
+ "namespace": "${IMAGE_STREAM_NAMESPACE}",
+ "name": "fis-java-openshift:${BUILDER_VERSION}"
+ },
+ "forcePull": true,
+ "incremental": true,
+ "env": [
+ {
+ "name": "BUILD_LOGLEVEL",
+ "value": "5"
+ },
+ {
+ "name": "ARTIFACT_DIR",
+ "value": "${ARTIFACT_DIR}"
+ },
+ {
+ "name": "MAVEN_ARGS",
+ "value": "${MAVEN_ARGS}"
+ },
+ {
+ "name": "MAVEN_ARGS_APPEND",
+ "value": "${MAVEN_ARGS_APPEND}"
+ }
+ ]
+ }
+ },
+ "output": {
+ "to": {
+ "kind": "ImageStreamTag",
+ "name": "${APP_NAME}:latest"
+ }
+ },
+ "resources": {}
+ },
+ "status": {
+ "lastVersion": 0
+ }
+ },
+ {
+ "kind": "DeploymentConfig",
+ "apiVersion": "v1",
+ "metadata": {
+ "name": "${APP_NAME}",
+ "creationTimestamp": null,
+ "labels": {
+ "component": "${APP_NAME}",
+ "group": "quickstarts",
+ "project": "${APP_NAME}",
+ "provider": "s2i",
+ "version": "${APP_VERSION}"
+ }
+ },
+ "spec": {
+ "strategy": {
+ "resources": {}
+ },
+ "triggers": [
+ {
+ "type": "ConfigChange"
+ },
+ {
+ "type": "ImageChange",
+ "imageChangeParams": {
+ "automatic": true,
+ "containerNames": [
+ "${APP_NAME}"
+ ],
+ "from": {
+ "kind": "ImageStreamTag",
+ "name": "${APP_NAME}:latest"
+ }
+ }
+ }
+ ],
+ "replicas": 1,
+ "selector": {
+ "component": "${APP_NAME}",
+ "deploymentconfig": "${APP_NAME}",
+ "group": "quickstarts",
+ "project": "${APP_NAME}",
+ "provider": "s2i",
+ "version": "${APP_VERSION}"
+ },
+ "template": {
+ "metadata": {
+ "creationTimestamp": null,
+ "labels": {
+ "component": "${APP_NAME}",
+ "deploymentconfig": "${APP_NAME}",
+ "group": "quickstarts",
+ "project": "${APP_NAME}",
+ "provider": "s2i",
+ "version": "${APP_VERSION}"
+ }
+ },
+ "spec": {
+ "containers": [
+ {
+ "name": "${APP_NAME}",
+ "image": "library/${APP_NAME}:latest",
+ "readinessProbe" : {
+ "httpGet" : {
+ "path" : "/health",
+ "port" : 8081
+ },
+ "initialDelaySeconds" : 10
+ },
+ "livenessProbe" : {
+ "httpGet" : {
+ "path" : "/health",
+ "port" : 8081
+ },
+ "initialDelaySeconds" : 180
+ },
+ "ports": [
+ {
+ "containerPort": 8778,
+ "name": "jolokia"
+ }
+ ],
+ "env" : [ {
+ "name" : "KUBERNETES_NAMESPACE",
+ "valueFrom" : {
+ "fieldRef" : {
+ "fieldPath" : "metadata.namespace"
+ }
+ }
+ } ],
+ "resources": {
+ "requests": {
+ "cpu": "${CPU_REQUEST}"
+ },
+ "limits": {
+ "cpu": "${CPU_LIMIT}"
+ }
+ }
+ }
+ ]
+ }
+ }
+ },
+ "status": {}
+ }
+ ]
+}
diff --git a/roles/openshift_examples/files/examples/v1.3/xpaas-templates/spring-boot-cxf-jaxws-template.json b/roles/openshift_examples/files/examples/v1.3/xpaas-templates/spring-boot-cxf-jaxws-template.json
new file mode 100644
index 000000000..c70ee7726
--- /dev/null
+++ b/roles/openshift_examples/files/examples/v1.3/xpaas-templates/spring-boot-cxf-jaxws-template.json
@@ -0,0 +1,305 @@
+{
+ "apiVersion": "v1",
+ "kind": "Template",
+ "metadata": {
+ "annotations": {
+ "description": "Spring-Boot and CXF JAXWS QuickStart. This example demonstrates how you can use Apache CXF JAXWS with Spring Boot on Openshift. The quickstart uses Spring Boot to configure a little application that includes a CXF JAXWS endpoint.",
+ "tags": "quickstart,java,springboot,fis",
+ "iconClass": "icon-jboss",
+ "version": "2.0"
+ },
+ "name": "s2i-spring-boot-cxf-jaxws"
+ },
+ "labels": {
+ "template": "s2i-spring-boot-cxf-jaxws"
+ },
+ "parameters": [
+ {
+ "name": "APP_NAME",
+ "displayName": "Application Name",
+ "required": true,
+ "value": "s2i-spring-boot-cxf-jaxws",
+ "description": "The name assigned to the application."
+ },
+ {
+ "name": "GIT_REPO",
+ "displayName": "Git Repository URL",
+ "required": true,
+ "value": "https://github.com/fabric8-quickstarts/spring-boot-cxf-jaxws.git",
+ "description": "The URL of the repository with your application source code."
+ },
+ {
+ "name": "GIT_REF",
+ "displayName": "Git Reference",
+ "value": "spring-boot-cxf-jaxws-1.0.0.redhat-000005",
+ "description": "Set this to a branch name, tag or other ref of your repository if you are not using the default branch."
+ },
+ {
+ "name": "BUILDER_VERSION",
+ "displayName": "Builder version",
+ "value": "2.0",
+ "description": "The version of the FIS S2I builder image to use."
+ },
+ {
+ "name": "APP_VERSION",
+ "displayName": "Application Version",
+ "value": "1.0.0.redhat-000005",
+ "description": "The application version."
+ },
+ {
+ "name": "MAVEN_ARGS",
+ "displayName": "Maven Arguments",
+ "value": "package -DskipTests -Dfabric8.skip -e -B",
+ "description": "Arguments passed to mvn in the build."
+ },
+ {
+ "name": "MAVEN_ARGS_APPEND",
+ "displayName": "Extra Maven Arguments",
+ "description": "Extra arguments passed to mvn, e.g. for multi-module builds."
+ },
+ {
+ "name": "ARTIFACT_DIR",
+ "displayName": "Maven build directory",
+ "description": "Directory of the artifact to be built, e.g. for multi-module builds."
+ },
+ {
+ "name": "IMAGE_STREAM_NAMESPACE",
+ "displayName": "Image Stream Namespace",
+ "value": "openshift",
+ "required": true,
+ "description": "Namespace in which the Fuse ImageStreams are installed. These ImageStreams are normally installed in the openshift namespace. You should only need to modify this if you've installed the ImageStreams in a different namespace/project."
+ },
+ {
+ "name": "BUILD_SECRET",
+ "displayName": "Git Build Secret",
+ "generate": "expression",
+ "description": "The secret needed to trigger a build.",
+ "from": "[a-zA-Z0-9]{40}"
+ },
+ {
+ "name": "CPU_REQUEST",
+ "displayName": "CPU request",
+ "value": "0.2",
+ "required": true,
+ "description": "The amount of CPU to requests."
+ },
+ {
+ "name": "CPU_LIMIT",
+ "displayName": "CPU limit",
+ "value": "1.0",
+ "required": true,
+ "description": "The amount of CPU the container is limited to use."
+ }
+ ],
+ "objects": [
+ {
+ "kind": "ImageStream",
+ "apiVersion": "v1",
+ "metadata": {
+ "name": "${APP_NAME}",
+ "creationTimestamp": null,
+ "labels": {
+ "component": "${APP_NAME}",
+ "group": "quickstarts",
+ "project": "${APP_NAME}",
+ "provider": "s2i",
+ "version": "${APP_VERSION}"
+ }
+ },
+ "spec": {},
+ "status": {
+ "dockerImageRepository": ""
+ }
+ },
+ {
+ "kind": "BuildConfig",
+ "apiVersion": "v1",
+ "metadata": {
+ "name": "${APP_NAME}",
+ "creationTimestamp": null,
+ "labels": {
+ "component": "${APP_NAME}",
+ "group": "quickstarts",
+ "project": "${APP_NAME}",
+ "provider": "s2i",
+ "version": "${APP_VERSION}"
+ }
+ },
+ "spec": {
+ "triggers": [
+ {
+ "type": "GitHub",
+ "github": {
+ "secret": "${BUILD_SECRET}"
+ }
+ },
+ {
+ "type": "Generic",
+ "generic": {
+ "secret": "${BUILD_SECRET}"
+ }
+ },
+ {
+ "type": "ConfigChange"
+ },
+ {
+ "type": "ImageChange",
+ "imageChange": {}
+ }
+ ],
+ "source": {
+ "type": "Git",
+ "git": {
+ "uri": "${GIT_REPO}",
+ "ref": "${GIT_REF}"
+ }
+ },
+ "strategy": {
+ "type": "Source",
+ "sourceStrategy": {
+ "from": {
+ "kind": "ImageStreamTag",
+ "namespace": "${IMAGE_STREAM_NAMESPACE}",
+ "name": "fis-java-openshift:${BUILDER_VERSION}"
+ },
+ "forcePull": true,
+ "incremental": true,
+ "env": [
+ {
+ "name": "BUILD_LOGLEVEL",
+ "value": "5"
+ },
+ {
+ "name": "ARTIFACT_DIR",
+ "value": "${ARTIFACT_DIR}"
+ },
+ {
+ "name": "MAVEN_ARGS",
+ "value": "${MAVEN_ARGS}"
+ },
+ {
+ "name": "MAVEN_ARGS_APPEND",
+ "value": "${MAVEN_ARGS_APPEND}"
+ }
+ ]
+ }
+ },
+ "output": {
+ "to": {
+ "kind": "ImageStreamTag",
+ "name": "${APP_NAME}:latest"
+ }
+ },
+ "resources": {}
+ },
+ "status": {
+ "lastVersion": 0
+ }
+ },
+ {
+ "kind": "DeploymentConfig",
+ "apiVersion": "v1",
+ "metadata": {
+ "name": "${APP_NAME}",
+ "creationTimestamp": null,
+ "labels": {
+ "component": "${APP_NAME}",
+ "group": "quickstarts",
+ "project": "${APP_NAME}",
+ "provider": "s2i",
+ "version": "${APP_VERSION}"
+ }
+ },
+ "spec": {
+ "strategy": {
+ "resources": {}
+ },
+ "triggers": [
+ {
+ "type": "ConfigChange"
+ },
+ {
+ "type": "ImageChange",
+ "imageChangeParams": {
+ "automatic": true,
+ "containerNames": [
+ "${APP_NAME}"
+ ],
+ "from": {
+ "kind": "ImageStreamTag",
+ "name": "${APP_NAME}:latest"
+ }
+ }
+ }
+ ],
+ "replicas": 1,
+ "selector": {
+ "component": "${APP_NAME}",
+ "deploymentconfig": "${APP_NAME}",
+ "group": "quickstarts",
+ "project": "${APP_NAME}",
+ "provider": "s2i",
+ "version": "${APP_VERSION}"
+ },
+ "template": {
+ "metadata": {
+ "creationTimestamp": null,
+ "labels": {
+ "component": "${APP_NAME}",
+ "deploymentconfig": "${APP_NAME}",
+ "group": "quickstarts",
+ "project": "${APP_NAME}",
+ "provider": "s2i",
+ "version": "${APP_VERSION}"
+ }
+ },
+ "spec": {
+ "containers": [
+ {
+ "name": "${APP_NAME}",
+ "image": "library/${APP_NAME}:latest",
+ "readinessProbe" : {
+ "httpGet" : {
+ "path" : "/health",
+ "port" : 8081
+ },
+ "initialDelaySeconds" : 10
+ },
+ "livenessProbe" : {
+ "httpGet" : {
+ "path" : "/health",
+ "port" : 8081
+ },
+ "initialDelaySeconds" : 180
+ },
+ "ports": [
+ {
+ "containerPort": 8778,
+ "name": "jolokia"
+ }
+ ],
+ "env" : [ {
+ "name" : "KUBERNETES_NAMESPACE",
+ "valueFrom" : {
+ "fieldRef" : {
+ "fieldPath" : "metadata.namespace"
+ }
+ }
+ } ],
+ "resources": {
+ "requests": {
+ "cpu": "${CPU_REQUEST}"
+ },
+ "limits": {
+ "cpu": "${CPU_LIMIT}"
+ }
+ }
+ }
+ ]
+ }
+ }
+ },
+ "status": {}
+ }
+ ]
+}
diff --git a/roles/openshift_examples/files/examples/v1.4/db-templates/mariadb-ephemeral-template.json b/roles/openshift_examples/files/examples/v1.4/db-templates/mariadb-ephemeral-template.json
index cfbfc3e20..f347f1f9f 100644
--- a/roles/openshift_examples/files/examples/v1.4/db-templates/mariadb-ephemeral-template.json
+++ b/roles/openshift_examples/files/examples/v1.4/db-templates/mariadb-ephemeral-template.json
@@ -7,7 +7,11 @@
"openshift.io/display-name": "MariaDB (Ephemeral)",
"description": "MariaDB database service, without persistent storage. For more information about using this template, including OpenShift considerations, see https://github.com/sclorg/mariadb-container/blob/master/10.1/README.md.\n\nWARNING: Any data stored will be lost upon pod destruction. Only use this template for testing",
"iconClass": "icon-mariadb",
- "tags": "database,mariadb"
+ "tags": "database,mariadb",
+ "template.openshift.io/long-description": "This template provides a standalone MariaDB server with a database created. The database is not stored on persistent storage, so any restart of the service will result in all data being lost. The database name, username, and password are chosen via parameters when provisioning this service.",
+ "template.openshift.io/provider-display-name": "Red Hat, Inc.",
+ "template.openshift.io/documentation-url": "https://github.com/sclorg/mariadb-container/blob/master/10.1/README.md",
+ "template.openshift.io/support-url": "https://access.redhat.com"
}
},
"message": "The following service(s) have been created in your project: ${DATABASE_SERVICE_NAME}.\n\n Username: ${MYSQL_USER}\n Password: ${MYSQL_PASSWORD}\n Database Name: ${MYSQL_DATABASE}\n Connection URL: mysql://${DATABASE_SERVICE_NAME}:3306/\n\nFor more information about using this template, including OpenShift considerations, see https://github.com/sclorg/mariadb-container/blob/master/10.1/README.md.",
diff --git a/roles/openshift_examples/files/examples/v1.4/db-templates/mariadb-persistent-template.json b/roles/openshift_examples/files/examples/v1.4/db-templates/mariadb-persistent-template.json
index e933eecf0..6ed744777 100644
--- a/roles/openshift_examples/files/examples/v1.4/db-templates/mariadb-persistent-template.json
+++ b/roles/openshift_examples/files/examples/v1.4/db-templates/mariadb-persistent-template.json
@@ -7,7 +7,11 @@
"openshift.io/display-name": "MariaDB (Persistent)",
"description": "MariaDB database service, with persistent storage. For more information about using this template, including OpenShift considerations, see https://github.com/sclorg/mariadb-container/blob/master/10.1/README.md.\n\nNOTE: Scaling to more than one replica is not supported. You must have persistent volumes available in your cluster to use this template.",
"iconClass": "icon-mariadb",
- "tags": "database,mariadb"
+ "tags": "database,mariadb",
+ "template.openshift.io/long-description": "This template provides a standalone MariaDB server with a database created. The database is stored on persistent storage. The database name, username, and password are chosen via parameters when provisioning this service.",
+ "template.openshift.io/provider-display-name": "Red Hat, Inc.",
+ "template.openshift.io/documentation-url": "https://github.com/sclorg/mariadb-container/blob/master/10.1/README.md",
+ "template.openshift.io/support-url": "https://access.redhat.com"
}
},
"message": "The following service(s) have been created in your project: ${DATABASE_SERVICE_NAME}.\n\n Username: ${MYSQL_USER}\n Password: ${MYSQL_PASSWORD}\n Database Name: ${MYSQL_DATABASE}\n Connection URL: mysql://${DATABASE_SERVICE_NAME}:3306/\n\nFor more information about using this template, including OpenShift considerations, see https://github.com/sclorg/mariadb-container/blob/master/10.1/README.md.",
diff --git a/roles/openshift_examples/files/examples/v1.4/db-templates/mongodb-ephemeral-template.json b/roles/openshift_examples/files/examples/v1.4/db-templates/mongodb-ephemeral-template.json
index c38d2680b..97a8abf6d 100644
--- a/roles/openshift_examples/files/examples/v1.4/db-templates/mongodb-ephemeral-template.json
+++ b/roles/openshift_examples/files/examples/v1.4/db-templates/mongodb-ephemeral-template.json
@@ -8,7 +8,11 @@
"openshift.io/display-name": "MongoDB (Ephemeral)",
"description": "MongoDB database service, without persistent storage. For more information about using this template, including OpenShift considerations, see https://github.com/sclorg/mongodb-container/blob/master/3.2/README.md.\n\nWARNING: Any data stored will be lost upon pod destruction. Only use this template for testing",
"iconClass": "icon-mongodb",
- "tags": "database,mongodb"
+ "tags": "database,mongodb",
+ "template.openshift.io/long-description": "This template provides a standalone MongoDB server with a database created. The database is not stored on persistent storage, so any restart of the service will result in all data being lost. The database name, username, and password are chosen via parameters when provisioning this service.",
+ "template.openshift.io/provider-display-name": "Red Hat, Inc.",
+ "template.openshift.io/documentation-url": "https://docs.openshift.org/latest/using_images/db_images/mongodb.html",
+ "template.openshift.io/support-url": "https://access.redhat.com"
}
},
"message": "The following service(s) have been created in your project: ${DATABASE_SERVICE_NAME}.\n\n Username: ${MONGODB_USER}\n Password: ${MONGODB_PASSWORD}\n Database Name: ${MONGODB_DATABASE}\n Connection URL: mongodb://${MONGODB_USER}:${MONGODB_PASSWORD}@${DATABASE_SERVICE_NAME}/${MONGODB_DATABASE}\n\nFor more information about using this template, including OpenShift considerations, see https://github.com/sclorg/mongodb-container/blob/master/3.2/README.md.",
diff --git a/roles/openshift_examples/files/examples/v1.4/db-templates/mongodb-persistent-template.json b/roles/openshift_examples/files/examples/v1.4/db-templates/mongodb-persistent-template.json
index e8853d8ff..0656219fb 100644
--- a/roles/openshift_examples/files/examples/v1.4/db-templates/mongodb-persistent-template.json
+++ b/roles/openshift_examples/files/examples/v1.4/db-templates/mongodb-persistent-template.json
@@ -8,7 +8,11 @@
"openshift.io/display-name": "MongoDB (Persistent)",
"description": "MongoDB database service, with persistent storage. For more information about using this template, including OpenShift considerations, see https://github.com/sclorg/mongodb-container/blob/master/3.2/README.md.\n\nNOTE: Scaling to more than one replica is not supported. You must have persistent volumes available in your cluster to use this template.",
"iconClass": "icon-mongodb",
- "tags": "database,mongodb"
+ "tags": "database,mongodb",
+ "template.openshift.io/long-description": "This template provides a standalone MongoDB server with a database created. The database is stored on persistent storage. The database name, username, and password are chosen via parameters when provisioning this service.",
+ "template.openshift.io/provider-display-name": "Red Hat, Inc.",
+ "template.openshift.io/documentation-url": "https://docs.openshift.org/latest/using_images/db_images/mongodb.html",
+ "template.openshift.io/support-url": "https://access.redhat.com"
}
},
"message": "The following service(s) have been created in your project: ${DATABASE_SERVICE_NAME}.\n\n Username: ${MONGODB_USER}\n Password: ${MONGODB_PASSWORD}\n Database Name: ${MONGODB_DATABASE}\n Connection URL: mongodb://${MONGODB_USER}:${MONGODB_PASSWORD}@${DATABASE_SERVICE_NAME}/${MONGODB_DATABASE}\n\nFor more information about using this template, including OpenShift considerations, see https://github.com/sclorg/mongodb-container/blob/master/3.2/README.md.",
diff --git a/roles/openshift_examples/files/examples/v1.4/db-templates/mysql-ephemeral-template.json b/roles/openshift_examples/files/examples/v1.4/db-templates/mysql-ephemeral-template.json
index f7bcfe2ed..d60b4647d 100644
--- a/roles/openshift_examples/files/examples/v1.4/db-templates/mysql-ephemeral-template.json
+++ b/roles/openshift_examples/files/examples/v1.4/db-templates/mysql-ephemeral-template.json
@@ -7,7 +7,11 @@
"openshift.io/display-name": "MySQL (Ephemeral)",
"description": "MySQL database service, without persistent storage. For more information about using this template, including OpenShift considerations, see https://github.com/sclorg/mysql-container/blob/master/5.7/README.md.\n\nWARNING: Any data stored will be lost upon pod destruction. Only use this template for testing",
"iconClass": "icon-mysql-database",
- "tags": "database,mysql"
+ "tags": "database,mysql",
+ "template.openshift.io/long-description": "This template provides a standalone MySQL server with a database created. The database is not stored on persistent storage, so any restart of the service will result in all data being lost. The database name, username, and password are chosen via parameters when provisioning this service.",
+ "template.openshift.io/provider-display-name": "Red Hat, Inc.",
+ "template.openshift.io/documentation-url": "https://docs.openshift.org/latest/using_images/db_images/mysql.html",
+ "template.openshift.io/support-url": "https://access.redhat.com"
}
},
"message": "The following service(s) have been created in your project: ${DATABASE_SERVICE_NAME}.\n\n Username: ${MYSQL_USER}\n Password: ${MYSQL_PASSWORD}\n Database Name: ${MYSQL_DATABASE}\n Connection URL: mysql://${DATABASE_SERVICE_NAME}:3306/\n\nFor more information about using this template, including OpenShift considerations, see https://github.com/sclorg/mysql-container/blob/master/5.7/README.md.",
diff --git a/roles/openshift_examples/files/examples/v1.4/db-templates/mysql-persistent-template.json b/roles/openshift_examples/files/examples/v1.4/db-templates/mysql-persistent-template.json
index 85c48da01..c2bfa40fd 100644
--- a/roles/openshift_examples/files/examples/v1.4/db-templates/mysql-persistent-template.json
+++ b/roles/openshift_examples/files/examples/v1.4/db-templates/mysql-persistent-template.json
@@ -7,7 +7,11 @@
"openshift.io/display-name": "MySQL (Persistent)",
"description": "MySQL database service, with persistent storage. For more information about using this template, including OpenShift considerations, see https://github.com/sclorg/mysql-container/blob/master/5.7/README.md.\n\nNOTE: Scaling to more than one replica is not supported. You must have persistent volumes available in your cluster to use this template.",
"iconClass": "icon-mysql-database",
- "tags": "database,mysql"
+ "tags": "database,mysql",
+ "template.openshift.io/long-description": "This template provides a standalone MySQL server with a database created. The database is stored on persistent storage. The database name, username, and password are chosen via parameters when provisioning this service.",
+ "template.openshift.io/provider-display-name": "Red Hat, Inc.",
+ "template.openshift.io/documentation-url": "https://docs.openshift.org/latest/using_images/db_images/mysql.html",
+ "template.openshift.io/support-url": "https://access.redhat.com"
}
},
"message": "The following service(s) have been created in your project: ${DATABASE_SERVICE_NAME}.\n\n Username: ${MYSQL_USER}\n Password: ${MYSQL_PASSWORD}\n Database Name: ${MYSQL_DATABASE}\n Connection URL: mysql://${DATABASE_SERVICE_NAME}:3306/\n\nFor more information about using this template, including OpenShift considerations, see https://github.com/sclorg/mysql-container/blob/master/5.7/README.md.",
diff --git a/roles/openshift_examples/files/examples/v1.4/db-templates/postgresql-ephemeral-template.json b/roles/openshift_examples/files/examples/v1.4/db-templates/postgresql-ephemeral-template.json
index 64d5e2b32..7a16e742a 100644
--- a/roles/openshift_examples/files/examples/v1.4/db-templates/postgresql-ephemeral-template.json
+++ b/roles/openshift_examples/files/examples/v1.4/db-templates/postgresql-ephemeral-template.json
@@ -8,7 +8,11 @@
"openshift.io/display-name": "PostgreSQL (Ephemeral)",
"description": "PostgreSQL database service, without persistent storage. For more information about using this template, including OpenShift considerations, see https://github.com/sclorg/postgresql-container/blob/master/9.5.\n\nWARNING: Any data stored will be lost upon pod destruction. Only use this template for testing",
"iconClass": "icon-postgresql",
- "tags": "database,postgresql"
+ "tags": "database,postgresql",
+ "template.openshift.io/long-description": "This template provides a standalone PostgreSQL server with a database created. The database is not stored on persistent storage, so any restart of the service will result in all data being lost. The database name, username, and password are chosen via parameters when provisioning this service.",
+ "template.openshift.io/provider-display-name": "Red Hat, Inc.",
+ "template.openshift.io/documentation-url": "https://docs.openshift.org/latest/using_images/db_images/postgresql.html",
+ "template.openshift.io/support-url": "https://access.redhat.com"
}
},
"message": "The following service(s) have been created in your project: ${DATABASE_SERVICE_NAME}.\n\n Username: ${POSTGRESQL_USER}\n Password: ${POSTGRESQL_PASSWORD}\n Database Name: ${POSTGRESQL_DATABASE}\n Connection URL: postgresql://${DATABASE_SERVICE_NAME}:5432/\n\nFor more information about using this template, including OpenShift considerations, see https://github.com/sclorg/postgresql-container/blob/master/9.5.",
diff --git a/roles/openshift_examples/files/examples/v1.4/db-templates/postgresql-persistent-template.json b/roles/openshift_examples/files/examples/v1.4/db-templates/postgresql-persistent-template.json
index 6c101f9d2..242212d6f 100644
--- a/roles/openshift_examples/files/examples/v1.4/db-templates/postgresql-persistent-template.json
+++ b/roles/openshift_examples/files/examples/v1.4/db-templates/postgresql-persistent-template.json
@@ -8,7 +8,11 @@
"openshift.io/display-name": "PostgreSQL (Persistent)",
"description": "PostgreSQL database service, with persistent storage. For more information about using this template, including OpenShift considerations, see https://github.com/sclorg/postgresql-container/blob/master/9.5.\n\nNOTE: Scaling to more than one replica is not supported. You must have persistent volumes available in your cluster to use this template.",
"iconClass": "icon-postgresql",
- "tags": "database,postgresql"
+ "tags": "database,postgresql",
+ "template.openshift.io/long-description": "This template provides a standalone PostgreSQL server with a database created. The database is stored on persistent storage. The database name, username, and password are chosen via parameters when provisioning this service.",
+ "template.openshift.io/provider-display-name": "Red Hat, Inc.",
+ "template.openshift.io/documentation-url": "https://docs.openshift.org/latest/using_images/db_images/postgresql.html",
+ "template.openshift.io/support-url": "https://access.redhat.com"
}
},
"message": "The following service(s) have been created in your project: ${DATABASE_SERVICE_NAME}.\n\n Username: ${POSTGRESQL_USER}\n Password: ${POSTGRESQL_PASSWORD}\n Database Name: ${POSTGRESQL_DATABASE}\n Connection URL: postgresql://${DATABASE_SERVICE_NAME}:5432/\n\nFor more information about using this template, including OpenShift considerations, see https://github.com/sclorg/postgresql-container/blob/master/9.5.",
diff --git a/roles/openshift_examples/files/examples/v1.4/db-templates/redis-ephemeral-template.json b/roles/openshift_examples/files/examples/v1.4/db-templates/redis-ephemeral-template.json
index b97e1fd29..82a09a3ec 100644
--- a/roles/openshift_examples/files/examples/v1.4/db-templates/redis-ephemeral-template.json
+++ b/roles/openshift_examples/files/examples/v1.4/db-templates/redis-ephemeral-template.json
@@ -8,7 +8,11 @@
"openshift.io/display-name": "Redis (Ephemeral)",
"description": "Redis in-memory data structure store, without persistent storage. For more information about using this template, including OpenShift considerations, see https://github.com/sclorg/redis-container/blob/master/3.2.\n\nWARNING: Any data stored will be lost upon pod destruction. Only use this template for testing",
"iconClass": "icon-redis",
- "tags": "database,redis"
+ "tags": "database,redis",
+ "template.openshift.io/long-description": "This template provides a standalone Redis server. The data is not stored on persistent storage, so any restart of the service will result in all data being lost.",
+ "template.openshift.io/provider-display-name": "Red Hat, Inc.",
+ "template.openshift.io/documentation-url": "https://github.com/sclorg/redis-container/tree/master/3.2",
+ "template.openshift.io/support-url": "https://access.redhat.com"
}
},
"message": "The following service(s) have been created in your project: ${DATABASE_SERVICE_NAME}.\n\n Password: ${REDIS_PASSWORD}\n Connection URL: redis://${DATABASE_SERVICE_NAME}:6379/\n\nFor more information about using this template, including OpenShift considerations, see https://github.com/sclorg/redis-container/blob/master/3.2.",
diff --git a/roles/openshift_examples/files/examples/v1.4/db-templates/redis-persistent-template.json b/roles/openshift_examples/files/examples/v1.4/db-templates/redis-persistent-template.json
index dc167da41..1d5f59188 100644
--- a/roles/openshift_examples/files/examples/v1.4/db-templates/redis-persistent-template.json
+++ b/roles/openshift_examples/files/examples/v1.4/db-templates/redis-persistent-template.json
@@ -8,7 +8,11 @@
"openshift.io/display-name": "Redis (Persistent)",
"description": "Redis in-memory data structure store, with persistent storage. For more information about using this template, including OpenShift considerations, see https://github.com/sclorg/redis-container/blob/master/3.2.\n\nNOTE: You must have persistent volumes available in your cluster to use this template.",
"iconClass": "icon-redis",
- "tags": "database,redis"
+ "tags": "database,redis",
+ "template.openshift.io/long-description": "This template provides a standalone Redis server. The data is stored on persistent storage.",
+ "template.openshift.io/provider-display-name": "Red Hat, Inc.",
+ "template.openshift.io/documentation-url": "https://github.com/sclorg/redis-container/tree/master/3.2",
+ "template.openshift.io/support-url": "https://access.redhat.com"
}
},
"message": "The following service(s) have been created in your project: ${DATABASE_SERVICE_NAME}.\n\n Password: ${REDIS_PASSWORD}\n Connection URL: redis://${DATABASE_SERVICE_NAME}:6379/\n\nFor more information about using this template, including OpenShift considerations, see https://github.com/sclorg/redis-container/blob/master/3.2.",
diff --git a/roles/openshift_examples/files/examples/v1.4/image-streams/image-streams-rhel7.json b/roles/openshift_examples/files/examples/v1.4/image-streams/image-streams-rhel7.json
index 9b19b8bd0..eb94c3bb4 100644
--- a/roles/openshift_examples/files/examples/v1.4/image-streams/image-streams-rhel7.json
+++ b/roles/openshift_examples/files/examples/v1.4/image-streams/image-streams-rhel7.json
@@ -241,7 +241,7 @@
},
"from": {
"kind": "ImageStreamTag",
- "name": "5.6"
+ "name": "7.0"
}
},
{
diff --git a/roles/openshift_examples/files/examples/v1.4/quickstart-templates/cakephp-mysql-persistent.json b/roles/openshift_examples/files/examples/v1.4/quickstart-templates/cakephp-mysql-persistent.json
index 0ba57864e..eb3d296be 100644
--- a/roles/openshift_examples/files/examples/v1.4/quickstart-templates/cakephp-mysql-persistent.json
+++ b/roles/openshift_examples/files/examples/v1.4/quickstart-templates/cakephp-mysql-persistent.json
@@ -7,7 +7,11 @@
"openshift.io/display-name": "CakePHP + MySQL (Persistent)",
"description": "An example CakePHP application with a MySQL database. For more information about using this template, including OpenShift considerations, see https://github.com/openshift/cakephp-ex/blob/master/README.md.",
"tags": "quickstart,php,cakephp",
- "iconClass": "icon-php"
+ "iconClass": "icon-php",
+ "template.openshift.io/long-description": "This template defines resources needed to develop a CakePHP application, including a build configuration, application deployment configuration, and database deployment configuration.",
+ "template.openshift.io/provider-display-name": "Red Hat, Inc.",
+ "template.openshift.io/documentation-url": "https://github.com/openshift/cakephp-ex",
+ "template.openshift.io/support-url": "https://access.redhat.com"
}
},
"message": "The following service(s) have been created in your project: ${NAME}, ${DATABASE_SERVICE_NAME}.\n\nFor more information about using this template, including OpenShift considerations, see https://github.com/openshift/cake-ex/blob/master/README.md.",
@@ -129,7 +133,10 @@
"secret": "${GITHUB_WEBHOOK_SECRET}"
}
}
- ]
+ ],
+ "postCommit": {
+ "script": "./lib/Cake/Console/cake test app AllTests"
+ }
}
},
{
diff --git a/roles/openshift_examples/files/examples/v1.4/quickstart-templates/cakephp-mysql.json b/roles/openshift_examples/files/examples/v1.4/quickstart-templates/cakephp-mysql.json
index 9732e59e1..da2454d2e 100644
--- a/roles/openshift_examples/files/examples/v1.4/quickstart-templates/cakephp-mysql.json
+++ b/roles/openshift_examples/files/examples/v1.4/quickstart-templates/cakephp-mysql.json
@@ -7,7 +7,11 @@
"openshift.io/display-name": "CakePHP + MySQL (Ephemeral)",
"description": "An example CakePHP application with a MySQL database. For more information about using this template, including OpenShift considerations, see https://github.com/openshift/cakephp-ex/blob/master/README.md.\n\nWARNING: Any data stored will be lost upon pod destruction. Only use this template for testing.",
"tags": "quickstart,php,cakephp",
- "iconClass": "icon-php"
+ "iconClass": "icon-php",
+ "template.openshift.io/long-description": "This template defines resources needed to develop a CakePHP application, including a build configuration, application deployment configuration, and database deployment configuration. The database is stored in non-persistent storage, so this configuration should be used for experimental purposes only.",
+ "template.openshift.io/provider-display-name": "Red Hat, Inc.",
+ "template.openshift.io/documentation-url": "https://github.com/openshift/cakephp-ex",
+ "template.openshift.io/support-url": "https://access.redhat.com"
}
},
"message": "The following service(s) have been created in your project: ${NAME}, ${DATABASE_SERVICE_NAME}.\n\nFor more information about using this template, including OpenShift considerations, see https://github.com/openshift/cake-ex/blob/master/README.md.",
@@ -129,7 +133,10 @@
"secret": "${GITHUB_WEBHOOK_SECRET}"
}
}
- ]
+ ],
+ "postCommit": {
+ "script": "./lib/Cake/Console/cake test app AllTests"
+ }
}
},
{
diff --git a/roles/openshift_examples/files/examples/v1.4/quickstart-templates/dancer-mysql-persistent.json b/roles/openshift_examples/files/examples/v1.4/quickstart-templates/dancer-mysql-persistent.json
index 074561550..81ae63416 100644
--- a/roles/openshift_examples/files/examples/v1.4/quickstart-templates/dancer-mysql-persistent.json
+++ b/roles/openshift_examples/files/examples/v1.4/quickstart-templates/dancer-mysql-persistent.json
@@ -7,7 +7,11 @@
"openshift.io/display-name": "Dancer + MySQL (Persistent)",
"description": "An example Dancer application with a MySQL database. For more information about using this template, including OpenShift considerations, see https://github.com/openshift/dancer-ex/blob/master/README.md.",
"tags": "quickstart,perl,dancer",
- "iconClass": "icon-perl"
+ "iconClass": "icon-perl",
+ "template.openshift.io/long-description": "This template defines resources needed to develop a Dancer based application, including a build configuration, application deployment configuration, and database deployment configuration.",
+ "template.openshift.io/provider-display-name": "Red Hat, Inc.",
+ "template.openshift.io/documentation-url": "https://github.com/openshift/dancer-ex",
+ "template.openshift.io/support-url": "https://access.redhat.com"
}
},
"message": "The following service(s) have been created in your project: ${NAME}, ${DATABASE_SERVICE_NAME}.\n\nFor more information about using this template, including OpenShift considerations, see https://github.com/openshift/dancer-ex/blob/master/README.md.",
diff --git a/roles/openshift_examples/files/examples/v1.4/quickstart-templates/dancer-mysql.json b/roles/openshift_examples/files/examples/v1.4/quickstart-templates/dancer-mysql.json
index 18100974b..7a285dba8 100644
--- a/roles/openshift_examples/files/examples/v1.4/quickstart-templates/dancer-mysql.json
+++ b/roles/openshift_examples/files/examples/v1.4/quickstart-templates/dancer-mysql.json
@@ -7,7 +7,11 @@
"openshift.io/display-name": "Dancer + MySQL (Ephemeral)",
"description": "An example Dancer application with a MySQL database. For more information about using this template, including OpenShift considerations, see https://github.com/openshift/dancer-ex/blob/master/README.md.\n\nWARNING: Any data stored will be lost upon pod destruction. Only use this template for testing.",
"tags": "quickstart,perl,dancer",
- "iconClass": "icon-perl"
+ "iconClass": "icon-perl",
+ "template.openshift.io/long-description": "This template defines resources needed to develop a Dancer based application, including a build configuration, application deployment configuration, and database deployment configuration. The database is stored in non-persistent storage, so this configuration should be used for experimental purposes only.",
+ "template.openshift.io/provider-display-name": "Red Hat, Inc.",
+ "template.openshift.io/documentation-url": "https://github.com/openshift/dancer-ex",
+ "template.openshift.io/support-url": "https://access.redhat.com"
}
},
"message": "The following service(s) have been created in your project: ${NAME}, ${DATABASE_SERVICE_NAME}.\n\nFor more information about using this template, including OpenShift considerations, see https://github.com/openshift/dancer-ex/blob/master/README.md.",
diff --git a/roles/openshift_examples/files/examples/v1.4/quickstart-templates/django-postgresql-persistent.json b/roles/openshift_examples/files/examples/v1.4/quickstart-templates/django-postgresql-persistent.json
index b39771bd8..9f982c286 100644
--- a/roles/openshift_examples/files/examples/v1.4/quickstart-templates/django-postgresql-persistent.json
+++ b/roles/openshift_examples/files/examples/v1.4/quickstart-templates/django-postgresql-persistent.json
@@ -7,7 +7,11 @@
"openshift.io/display-name": "Django + PostgreSQL (Persistent)",
"description": "An example Django application with a PostgreSQL database. For more information about using this template, including OpenShift considerations, see https://github.com/openshift/django-ex/blob/master/README.md.",
"tags": "quickstart,python,django",
- "iconClass": "icon-python"
+ "iconClass": "icon-python",
+ "template.openshift.io/long-description": "This template defines resources needed to develop a Django based application, including a build configuration, application deployment configuration, and database deployment configuration.",
+ "template.openshift.io/provider-display-name": "Red Hat, Inc.",
+ "template.openshift.io/documentation-url": "https://github.com/openshift/django-ex",
+ "template.openshift.io/support-url": "https://access.redhat.com"
}
},
"message": "The following service(s) have been created in your project: ${NAME}, ${DATABASE_SERVICE_NAME}.\n\nFor more information about using this template, including OpenShift considerations, see https://github.com/openshift/django-ex/blob/master/README.md.",
diff --git a/roles/openshift_examples/files/examples/v1.4/quickstart-templates/django-postgresql.json b/roles/openshift_examples/files/examples/v1.4/quickstart-templates/django-postgresql.json
index 64b914e61..7bee85ddd 100644
--- a/roles/openshift_examples/files/examples/v1.4/quickstart-templates/django-postgresql.json
+++ b/roles/openshift_examples/files/examples/v1.4/quickstart-templates/django-postgresql.json
@@ -7,7 +7,11 @@
"openshift.io/display-name": "Django + PostgreSQL (Ephemeral)",
"description": "An example Django application with a PostgreSQL database. For more information about using this template, including OpenShift considerations, see https://github.com/openshift/django-ex/blob/master/README.md.\n\nWARNING: Any data stored will be lost upon pod destruction. Only use this template for testing.",
"tags": "quickstart,python,django",
- "iconClass": "icon-python"
+ "iconClass": "icon-python",
+ "template.openshift.io/long-description": "This template defines resources needed to develop a Django based application, including a build configuration, application deployment configuration, and database deployment configuration. The database is stored in non-persistent storage, so this configuration should be used for experimental purposes only.",
+ "template.openshift.io/provider-display-name": "Red Hat, Inc.",
+ "template.openshift.io/documentation-url": "https://github.com/openshift/django-ex",
+ "template.openshift.io/support-url": "https://access.redhat.com"
}
},
"message": "The following service(s) have been created in your project: ${NAME}, ${DATABASE_SERVICE_NAME}.\n\nFor more information about using this template, including OpenShift considerations, see https://github.com/openshift/django-ex/blob/master/README.md.",
diff --git a/roles/openshift_examples/files/examples/v1.4/quickstart-templates/jenkins-ephemeral-template.json b/roles/openshift_examples/files/examples/v1.4/quickstart-templates/jenkins-ephemeral-template.json
index 62ccc5b7f..b0aef3cfc 100644
--- a/roles/openshift_examples/files/examples/v1.4/quickstart-templates/jenkins-ephemeral-template.json
+++ b/roles/openshift_examples/files/examples/v1.4/quickstart-templates/jenkins-ephemeral-template.json
@@ -8,7 +8,11 @@
"openshift.io/display-name": "Jenkins (Ephemeral)",
"description": "Jenkins service, without persistent storage.\n\nWARNING: Any data stored will be lost upon pod destruction. Only use this template for testing.",
"iconClass": "icon-jenkins",
- "tags": "instant-app,jenkins"
+ "tags": "instant-app,jenkins",
+ "template.openshift.io/long-description": "This template deploys a Jenkins server capable of managing OpenShift Pipeline builds and supporting OpenShift-based oauth login. The Jenkins configuration is stored in non-persistent storage, so this configuration should be used for experimental purposes only.",
+ "template.openshift.io/provider-display-name": "Red Hat, Inc.",
+ "template.openshift.io/documentation-url": "https://docs.openshift.org/latest/using_images/other_images/jenkins.html",
+ "template.openshift.io/support-url": "https://access.redhat.com"
}
},
"message": "A Jenkins service has been created in your project. Log into Jenkins with your OpenShift account. The tutorial at https://github.com/openshift/origin/blob/master/examples/jenkins/README.md contains more information about using this template.",
diff --git a/roles/openshift_examples/files/examples/v1.4/quickstart-templates/jenkins-persistent-template.json b/roles/openshift_examples/files/examples/v1.4/quickstart-templates/jenkins-persistent-template.json
index 50c4ad566..a542de219 100644
--- a/roles/openshift_examples/files/examples/v1.4/quickstart-templates/jenkins-persistent-template.json
+++ b/roles/openshift_examples/files/examples/v1.4/quickstart-templates/jenkins-persistent-template.json
@@ -8,7 +8,11 @@
"openshift.io/display-name": "Jenkins (Persistent)",
"description": "Jenkins service, with persistent storage.\n\nNOTE: You must have persistent volumes available in your cluster to use this template.",
"iconClass": "icon-jenkins",
- "tags": "instant-app,jenkins"
+ "tags": "instant-app,jenkins",
+ "template.openshift.io/long-description": "This template deploys a Jenkins server capable of managing OpenShift Pipeline builds and supporting OpenShift-based oauth login.",
+ "template.openshift.io/provider-display-name": "Red Hat, Inc.",
+ "template.openshift.io/documentation-url": "https://docs.openshift.org/latest/using_images/other_images/jenkins.html",
+ "template.openshift.io/support-url": "https://access.redhat.com"
}
},
"message": "A Jenkins service has been created in your project. Log into Jenkins with your OpenShift account. The tutorial at https://github.com/openshift/origin/blob/master/examples/jenkins/README.md contains more information about using this template.",
diff --git a/roles/openshift_examples/files/examples/v1.4/quickstart-templates/nodejs-mongodb-persistent.json b/roles/openshift_examples/files/examples/v1.4/quickstart-templates/nodejs-mongodb-persistent.json
index fecb84662..6ee999cb1 100644
--- a/roles/openshift_examples/files/examples/v1.4/quickstart-templates/nodejs-mongodb-persistent.json
+++ b/roles/openshift_examples/files/examples/v1.4/quickstart-templates/nodejs-mongodb-persistent.json
@@ -7,7 +7,11 @@
"openshift.io/display-name": "Node.js + MongoDB (Persistent)",
"description": "An example Node.js application with a MongoDB database. For more information about using this template, including OpenShift considerations, see https://github.com/openshift/nodejs-ex/blob/master/README.md.",
"tags": "quickstart,nodejs",
- "iconClass": "icon-nodejs"
+ "iconClass": "icon-nodejs",
+ "template.openshift.io/long-description": "This template defines resources needed to develop a NodeJS application, including a build configuration, application deployment configuration, and database deployment configuration.",
+ "template.openshift.io/provider-display-name": "Red Hat, Inc.",
+ "template.openshift.io/documentation-url": "https://github.com/openshift/nodejs-ex",
+ "template.openshift.io/support-url": "https://access.redhat.com"
}
},
"message": "The following service(s) have been created in your project: ${NAME}, ${DATABASE_SERVICE_NAME}.\n\nFor more information about using this template, including OpenShift considerations, see https://github.com/openshift/nodejs-ex/blob/master/README.md.",
diff --git a/roles/openshift_examples/files/examples/v1.4/quickstart-templates/nodejs-mongodb.json b/roles/openshift_examples/files/examples/v1.4/quickstart-templates/nodejs-mongodb.json
index 6a55f0251..5c177a7e0 100644
--- a/roles/openshift_examples/files/examples/v1.4/quickstart-templates/nodejs-mongodb.json
+++ b/roles/openshift_examples/files/examples/v1.4/quickstart-templates/nodejs-mongodb.json
@@ -7,7 +7,11 @@
"openshift.io/display-name": "Node.js + MongoDB (Ephemeral)",
"description": "An example Node.js application with a MongoDB database. For more information about using this template, including OpenShift considerations, see https://github.com/openshift/nodejs-ex/blob/master/README.md.\n\nWARNING: Any data stored will be lost upon pod destruction. Only use this template for testing.",
"tags": "quickstart,nodejs",
- "iconClass": "icon-nodejs"
+ "iconClass": "icon-nodejs",
+ "template.openshift.io/long-description": "This template defines resources needed to develop a NodeJS application, including a build configuration, application deployment configuration, and database deployment configuration. The database is stored in non-persistent storage, so this configuration should be used for experimental purposes only.",
+ "template.openshift.io/provider-display-name": "Red Hat, Inc.",
+ "template.openshift.io/documentation-url": "https://github.com/openshift/nodejs-ex",
+ "template.openshift.io/support-url": "https://access.redhat.com"
}
},
"message": "The following service(s) have been created in your project: ${NAME}, ${DATABASE_SERVICE_NAME}.\n\nFor more information about using this template, including OpenShift considerations, see https://github.com/openshift/nodejs-ex/blob/master/README.md.",
diff --git a/roles/openshift_examples/files/examples/v1.4/quickstart-templates/rails-postgresql-persistent.json b/roles/openshift_examples/files/examples/v1.4/quickstart-templates/rails-postgresql-persistent.json
index 6c0a484b5..b400cfdb3 100644
--- a/roles/openshift_examples/files/examples/v1.4/quickstart-templates/rails-postgresql-persistent.json
+++ b/roles/openshift_examples/files/examples/v1.4/quickstart-templates/rails-postgresql-persistent.json
@@ -7,7 +7,11 @@
"openshift.io/display-name": "Rails + PostgreSQL (Persistent)",
"description": "An example Rails application with a PostgreSQL database. For more information about using this template, including OpenShift considerations, see https://github.com/openshift/rails-ex/blob/master/README.md.",
"tags": "quickstart,ruby,rails",
- "iconClass": "icon-ruby"
+ "iconClass": "icon-ruby",
+ "template.openshift.io/long-description": "This template defines resources needed to develop a Rails application, including a build configuration, application deployment configuration, and database deployment configuration.",
+ "template.openshift.io/provider-display-name": "Red Hat, Inc.",
+ "template.openshift.io/documentation-url": "https://github.com/openshift/rails-ex",
+ "template.openshift.io/support-url": "https://access.redhat.com"
}
},
"message": "The following service(s) have been created in your project: ${NAME}, ${DATABASE_SERVICE_NAME}.\n\nFor more information about using this template, including OpenShift considerations, see https://github.com/openshift/rails-ex/blob/master/README.md.",
@@ -165,7 +169,7 @@
"imageChangeParams": {
"automatic": true,
"containerNames": [
- "rails-pgsql-persistent"
+ "${NAME}"
],
"from": {
"kind": "ImageStreamTag",
@@ -191,7 +195,7 @@
"spec": {
"containers": [
{
- "name": "rails-pgsql-persistent",
+ "name": "${NAME}",
"image": " ",
"ports": [
{
diff --git a/roles/openshift_examples/files/examples/v1.4/quickstart-templates/rails-postgresql.json b/roles/openshift_examples/files/examples/v1.4/quickstart-templates/rails-postgresql.json
index 043554c79..fa67412ff 100644
--- a/roles/openshift_examples/files/examples/v1.4/quickstart-templates/rails-postgresql.json
+++ b/roles/openshift_examples/files/examples/v1.4/quickstart-templates/rails-postgresql.json
@@ -7,7 +7,11 @@
"openshift.io/display-name": "Rails + PostgreSQL (Ephemeral)",
"description": "An example Rails application with a PostgreSQL database. For more information about using this template, including OpenShift considerations, see https://github.com/openshift/rails-ex/blob/master/README.md.\n\nWARNING: Any data stored will be lost upon pod destruction. Only use this template for testing.",
"tags": "quickstart,ruby,rails",
- "iconClass": "icon-ruby"
+ "iconClass": "icon-ruby",
+ "template.openshift.io/long-description": "This template defines resources needed to develop a Rails application, including a build configuration, application deployment configuration, and database deployment configuration. The database is stored in non-persistent storage, so this configuration should be used for experimental purposes only.",
+ "template.openshift.io/provider-display-name": "Red Hat, Inc.",
+ "template.openshift.io/documentation-url": "https://github.com/openshift/rails-ex",
+ "template.openshift.io/support-url": "https://access.redhat.com"
}
},
"message": "The following service(s) have been created in your project: ${NAME}, ${DATABASE_SERVICE_NAME}.\n\nFor more information about using this template, including OpenShift considerations, see https://github.com/openshift/rails-ex/blob/master/README.md.",
@@ -165,7 +169,7 @@
"imageChangeParams": {
"automatic": true,
"containerNames": [
- "rails-postgresql-example"
+ "${NAME}"
],
"from": {
"kind": "ImageStreamTag",
@@ -191,7 +195,7 @@
"spec": {
"containers": [
{
- "name": "rails-postgresql-example",
+ "name": "${NAME}",
"image": " ",
"ports": [
{
diff --git a/roles/openshift_examples/files/examples/v1.4/xpaas-streams/fis-image-streams.json b/roles/openshift_examples/files/examples/v1.4/xpaas-streams/fis-image-streams.json
index ed0e94bed..9d99973be 100644
--- a/roles/openshift_examples/files/examples/v1.4/xpaas-streams/fis-image-streams.json
+++ b/roles/openshift_examples/files/examples/v1.4/xpaas-streams/fis-image-streams.json
@@ -20,12 +20,22 @@
{
"name": "1.0",
"annotations": {
- "description": "JBoss Fuse Integration Services 6.2.1 Java S2I images.",
+ "description": "JBoss Fuse Integration Services 1.0 Java S2I images.",
"iconClass": "icon-jboss",
"tags": "builder,jboss-fuse,java,xpaas",
"supports":"jboss-fuse:6.2.1,java:8,xpaas:1.2",
"version": "1.0"
}
+ },
+ {
+ "name": "2.0",
+ "annotations": {
+ "description": "JBoss Fuse Integration Services 2.0 Java S2I images.",
+ "iconClass": "icon-jboss",
+ "tags": "builder,jboss-fuse,java,xpaas",
+ "supports":"jboss-fuse:6.3.0,java:8,xpaas:1.2",
+ "version": "2.0"
+ }
}
]
}
@@ -42,12 +52,22 @@
{
"name": "1.0",
"annotations": {
- "description": "JBoss Fuse Integration Services 6.2.1 Karaf S2I images.",
+ "description": "JBoss Fuse Integration Services 1.0 Karaf S2I images.",
"iconClass": "icon-jboss",
"tags": "builder,jboss-fuse,java,karaf,xpaas",
"supports":"jboss-fuse:6.2.1,java:8,xpaas:1.2",
"version": "1.0"
}
+ },
+ {
+ "name": "2.0",
+ "annotations": {
+ "description": "JBoss Fuse Integration Services 2.0 Karaf S2I images.",
+ "iconClass": "icon-jboss",
+ "tags": "builder,jboss-fuse,java,karaf,xpaas",
+ "supports":"jboss-fuse:6.3.0,java:8,xpaas:1.2",
+ "version": "2.0"
+ }
}
]
}
diff --git a/roles/openshift_examples/files/examples/v1.4/xpaas-streams/jboss-image-streams.json b/roles/openshift_examples/files/examples/v1.4/xpaas-streams/jboss-image-streams.json
index a7cb12867..049f3f884 100644
--- a/roles/openshift_examples/files/examples/v1.4/xpaas-streams/jboss-image-streams.json
+++ b/roles/openshift_examples/files/examples/v1.4/xpaas-streams/jboss-image-streams.json
@@ -367,6 +367,31 @@
}
]
}
+ },
+ {
+ "kind": "ImageStream",
+ "apiVersion": "v1",
+ "metadata": {
+ "name": "redhat-openjdk18-openshift"
+ },
+ "spec": {
+ "dockerImageRepository": "registry.access.redhat.com/redhat-openjdk-18/openjdk18-openshift",
+ "tags": [
+ {
+ "name": "1.0",
+ "annotations": {
+ "openshift.io/display-name": "Red Hat OpenJDK 8",
+ "description": "Build and run Java applications using Maven and OpenJDK 8.",
+ "iconClass": "icon-jboss",
+ "tags": "builder,java,xpaas,openjdk",
+ "supports":"java:8,xpaas:1.0",
+ "sampleRepo": "https://github.com/jboss-openshift/openshift-quickstarts",
+ "sampleContextDir": "undertow-servlet",
+ "version": "1.0"
+ }
+ }
+ ]
+ }
}
]
}
diff --git a/roles/openshift_examples/files/examples/v1.4/xpaas-templates/karaf2-camel-amq-template.json b/roles/openshift_examples/files/examples/v1.4/xpaas-templates/karaf2-camel-amq-template.json
new file mode 100644
index 000000000..cd0bec3c1
--- /dev/null
+++ b/roles/openshift_examples/files/examples/v1.4/xpaas-templates/karaf2-camel-amq-template.json
@@ -0,0 +1,362 @@
+{
+ "apiVersion": "v1",
+ "kind": "Template",
+ "metadata": {
+ "annotations": {
+ "description": "Camel route using ActiveMQ in Karaf container.",
+ "tags": "quickstart,java,karaf,fis",
+ "iconClass": "icon-jboss",
+ "version": "2.0"
+ },
+ "name": "s2i-karaf2-camel-amq"
+ },
+ "labels": {
+ "template": "s2i-karaf2-camel-amq"
+ },
+ "parameters": [
+ {
+ "name": "APP_NAME",
+ "displayName": "Application Name",
+ "required": true,
+ "value": "s2i-karaf2-camel-amq",
+ "description": "The name assigned to the application."
+ },
+ {
+ "name": "GIT_REPO",
+ "displayName": "Git Repository URL",
+ "value": "https://github.com/fabric8-quickstarts/karaf2-camel-amq.git",
+ "required": true,
+ "description": "The URL of the repository with your application source code."
+ },
+ {
+ "name": "GIT_REF",
+ "displayName": "Git Reference",
+ "value": "karaf2-camel-amq-1.0.0.redhat-000010",
+ "description": "Set this to a branch name, tag or other ref of your repository if you are not using the default branch."
+ },
+ {
+ "name": "SERVICE_NAME",
+ "displayName": "Service Name",
+ "value": "s2i-karaf2-camel-amq",
+ "description": "Exposed Service name."
+ },
+ {
+ "name": "BUILDER_VERSION",
+ "displayName": "Builder version",
+ "value": "2.0",
+ "description": "The version of the FIS S2I builder image to use."
+ },
+ {
+ "name": "APP_VERSION",
+ "displayName": "Application Version",
+ "value": "1.0.0.redhat-000010",
+ "description": "The application version."
+ },
+ {
+ "name": "MAVEN_ARGS",
+ "displayName": "Maven Arguments",
+ "value": "install -DskipTests -Dfabric8.skip -e -B",
+ "description": "Arguments passed to mvn in the build."
+ },
+ {
+ "name": "MAVEN_ARGS_APPEND",
+ "displayName": "Extra Maven Arguments",
+ "description": "Extra arguments passed to mvn, e.g. for multi-module builds."
+ },
+ {
+ "name": "ARTIFACT_DIR",
+ "displayName": "Maven build directory",
+ "description": "Directory of the artifact to be built, e.g. for multi-module builds."
+ },
+ {
+ "name": "IMAGE_STREAM_NAMESPACE",
+ "displayName": "Image Stream Namespace",
+ "value": "openshift",
+ "required": true,
+ "description": "Namespace in which the Fuse ImageStreams are installed. These ImageStreams are normally installed in the openshift namespace. You should only need to modify this if you've installed the ImageStreams in a different namespace/project."
+ },
+ {
+ "name": "BUILD_SECRET",
+ "displayName": "Git Build Secret",
+ "generate": "expression",
+ "description": "The secret needed to trigger a build.",
+ "from": "[a-zA-Z0-9]{40}"
+ },
+ {
+ "name": "ACTIVEMQ_SERVICE_NAME",
+ "displayName": "ActiveMQ Broker Service",
+ "required": true,
+ "value": "broker-amq-tcp",
+ "description": "Set this to the name of the TCP service of the ActiveMQ broker. You may need to create a broker first."
+ },
+ {
+ "name": "ACTIVEMQ_USERNAME",
+ "displayName": "ActiveMQ Broker Username",
+ "description": "The username used to authenticate with the ActiveMQ broker. Leave it empty if authentication is disabled."
+ },
+ {
+ "name": "ACTIVEMQ_PASSWORD",
+ "displayName": "ActiveMQ Broker Password",
+ "description": "The password used to authenticate with the ActiveMQ broker. Leave it empty if authentication is disabled."
+ },
+ {
+ "name": "CPU_REQUEST",
+ "displayName": "CPU request",
+ "value": "0.2",
+ "required": true,
+ "description": "The amount of CPU to requests."
+ },
+ {
+ "name": "MEMORY_REQUEST",
+ "displayName": "Memory request",
+ "value": "1.5G",
+ "required": true,
+ "description": "The amount of memory required for the container to run."
+ },
+ {
+ "name": "CPU_LIMIT",
+ "displayName": "CPU limit",
+ "value": "1.0",
+ "required": true,
+ "description": "The amount of CPU the container is limited to use."
+ },
+ {
+ "name": "MEMORY_LIMIT",
+ "displayName": "Memory limit",
+ "value": "2G",
+ "required": true,
+ "description": "The amount of memory the container is limited to use."
+ }
+ ],
+ "objects": [
+ {
+ "kind": "ImageStream",
+ "apiVersion": "v1",
+ "metadata": {
+ "name": "${APP_NAME}",
+ "creationTimestamp": null,
+ "labels": {
+ "component": "${APP_NAME}",
+ "container": "karaf",
+ "group": "quickstarts",
+ "project": "${APP_NAME}",
+ "provider": "s2i",
+ "version": "${APP_VERSION}"
+ }
+ },
+ "spec": {},
+ "status": {
+ "dockerImageRepository": ""
+ }
+ },
+ {
+ "kind": "BuildConfig",
+ "apiVersion": "v1",
+ "metadata": {
+ "name": "${APP_NAME}",
+ "creationTimestamp": null,
+ "labels": {
+ "component": "${APP_NAME}",
+ "container": "karaf",
+ "group": "quickstarts",
+ "project": "${APP_NAME}",
+ "provider": "s2i",
+ "version": "${APP_VERSION}"
+ }
+ },
+ "spec": {
+ "triggers": [
+ {
+ "type": "GitHub",
+ "github": {
+ "secret": "${BUILD_SECRET}"
+ }
+ },
+ {
+ "type": "Generic",
+ "generic": {
+ "secret": "${BUILD_SECRET}"
+ }
+ },
+ {
+ "type": "ConfigChange"
+ },
+ {
+ "type": "ImageChange",
+ "imageChange": {}
+ }
+ ],
+ "source": {
+ "type": "Git",
+ "git": {
+ "uri": "${GIT_REPO}",
+ "ref": "${GIT_REF}"
+ }
+ },
+ "strategy": {
+ "type": "Source",
+ "sourceStrategy": {
+ "from": {
+ "kind": "ImageStreamTag",
+ "namespace": "${IMAGE_STREAM_NAMESPACE}",
+ "name": "fis-karaf-openshift:${BUILDER_VERSION}"
+ },
+ "forcePull": true,
+ "incremental": true,
+ "env": [
+ {
+ "name": "BUILD_LOGLEVEL",
+ "value": "5"
+ },
+ {
+ "name": "ARTIFACT_DIR",
+ "value": "${ARTIFACT_DIR}"
+ },
+ {
+ "name": "MAVEN_ARGS",
+ "value": "${MAVEN_ARGS}"
+ },
+ {
+ "name": "MAVEN_ARGS_APPEND",
+ "value": "${MAVEN_ARGS_APPEND}"
+ }
+ ]
+ }
+ },
+ "output": {
+ "to": {
+ "kind": "ImageStreamTag",
+ "name": "${APP_NAME}:latest"
+ }
+ },
+ "resources": {}
+ },
+ "status": {
+ "lastVersion": 0
+ }
+ },
+ {
+ "kind": "DeploymentConfig",
+ "apiVersion": "v1",
+ "metadata": {
+ "name": "${APP_NAME}",
+ "creationTimestamp": null,
+ "labels": {
+ "component": "${APP_NAME}",
+ "container": "karaf",
+ "group": "quickstarts",
+ "project": "${APP_NAME}",
+ "provider": "s2i",
+ "version": "${APP_VERSION}"
+ }
+ },
+ "spec": {
+ "strategy": {
+ "resources": {}
+ },
+ "triggers": [
+ {
+ "type": "ConfigChange"
+ },
+ {
+ "type": "ImageChange",
+ "imageChangeParams": {
+ "automatic": true,
+ "containerNames": [
+ "${APP_NAME}"
+ ],
+ "from": {
+ "kind": "ImageStreamTag",
+ "name": "${APP_NAME}:latest"
+ }
+ }
+ }
+ ],
+ "replicas": 1,
+ "selector": {
+ "component": "${APP_NAME}",
+ "container": "karaf",
+ "deploymentconfig": "${APP_NAME}",
+ "group": "quickstarts",
+ "project": "${APP_NAME}",
+ "provider": "s2i",
+ "version": "${APP_VERSION}"
+ },
+ "template": {
+ "metadata": {
+ "creationTimestamp": null,
+ "labels": {
+ "component": "${APP_NAME}",
+ "container": "karaf",
+ "deploymentconfig": "${APP_NAME}",
+ "group": "quickstarts",
+ "project": "${APP_NAME}",
+ "provider": "s2i",
+ "version": "${APP_VERSION}"
+ }
+ },
+ "spec": {
+ "containers": [
+ {
+ "name": "${APP_NAME}",
+ "image": "library/${APP_NAME}:latest",
+ "readinessProbe" : {
+ "httpGet" : {
+ "path" : "/readiness-check",
+ "port" : 8181
+ },
+ "initialDelaySeconds" : 10
+ },
+ "livenessProbe" : {
+ "httpGet" : {
+ "path" : "/health-check",
+ "port" : 8181
+ },
+ "initialDelaySeconds" : 180
+ },
+ "ports": [
+ {
+ "containerPort": 8181,
+ "name": "http"
+ },
+ {
+ "containerPort": 8778,
+ "name": "jolokia"
+ }
+ ],
+ "env" : [ {
+ "name" : "KUBERNETES_NAMESPACE",
+ "valueFrom" : {
+ "fieldRef" : {
+ "fieldPath" : "metadata.namespace"
+ }
+ }
+ }, {
+ "name": "ACTIVEMQ_SERVICE_NAME",
+ "value": "${ACTIVEMQ_SERVICE_NAME}"
+ }, {
+ "name": "ACTIVEMQ_USERNAME",
+ "value": "${ACTIVEMQ_USERNAME}"
+ }, {
+ "name": "ACTIVEMQ_PASSWORD",
+ "value": "${ACTIVEMQ_PASSWORD}"
+ } ],
+ "resources": {
+ "requests": {
+ "cpu": "${CPU_REQUEST}",
+ "memory": "${MEMORY_REQUEST}"
+ },
+ "limits": {
+ "cpu": "${CPU_LIMIT}",
+ "memory": "${MEMORY_LIMIT}"
+ }
+ }
+ }
+ ]
+ }
+ }
+ },
+ "status": {}
+ }
+ ]
+}
diff --git a/roles/openshift_examples/files/examples/v1.4/xpaas-templates/karaf2-camel-log-template.json b/roles/openshift_examples/files/examples/v1.4/xpaas-templates/karaf2-camel-log-template.json
new file mode 100644
index 000000000..2ecce08a9
--- /dev/null
+++ b/roles/openshift_examples/files/examples/v1.4/xpaas-templates/karaf2-camel-log-template.json
@@ -0,0 +1,336 @@
+{
+ "apiVersion": "v1",
+ "kind": "Template",
+ "metadata": {
+ "annotations": {
+ "description": "A simple Camel route in Karaf container.",
+ "tags": "quickstart,java,karaf,fis",
+ "iconClass": "icon-jboss",
+ "version": "2.0"
+ },
+ "name": "s2i-karaf2-camel-log"
+ },
+ "labels": {
+ "template": "s2i-karaf2-camel-log"
+ },
+ "parameters": [
+ {
+ "name": "APP_NAME",
+ "displayName": "Application Name",
+ "required": true,
+ "value": "s2i-karaf2-camel-log",
+ "description": "The name assigned to the application."
+ },
+ {
+ "name": "GIT_REPO",
+ "displayName": "Git Repository URL",
+ "value": "https://github.com/fabric8-quickstarts/karaf2-camel-log.git",
+ "required": true,
+ "description": "The URL of the repository with your application source code."
+ },
+ {
+ "name": "GIT_REF",
+ "displayName": "Git Reference",
+ "value": "karaf2-camel-log-1.0.0.redhat-000010",
+ "description": "Set this to a branch name, tag or other ref of your repository if you are not using the default branch."
+ },
+ {
+ "name": "SERVICE_NAME",
+ "displayName": "Service Name",
+ "value": "s2i-karaf2-camel-log",
+ "description": "Exposed Service name."
+ },
+ {
+ "name": "BUILDER_VERSION",
+ "displayName": "Builder version",
+ "value": "2.0",
+ "description": "The version of the FIS S2I builder image to use."
+ },
+ {
+ "name": "APP_VERSION",
+ "displayName": "Application Version",
+ "value": "1.0.0.redhat-000010",
+ "description": "The application version."
+ },
+ {
+ "name": "MAVEN_ARGS",
+ "displayName": "Maven Arguments",
+ "value": "install -DskipTests -Dfabric8.skip -e -B",
+ "description": "Arguments passed to mvn in the build."
+ },
+ {
+ "name": "MAVEN_ARGS_APPEND",
+ "displayName": "Extra Maven Arguments",
+ "description": "Extra arguments passed to mvn, e.g. for multi-module builds."
+ },
+ {
+ "name": "ARTIFACT_DIR",
+ "displayName": "Maven build directory",
+ "description": "Directory of the artifact to be built, e.g. for multi-module builds."
+ },
+ {
+ "name": "IMAGE_STREAM_NAMESPACE",
+ "displayName": "Image Stream Namespace",
+ "value": "openshift",
+ "required": true,
+ "description": "Namespace in which the Fuse ImageStreams are installed. These ImageStreams are normally installed in the openshift namespace. You should only need to modify this if you've installed the ImageStreams in a different namespace/project."
+ },
+ {
+ "name": "BUILD_SECRET",
+ "displayName": "Git Build Secret",
+ "generate": "expression",
+ "description": "The secret needed to trigger a build.",
+ "from": "[a-zA-Z0-9]{40}"
+ },
+ {
+ "name": "CPU_REQUEST",
+ "displayName": "CPU request",
+ "value": "0.2",
+ "required": true,
+ "description": "The amount of CPU to requests."
+ },
+ {
+ "name": "MEMORY_REQUEST",
+ "displayName": "Memory request",
+ "value": "1.5G",
+ "required": true,
+ "description": "The amount of memory required for the container to run."
+ },
+ {
+ "name": "CPU_LIMIT",
+ "displayName": "CPU limit",
+ "value": "1.0",
+ "required": true,
+ "description": "The amount of CPU the container is limited to use."
+ },
+ {
+ "name": "MEMORY_LIMIT",
+ "displayName": "Memory limit",
+ "value": "2G",
+ "required": true,
+ "description": "The amount of memory the container is limited to use."
+ }
+ ],
+ "objects": [
+ {
+ "kind": "ImageStream",
+ "apiVersion": "v1",
+ "metadata": {
+ "name": "${APP_NAME}",
+ "creationTimestamp": null,
+ "labels": {
+ "component": "${APP_NAME}",
+ "container": "karaf",
+ "group": "quickstarts",
+ "project": "${APP_NAME}",
+ "provider": "s2i",
+ "version": "${APP_VERSION}"
+ }
+ },
+ "spec": {},
+ "status": {
+ "dockerImageRepository": ""
+ }
+ },
+ {
+ "kind": "BuildConfig",
+ "apiVersion": "v1",
+ "metadata": {
+ "name": "${APP_NAME}",
+ "creationTimestamp": null,
+ "labels": {
+ "component": "${APP_NAME}",
+ "container": "karaf",
+ "group": "quickstarts",
+ "project": "${APP_NAME}",
+ "provider": "s2i",
+ "version": "${APP_VERSION}"
+ }
+ },
+ "spec": {
+ "triggers": [
+ {
+ "type": "GitHub",
+ "github": {
+ "secret": "${BUILD_SECRET}"
+ }
+ },
+ {
+ "type": "Generic",
+ "generic": {
+ "secret": "${BUILD_SECRET}"
+ }
+ },
+ {
+ "type": "ConfigChange"
+ },
+ {
+ "type": "ImageChange",
+ "imageChange": {}
+ }
+ ],
+ "source": {
+ "type": "Git",
+ "git": {
+ "uri": "${GIT_REPO}",
+ "ref": "${GIT_REF}"
+ }
+ },
+ "strategy": {
+ "type": "Source",
+ "sourceStrategy": {
+ "from": {
+ "kind": "ImageStreamTag",
+ "namespace": "${IMAGE_STREAM_NAMESPACE}",
+ "name": "fis-karaf-openshift:${BUILDER_VERSION}"
+ },
+ "forcePull": true,
+ "incremental": true,
+ "env": [
+ {
+ "name": "BUILD_LOGLEVEL",
+ "value": "5"
+ },
+ {
+ "name": "ARTIFACT_DIR",
+ "value": "${ARTIFACT_DIR}"
+ },
+ {
+ "name": "MAVEN_ARGS",
+ "value": "${MAVEN_ARGS}"
+ },
+ {
+ "name": "MAVEN_ARGS_APPEND",
+ "value": "${MAVEN_ARGS_APPEND}"
+ }
+ ]
+ }
+ },
+ "output": {
+ "to": {
+ "kind": "ImageStreamTag",
+ "name": "${APP_NAME}:latest"
+ }
+ },
+ "resources": {}
+ },
+ "status": {
+ "lastVersion": 0
+ }
+ },
+ {
+ "kind": "DeploymentConfig",
+ "apiVersion": "v1",
+ "metadata": {
+ "name": "${APP_NAME}",
+ "creationTimestamp": null,
+ "labels": {
+ "component": "${APP_NAME}",
+ "container": "karaf",
+ "group": "quickstarts",
+ "project": "${APP_NAME}",
+ "provider": "s2i",
+ "version": "${APP_VERSION}"
+ }
+ },
+ "spec": {
+ "strategy": {
+ "resources": {}
+ },
+ "triggers": [
+ {
+ "type": "ConfigChange"
+ },
+ {
+ "type": "ImageChange",
+ "imageChangeParams": {
+ "automatic": true,
+ "containerNames": [
+ "${APP_NAME}"
+ ],
+ "from": {
+ "kind": "ImageStreamTag",
+ "name": "${APP_NAME}:latest"
+ }
+ }
+ }
+ ],
+ "replicas": 1,
+ "selector": {
+ "component": "${APP_NAME}",
+ "container": "karaf",
+ "deploymentconfig": "${APP_NAME}",
+ "group": "quickstarts",
+ "project": "${APP_NAME}",
+ "provider": "s2i",
+ "version": "${APP_VERSION}"
+ },
+ "template": {
+ "metadata": {
+ "creationTimestamp": null,
+ "labels": {
+ "component": "${APP_NAME}",
+ "container": "karaf",
+ "deploymentconfig": "${APP_NAME}",
+ "group": "quickstarts",
+ "project": "${APP_NAME}",
+ "provider": "s2i",
+ "version": "${APP_VERSION}"
+ }
+ },
+ "spec": {
+ "containers": [
+ {
+ "name": "${APP_NAME}",
+ "image": "library/${APP_NAME}:latest",
+ "readinessProbe" : {
+ "httpGet" : {
+ "path" : "/readiness-check",
+ "port" : 8181
+ },
+ "initialDelaySeconds" : 10
+ },
+ "livenessProbe" : {
+ "httpGet" : {
+ "path" : "/health-check",
+ "port" : 8181
+ },
+ "initialDelaySeconds" : 180
+ },
+ "ports": [
+ {
+ "containerPort": 8181,
+ "name": "http"
+ },
+ {
+ "containerPort": 8778,
+ "name": "jolokia"
+ }
+ ],
+ "env" : [ {
+ "name" : "KUBERNETES_NAMESPACE",
+ "valueFrom" : {
+ "fieldRef" : {
+ "fieldPath" : "metadata.namespace"
+ }
+ }
+ } ],
+ "resources": {
+ "requests": {
+ "cpu": "${CPU_REQUEST}",
+ "memory": "${MEMORY_REQUEST}"
+ },
+ "limits": {
+ "cpu": "${CPU_LIMIT}",
+ "memory": "${MEMORY_LIMIT}"
+ }
+ }
+ }
+ ]
+ }
+ }
+ },
+ "status": {}
+ }
+ ]
+}
diff --git a/roles/openshift_examples/files/examples/v1.4/xpaas-templates/karaf2-camel-rest-sql-template.json b/roles/openshift_examples/files/examples/v1.4/xpaas-templates/karaf2-camel-rest-sql-template.json
new file mode 100644
index 000000000..d80939efb
--- /dev/null
+++ b/roles/openshift_examples/files/examples/v1.4/xpaas-templates/karaf2-camel-rest-sql-template.json
@@ -0,0 +1,421 @@
+{
+ "apiVersion": "v1",
+ "kind": "Template",
+ "metadata": {
+ "annotations": {
+ "description": "Camel example using Rest DSL with SQL Database in Karaf container.",
+ "tags": "quickstart,java,karaf,fis",
+ "iconClass": "icon-jboss",
+ "version": "2.0"
+ },
+ "name": "s2i-karaf2-camel-rest-sql"
+ },
+ "labels": {
+ "template": "s2i-karaf2-camel-rest-sql"
+ },
+ "parameters": [
+ {
+ "name": "APP_NAME",
+ "displayName": "Application Name",
+ "required": true,
+ "value": "s2i-karaf2-camel-rest-sql",
+ "description": "The name assigned to the application."
+ },
+ {
+ "name": "GIT_REPO",
+ "displayName": "Git Repository URL",
+ "value": "https://github.com/fabric8-quickstarts/karaf2-camel-rest-sql.git",
+ "required": true,
+ "description": "The URL of the repository with your application source code."
+ },
+ {
+ "name": "GIT_REF",
+ "displayName": "Git Reference",
+ "value": "karaf2-camel-rest-sql-1.0.0.redhat-000010",
+ "description": "Set this to a branch name, tag or other ref of your repository if you are not using the default branch."
+ },
+ {
+ "name": "SERVICE_NAME",
+ "displayName": "Service Name",
+ "value": "s2i-karaf2-camel-rest",
+ "description": "Exposed Service name."
+ },
+ {
+ "name": "MYSQL_SERVICE_NAME",
+ "displayName": "MySQL Server Service",
+ "required": true,
+ "value": "mysql",
+ "description": "Set this to the name of the TCP service of the MySQL server. You may need to create a server first."
+ },
+ {
+ "name": "MYSQL_SERVICE_DATABASE",
+ "displayName": "MySQL Server Database",
+ "required": true,
+ "value": "sampledb",
+ "description": "The database hosted by the MySQL server to be used by the application."
+ },
+ {
+ "name": "MYSQL_SERVICE_USERNAME",
+ "displayName": "MySQL Server Username",
+ "description": "The username used to authenticate with the MySQL server. Leave it empty if authentication is disabled."
+ },
+ {
+ "name": "MYSQL_SERVICE_PASSWORD",
+ "displayName": "MySQL Server Password",
+ "description": "The password used to authenticate with the MySQL server. Leave it empty if authentication is disabled."
+ },
+ {
+ "name": "BUILDER_VERSION",
+ "displayName": "Builder version",
+ "value": "2.0",
+ "description": "The version of the FIS S2I builder image to use."
+ },
+ {
+ "name": "APP_VERSION",
+ "displayName": "Application Version",
+ "value": "1.0.0.redhat-000010",
+ "description": "The application version."
+ },
+ {
+ "name": "MAVEN_ARGS",
+ "displayName": "Maven Arguments",
+ "value": "install -DskipTests -Dfabric8.skip -e -B",
+ "description": "Arguments passed to mvn in the build."
+ },
+ {
+ "name": "MAVEN_ARGS_APPEND",
+ "displayName": "Extra Maven Arguments",
+ "description": "Extra arguments passed to mvn, e.g. for multi-module builds."
+ },
+ {
+ "name": "ARTIFACT_DIR",
+ "displayName": "Maven build directory",
+ "description": "Directory of the artifact to be built, e.g. for multi-module builds."
+ },
+ {
+ "name": "IMAGE_STREAM_NAMESPACE",
+ "displayName": "Image Stream Namespace",
+ "value": "openshift",
+ "required": true,
+ "description": "Namespace in which the Fuse ImageStreams are installed. These ImageStreams are normally installed in the openshift namespace. You should only need to modify this if you've installed the ImageStreams in a different namespace/project."
+ },
+ {
+ "name": "CPU_REQUEST",
+ "displayName": "CPU request",
+ "value": "0.2",
+ "required": true,
+ "description": "The amount of CPU to requests."
+ },
+ {
+ "name": "MEMORY_REQUEST",
+ "displayName": "Memory request",
+ "value": "1.5G",
+ "required": true,
+ "description": "The amount of memory required for the container to run."
+ },
+ {
+ "name": "CPU_LIMIT",
+ "displayName": "CPU limit",
+ "value": "1.0",
+ "required": true,
+ "description": "The amount of CPU the container is limited to use."
+ },
+ {
+ "name": "MEMORY_LIMIT",
+ "displayName": "Memory limit",
+ "value": "2G",
+ "required": true,
+ "description": "The amount of memory the container is limited to use."
+ }
+ ],
+ "objects": [
+ {
+ "apiVersion": "v1",
+ "kind": "Route",
+ "metadata": {
+ "labels": {
+ "container": "karaf",
+ "component": "${APP_NAME}",
+ "provider": "s2i",
+ "project": "${APP_NAME}",
+ "version": "${APP_VERSION}",
+ "group": "quickstarts"
+ },
+ "name": "${SERVICE_NAME}-route"
+ },
+ "spec": {
+ "to": {
+ "kind": "Service",
+ "name": "${SERVICE_NAME}"
+ }
+ }
+ },
+ {
+ "apiVersion": "v1",
+ "kind": "Service",
+ "metadata": {
+ "annotations": {
+ },
+ "labels": {
+ "container": "karaf",
+ "component": "${APP_NAME}",
+ "provider": "s2i",
+ "project": "${APP_NAME}",
+ "version": "${APP_VERSION}",
+ "group": "quickstarts"
+ },
+ "name": "${SERVICE_NAME}"
+ },
+ "spec": {
+ "clusterIP": "None",
+ "deprecatedPublicIPs": [],
+ "ports": [
+ {
+ "port": 9411,
+ "protocol": "TCP",
+ "targetPort": 8181
+ }
+ ],
+ "selector": {
+ "container": "karaf",
+ "project": "${APP_NAME}",
+ "component": "${APP_NAME}",
+ "provider": "s2i",
+ "group": "quickstarts"
+ }
+ }
+ },
+ {
+ "kind": "ImageStream",
+ "apiVersion": "v1",
+ "metadata": {
+ "name": "${APP_NAME}",
+ "creationTimestamp": null,
+ "labels": {
+ "component": "${APP_NAME}",
+ "container": "karaf",
+ "group": "quickstarts",
+ "project": "${APP_NAME}",
+ "provider": "s2i",
+ "version": "${APP_VERSION}"
+ }
+ },
+ "spec": {},
+ "status": {
+ "dockerImageRepository": ""
+ }
+ },
+ {
+ "kind": "BuildConfig",
+ "apiVersion": "v1",
+ "metadata": {
+ "name": "${APP_NAME}",
+ "creationTimestamp": null,
+ "labels": {
+ "component": "${APP_NAME}",
+ "container": "karaf",
+ "group": "quickstarts",
+ "project": "${APP_NAME}",
+ "provider": "s2i",
+ "version": "${APP_VERSION}"
+ }
+ },
+ "spec": {
+ "triggers": [
+ {
+ "type": "GitHub",
+ "github": {
+ "secret": "${BUILD_SECRET}"
+ }
+ },
+ {
+ "type": "Generic",
+ "generic": {
+ "secret": "${BUILD_SECRET}"
+ }
+ },
+ {
+ "type": "ConfigChange"
+ },
+ {
+ "type": "ImageChange",
+ "imageChange": {}
+ }
+ ],
+ "source": {
+ "type": "Git",
+ "git": {
+ "uri": "${GIT_REPO}",
+ "ref": "${GIT_REF}"
+ }
+ },
+ "strategy": {
+ "type": "Source",
+ "sourceStrategy": {
+ "from": {
+ "kind": "ImageStreamTag",
+ "namespace": "${IMAGE_STREAM_NAMESPACE}",
+ "name": "fis-karaf-openshift:${BUILDER_VERSION}"
+ },
+ "forcePull": true,
+ "incremental": true,
+ "env": [
+ {
+ "name": "BUILD_LOGLEVEL",
+ "value": "5"
+ },
+ {
+ "name": "ARTIFACT_DIR",
+ "value": "${ARTIFACT_DIR}"
+ },
+ {
+ "name": "MAVEN_ARGS",
+ "value": "${MAVEN_ARGS}"
+ },
+ {
+ "name": "MAVEN_ARGS_APPEND",
+ "value": "${MAVEN_ARGS_APPEND}"
+ }
+ ]
+ }
+ },
+ "output": {
+ "to": {
+ "kind": "ImageStreamTag",
+ "name": "${APP_NAME}:latest"
+ }
+ },
+ "resources": {}
+ },
+ "status": {
+ "lastVersion": 0
+ }
+ },
+ {
+ "kind": "DeploymentConfig",
+ "apiVersion": "v1",
+ "metadata": {
+ "name": "${APP_NAME}",
+ "creationTimestamp": null,
+ "labels": {
+ "component": "${APP_NAME}",
+ "container": "karaf",
+ "group": "quickstarts",
+ "project": "${APP_NAME}",
+ "provider": "s2i",
+ "version": "${APP_VERSION}"
+ }
+ },
+ "spec": {
+ "strategy": {
+ "resources": {}
+ },
+ "triggers": [
+ {
+ "type": "ConfigChange"
+ },
+ {
+ "type": "ImageChange",
+ "imageChangeParams": {
+ "automatic": true,
+ "containerNames": [
+ "${APP_NAME}"
+ ],
+ "from": {
+ "kind": "ImageStreamTag",
+ "name": "${APP_NAME}:latest"
+ }
+ }
+ }
+ ],
+ "replicas": 1,
+ "selector": {
+ "component": "${APP_NAME}",
+ "container": "karaf",
+ "deploymentconfig": "${APP_NAME}",
+ "group": "quickstarts",
+ "project": "${APP_NAME}",
+ "provider": "s2i",
+ "version": "${APP_VERSION}"
+ },
+ "template": {
+ "metadata": {
+ "creationTimestamp": null,
+ "labels": {
+ "component": "${APP_NAME}",
+ "container": "karaf",
+ "deploymentconfig": "${APP_NAME}",
+ "group": "quickstarts",
+ "project": "${APP_NAME}",
+ "provider": "s2i",
+ "version": "${APP_VERSION}"
+ }
+ },
+ "spec": {
+ "containers": [
+ {
+ "name": "${APP_NAME}",
+ "image": "library/${APP_NAME}:latest",
+ "readinessProbe" : {
+ "httpGet" : {
+ "path" : "/readiness-check",
+ "port" : 8181
+ },
+ "initialDelaySeconds" : 10
+ },
+ "livenessProbe" : {
+ "httpGet" : {
+ "path" : "/health-check",
+ "port" : 8181
+ },
+ "initialDelaySeconds" : 180
+ },
+ "ports": [
+ {
+ "containerPort": 8181,
+ "name": "http"
+ },
+ {
+ "containerPort": 8778,
+ "name": "jolokia"
+ }
+ ],
+ "env" : [ {
+ "name" : "KUBERNETES_NAMESPACE",
+ "valueFrom" : {
+ "fieldRef" : {
+ "fieldPath" : "metadata.namespace"
+ }
+ }
+ } , {
+ "name": "MYSQL_SERVICE_NAME",
+ "value": "${MYSQL_SERVICE_NAME}"
+ }, {
+ "name": "MYSQL_SERVICE_DATABASE",
+ "value": "${MYSQL_SERVICE_DATABASE}"
+ }, {
+ "name": "MYSQL_SERVICE_USERNAME",
+ "value": "${MYSQL_SERVICE_USERNAME}"
+ }, {
+ "name": "MYSQL_SERVICE_PASSWORD",
+ "value": "${MYSQL_SERVICE_PASSWORD}"
+ } ],
+ "resources": {
+ "requests": {
+ "cpu": "${CPU_REQUEST}",
+ "memory": "${MEMORY_REQUEST}"
+ },
+ "limits": {
+ "cpu": "${CPU_LIMIT}",
+ "memory": "${MEMORY_LIMIT}"
+ }
+ }
+ }
+ ]
+ }
+ }
+ },
+ "status": {}
+ }
+ ]
+}
diff --git a/roles/openshift_examples/files/examples/v1.4/xpaas-templates/karaf2-cxf-rest-template.json b/roles/openshift_examples/files/examples/v1.4/xpaas-templates/karaf2-cxf-rest-template.json
new file mode 100644
index 000000000..f99099868
--- /dev/null
+++ b/roles/openshift_examples/files/examples/v1.4/xpaas-templates/karaf2-cxf-rest-template.json
@@ -0,0 +1,385 @@
+{
+ "apiVersion": "v1",
+ "kind": "Template",
+ "metadata": {
+ "annotations": {
+ "description": "REST example using CXF in Karaf container.",
+ "tags": "quickstart,java,karaf,fis",
+ "iconClass": "icon-jboss",
+ "version": "2.0"
+ },
+ "name": "s2i-karaf2-cxf-rest"
+ },
+ "labels": {
+ "template": "s2i-karaf2-cxf-rest"
+ },
+ "parameters": [
+ {
+ "name": "APP_NAME",
+ "displayName": "Application Name",
+ "required": true,
+ "value": "s2i-karaf2-cxf-rest",
+ "description": "The name assigned to the application."
+ },
+ {
+ "name": "GIT_REPO",
+ "displayName": "Git Repository URL",
+ "value": "https://github.com/fabric8-quickstarts/karaf2-cxf-rest.git",
+ "required": true,
+ "description": "The URL of the repository with your application source code."
+ },
+ {
+ "name": "GIT_REF",
+ "displayName": "Git Reference",
+ "value": "karaf2-cxf-rest-1.0.0.redhat-000010",
+ "description": "Set this to a branch name, tag or other ref of your repository if you are not using the default branch."
+ },
+ {
+ "name": "SERVICE_NAME",
+ "displayName": "Service Name",
+ "value": "s2i-karaf2-cxf-rest",
+ "description": "Exposed Service name."
+ },
+ {
+ "name": "BUILDER_VERSION",
+ "displayName": "Builder version",
+ "value": "2.0",
+ "description": "The version of the FIS S2I builder image to use."
+ },
+ {
+ "name": "APP_VERSION",
+ "displayName": "Application Version",
+ "value": "1.0.0.redhat-000010",
+ "description": "The application version."
+ },
+ {
+ "name": "MAVEN_ARGS",
+ "displayName": "Maven Arguments",
+ "value": "install -DskipTests -Dfabric8.skip -e -B",
+ "description": "Arguments passed to mvn in the build."
+ },
+ {
+ "name": "MAVEN_ARGS_APPEND",
+ "displayName": "Extra Maven Arguments",
+ "description": "Extra arguments passed to mvn, e.g. for multi-module builds."
+ },
+ {
+ "name": "ARTIFACT_DIR",
+ "displayName": "Maven build directory",
+ "description": "Directory of the artifact to be built, e.g. for multi-module builds."
+ },
+ {
+ "name": "IMAGE_STREAM_NAMESPACE",
+ "displayName": "Image Stream Namespace",
+ "value": "openshift",
+ "required": true,
+ "description": "Namespace in which the Fuse ImageStreams are installed. These ImageStreams are normally installed in the openshift namespace. You should only need to modify this if you've installed the ImageStreams in a different namespace/project."
+ },
+ {
+ "name": "CPU_REQUEST",
+ "displayName": "CPU request",
+ "value": "0.2",
+ "required": true,
+ "description": "The amount of CPU to requests."
+ },
+ {
+ "name": "MEMORY_REQUEST",
+ "displayName": "Memory request",
+ "value": "1.5G",
+ "required": true,
+ "description": "The amount of memory required for the container to run."
+ },
+ {
+ "name": "CPU_LIMIT",
+ "displayName": "CPU limit",
+ "value": "1.0",
+ "required": true,
+ "description": "The amount of CPU the container is limited to use."
+ },
+ {
+ "name": "MEMORY_LIMIT",
+ "displayName": "Memory limit",
+ "value": "2G",
+ "required": true,
+ "description": "The amount of memory the container is limited to use."
+ }
+ ],
+ "objects": [
+ {
+ "apiVersion": "v1",
+ "kind": "Route",
+ "metadata": {
+ "labels": {
+ "container": "java",
+ "component": "${APP_NAME}",
+ "provider": "s2i",
+ "project": "${APP_NAME}",
+ "version": "${APP_VERSION}",
+ "group": "quickstarts"
+ },
+ "name": "${SERVICE_NAME}-route"
+ },
+ "spec": {
+ "to": {
+ "kind": "Service",
+ "name": "${SERVICE_NAME}"
+ }
+ }
+ },
+ {
+ "apiVersion": "v1",
+ "kind": "Service",
+ "metadata": {
+ "annotations": {
+ },
+ "labels": {
+ "container": "karaf",
+ "component": "${APP_NAME}",
+ "provider": "s2i",
+ "project": "${APP_NAME}",
+ "version": "${APP_VERSION}",
+ "group": "quickstarts"
+ },
+ "name": "${SERVICE_NAME}"
+ },
+ "spec": {
+ "clusterIP": "None",
+ "deprecatedPublicIPs": [],
+ "ports": [
+ {
+ "port": 9412,
+ "protocol": "TCP",
+ "targetPort": 8181
+ }
+ ],
+ "selector": {
+ "container": "karaf",
+ "project": "${APP_NAME}",
+ "component": "${APP_NAME}",
+ "provider": "s2i",
+ "group": "quickstarts"
+ }
+ }
+ },
+ {
+ "kind": "ImageStream",
+ "apiVersion": "v1",
+ "metadata": {
+ "name": "${APP_NAME}",
+ "creationTimestamp": null,
+ "labels": {
+ "component": "${APP_NAME}",
+ "container": "karaf",
+ "group": "quickstarts",
+ "project": "${APP_NAME}",
+ "provider": "s2i",
+ "version": "${APP_VERSION}"
+ }
+ },
+ "spec": {},
+ "status": {
+ "dockerImageRepository": ""
+ }
+ },
+ {
+ "kind": "BuildConfig",
+ "apiVersion": "v1",
+ "metadata": {
+ "name": "${APP_NAME}",
+ "creationTimestamp": null,
+ "labels": {
+ "component": "${APP_NAME}",
+ "container": "karaf",
+ "group": "quickstarts",
+ "project": "${APP_NAME}",
+ "provider": "s2i",
+ "version": "${APP_VERSION}"
+ }
+ },
+ "spec": {
+ "triggers": [
+ {
+ "type": "GitHub",
+ "github": {
+ "secret": "${BUILD_SECRET}"
+ }
+ },
+ {
+ "type": "Generic",
+ "generic": {
+ "secret": "${BUILD_SECRET}"
+ }
+ },
+ {
+ "type": "ConfigChange"
+ },
+ {
+ "type": "ImageChange",
+ "imageChange": {}
+ }
+ ],
+ "source": {
+ "type": "Git",
+ "git": {
+ "uri": "${GIT_REPO}",
+ "ref": "${GIT_REF}"
+ }
+ },
+ "strategy": {
+ "type": "Source",
+ "sourceStrategy": {
+ "from": {
+ "kind": "ImageStreamTag",
+ "namespace": "${IMAGE_STREAM_NAMESPACE}",
+ "name": "fis-karaf-openshift:${BUILDER_VERSION}"
+ },
+ "forcePull": true,
+ "incremental": true,
+ "env": [
+ {
+ "name": "BUILD_LOGLEVEL",
+ "value": "5"
+ },
+ {
+ "name": "ARTIFACT_DIR",
+ "value": "${ARTIFACT_DIR}"
+ },
+ {
+ "name": "MAVEN_ARGS",
+ "value": "${MAVEN_ARGS}"
+ },
+ {
+ "name": "MAVEN_ARGS_APPEND",
+ "value": "${MAVEN_ARGS_APPEND}"
+ }
+ ]
+ }
+ },
+ "output": {
+ "to": {
+ "kind": "ImageStreamTag",
+ "name": "${APP_NAME}:latest"
+ }
+ },
+ "resources": {}
+ },
+ "status": {
+ "lastVersion": 0
+ }
+ },
+ {
+ "kind": "DeploymentConfig",
+ "apiVersion": "v1",
+ "metadata": {
+ "name": "${APP_NAME}",
+ "creationTimestamp": null,
+ "labels": {
+ "component": "${APP_NAME}",
+ "container": "karaf",
+ "group": "quickstarts",
+ "project": "${APP_NAME}",
+ "provider": "s2i",
+ "version": "${APP_VERSION}"
+ }
+ },
+ "spec": {
+ "strategy": {
+ "resources": {}
+ },
+ "triggers": [
+ {
+ "type": "ConfigChange"
+ },
+ {
+ "type": "ImageChange",
+ "imageChangeParams": {
+ "automatic": true,
+ "containerNames": [
+ "${APP_NAME}"
+ ],
+ "from": {
+ "kind": "ImageStreamTag",
+ "name": "${APP_NAME}:latest"
+ }
+ }
+ }
+ ],
+ "replicas": 1,
+ "selector": {
+ "component": "${APP_NAME}",
+ "container": "karaf",
+ "deploymentconfig": "${APP_NAME}",
+ "group": "quickstarts",
+ "project": "${APP_NAME}",
+ "provider": "s2i",
+ "version": "${APP_VERSION}"
+ },
+ "template": {
+ "metadata": {
+ "creationTimestamp": null,
+ "labels": {
+ "component": "${APP_NAME}",
+ "container": "karaf",
+ "deploymentconfig": "${APP_NAME}",
+ "group": "quickstarts",
+ "project": "${APP_NAME}",
+ "provider": "s2i",
+ "version": "${APP_VERSION}"
+ }
+ },
+ "spec": {
+ "containers": [
+ {
+ "name": "${APP_NAME}",
+ "image": "library/${APP_NAME}:latest",
+ "readinessProbe" : {
+ "httpGet" : {
+ "path" : "/readiness-check",
+ "port" : 8181
+ },
+ "initialDelaySeconds" : 10
+ },
+ "livenessProbe" : {
+ "httpGet" : {
+ "path" : "/health-check",
+ "port" : 8181
+ },
+ "initialDelaySeconds" : 180
+ },
+ "ports": [
+ {
+ "containerPort": 8181,
+ "name": "http"
+ },
+ {
+ "containerPort": 8778,
+ "name": "jolokia"
+ }
+ ],
+ "env" : [ {
+ "name" : "KUBERNETES_NAMESPACE",
+ "valueFrom" : {
+ "fieldRef" : {
+ "fieldPath" : "metadata.namespace"
+ }
+ }
+ } ],
+ "resources": {
+ "requests": {
+ "cpu": "${CPU_REQUEST}",
+ "memory": "${MEMORY_REQUEST}"
+ },
+ "limits": {
+ "cpu": "${CPU_LIMIT}",
+ "memory": "${MEMORY_LIMIT}"
+ }
+ }
+ }
+ ]
+ }
+ }
+ },
+ "status": {}
+ }
+ ]
+}
diff --git a/roles/openshift_examples/files/examples/v1.4/xpaas-templates/openjdk18-web-basic-s2i.json b/roles/openshift_examples/files/examples/v1.4/xpaas-templates/openjdk18-web-basic-s2i.json
new file mode 100644
index 000000000..143e16756
--- /dev/null
+++ b/roles/openshift_examples/files/examples/v1.4/xpaas-templates/openjdk18-web-basic-s2i.json
@@ -0,0 +1,267 @@
+{
+ "kind": "Template",
+ "apiVersion": "v1",
+ "metadata": {
+ "annotations": {
+ "iconClass": "icon-jboss",
+ "description": "Application template for Java applications built using S2I.",
+ "tags": "java,xpaas",
+ "version": "1.0.0"
+ },
+ "name": "openjdk18-web-basic-s2i"
+ },
+ "labels": {
+ "template": "openjdk18-web-basic-s2i",
+ "xpaas": "1.0.0"
+ },
+ "message": "A new java application has been created in your project.",
+ "parameters": [
+ {
+ "description": "The name for the application.",
+ "displayName": "Application Name",
+ "name": "APPLICATION_NAME",
+ "value": "openjdk-app",
+ "required": true
+ },
+ {
+ "description": "Custom hostname for http service route. Leave blank for default hostname, e.g.: <application-name>-<project>.<default-domain-suffix>",
+ "displayName": "Custom http Route Hostname",
+ "name": "HOSTNAME_HTTP",
+ "value": "",
+ "required": false
+ },
+ {
+ "description": "Git source URI for application",
+ "displayName": "Git Repository URL",
+ "name": "SOURCE_REPOSITORY_URL",
+ "value": "https://github.com/jboss-openshift/openshift-quickstarts",
+ "required": true
+ },
+ {
+ "description": "Git branch/tag reference",
+ "displayName": "Git Reference",
+ "name": "SOURCE_REPOSITORY_REF",
+ "value": "master",
+ "required": false
+ },
+ {
+ "description": "Path within Git project to build; empty for root project directory.",
+ "displayName": "Context Directory",
+ "name": "CONTEXT_DIR",
+ "value": "undertow-servlet",
+ "required": false
+ },
+ {
+ "description": "GitHub trigger secret",
+ "displayName": "Github Webhook Secret",
+ "name": "GITHUB_WEBHOOK_SECRET",
+ "from": "[a-zA-Z0-9]{8}",
+ "generate": "expression",
+ "required": true
+ },
+ {
+ "description": "Generic build trigger secret",
+ "displayName": "Generic Webhook Secret",
+ "name": "GENERIC_WEBHOOK_SECRET",
+ "from": "[a-zA-Z0-9]{8}",
+ "generate": "expression",
+ "required": true
+ },
+ {
+ "description": "Namespace in which the ImageStreams for Red Hat Middleware images are installed. These ImageStreams are normally installed in the openshift namespace. You should only need to modify this if you've installed the ImageStreams in a different namespace/project.",
+ "displayName": "ImageStream Namespace",
+ "name": "IMAGE_STREAM_NAMESPACE",
+ "value": "openshift",
+ "required": true
+ }
+ ],
+ "objects": [
+ {
+ "kind": "Service",
+ "apiVersion": "v1",
+ "spec": {
+ "ports": [
+ {
+ "port": 8080,
+ "targetPort": 8080
+ }
+ ],
+ "selector": {
+ "deploymentConfig": "${APPLICATION_NAME}"
+ }
+ },
+ "metadata": {
+ "name": "${APPLICATION_NAME}",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ },
+ "annotations": {
+ "description": "The application's http port."
+ }
+ }
+ },
+ {
+ "kind": "Route",
+ "apiVersion": "v1",
+ "id": "${APPLICATION_NAME}-http",
+ "metadata": {
+ "name": "${APPLICATION_NAME}",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ },
+ "annotations": {
+ "description": "Route for application's http service."
+ }
+ },
+ "spec": {
+ "host": "${HOSTNAME_HTTP}",
+ "to": {
+ "name": "${APPLICATION_NAME}"
+ }
+ }
+ },
+ {
+ "kind": "ImageStream",
+ "apiVersion": "v1",
+ "metadata": {
+ "name": "${APPLICATION_NAME}",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ }
+ }
+ },
+ {
+ "kind": "BuildConfig",
+ "apiVersion": "v1",
+ "metadata": {
+ "name": "${APPLICATION_NAME}",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ }
+ },
+ "spec": {
+ "source": {
+ "type": "Git",
+ "git": {
+ "uri": "${SOURCE_REPOSITORY_URL}",
+ "ref": "${SOURCE_REPOSITORY_REF}"
+ },
+ "contextDir": "${CONTEXT_DIR}"
+ },
+ "strategy": {
+ "type": "Source",
+ "sourceStrategy": {
+ "forcePull": true,
+ "from": {
+ "kind": "ImageStreamTag",
+ "namespace": "${IMAGE_STREAM_NAMESPACE}",
+ "name": "redhat-openjdk18-openshift:1.0"
+ }
+ }
+ },
+ "output": {
+ "to": {
+ "kind": "ImageStreamTag",
+ "name": "${APPLICATION_NAME}:latest"
+ }
+ },
+ "triggers": [
+ {
+ "type": "GitHub",
+ "github": {
+ "secret": "${GITHUB_WEBHOOK_SECRET}"
+ }
+ },
+ {
+ "type": "Generic",
+ "generic": {
+ "secret": "${GENERIC_WEBHOOK_SECRET}"
+ }
+ },
+ {
+ "type": "ImageChange",
+ "imageChange": {}
+ },
+ {
+ "type": "ConfigChange"
+ }
+ ]
+ }
+ },
+ {
+ "kind": "DeploymentConfig",
+ "apiVersion": "v1",
+ "metadata": {
+ "name": "${APPLICATION_NAME}",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ }
+ },
+ "spec": {
+ "strategy": {
+ "type": "Recreate"
+ },
+ "triggers": [
+ {
+ "type": "ImageChange",
+ "imageChangeParams": {
+ "automatic": true,
+ "containerNames": [
+ "${APPLICATION_NAME}"
+ ],
+ "from": {
+ "kind": "ImageStreamTag",
+ "name": "${APPLICATION_NAME}:latest"
+ }
+ }
+ },
+ {
+ "type": "ConfigChange"
+ }
+ ],
+ "replicas": 1,
+ "selector": {
+ "deploymentConfig": "${APPLICATION_NAME}"
+ },
+ "template": {
+ "metadata": {
+ "name": "${APPLICATION_NAME}",
+ "labels": {
+ "deploymentConfig": "${APPLICATION_NAME}",
+ "application": "${APPLICATION_NAME}"
+ }
+ },
+ "spec": {
+ "terminationGracePeriodSeconds": 75,
+ "containers": [
+ {
+ "name": "${APPLICATION_NAME}",
+ "image": "${APPLICATION_NAME}",
+ "imagePullPolicy": "Always",
+ "env": [
+ ],
+ "ports": [
+ {
+ "name": "jolokia",
+ "containerPort": 8778,
+ "protocol": "TCP"
+ },
+ {
+ "name": "http",
+ "containerPort": 8080,
+ "protocol": "TCP"
+ },
+ {
+ "name": "https",
+ "containerPort": 8443,
+ "protocol": "TCP"
+ }
+ ]
+ }
+ ]
+ }
+ }
+ }
+ }
+ ]
+}
diff --git a/roles/openshift_examples/files/examples/v1.4/xpaas-templates/spring-boot-camel-amq-template.json b/roles/openshift_examples/files/examples/v1.4/xpaas-templates/spring-boot-camel-amq-template.json
new file mode 100644
index 000000000..8b3cd6ed0
--- /dev/null
+++ b/roles/openshift_examples/files/examples/v1.4/xpaas-templates/spring-boot-camel-amq-template.json
@@ -0,0 +1,331 @@
+{
+ "apiVersion": "v1",
+ "kind": "Template",
+ "metadata": {
+ "annotations": {
+ "description": "Spring Boot, Camel and ActiveMQ QuickStart. This quickstart demonstrates how to connect a Spring-Boot application to an ActiveMQ broker and use JMS messaging between two Camel routes using OpenShift. In this example we will use two containers, one container to run as a ActiveMQ broker, and another as a client to the broker, where the Camel routes are running. This quickstart requires the ActiveMQ broker has been deployed and running first.",
+ "tags": "quickstart,java,springboot,fis",
+ "iconClass": "icon-jboss",
+ "version": "2.0"
+ },
+ "name": "s2i-spring-boot-camel-amq"
+ },
+ "labels": {
+ "template": "s2i-spring-boot-camel-amq"
+ },
+ "parameters": [
+ {
+ "name": "APP_NAME",
+ "displayName": "Application Name",
+ "required": true,
+ "value": "s2i-spring-boot-camel-amq",
+ "description": "The name assigned to the application."
+ },
+ {
+ "name": "GIT_REPO",
+ "displayName": "Git Repository URL",
+ "required": true,
+ "value": "https://github.com/fabric8-quickstarts/spring-boot-camel-amq.git",
+ "description": "The URL of the repository with your application source code."
+ },
+ {
+ "name": "GIT_REF",
+ "displayName": "Git Reference",
+ "value": "spring-boot-camel-amq-1.0.0.redhat-000055",
+ "description": "Set this to a branch name, tag or other ref of your repository if you are not using the default branch."
+ },
+ {
+ "name": "ACTIVEMQ_SERVICE_NAME",
+ "displayName": "ActiveMQ Broker Service",
+ "required": true,
+ "value": "broker-amq-tcp",
+ "description": "Set this to the name of the TCP service of the ActiveMQ broker. You may need to create a broker first."
+ },
+ {
+ "name": "ACTIVEMQ_BROKER_USERNAME",
+ "displayName": "ActiveMQ Broker Username",
+ "description": "The username used to authenticate with the ActiveMQ broker. Leave it empty if authentication is disabled."
+ },
+ {
+ "name": "ACTIVEMQ_BROKER_PASSWORD",
+ "displayName": "ActiveMQ Broker Password",
+ "description": "The password used to authenticate with the ActiveMQ broker. Leave it empty if authentication is disabled."
+ },
+ {
+ "name": "BUILDER_VERSION",
+ "displayName": "Builder version",
+ "value": "2.0",
+ "description": "The version of the FIS S2I builder image to use."
+ },
+ {
+ "name": "APP_VERSION",
+ "displayName": "Application Version",
+ "value": "1.0.0.redhat-000055",
+ "description": "The application version."
+ },
+ {
+ "name": "MAVEN_ARGS",
+ "displayName": "Maven Arguments",
+ "value": "package -DskipTests -Dfabric8.skip -e -B",
+ "description": "Arguments passed to mvn in the build."
+ },
+ {
+ "name": "MAVEN_ARGS_APPEND",
+ "displayName": "Extra Maven Arguments",
+ "description": "Extra arguments passed to mvn, e.g. for multi-module builds."
+ },
+ {
+ "name": "ARTIFACT_DIR",
+ "displayName": "Maven build directory",
+ "description": "Directory of the artifact to be built, e.g. for multi-module builds."
+ },
+ {
+ "name": "IMAGE_STREAM_NAMESPACE",
+ "displayName": "Image Stream Namespace",
+ "value": "openshift",
+ "required": true,
+ "description": "Namespace in which the Fuse ImageStreams are installed. These ImageStreams are normally installed in the openshift namespace. You should only need to modify this if you've installed the ImageStreams in a different namespace/project."
+ },
+ {
+ "name": "BUILD_SECRET",
+ "displayName": "Git Build Secret",
+ "generate": "expression",
+ "description": "The secret needed to trigger a build.",
+ "from": "[a-zA-Z0-9]{40}"
+ },
+ {
+ "name": "CPU_REQUEST",
+ "displayName": "CPU request",
+ "value": "0.2",
+ "required": true,
+ "description": "The amount of CPU to requests."
+ },
+ {
+ "name": "CPU_LIMIT",
+ "displayName": "CPU limit",
+ "value": "1.0",
+ "required": true,
+ "description": "The amount of CPU the container is limited to use."
+ }
+ ],
+ "objects": [
+ {
+ "kind": "ImageStream",
+ "apiVersion": "v1",
+ "metadata": {
+ "name": "${APP_NAME}",
+ "creationTimestamp": null,
+ "labels": {
+ "component": "${APP_NAME}",
+ "group": "quickstarts",
+ "project": "${APP_NAME}",
+ "provider": "s2i",
+ "version": "${APP_VERSION}"
+ }
+ },
+ "spec": {},
+ "status": {
+ "dockerImageRepository": ""
+ }
+ },
+ {
+ "kind": "BuildConfig",
+ "apiVersion": "v1",
+ "metadata": {
+ "name": "${APP_NAME}",
+ "creationTimestamp": null,
+ "labels": {
+ "component": "${APP_NAME}",
+ "group": "quickstarts",
+ "project": "${APP_NAME}",
+ "provider": "s2i",
+ "version": "${APP_VERSION}"
+ }
+ },
+ "spec": {
+ "triggers": [
+ {
+ "type": "GitHub",
+ "github": {
+ "secret": "${BUILD_SECRET}"
+ }
+ },
+ {
+ "type": "Generic",
+ "generic": {
+ "secret": "${BUILD_SECRET}"
+ }
+ },
+ {
+ "type": "ConfigChange"
+ },
+ {
+ "type": "ImageChange",
+ "imageChange": {}
+ }
+ ],
+ "source": {
+ "type": "Git",
+ "git": {
+ "uri": "${GIT_REPO}",
+ "ref": "${GIT_REF}"
+ }
+ },
+ "strategy": {
+ "type": "Source",
+ "sourceStrategy": {
+ "from": {
+ "kind": "ImageStreamTag",
+ "namespace": "${IMAGE_STREAM_NAMESPACE}",
+ "name": "fis-java-openshift:${BUILDER_VERSION}"
+ },
+ "forcePull": true,
+ "incremental": true,
+ "env": [
+ {
+ "name": "BUILD_LOGLEVEL",
+ "value": "5"
+ },
+ {
+ "name": "ARTIFACT_DIR",
+ "value": "${ARTIFACT_DIR}"
+ },
+ {
+ "name": "MAVEN_ARGS",
+ "value": "${MAVEN_ARGS}"
+ },
+ {
+ "name": "MAVEN_ARGS_APPEND",
+ "value": "${MAVEN_ARGS_APPEND}"
+ }
+ ]
+ }
+ },
+ "output": {
+ "to": {
+ "kind": "ImageStreamTag",
+ "name": "${APP_NAME}:latest"
+ }
+ },
+ "resources": {}
+ },
+ "status": {
+ "lastVersion": 0
+ }
+ },
+ {
+ "kind": "DeploymentConfig",
+ "apiVersion": "v1",
+ "metadata": {
+ "name": "${APP_NAME}",
+ "creationTimestamp": null,
+ "labels": {
+ "component": "${APP_NAME}",
+ "group": "quickstarts",
+ "project": "${APP_NAME}",
+ "provider": "s2i",
+ "version": "${APP_VERSION}"
+ }
+ },
+ "spec": {
+ "strategy": {
+ "resources": {}
+ },
+ "triggers": [
+ {
+ "type": "ConfigChange"
+ },
+ {
+ "type": "ImageChange",
+ "imageChangeParams": {
+ "automatic": true,
+ "containerNames": [
+ "${APP_NAME}"
+ ],
+ "from": {
+ "kind": "ImageStreamTag",
+ "name": "${APP_NAME}:latest"
+ }
+ }
+ }
+ ],
+ "replicas": 1,
+ "selector": {
+ "component": "${APP_NAME}",
+ "deploymentconfig": "${APP_NAME}",
+ "group": "quickstarts",
+ "project": "${APP_NAME}",
+ "provider": "s2i",
+ "version": "${APP_VERSION}"
+ },
+ "template": {
+ "metadata": {
+ "creationTimestamp": null,
+ "labels": {
+ "component": "${APP_NAME}",
+ "deploymentconfig": "${APP_NAME}",
+ "group": "quickstarts",
+ "project": "${APP_NAME}",
+ "provider": "s2i",
+ "version": "${APP_VERSION}"
+ }
+ },
+ "spec": {
+ "containers": [
+ {
+ "name": "${APP_NAME}",
+ "image": "library/${APP_NAME}:latest",
+ "readinessProbe" : {
+ "httpGet" : {
+ "path" : "/health",
+ "port" : 8081
+ },
+ "initialDelaySeconds" : 10
+ },
+ "livenessProbe" : {
+ "httpGet" : {
+ "path" : "/health",
+ "port" : 8081
+ },
+ "initialDelaySeconds" : 180
+ },
+ "ports": [
+ {
+ "containerPort": 8778,
+ "name": "jolokia"
+ }
+ ],
+ "env" : [ {
+ "name" : "KUBERNETES_NAMESPACE",
+ "valueFrom" : {
+ "fieldRef" : {
+ "fieldPath" : "metadata.namespace"
+ }
+ }
+ } , {
+ "name": "ACTIVEMQ_SERVICE_NAME",
+ "value": "${ACTIVEMQ_SERVICE_NAME}"
+ }, {
+ "name": "ACTIVEMQ_BROKER_USERNAME",
+ "value": "${ACTIVEMQ_BROKER_USERNAME}"
+ }, {
+ "name": "ACTIVEMQ_BROKER_PASSWORD",
+ "value": "${ACTIVEMQ_BROKER_PASSWORD}"
+ } ],
+ "resources": {
+ "requests": {
+ "cpu": "${CPU_REQUEST}"
+ },
+ "limits": {
+ "cpu": "${CPU_LIMIT}"
+ }
+ }
+ }
+ ]
+ }
+ }
+ },
+ "status": {}
+ }
+ ]
+}
diff --git a/roles/openshift_examples/files/examples/v1.4/xpaas-templates/spring-boot-camel-config-template.json b/roles/openshift_examples/files/examples/v1.4/xpaas-templates/spring-boot-camel-config-template.json
new file mode 100644
index 000000000..bc5bbad22
--- /dev/null
+++ b/roles/openshift_examples/files/examples/v1.4/xpaas-templates/spring-boot-camel-config-template.json
@@ -0,0 +1,327 @@
+{
+ "apiVersion": "v1",
+ "kind": "Template",
+ "metadata": {
+ "annotations": {
+ "description": "Spring Boot and Camel using ConfigMaps and Secrets. This quickstart demonstrates how to configure a Spring-Boot application using Openshift ConfigMaps and Secrets.",
+ "tags": "quickstart,java,springboot,fis",
+ "iconClass": "icon-jboss",
+ "version": "2.0"
+ },
+ "name": "s2i-spring-boot-camel-config"
+ },
+ "labels": {
+ "template": "s2i-spring-boot-camel-config"
+ },
+ "parameters": [
+ {
+ "name": "APP_NAME",
+ "displayName": "Application Name",
+ "required": true,
+ "value": "s2i-spring-boot-camel-config",
+ "description": "The name assigned to the application."
+ },
+ {
+ "name": "GIT_REPO",
+ "displayName": "Git Repository URL",
+ "required": true,
+ "value": "https://github.com/fabric8-quickstarts/spring-boot-camel-config.git",
+ "description": "The URL of the repository with your application source code."
+ },
+ {
+ "name": "GIT_REF",
+ "displayName": "Git Reference",
+ "value": "spring-boot-camel-config-1.0.0.redhat-000005",
+ "description": "Set this to a branch name, tag or other ref of your repository if you are not using the default branch."
+ },
+ {
+ "name": "SERVICE_ACCOUNT_NAME",
+ "displayName": "Service Account",
+ "value": "qs-camel-config",
+ "required": true,
+ "description": "The Service Account that will be used to run the container. It must be already present in Openshift and have the view role."
+ },
+ {
+ "name": "SECRET_NAME",
+ "displayName": "Secret Name",
+ "value": "camel-config",
+ "required": true,
+ "description": "The name of the Openshift Secret that will be used to configure the application. It must be already present in Openshift."
+ },
+ {
+ "name": "CONFIGMAP_NAME",
+ "displayName": "ConfigMap Name",
+ "value": "camel-config",
+ "required": true,
+ "description": "The name of the Openshift ConfigMap that will be used to configure the application. It must be already present in Openshift."
+ },
+ {
+ "name": "BUILDER_VERSION",
+ "displayName": "Builder version",
+ "value": "2.0",
+ "description": "The version of the FIS S2I builder image to use."
+ },
+ {
+ "name": "APP_VERSION",
+ "displayName": "Application Version",
+ "value": "1.0.0.redhat-000005",
+ "description": "The application version."
+ },
+ {
+ "name": "MAVEN_ARGS",
+ "displayName": "Maven Arguments",
+ "value": "package -DskipTests -Dfabric8.skip -e -B",
+ "description": "Arguments passed to mvn in the build."
+ },
+ {
+ "name": "MAVEN_ARGS_APPEND",
+ "displayName": "Extra Maven Arguments",
+ "description": "Extra arguments passed to mvn, e.g. for multi-module builds."
+ },
+ {
+ "name": "ARTIFACT_DIR",
+ "displayName": "Maven build directory",
+ "description": "Directory of the artifact to be built, e.g. for multi-module builds."
+ },
+ {
+ "name": "IMAGE_STREAM_NAMESPACE",
+ "displayName": "Image Stream Namespace",
+ "value": "openshift",
+ "required": true,
+ "description": "Namespace in which the Fuse ImageStreams are installed. These ImageStreams are normally installed in the openshift namespace. You should only need to modify this if you've installed the ImageStreams in a different namespace/project."
+ },
+ {
+ "name": "BUILD_SECRET",
+ "displayName": "Git Build Secret",
+ "generate": "expression",
+ "description": "The secret needed to trigger a build.",
+ "from": "[a-zA-Z0-9]{40}"
+ }
+ ],
+ "objects": [
+ {
+ "kind": "ImageStream",
+ "apiVersion": "v1",
+ "metadata": {
+ "name": "${APP_NAME}",
+ "creationTimestamp": null,
+ "labels": {
+ "component": "${APP_NAME}",
+ "group": "quickstarts",
+ "project": "${APP_NAME}",
+ "provider": "s2i",
+ "version": "${APP_VERSION}"
+ }
+ },
+ "spec": {},
+ "status": {
+ "dockerImageRepository": ""
+ }
+ },
+ {
+ "kind": "BuildConfig",
+ "apiVersion": "v1",
+ "metadata": {
+ "name": "${APP_NAME}",
+ "creationTimestamp": null,
+ "labels": {
+ "component": "${APP_NAME}",
+ "group": "quickstarts",
+ "project": "${APP_NAME}",
+ "provider": "s2i",
+ "version": "${APP_VERSION}"
+ }
+ },
+ "spec": {
+ "triggers": [
+ {
+ "type": "GitHub",
+ "github": {
+ "secret": "${BUILD_SECRET}"
+ }
+ },
+ {
+ "type": "Generic",
+ "generic": {
+ "secret": "${BUILD_SECRET}"
+ }
+ },
+ {
+ "type": "ConfigChange"
+ },
+ {
+ "type": "ImageChange",
+ "imageChange": {}
+ }
+ ],
+ "source": {
+ "type": "Git",
+ "git": {
+ "uri": "${GIT_REPO}",
+ "ref": "${GIT_REF}"
+ }
+ },
+ "strategy": {
+ "type": "Source",
+ "sourceStrategy": {
+ "from": {
+ "kind": "ImageStreamTag",
+ "namespace": "${IMAGE_STREAM_NAMESPACE}",
+ "name": "fis-java-openshift:${BUILDER_VERSION}"
+ },
+ "forcePull": true,
+ "incremental": true,
+ "env": [
+ {
+ "name": "BUILD_LOGLEVEL",
+ "value": "5"
+ },
+ {
+ "name": "ARTIFACT_DIR",
+ "value": "${ARTIFACT_DIR}"
+ },
+ {
+ "name": "MAVEN_ARGS",
+ "value": "${MAVEN_ARGS}"
+ },
+ {
+ "name": "MAVEN_ARGS_APPEND",
+ "value": "${MAVEN_ARGS_APPEND}"
+ }
+ ]
+ }
+ },
+ "output": {
+ "to": {
+ "kind": "ImageStreamTag",
+ "name": "${APP_NAME}:latest"
+ }
+ },
+ "resources": {}
+ },
+ "status": {
+ "lastVersion": 0
+ }
+ },
+ {
+ "kind": "DeploymentConfig",
+ "apiVersion": "v1",
+ "metadata": {
+ "name": "${APP_NAME}",
+ "creationTimestamp": null,
+ "labels": {
+ "component": "${APP_NAME}",
+ "group": "quickstarts",
+ "project": "${APP_NAME}",
+ "provider": "s2i",
+ "version": "${APP_VERSION}"
+ }
+ },
+ "spec": {
+ "strategy": {
+ "resources": {}
+ },
+ "triggers": [
+ {
+ "type": "ConfigChange"
+ },
+ {
+ "type": "ImageChange",
+ "imageChangeParams": {
+ "automatic": true,
+ "containerNames": [
+ "${APP_NAME}"
+ ],
+ "from": {
+ "kind": "ImageStreamTag",
+ "name": "${APP_NAME}:latest"
+ }
+ }
+ }
+ ],
+ "replicas": 1,
+ "selector": {
+ "component": "${APP_NAME}",
+ "deploymentconfig": "${APP_NAME}",
+ "group": "quickstarts",
+ "project": "${APP_NAME}",
+ "provider": "s2i",
+ "version": "${APP_VERSION}"
+ },
+ "template": {
+ "metadata": {
+ "creationTimestamp": null,
+ "labels": {
+ "component": "${APP_NAME}",
+ "deploymentconfig": "${APP_NAME}",
+ "group": "quickstarts",
+ "project": "${APP_NAME}",
+ "provider": "s2i",
+ "version": "${APP_VERSION}"
+ }
+ },
+ "spec": {
+ "serviceAccountName": "${SERVICE_ACCOUNT_NAME}",
+ "volumes": [
+ {
+ "name": "camel-config",
+ "secret": {
+ "secretName": "${SECRET_NAME}"
+ }
+ }
+ ],
+ "containers": [
+ {
+ "name": "${APP_NAME}",
+ "image": "library/${APP_NAME}:latest",
+ "readinessProbe" : {
+ "httpGet" : {
+ "path" : "/health",
+ "port" : 8081
+ },
+ "initialDelaySeconds" : 10
+ },
+ "livenessProbe" : {
+ "httpGet" : {
+ "path" : "/health",
+ "port" : 8081
+ },
+ "initialDelaySeconds" : 180
+ },
+ "ports": [
+ {
+ "containerPort": 8778,
+ "name": "jolokia"
+ }
+ ],
+ "env" : [ {
+ "name" : "KUBERNETES_NAMESPACE",
+ "valueFrom" : {
+ "fieldRef" : {
+ "fieldPath" : "metadata.namespace"
+ }
+ }
+ }, {
+ "name": "SPRING_CLOUD_KUBERNETES_SECRETS_NAME",
+ "value": "${SECRET_NAME}"
+ }, {
+ "name": "SPRING_CLOUD_KUBERNETES_CONFIG_NAME",
+ "value": "${CONFIGMAP_NAME}"
+ } ],
+ "resources": {},
+ "volumeMounts": [
+ {
+ "name": "camel-config",
+ "readOnly": true,
+ "mountPath": "/etc/secrets/camel-config"
+ }
+ ]
+ }
+ ]
+ }
+ }
+ },
+ "status": {}
+ }
+ ]
+}
diff --git a/roles/openshift_examples/files/examples/v1.4/xpaas-templates/spring-boot-camel-drools-template.json b/roles/openshift_examples/files/examples/v1.4/xpaas-templates/spring-boot-camel-drools-template.json
new file mode 100644
index 000000000..e54fa0d59
--- /dev/null
+++ b/roles/openshift_examples/files/examples/v1.4/xpaas-templates/spring-boot-camel-drools-template.json
@@ -0,0 +1,334 @@
+{
+ "apiVersion": "v1",
+ "kind": "Template",
+ "metadata": {
+ "annotations": {
+ "description": "Spring-Boot, Camel and JBoss BRMS QuickStart. This example demonstrates how you can use Apache Camel and JBoss BRMS with Spring Boot on OpenShift. DRL files contain simple rules which are used to create knowledge session via Spring configuration file. Camel routes, defined via Spring as well, are then used to e.g. pass (insert) the Body of the message as a POJO to Drools engine for execution.",
+ "tags": "quickstart,java,springboot,fis",
+ "iconClass": "icon-jboss",
+ "version": "2.0"
+ },
+ "name": "s2i-spring-boot-camel-drools"
+ },
+ "labels": {
+ "template": "s2i-spring-boot-camel-drools"
+ },
+ "parameters": [
+ {
+ "name": "APP_NAME",
+ "displayName": "Application Name",
+ "required": true,
+ "value": "s2i-spring-boot-camel-drools",
+ "description": "The name assigned to the application."
+ },
+ {
+ "name": "GIT_REPO",
+ "displayName": "Git Repository URL",
+ "required": true,
+ "value": "https://github.com/fabric8-quickstarts/spring-boot-camel-drools.git",
+ "description": "The URL of the repository with your application source code."
+ },
+ {
+ "name": "GIT_REF",
+ "displayName": "Git Reference",
+ "value": "spring-boot-camel-drools-1.0.0.redhat-000054",
+ "description": "Set this to a branch name, tag or other ref of your repository if you are not using the default branch."
+ },
+ {
+ "name": "KIESERVER_SERVICE",
+ "displayName": "Decision Server Name",
+ "required": true,
+ "value": "kie-app",
+ "description": "Set this to the name of the Decision Server. You may need to create an instance before."
+ },
+ {
+ "name": "KIESERVER_USERNAME",
+ "displayName": "Decision Server Username",
+ "required": true,
+ "value": "kieserver",
+ "description": "The username used to authenticate with the Decision Server."
+ },
+ {
+ "name": "KIESERVER_PASSWORD",
+ "displayName": "Decision Server Password",
+ "required": true,
+ "description": "The password used to authenticate with the Decision Server."
+ },
+ {
+ "name": "BUILDER_VERSION",
+ "displayName": "Builder version",
+ "value": "2.0",
+ "description": "The version of the FIS S2I builder image to use."
+ },
+ {
+ "name": "APP_VERSION",
+ "displayName": "Application Version",
+ "value": "1.0.0.redhat-000054",
+ "description": "The application version."
+ },
+ {
+ "name": "MAVEN_ARGS",
+ "displayName": "Maven Arguments",
+ "value": "package -DskipTests -Dfabric8.skip -e -B",
+ "description": "Arguments passed to mvn in the build."
+ },
+ {
+ "name": "MAVEN_ARGS_APPEND",
+ "displayName": "Extra Maven Arguments",
+ "description": "Extra arguments passed to mvn, e.g. for multi-module builds."
+ },
+ {
+ "name": "ARTIFACT_DIR",
+ "displayName": "Maven build directory",
+ "description": "Directory of the artifact to be built, e.g. for multi-module builds."
+ },
+ {
+ "name": "IMAGE_STREAM_NAMESPACE",
+ "displayName": "Image Stream Namespace",
+ "value": "openshift",
+ "required": true,
+ "description": "Namespace in which the Fuse ImageStreams are installed. These ImageStreams are normally installed in the openshift namespace. You should only need to modify this if you've installed the ImageStreams in a different namespace/project."
+ },
+ {
+ "name": "BUILD_SECRET",
+ "displayName": "Git Build Secret",
+ "generate": "expression",
+ "description": "The secret needed to trigger a build.",
+ "from": "[a-zA-Z0-9]{40}"
+ },
+ {
+ "name": "CPU_REQUEST",
+ "displayName": "CPU request",
+ "value": "0.2",
+ "required": true,
+ "description": "The amount of CPU to requests."
+ },
+ {
+ "name": "CPU_LIMIT",
+ "displayName": "CPU limit",
+ "value": "1.0",
+ "required": true,
+ "description": "The amount of CPU the container is limited to use."
+ }
+ ],
+ "objects": [
+ {
+ "kind": "ImageStream",
+ "apiVersion": "v1",
+ "metadata": {
+ "name": "${APP_NAME}",
+ "creationTimestamp": null,
+ "labels": {
+ "component": "${APP_NAME}",
+ "group": "quickstarts",
+ "project": "${APP_NAME}",
+ "provider": "s2i",
+ "version": "${APP_VERSION}"
+ }
+ },
+ "spec": {},
+ "status": {
+ "dockerImageRepository": ""
+ }
+ },
+ {
+ "kind": "BuildConfig",
+ "apiVersion": "v1",
+ "metadata": {
+ "name": "${APP_NAME}",
+ "creationTimestamp": null,
+ "labels": {
+ "component": "${APP_NAME}",
+ "group": "quickstarts",
+ "project": "${APP_NAME}",
+ "provider": "s2i",
+ "version": "${APP_VERSION}"
+ }
+ },
+ "spec": {
+ "triggers": [
+ {
+ "type": "GitHub",
+ "github": {
+ "secret": "${BUILD_SECRET}"
+ }
+ },
+ {
+ "type": "Generic",
+ "generic": {
+ "secret": "${BUILD_SECRET}"
+ }
+ },
+ {
+ "type": "ConfigChange"
+ },
+ {
+ "type": "ImageChange",
+ "imageChange": {}
+ }
+ ],
+ "source": {
+ "type": "Git",
+ "git": {
+ "uri": "${GIT_REPO}",
+ "ref": "${GIT_REF}"
+ }
+ },
+ "strategy": {
+ "type": "Source",
+ "sourceStrategy": {
+ "from": {
+ "kind": "ImageStreamTag",
+ "namespace": "${IMAGE_STREAM_NAMESPACE}",
+ "name": "fis-java-openshift:${BUILDER_VERSION}"
+ },
+ "forcePull": true,
+ "incremental": true,
+ "env": [
+ {
+ "name": "BUILD_LOGLEVEL",
+ "value": "5"
+ },
+ {
+ "name": "ARTIFACT_DIR",
+ "value": "${ARTIFACT_DIR}"
+ },
+ {
+ "name": "MAVEN_ARGS",
+ "value": "${MAVEN_ARGS}"
+ },
+ {
+ "name": "MAVEN_ARGS_APPEND",
+ "value": "${MAVEN_ARGS_APPEND}"
+ }
+ ]
+ }
+ },
+ "output": {
+ "to": {
+ "kind": "ImageStreamTag",
+ "name": "${APP_NAME}:latest"
+ }
+ },
+ "resources": {}
+ },
+ "status": {
+ "lastVersion": 0
+ }
+ },
+ {
+ "kind": "DeploymentConfig",
+ "apiVersion": "v1",
+ "metadata": {
+ "name": "${APP_NAME}",
+ "creationTimestamp": null,
+ "labels": {
+ "component": "${APP_NAME}",
+ "group": "quickstarts",
+ "project": "${APP_NAME}",
+ "provider": "s2i",
+ "version": "${APP_VERSION}"
+ }
+ },
+ "spec": {
+ "strategy": {
+ "resources": {}
+ },
+ "triggers": [
+ {
+ "type": "ConfigChange"
+ },
+ {
+ "type": "ImageChange",
+ "imageChangeParams": {
+ "automatic": true,
+ "containerNames": [
+ "${APP_NAME}"
+ ],
+ "from": {
+ "kind": "ImageStreamTag",
+ "name": "${APP_NAME}:latest"
+ }
+ }
+ }
+ ],
+ "replicas": 1,
+ "selector": {
+ "component": "${APP_NAME}",
+ "deploymentconfig": "${APP_NAME}",
+ "group": "quickstarts",
+ "project": "${APP_NAME}",
+ "provider": "s2i",
+ "version": "${APP_VERSION}"
+ },
+ "template": {
+ "metadata": {
+ "creationTimestamp": null,
+ "labels": {
+ "component": "${APP_NAME}",
+ "deploymentconfig": "${APP_NAME}",
+ "group": "quickstarts",
+ "project": "${APP_NAME}",
+ "provider": "s2i",
+ "version": "${APP_VERSION}"
+ }
+ },
+ "spec": {
+ "containers": [
+ {
+ "name": "${APP_NAME}",
+ "image": "library/${APP_NAME}:latest",
+ "readinessProbe" : {
+ "httpGet" : {
+ "path" : "/health",
+ "port" : 8081
+ },
+ "initialDelaySeconds" : 10
+ },
+ "livenessProbe" : {
+ "httpGet" : {
+ "path" : "/health",
+ "port" : 8081
+ },
+ "initialDelaySeconds" : 180
+ },
+ "ports": [
+ {
+ "containerPort": 8778,
+ "name": "jolokia"
+ }
+ ],
+ "env" : [ {
+ "name" : "KUBERNETES_NAMESPACE",
+ "valueFrom" : {
+ "fieldRef" : {
+ "fieldPath" : "metadata.namespace"
+ }
+ }
+ }, {
+ "name": "KIESERVER_SERVICE",
+ "value": "${KIESERVER_SERVICE}"
+ }, {
+ "name": "KIESERVER_USERNAME",
+ "value": "${KIESERVER_USERNAME}"
+ }, {
+ "name": "KIESERVER_PASSWORD",
+ "value": "${KIESERVER_PASSWORD}"
+ } ],
+ "resources": {
+ "requests": {
+ "cpu": "${CPU_REQUEST}"
+ },
+ "limits": {
+ "cpu": "${CPU_LIMIT}"
+ }
+ }
+ }
+ ]
+ }
+ }
+ },
+ "status": {}
+ }
+ ]
+}
diff --git a/roles/openshift_examples/files/examples/v1.4/xpaas-templates/spring-boot-camel-infinispan-template.json b/roles/openshift_examples/files/examples/v1.4/xpaas-templates/spring-boot-camel-infinispan-template.json
new file mode 100644
index 000000000..20ba97dac
--- /dev/null
+++ b/roles/openshift_examples/files/examples/v1.4/xpaas-templates/spring-boot-camel-infinispan-template.json
@@ -0,0 +1,315 @@
+{
+ "apiVersion": "v1",
+ "kind": "Template",
+ "metadata": {
+ "annotations": {
+ "description": "Spring Boot, Camel and JBoss Data Grid QuickStart. This quickstart demonstrates how to connect a Spring-Boot application to a JBoss Data Grid (or Infinispan) server using the Hot Rod protocol. It requires that the data grid server (or cluster) has been deployed first.",
+ "tags": "quickstart,java,springboot,fis",
+ "iconClass": "icon-jboss",
+ "version": "2.0"
+ },
+ "name": "s2i-spring-boot-camel-infinispan"
+ },
+ "labels": {
+ "template": "s2i-spring-boot-camel-infinispan"
+ },
+ "parameters": [
+ {
+ "name": "APP_NAME",
+ "displayName": "Application Name",
+ "required": true,
+ "value": "s2i-spring-boot-camel-infinispan",
+ "description": "The name assigned to the application."
+ },
+ {
+ "name": "GIT_REPO",
+ "displayName": "Git Repository URL",
+ "required": true,
+ "value": "https://github.com/fabric8-quickstarts/spring-boot-camel-infinispan.git",
+ "description": "The URL of the repository with your application source code."
+ },
+ {
+ "name": "GIT_REF",
+ "displayName": "Git Reference",
+ "value": "spring-boot-camel-infinispan-1.0.0.redhat-000024",
+ "description": "Set this to a branch name, tag or other ref of your repository if you are not using the default branch."
+ },
+ {
+ "name": "INFINISPAN_SERVICE",
+ "displayName": "JBoss Data Grid Service (Hot Rod)",
+ "required": true,
+ "value": "datagrid-app-hotrod",
+ "description": "Set this to the name of the Hot Rod service of the JBoss Data Grid. You may need to create the data grid first."
+ },
+ {
+ "name": "BUILDER_VERSION",
+ "displayName": "Builder version",
+ "value": "2.0",
+ "description": "The version of the FIS S2I builder image to use."
+ },
+ {
+ "name": "APP_VERSION",
+ "displayName": "Application Version",
+ "value": "1.0.0.redhat-000024",
+ "description": "The application version."
+ },
+ {
+ "name": "MAVEN_ARGS",
+ "displayName": "Maven Arguments",
+ "value": "package -DskipTests -Dfabric8.skip -e -B",
+ "description": "Arguments passed to mvn in the build."
+ },
+ {
+ "name": "MAVEN_ARGS_APPEND",
+ "displayName": "Extra Maven Arguments",
+ "description": "Extra arguments passed to mvn, e.g. for multi-module builds."
+ },
+ {
+ "name": "ARTIFACT_DIR",
+ "displayName": "Maven build directory",
+ "description": "Directory of the artifact to be built, e.g. for multi-module builds."
+ },
+ {
+ "name": "IMAGE_STREAM_NAMESPACE",
+ "displayName": "Image Stream Namespace",
+ "value": "openshift",
+ "required": true,
+ "description": "Namespace in which the Fuse ImageStreams are installed. These ImageStreams are normally installed in the openshift namespace. You should only need to modify this if you've installed the ImageStreams in a different namespace/project."
+ },
+ {
+ "name": "BUILD_SECRET",
+ "displayName": "Git Build Secret",
+ "generate": "expression",
+ "description": "The secret needed to trigger a build.",
+ "from": "[a-zA-Z0-9]{40}"
+ },
+ {
+ "name": "CPU_REQUEST",
+ "displayName": "CPU request",
+ "value": "0.2",
+ "required": true,
+ "description": "The amount of CPU to requests."
+ },
+ {
+ "name": "CPU_LIMIT",
+ "displayName": "CPU limit",
+ "value": "1.0",
+ "required": true,
+ "description": "The amount of CPU the container is limited to use."
+ }
+ ],
+ "objects": [
+ {
+ "kind": "ImageStream",
+ "apiVersion": "v1",
+ "metadata": {
+ "name": "${APP_NAME}",
+ "creationTimestamp": null,
+ "labels": {
+ "component": "${APP_NAME}",
+ "group": "quickstarts",
+ "project": "${APP_NAME}",
+ "provider": "s2i",
+ "version": "${APP_VERSION}"
+ }
+ },
+ "spec": {},
+ "status": {
+ "dockerImageRepository": ""
+ }
+ },
+ {
+ "kind": "BuildConfig",
+ "apiVersion": "v1",
+ "metadata": {
+ "name": "${APP_NAME}",
+ "creationTimestamp": null,
+ "labels": {
+ "component": "${APP_NAME}",
+ "group": "quickstarts",
+ "project": "${APP_NAME}",
+ "provider": "s2i",
+ "version": "${APP_VERSION}"
+ }
+ },
+ "spec": {
+ "triggers": [
+ {
+ "type": "GitHub",
+ "github": {
+ "secret": "${BUILD_SECRET}"
+ }
+ },
+ {
+ "type": "Generic",
+ "generic": {
+ "secret": "${BUILD_SECRET}"
+ }
+ },
+ {
+ "type": "ConfigChange"
+ },
+ {
+ "type": "ImageChange",
+ "imageChange": {}
+ }
+ ],
+ "source": {
+ "type": "Git",
+ "git": {
+ "uri": "${GIT_REPO}",
+ "ref": "${GIT_REF}"
+ }
+ },
+ "strategy": {
+ "type": "Source",
+ "sourceStrategy": {
+ "from": {
+ "kind": "ImageStreamTag",
+ "namespace": "${IMAGE_STREAM_NAMESPACE}",
+ "name": "fis-java-openshift:${BUILDER_VERSION}"
+ },
+ "forcePull": true,
+ "incremental": true,
+ "env": [
+ {
+ "name": "BUILD_LOGLEVEL",
+ "value": "5"
+ },
+ {
+ "name": "ARTIFACT_DIR",
+ "value": "${ARTIFACT_DIR}"
+ },
+ {
+ "name": "MAVEN_ARGS",
+ "value": "${MAVEN_ARGS}"
+ },
+ {
+ "name": "MAVEN_ARGS_APPEND",
+ "value": "${MAVEN_ARGS_APPEND}"
+ }
+ ]
+ }
+ },
+ "output": {
+ "to": {
+ "kind": "ImageStreamTag",
+ "name": "${APP_NAME}:latest"
+ }
+ },
+ "resources": {}
+ },
+ "status": {
+ "lastVersion": 0
+ }
+ },
+ {
+ "kind": "DeploymentConfig",
+ "apiVersion": "v1",
+ "metadata": {
+ "name": "${APP_NAME}",
+ "creationTimestamp": null,
+ "labels": {
+ "component": "${APP_NAME}",
+ "group": "quickstarts",
+ "project": "${APP_NAME}",
+ "provider": "s2i",
+ "version": "${APP_VERSION}"
+ }
+ },
+ "spec": {
+ "strategy": {
+ "resources": {}
+ },
+ "triggers": [
+ {
+ "type": "ConfigChange"
+ },
+ {
+ "type": "ImageChange",
+ "imageChangeParams": {
+ "automatic": true,
+ "containerNames": [
+ "${APP_NAME}"
+ ],
+ "from": {
+ "kind": "ImageStreamTag",
+ "name": "${APP_NAME}:latest"
+ }
+ }
+ }
+ ],
+ "replicas": 1,
+ "selector": {
+ "component": "${APP_NAME}",
+ "deploymentconfig": "${APP_NAME}",
+ "group": "quickstarts",
+ "project": "${APP_NAME}",
+ "provider": "s2i",
+ "version": "${APP_VERSION}"
+ },
+ "template": {
+ "metadata": {
+ "creationTimestamp": null,
+ "labels": {
+ "component": "${APP_NAME}",
+ "deploymentconfig": "${APP_NAME}",
+ "group": "quickstarts",
+ "project": "${APP_NAME}",
+ "provider": "s2i",
+ "version": "${APP_VERSION}"
+ }
+ },
+ "spec": {
+ "containers": [
+ {
+ "name": "${APP_NAME}",
+ "image": "library/${APP_NAME}:latest",
+ "readinessProbe" : {
+ "httpGet" : {
+ "path" : "/health",
+ "port" : 8081
+ },
+ "initialDelaySeconds" : 10
+ },
+ "livenessProbe" : {
+ "httpGet" : {
+ "path" : "/health",
+ "port" : 8081
+ },
+ "initialDelaySeconds" : 180
+ },
+ "ports": [
+ {
+ "containerPort": 8778,
+ "name": "jolokia"
+ }
+ ],
+ "env" : [ {
+ "name" : "KUBERNETES_NAMESPACE",
+ "valueFrom" : {
+ "fieldRef" : {
+ "fieldPath" : "metadata.namespace"
+ }
+ }
+ } , {
+ "name": "INFINISPAN_SERVICE",
+ "value": "${INFINISPAN_SERVICE}"
+ } ],
+ "resources": {
+ "requests": {
+ "cpu": "${CPU_REQUEST}"
+ },
+ "limits": {
+ "cpu": "${CPU_LIMIT}"
+ }
+ }
+ }
+ ]
+ }
+ }
+ },
+ "status": {}
+ }
+ ]
+}
diff --git a/roles/openshift_examples/files/examples/v1.4/xpaas-templates/spring-boot-camel-rest-sql-template.json b/roles/openshift_examples/files/examples/v1.4/xpaas-templates/spring-boot-camel-rest-sql-template.json
new file mode 100644
index 000000000..555647fab
--- /dev/null
+++ b/roles/openshift_examples/files/examples/v1.4/xpaas-templates/spring-boot-camel-rest-sql-template.json
@@ -0,0 +1,403 @@
+{
+ "apiVersion": "v1",
+ "kind": "Template",
+ "metadata": {
+ "annotations": {
+ "description": "Spring Boot, Camel REST DSL and MySQL QuickStart. This quickstart demonstrates how to connect a Spring Boot application to a MySQL database and expose a REST API with Camel on OpenShift. In this example we will use two containers, one container to run as a MySQL server, and another as a client to the database, where the Camel routes are running. This quickstart requires the MySQL server to be deployed and started first.",
+ "tags": "quickstart,java,springboot,fis",
+ "iconClass": "icon-jboss",
+ "version": "2.0"
+ },
+ "name": "s2i-spring-boot-camel-rest-sql"
+ },
+ "labels": {
+ "template": "s2i-spring-boot-camel-rest-sql"
+ },
+ "parameters": [
+ {
+ "name": "APP_NAME",
+ "displayName": "Application Name",
+ "required": true,
+ "value": "s2i-spring-boot-camel-rest-sql",
+ "description": "The name assigned to the application."
+ },
+ {
+ "name": "GIT_REPO",
+ "displayName": "Git Repository URL",
+ "required": true,
+ "value": "https://github.com/fabric8-quickstarts/spring-boot-camel-rest-sql.git",
+ "description": "The URL of the repository with your application source code."
+ },
+ {
+ "name": "GIT_REF",
+ "displayName": "Git Reference",
+ "value": "spring-boot-camel-rest-sql-1.0.0.redhat-000055",
+ "description": "Set this to a branch name, tag or other ref of your repository if you are not using the default branch."
+ },
+ {
+ "name": "SERVICE_NAME",
+ "displayName": "Service Name",
+ "value": "camel-rest-sql",
+ "description": "Exposed service name."
+ },
+ {
+ "name": "MYSQL_SERVICE_NAME",
+ "displayName": "MySQL Server Service",
+ "required": true,
+ "value": "mysql",
+ "description": "Set this to the name of the TCP service of the MySQL server. You may need to create a server first."
+ },
+ {
+ "name": "MYSQL_SERVICE_DATABASE",
+ "displayName": "MySQL Server Database",
+ "value": "sampledb",
+ "description": "The database hosted by the MySQL server to be used by the application."
+ },
+ {
+ "name": "MYSQL_SERVICE_USERNAME",
+ "displayName": "MySQL Server Username",
+ "description": "The username used to authenticate with the MySQL server. Leave it empty if authentication is disabled."
+ },
+ {
+ "name": "MYSQL_SERVICE_PASSWORD",
+ "displayName": "MySQL Server Password",
+ "description": "The password used to authenticate with the MySQL server. Leave it empty if authentication is disabled."
+ },
+ {
+ "name": "BUILDER_VERSION",
+ "displayName": "Builder version",
+ "value": "2.0",
+ "description": "The version of the FIS S2I builder image to use."
+ },
+ {
+ "name": "APP_VERSION",
+ "displayName": "Application Version",
+ "value": "1.0.0.redhat-000055",
+ "description": "The application version."
+ },
+ {
+ "name": "MAVEN_ARGS",
+ "displayName": "Maven Arguments",
+ "value": "package -DskipTests -Dfabric8.skip -e -B",
+ "description": "Arguments passed to mvn in the build."
+ },
+ {
+ "name": "MAVEN_ARGS_APPEND",
+ "displayName": "Extra Maven Arguments",
+ "description": "Extra arguments passed to mvn, e.g. for multi-module builds."
+ },
+ {
+ "name": "ARTIFACT_DIR",
+ "displayName": "Maven build directory",
+ "description": "Directory of the artifact to be built, e.g. for multi-module builds."
+ },
+ {
+ "name": "IMAGE_STREAM_NAMESPACE",
+ "displayName": "Image Stream Namespace",
+ "value": "openshift",
+ "required": true,
+ "description": "Namespace in which the Fuse ImageStreams are installed. These ImageStreams are normally installed in the openshift namespace. You should only need to modify this if you've installed the ImageStreams in a different namespace/project."
+ },
+ {
+ "name": "BUILD_SECRET",
+ "displayName": "Git Build Secret",
+ "generate": "expression",
+ "description": "The secret needed to trigger a build.",
+ "from": "[a-zA-Z0-9]{40}"
+ },
+ {
+ "name": "CPU_REQUEST",
+ "displayName": "CPU request",
+ "value": "0.2",
+ "required": true,
+ "description": "The amount of CPU to requests."
+ },
+ {
+ "name": "CPU_LIMIT",
+ "displayName": "CPU limit",
+ "value": "1.0",
+ "required": true,
+ "description": "The amount of CPU the container is limited to use."
+ }
+ ],
+ "objects": [
+ {
+ "apiVersion": "v1",
+ "kind": "Route",
+ "metadata": {
+ "labels": {
+ "component": "${APP_NAME}",
+ "provider": "s2i",
+ "project": "${APP_NAME}",
+ "version": "${APP_VERSION}",
+ "group": "quickstarts"
+ },
+ "name": "${SERVICE_NAME}-route"
+ },
+ "spec": {
+ "to": {
+ "kind": "Service",
+ "name": "${SERVICE_NAME}"
+ }
+ }
+ },
+ {
+ "apiVersion": "v1",
+ "kind": "Service",
+ "metadata": {
+ "annotations": {
+ },
+ "labels": {
+ "component": "${APP_NAME}",
+ "provider": "s2i",
+ "project": "${APP_NAME}",
+ "version": "${APP_VERSION}",
+ "group": "quickstarts"
+ },
+ "name": "${SERVICE_NAME}"
+ },
+ "spec": {
+ "clusterIP": "None",
+ "deprecatedPublicIPs": [],
+ "ports": [
+ {
+ "port": 9411,
+ "protocol": "TCP",
+ "targetPort": 8080
+ }
+ ],
+ "selector": {
+ "project": "${APP_NAME}",
+ "component": "${APP_NAME}",
+ "provider": "s2i",
+ "group": "quickstarts"
+ }
+ }
+ },
+ {
+ "kind": "ImageStream",
+ "apiVersion": "v1",
+ "metadata": {
+ "name": "${APP_NAME}",
+ "creationTimestamp": null,
+ "labels": {
+ "component": "${APP_NAME}",
+ "group": "quickstarts",
+ "project": "${APP_NAME}",
+ "provider": "s2i",
+ "version": "${APP_VERSION}"
+ }
+ },
+ "spec": {},
+ "status": {
+ "dockerImageRepository": ""
+ }
+ },
+ {
+ "kind": "BuildConfig",
+ "apiVersion": "v1",
+ "metadata": {
+ "name": "${APP_NAME}",
+ "creationTimestamp": null,
+ "labels": {
+ "component": "${APP_NAME}",
+ "group": "quickstarts",
+ "project": "${APP_NAME}",
+ "provider": "s2i",
+ "version": "${APP_VERSION}"
+ }
+ },
+ "spec": {
+ "triggers": [
+ {
+ "type": "GitHub",
+ "github": {
+ "secret": "${BUILD_SECRET}"
+ }
+ },
+ {
+ "type": "Generic",
+ "generic": {
+ "secret": "${BUILD_SECRET}"
+ }
+ },
+ {
+ "type": "ConfigChange"
+ },
+ {
+ "type": "ImageChange",
+ "imageChange": {}
+ }
+ ],
+ "source": {
+ "type": "Git",
+ "git": {
+ "uri": "${GIT_REPO}",
+ "ref": "${GIT_REF}"
+ }
+ },
+ "strategy": {
+ "type": "Source",
+ "sourceStrategy": {
+ "from": {
+ "kind": "ImageStreamTag",
+ "namespace": "${IMAGE_STREAM_NAMESPACE}",
+ "name": "fis-java-openshift:${BUILDER_VERSION}"
+ },
+ "forcePull": true,
+ "incremental": true,
+ "env": [
+ {
+ "name": "BUILD_LOGLEVEL",
+ "value": "5"
+ },
+ {
+ "name": "ARTIFACT_DIR",
+ "value": "${ARTIFACT_DIR}"
+ },
+ {
+ "name": "MAVEN_ARGS",
+ "value": "${MAVEN_ARGS}"
+ },
+ {
+ "name": "MAVEN_ARGS_APPEND",
+ "value": "${MAVEN_ARGS_APPEND}"
+ }
+ ]
+ }
+ },
+ "output": {
+ "to": {
+ "kind": "ImageStreamTag",
+ "name": "${APP_NAME}:latest"
+ }
+ },
+ "resources": {}
+ },
+ "status": {
+ "lastVersion": 0
+ }
+ },
+ {
+ "kind": "DeploymentConfig",
+ "apiVersion": "v1",
+ "metadata": {
+ "name": "${APP_NAME}",
+ "creationTimestamp": null,
+ "labels": {
+ "component": "${APP_NAME}",
+ "group": "quickstarts",
+ "project": "${APP_NAME}",
+ "provider": "s2i",
+ "version": "${APP_VERSION}"
+ }
+ },
+ "spec": {
+ "strategy": {
+ "resources": {}
+ },
+ "triggers": [
+ {
+ "type": "ConfigChange"
+ },
+ {
+ "type": "ImageChange",
+ "imageChangeParams": {
+ "automatic": true,
+ "containerNames": [
+ "${APP_NAME}"
+ ],
+ "from": {
+ "kind": "ImageStreamTag",
+ "name": "${APP_NAME}:latest"
+ }
+ }
+ }
+ ],
+ "replicas": 1,
+ "selector": {
+ "component": "${APP_NAME}",
+ "deploymentconfig": "${APP_NAME}",
+ "group": "quickstarts",
+ "project": "${APP_NAME}",
+ "provider": "s2i",
+ "version": "${APP_VERSION}"
+ },
+ "template": {
+ "metadata": {
+ "creationTimestamp": null,
+ "labels": {
+ "component": "${APP_NAME}",
+ "deploymentconfig": "${APP_NAME}",
+ "group": "quickstarts",
+ "project": "${APP_NAME}",
+ "provider": "s2i",
+ "version": "${APP_VERSION}"
+ }
+ },
+ "spec": {
+ "containers": [
+ {
+ "name": "${APP_NAME}",
+ "image": "library/${APP_NAME}:latest",
+ "readinessProbe" : {
+ "httpGet" : {
+ "path" : "/health",
+ "port" : 8081
+ },
+ "initialDelaySeconds" : 10
+ },
+ "livenessProbe" : {
+ "httpGet" : {
+ "path" : "/health",
+ "port" : 8081
+ },
+ "initialDelaySeconds" : 180
+ },
+ "ports": [
+ {
+ "containerPort": 8080,
+ "name": "http"
+ },
+ {
+ "containerPort": 8778,
+ "name": "jolokia"
+ }
+ ],
+ "env" : [ {
+ "name" : "KUBERNETES_NAMESPACE",
+ "valueFrom" : {
+ "fieldRef" : {
+ "fieldPath" : "metadata.namespace"
+ }
+ }
+ } , {
+ "name": "MYSQL_SERVICE_NAME",
+ "value": "${MYSQL_SERVICE_NAME}"
+ }, {
+ "name": "MYSQL_SERVICE_DATABASE",
+ "value": "${MYSQL_SERVICE_DATABASE}"
+ }, {
+ "name": "MYSQL_SERVICE_USERNAME",
+ "value": "${MYSQL_SERVICE_USERNAME}"
+ }, {
+ "name": "MYSQL_SERVICE_PASSWORD",
+ "value": "${MYSQL_SERVICE_PASSWORD}"
+ } ],
+ "resources": {
+ "requests": {
+ "cpu": "${CPU_REQUEST}"
+ },
+ "limits": {
+ "cpu": "${CPU_LIMIT}"
+ }
+ }
+ }
+ ]
+ }
+ }
+ },
+ "status": {}
+ }
+ ]
+}
diff --git a/roles/openshift_examples/files/examples/v1.4/xpaas-templates/spring-boot-camel-teiid-template.json b/roles/openshift_examples/files/examples/v1.4/xpaas-templates/spring-boot-camel-teiid-template.json
new file mode 100644
index 000000000..cf9a4e903
--- /dev/null
+++ b/roles/openshift_examples/files/examples/v1.4/xpaas-templates/spring-boot-camel-teiid-template.json
@@ -0,0 +1,343 @@
+{
+ "apiVersion": "v1",
+ "kind": "Template",
+ "metadata": {
+ "annotations": {
+ "description": "Spring-Boot, Camel and JBoss Data Virtualization QuickStart. This example demonstrates how to connect Apache Camel to a remote JBoss Data Virtualization (or Teiid) Server using the JDBC protocol.",
+ "tags": "quickstart,java,springboot,fis",
+ "iconClass": "icon-jboss",
+ "version": "2.0"
+ },
+ "name": "s2i-spring-boot-camel-teiid"
+ },
+ "labels": {
+ "template": "s2i-spring-boot-camel-teiid"
+ },
+ "parameters": [
+ {
+ "name": "APP_NAME",
+ "displayName": "Application Name",
+ "required": true,
+ "value": "s2i-spring-boot-camel-teiid",
+ "description": "The name assigned to the application."
+ },
+ {
+ "name": "GIT_REPO",
+ "displayName": "Git Repository URL",
+ "required": true,
+ "value": "https://github.com/fabric8-quickstarts/spring-boot-camel-teiid.git",
+ "description": "The URL of the repository with your application source code."
+ },
+ {
+ "name": "GIT_REF",
+ "displayName": "Git Reference",
+ "value": "spring-boot-camel-teiid-1.0.0.redhat-000053",
+ "description": "Set this to a branch name, tag or other ref of your repository if you are not using the default branch."
+ },
+ {
+ "name": "TEIID_SERVICE_NAME",
+ "displayName": "JDV Server Name",
+ "required": true,
+ "value": "datavirt-app",
+ "description": "Set this to the name of the JDV Server. You may need to create an instance before."
+ },
+ {
+ "name": "TEIID_PORT_NAME",
+ "displayName": "JDV Port Name",
+ "value": "jdbc",
+ "description": "Set this to the name of the JDV port to use. Set this value if the JDV service contains multiple named ports."
+ },
+ {
+ "name": "TEIID_USERNAME",
+ "displayName": "JDV Server Username",
+ "required": true,
+ "description": "The username used to authenticate with the JDV Server."
+ },
+ {
+ "name": "TEIID_PASSWORD",
+ "displayName": "JDV Server Password",
+ "required": true,
+ "description": "The password used to authenticate with the JDV Server."
+ },
+ {
+ "name": "BUILDER_VERSION",
+ "displayName": "Builder version",
+ "value": "2.0",
+ "description": "The version of the FIS S2I builder image to use."
+ },
+ {
+ "name": "APP_VERSION",
+ "displayName": "Application Version",
+ "value": "1.0.0.redhat-000053",
+ "description": "The application version."
+ },
+ {
+ "name": "MAVEN_ARGS",
+ "displayName": "Maven Arguments",
+ "value": "package -DskipTests -Dfabric8.skip -e -B",
+ "description": "Arguments passed to mvn in the build."
+ },
+ {
+ "name": "MAVEN_ARGS_APPEND",
+ "displayName": "Extra Maven Arguments",
+ "description": "Extra arguments passed to mvn, e.g. for multi-module builds."
+ },
+ {
+ "name": "ARTIFACT_DIR",
+ "displayName": "Maven build directory",
+ "description": "Directory of the artifact to be built, e.g. for multi-module builds."
+ },
+ {
+ "name": "IMAGE_STREAM_NAMESPACE",
+ "displayName": "Image Stream Namespace",
+ "value": "openshift",
+ "required": true,
+ "description": "Namespace in which the Fuse ImageStreams are installed. These ImageStreams are normally installed in the openshift namespace. You should only need to modify this if you've installed the ImageStreams in a different namespace/project."
+ },
+ {
+ "name": "BUILD_SECRET",
+ "displayName": "Git Build Secret",
+ "generate": "expression",
+ "description": "The secret needed to trigger a build.",
+ "from": "[a-zA-Z0-9]{40}"
+ },
+ {
+ "name": "CPU_REQUEST",
+ "displayName": "CPU request",
+ "value": "0.2",
+ "required": true,
+ "description": "The amount of CPU to requests."
+ },
+ {
+ "name": "CPU_LIMIT",
+ "displayName": "CPU limit",
+ "value": "1.0",
+ "required": true,
+ "description": "The amount of CPU the container is limited to use."
+ }
+ ],
+ "objects": [
+ {
+ "kind": "ImageStream",
+ "apiVersion": "v1",
+ "metadata": {
+ "name": "${APP_NAME}",
+ "creationTimestamp": null,
+ "labels": {
+ "component": "${APP_NAME}",
+ "group": "quickstarts",
+ "project": "${APP_NAME}",
+ "provider": "s2i",
+ "version": "${APP_VERSION}"
+ }
+ },
+ "spec": {},
+ "status": {
+ "dockerImageRepository": ""
+ }
+ },
+ {
+ "kind": "BuildConfig",
+ "apiVersion": "v1",
+ "metadata": {
+ "name": "${APP_NAME}",
+ "creationTimestamp": null,
+ "labels": {
+ "component": "${APP_NAME}",
+ "group": "quickstarts",
+ "project": "${APP_NAME}",
+ "provider": "s2i",
+ "version": "${APP_VERSION}"
+ }
+ },
+ "spec": {
+ "triggers": [
+ {
+ "type": "GitHub",
+ "github": {
+ "secret": "${BUILD_SECRET}"
+ }
+ },
+ {
+ "type": "Generic",
+ "generic": {
+ "secret": "${BUILD_SECRET}"
+ }
+ },
+ {
+ "type": "ConfigChange"
+ },
+ {
+ "type": "ImageChange",
+ "imageChange": {}
+ }
+ ],
+ "source": {
+ "type": "Git",
+ "git": {
+ "uri": "${GIT_REPO}",
+ "ref": "${GIT_REF}"
+ }
+ },
+ "strategy": {
+ "type": "Source",
+ "sourceStrategy": {
+ "from": {
+ "kind": "ImageStreamTag",
+ "namespace": "${IMAGE_STREAM_NAMESPACE}",
+ "name": "fis-java-openshift:${BUILDER_VERSION}"
+ },
+ "forcePull": true,
+ "incremental": true,
+ "env": [
+ {
+ "name": "BUILD_LOGLEVEL",
+ "value": "5"
+ },
+ {
+ "name": "ARTIFACT_DIR",
+ "value": "${ARTIFACT_DIR}"
+ },
+ {
+ "name": "MAVEN_ARGS",
+ "value": "${MAVEN_ARGS}"
+ },
+ {
+ "name": "MAVEN_ARGS_APPEND",
+ "value": "${MAVEN_ARGS_APPEND}"
+ }
+ ]
+ }
+ },
+ "output": {
+ "to": {
+ "kind": "ImageStreamTag",
+ "name": "${APP_NAME}:latest"
+ }
+ },
+ "resources": {}
+ },
+ "status": {
+ "lastVersion": 0
+ }
+ },
+ {
+ "kind": "DeploymentConfig",
+ "apiVersion": "v1",
+ "metadata": {
+ "name": "${APP_NAME}",
+ "creationTimestamp": null,
+ "labels": {
+ "component": "${APP_NAME}",
+ "group": "quickstarts",
+ "project": "${APP_NAME}",
+ "provider": "s2i",
+ "version": "${APP_VERSION}"
+ }
+ },
+ "spec": {
+ "strategy": {
+ "resources": {}
+ },
+ "triggers": [
+ {
+ "type": "ConfigChange"
+ },
+ {
+ "type": "ImageChange",
+ "imageChangeParams": {
+ "automatic": true,
+ "containerNames": [
+ "${APP_NAME}"
+ ],
+ "from": {
+ "kind": "ImageStreamTag",
+ "name": "${APP_NAME}:latest"
+ }
+ }
+ }
+ ],
+ "replicas": 1,
+ "selector": {
+ "component": "${APP_NAME}",
+ "deploymentconfig": "${APP_NAME}",
+ "group": "quickstarts",
+ "project": "${APP_NAME}",
+ "provider": "s2i",
+ "version": "${APP_VERSION}"
+ },
+ "template": {
+ "metadata": {
+ "creationTimestamp": null,
+ "labels": {
+ "component": "${APP_NAME}",
+ "deploymentconfig": "${APP_NAME}",
+ "group": "quickstarts",
+ "project": "${APP_NAME}",
+ "provider": "s2i",
+ "version": "${APP_VERSION}"
+ }
+ },
+ "spec": {
+ "containers": [
+ {
+ "name": "${APP_NAME}",
+ "image": "library/${APP_NAME}:latest",
+ "readinessProbe" : {
+ "httpGet" : {
+ "path" : "/health",
+ "port" : 8081
+ },
+ "initialDelaySeconds" : 10
+ },
+ "livenessProbe" : {
+ "httpGet" : {
+ "path" : "/health",
+ "port" : 8081
+ },
+ "initialDelaySeconds" : 180
+ },
+ "ports": [
+ {
+ "containerPort": 8778,
+ "name": "jolokia"
+ }
+ ],
+ "env" : [
+ {
+ "name" : "KUBERNETES_NAMESPACE",
+ "valueFrom" : {
+ "fieldRef" : {
+ "fieldPath" : "metadata.namespace"
+ }
+ }
+ }, {
+ "name": "TEIID_SERVICE_NAME",
+ "value": "${TEIID_SERVICE_NAME}"
+ }, {
+ "name": "TEIID_PORT_NAME",
+ "value": "${TEIID_PORT_NAME}"
+ }, {
+ "name": "TEIID_USERNAME",
+ "value": "${TEIID_USERNAME}"
+ }, {
+ "name": "TEIID_PASSWORD",
+ "value": "${TEIID_PASSWORD}"
+ }],
+ "resources": {
+ "requests": {
+ "cpu": "${CPU_REQUEST}"
+ },
+ "limits": {
+ "cpu": "${CPU_LIMIT}"
+ }
+ }
+ }
+ ]
+ }
+ }
+ },
+ "status": {}
+ }
+ ]
+}
diff --git a/roles/openshift_examples/files/examples/v1.4/xpaas-templates/spring-boot-camel-template.json b/roles/openshift_examples/files/examples/v1.4/xpaas-templates/spring-boot-camel-template.json
new file mode 100644
index 000000000..c78a96f7c
--- /dev/null
+++ b/roles/openshift_examples/files/examples/v1.4/xpaas-templates/spring-boot-camel-template.json
@@ -0,0 +1,305 @@
+{
+ "apiVersion": "v1",
+ "kind": "Template",
+ "metadata": {
+ "annotations": {
+ "description": "Spring-Boot and Camel QuickStart. This example demonstrates how you can use Apache Camel with Spring Boot on Openshift. The quickstart uses Spring Boot to configure a little application that includes a Camel route that triggeres a message every 5th second, and routes the message to a log.",
+ "tags": "quickstart,java,springboot,fis",
+ "iconClass": "icon-jboss",
+ "version": "2.0"
+ },
+ "name": "s2i-spring-boot-camel"
+ },
+ "labels": {
+ "template": "s2i-spring-boot-camel"
+ },
+ "parameters": [
+ {
+ "name": "APP_NAME",
+ "displayName": "Application Name",
+ "required": true,
+ "value": "s2i-spring-boot-camel",
+ "description": "The name assigned to the application."
+ },
+ {
+ "name": "GIT_REPO",
+ "displayName": "Git Repository URL",
+ "required": true,
+ "value": "https://github.com/fabric8-quickstarts/spring-boot-camel.git",
+ "description": "The URL of the repository with your application source code."
+ },
+ {
+ "name": "GIT_REF",
+ "displayName": "Git Reference",
+ "value": "spring-boot-camel-1.0.0.redhat-000055",
+ "description": "Set this to a branch name, tag or other ref of your repository if you are not using the default branch."
+ },
+ {
+ "name": "BUILDER_VERSION",
+ "displayName": "Builder version",
+ "value": "2.0",
+ "description": "The version of the FIS S2I builder image to use."
+ },
+ {
+ "name": "APP_VERSION",
+ "displayName": "Application Version",
+ "value": "1.0.0.redhat-000055",
+ "description": "The application version."
+ },
+ {
+ "name": "MAVEN_ARGS",
+ "displayName": "Maven Arguments",
+ "value": "package -DskipTests -Dfabric8.skip -e -B",
+ "description": "Arguments passed to mvn in the build."
+ },
+ {
+ "name": "MAVEN_ARGS_APPEND",
+ "displayName": "Extra Maven Arguments",
+ "description": "Extra arguments passed to mvn, e.g. for multi-module builds."
+ },
+ {
+ "name": "ARTIFACT_DIR",
+ "displayName": "Maven build directory",
+ "description": "Directory of the artifact to be built, e.g. for multi-module builds."
+ },
+ {
+ "name": "IMAGE_STREAM_NAMESPACE",
+ "displayName": "Image Stream Namespace",
+ "value": "openshift",
+ "required": true,
+ "description": "Namespace in which the Fuse ImageStreams are installed. These ImageStreams are normally installed in the openshift namespace. You should only need to modify this if you've installed the ImageStreams in a different namespace/project."
+ },
+ {
+ "name": "BUILD_SECRET",
+ "displayName": "Git Build Secret",
+ "generate": "expression",
+ "description": "The secret needed to trigger a build.",
+ "from": "[a-zA-Z0-9]{40}"
+ },
+ {
+ "name": "CPU_REQUEST",
+ "displayName": "CPU request",
+ "value": "0.2",
+ "required": true,
+ "description": "The amount of CPU to requests."
+ },
+ {
+ "name": "CPU_LIMIT",
+ "displayName": "CPU limit",
+ "value": "1.0",
+ "required": true,
+ "description": "The amount of CPU the container is limited to use."
+ }
+ ],
+ "objects": [
+ {
+ "kind": "ImageStream",
+ "apiVersion": "v1",
+ "metadata": {
+ "name": "${APP_NAME}",
+ "creationTimestamp": null,
+ "labels": {
+ "component": "${APP_NAME}",
+ "group": "quickstarts",
+ "project": "${APP_NAME}",
+ "provider": "s2i",
+ "version": "${APP_VERSION}"
+ }
+ },
+ "spec": {},
+ "status": {
+ "dockerImageRepository": ""
+ }
+ },
+ {
+ "kind": "BuildConfig",
+ "apiVersion": "v1",
+ "metadata": {
+ "name": "${APP_NAME}",
+ "creationTimestamp": null,
+ "labels": {
+ "component": "${APP_NAME}",
+ "group": "quickstarts",
+ "project": "${APP_NAME}",
+ "provider": "s2i",
+ "version": "${APP_VERSION}"
+ }
+ },
+ "spec": {
+ "triggers": [
+ {
+ "type": "GitHub",
+ "github": {
+ "secret": "${BUILD_SECRET}"
+ }
+ },
+ {
+ "type": "Generic",
+ "generic": {
+ "secret": "${BUILD_SECRET}"
+ }
+ },
+ {
+ "type": "ConfigChange"
+ },
+ {
+ "type": "ImageChange",
+ "imageChange": {}
+ }
+ ],
+ "source": {
+ "type": "Git",
+ "git": {
+ "uri": "${GIT_REPO}",
+ "ref": "${GIT_REF}"
+ }
+ },
+ "strategy": {
+ "type": "Source",
+ "sourceStrategy": {
+ "from": {
+ "kind": "ImageStreamTag",
+ "namespace": "${IMAGE_STREAM_NAMESPACE}",
+ "name": "fis-java-openshift:${BUILDER_VERSION}"
+ },
+ "forcePull": true,
+ "incremental": true,
+ "env": [
+ {
+ "name": "BUILD_LOGLEVEL",
+ "value": "5"
+ },
+ {
+ "name": "ARTIFACT_DIR",
+ "value": "${ARTIFACT_DIR}"
+ },
+ {
+ "name": "MAVEN_ARGS",
+ "value": "${MAVEN_ARGS}"
+ },
+ {
+ "name": "MAVEN_ARGS_APPEND",
+ "value": "${MAVEN_ARGS_APPEND}"
+ }
+ ]
+ }
+ },
+ "output": {
+ "to": {
+ "kind": "ImageStreamTag",
+ "name": "${APP_NAME}:latest"
+ }
+ },
+ "resources": {}
+ },
+ "status": {
+ "lastVersion": 0
+ }
+ },
+ {
+ "kind": "DeploymentConfig",
+ "apiVersion": "v1",
+ "metadata": {
+ "name": "${APP_NAME}",
+ "creationTimestamp": null,
+ "labels": {
+ "component": "${APP_NAME}",
+ "group": "quickstarts",
+ "project": "${APP_NAME}",
+ "provider": "s2i",
+ "version": "${APP_VERSION}"
+ }
+ },
+ "spec": {
+ "strategy": {
+ "resources": {}
+ },
+ "triggers": [
+ {
+ "type": "ConfigChange"
+ },
+ {
+ "type": "ImageChange",
+ "imageChangeParams": {
+ "automatic": true,
+ "containerNames": [
+ "${APP_NAME}"
+ ],
+ "from": {
+ "kind": "ImageStreamTag",
+ "name": "${APP_NAME}:latest"
+ }
+ }
+ }
+ ],
+ "replicas": 1,
+ "selector": {
+ "component": "${APP_NAME}",
+ "deploymentconfig": "${APP_NAME}",
+ "group": "quickstarts",
+ "project": "${APP_NAME}",
+ "provider": "s2i",
+ "version": "${APP_VERSION}"
+ },
+ "template": {
+ "metadata": {
+ "creationTimestamp": null,
+ "labels": {
+ "component": "${APP_NAME}",
+ "deploymentconfig": "${APP_NAME}",
+ "group": "quickstarts",
+ "project": "${APP_NAME}",
+ "provider": "s2i",
+ "version": "${APP_VERSION}"
+ }
+ },
+ "spec": {
+ "containers": [
+ {
+ "name": "${APP_NAME}",
+ "image": "library/${APP_NAME}:latest",
+ "readinessProbe" : {
+ "httpGet" : {
+ "path" : "/health",
+ "port" : 8081
+ },
+ "initialDelaySeconds" : 10
+ },
+ "livenessProbe" : {
+ "httpGet" : {
+ "path" : "/health",
+ "port" : 8081
+ },
+ "initialDelaySeconds" : 180
+ },
+ "ports": [
+ {
+ "containerPort": 8778,
+ "name": "jolokia"
+ }
+ ],
+ "env" : [ {
+ "name" : "KUBERNETES_NAMESPACE",
+ "valueFrom" : {
+ "fieldRef" : {
+ "fieldPath" : "metadata.namespace"
+ }
+ }
+ } ],
+ "resources": {
+ "requests": {
+ "cpu": "${CPU_REQUEST}"
+ },
+ "limits": {
+ "cpu": "${CPU_LIMIT}"
+ }
+ }
+ }
+ ]
+ }
+ }
+ },
+ "status": {}
+ }
+ ]
+}
diff --git a/roles/openshift_examples/files/examples/v1.4/xpaas-templates/spring-boot-camel-xml-template.json b/roles/openshift_examples/files/examples/v1.4/xpaas-templates/spring-boot-camel-xml-template.json
new file mode 100644
index 000000000..620425902
--- /dev/null
+++ b/roles/openshift_examples/files/examples/v1.4/xpaas-templates/spring-boot-camel-xml-template.json
@@ -0,0 +1,305 @@
+{
+ "apiVersion": "v1",
+ "kind": "Template",
+ "metadata": {
+ "annotations": {
+ "description": "Spring-Boot and Camel Xml QuickStart. This example demonstrates how you can use Apache Camel with Spring Boot on Openshift. The quickstart uses Spring Boot to configure a little application that includes a Camel route (in Spring xml) that triggeres a message every 5th second, and routes the message to a log.",
+ "tags": "quickstart,java,springboot,fis",
+ "iconClass": "icon-jboss",
+ "version": "2.0"
+ },
+ "name": "s2i-spring-boot-camel-xml"
+ },
+ "labels": {
+ "template": "s2i-spring-boot-camel-xml"
+ },
+ "parameters": [
+ {
+ "name": "APP_NAME",
+ "displayName": "Application Name",
+ "required": true,
+ "value": "s2i-spring-boot-camel-xml",
+ "description": "The name assigned to the application."
+ },
+ {
+ "name": "GIT_REPO",
+ "displayName": "Git Repository URL",
+ "required": true,
+ "value": "https://github.com/fabric8-quickstarts/spring-boot-camel-xml.git",
+ "description": "The URL of the repository with your application source code."
+ },
+ {
+ "name": "GIT_REF",
+ "displayName": "Git Reference",
+ "value": "spring-boot-camel-xml-1.0.0.redhat-000055",
+ "description": "Set this to a branch name, tag or other ref of your repository if you are not using the default branch."
+ },
+ {
+ "name": "BUILDER_VERSION",
+ "displayName": "Builder version",
+ "value": "2.0",
+ "description": "The version of the FIS S2I builder image to use."
+ },
+ {
+ "name": "APP_VERSION",
+ "displayName": "Application Version",
+ "value": "1.0.0.redhat-000055",
+ "description": "The application version."
+ },
+ {
+ "name": "MAVEN_ARGS",
+ "displayName": "Maven Arguments",
+ "value": "package -DskipTests -Dfabric8.skip -e -B",
+ "description": "Arguments passed to mvn in the build."
+ },
+ {
+ "name": "MAVEN_ARGS_APPEND",
+ "displayName": "Extra Maven Arguments",
+ "description": "Extra arguments passed to mvn, e.g. for multi-module builds."
+ },
+ {
+ "name": "ARTIFACT_DIR",
+ "displayName": "Maven build directory",
+ "description": "Directory of the artifact to be built, e.g. for multi-module builds."
+ },
+ {
+ "name": "IMAGE_STREAM_NAMESPACE",
+ "displayName": "Image Stream Namespace",
+ "value": "openshift",
+ "required": true,
+ "description": "Namespace in which the Fuse ImageStreams are installed. These ImageStreams are normally installed in the openshift namespace. You should only need to modify this if you've installed the ImageStreams in a different namespace/project."
+ },
+ {
+ "name": "BUILD_SECRET",
+ "displayName": "Git Build Secret",
+ "generate": "expression",
+ "description": "The secret needed to trigger a build.",
+ "from": "[a-zA-Z0-9]{40}"
+ },
+ {
+ "name": "CPU_REQUEST",
+ "displayName": "CPU request",
+ "value": "0.2",
+ "required": true,
+ "description": "The amount of CPU to requests."
+ },
+ {
+ "name": "CPU_LIMIT",
+ "displayName": "CPU limit",
+ "value": "1.0",
+ "required": true,
+ "description": "The amount of CPU the container is limited to use."
+ }
+ ],
+ "objects": [
+ {
+ "kind": "ImageStream",
+ "apiVersion": "v1",
+ "metadata": {
+ "name": "${APP_NAME}",
+ "creationTimestamp": null,
+ "labels": {
+ "component": "${APP_NAME}",
+ "group": "quickstarts",
+ "project": "${APP_NAME}",
+ "provider": "s2i",
+ "version": "${APP_VERSION}"
+ }
+ },
+ "spec": {},
+ "status": {
+ "dockerImageRepository": ""
+ }
+ },
+ {
+ "kind": "BuildConfig",
+ "apiVersion": "v1",
+ "metadata": {
+ "name": "${APP_NAME}",
+ "creationTimestamp": null,
+ "labels": {
+ "component": "${APP_NAME}",
+ "group": "quickstarts",
+ "project": "${APP_NAME}",
+ "provider": "s2i",
+ "version": "${APP_VERSION}"
+ }
+ },
+ "spec": {
+ "triggers": [
+ {
+ "type": "GitHub",
+ "github": {
+ "secret": "${BUILD_SECRET}"
+ }
+ },
+ {
+ "type": "Generic",
+ "generic": {
+ "secret": "${BUILD_SECRET}"
+ }
+ },
+ {
+ "type": "ConfigChange"
+ },
+ {
+ "type": "ImageChange",
+ "imageChange": {}
+ }
+ ],
+ "source": {
+ "type": "Git",
+ "git": {
+ "uri": "${GIT_REPO}",
+ "ref": "${GIT_REF}"
+ }
+ },
+ "strategy": {
+ "type": "Source",
+ "sourceStrategy": {
+ "from": {
+ "kind": "ImageStreamTag",
+ "namespace": "${IMAGE_STREAM_NAMESPACE}",
+ "name": "fis-java-openshift:${BUILDER_VERSION}"
+ },
+ "forcePull": true,
+ "incremental": true,
+ "env": [
+ {
+ "name": "BUILD_LOGLEVEL",
+ "value": "5"
+ },
+ {
+ "name": "ARTIFACT_DIR",
+ "value": "${ARTIFACT_DIR}"
+ },
+ {
+ "name": "MAVEN_ARGS",
+ "value": "${MAVEN_ARGS}"
+ },
+ {
+ "name": "MAVEN_ARGS_APPEND",
+ "value": "${MAVEN_ARGS_APPEND}"
+ }
+ ]
+ }
+ },
+ "output": {
+ "to": {
+ "kind": "ImageStreamTag",
+ "name": "${APP_NAME}:latest"
+ }
+ },
+ "resources": {}
+ },
+ "status": {
+ "lastVersion": 0
+ }
+ },
+ {
+ "kind": "DeploymentConfig",
+ "apiVersion": "v1",
+ "metadata": {
+ "name": "${APP_NAME}",
+ "creationTimestamp": null,
+ "labels": {
+ "component": "${APP_NAME}",
+ "group": "quickstarts",
+ "project": "${APP_NAME}",
+ "provider": "s2i",
+ "version": "${APP_VERSION}"
+ }
+ },
+ "spec": {
+ "strategy": {
+ "resources": {}
+ },
+ "triggers": [
+ {
+ "type": "ConfigChange"
+ },
+ {
+ "type": "ImageChange",
+ "imageChangeParams": {
+ "automatic": true,
+ "containerNames": [
+ "${APP_NAME}"
+ ],
+ "from": {
+ "kind": "ImageStreamTag",
+ "name": "${APP_NAME}:latest"
+ }
+ }
+ }
+ ],
+ "replicas": 1,
+ "selector": {
+ "component": "${APP_NAME}",
+ "deploymentconfig": "${APP_NAME}",
+ "group": "quickstarts",
+ "project": "${APP_NAME}",
+ "provider": "s2i",
+ "version": "${APP_VERSION}"
+ },
+ "template": {
+ "metadata": {
+ "creationTimestamp": null,
+ "labels": {
+ "component": "${APP_NAME}",
+ "deploymentconfig": "${APP_NAME}",
+ "group": "quickstarts",
+ "project": "${APP_NAME}",
+ "provider": "s2i",
+ "version": "${APP_VERSION}"
+ }
+ },
+ "spec": {
+ "containers": [
+ {
+ "name": "${APP_NAME}",
+ "image": "library/${APP_NAME}:latest",
+ "readinessProbe" : {
+ "httpGet" : {
+ "path" : "/health",
+ "port" : 8081
+ },
+ "initialDelaySeconds" : 10
+ },
+ "livenessProbe" : {
+ "httpGet" : {
+ "path" : "/health",
+ "port" : 8081
+ },
+ "initialDelaySeconds" : 180
+ },
+ "ports": [
+ {
+ "containerPort": 8778,
+ "name": "jolokia"
+ }
+ ],
+ "env" : [ {
+ "name" : "KUBERNETES_NAMESPACE",
+ "valueFrom" : {
+ "fieldRef" : {
+ "fieldPath" : "metadata.namespace"
+ }
+ }
+ } ],
+ "resources": {
+ "requests": {
+ "cpu": "${CPU_REQUEST}"
+ },
+ "limits": {
+ "cpu": "${CPU_LIMIT}"
+ }
+ }
+ }
+ ]
+ }
+ }
+ },
+ "status": {}
+ }
+ ]
+}
diff --git a/roles/openshift_examples/files/examples/v1.4/xpaas-templates/spring-boot-cxf-jaxrs-template.json b/roles/openshift_examples/files/examples/v1.4/xpaas-templates/spring-boot-cxf-jaxrs-template.json
new file mode 100644
index 000000000..15cfc93fd
--- /dev/null
+++ b/roles/openshift_examples/files/examples/v1.4/xpaas-templates/spring-boot-cxf-jaxrs-template.json
@@ -0,0 +1,305 @@
+{
+ "apiVersion": "v1",
+ "kind": "Template",
+ "metadata": {
+ "annotations": {
+ "description": "Spring-Boot and CXF JAXRS QuickStart. This example demonstrates how you can use Apache CXF JAXRS with Spring Boot on Openshift. The quickstart uses Spring Boot to configure a little application that includes a CXF JAXRS endpoint with Swagger enabled.",
+ "tags": "quickstart,java,springboot,fis",
+ "iconClass": "icon-jboss",
+ "version": "2.0"
+ },
+ "name": "s2i-spring-boot-cxf-jaxrs"
+ },
+ "labels": {
+ "template": "s2i-spring-boot-cxf-jaxrs"
+ },
+ "parameters": [
+ {
+ "name": "APP_NAME",
+ "displayName": "Application Name",
+ "required": true,
+ "value": "s2i-spring-boot-cxf-jaxrs",
+ "description": "The name assigned to the application."
+ },
+ {
+ "name": "GIT_REPO",
+ "displayName": "Git Repository URL",
+ "required": true,
+ "value": "https://github.com/fabric8-quickstarts/spring-boot-cxf-jaxrs.git",
+ "description": "The URL of the repository with your application source code."
+ },
+ {
+ "name": "GIT_REF",
+ "displayName": "Git Reference",
+ "value": "spring-boot-cxf-jaxrs-1.0.0.redhat-000005",
+ "description": "Set this to a branch name, tag or other ref of your repository if you are not using the default branch."
+ },
+ {
+ "name": "BUILDER_VERSION",
+ "displayName": "Builder version",
+ "value": "2.0",
+ "description": "The version of the FIS S2I builder image to use."
+ },
+ {
+ "name": "APP_VERSION",
+ "displayName": "Application Version",
+ "value": "1.0.0.redhat-000005",
+ "description": "The application version."
+ },
+ {
+ "name": "MAVEN_ARGS",
+ "displayName": "Maven Arguments",
+ "value": "package -DskipTests -Dfabric8.skip -e -B",
+ "description": "Arguments passed to mvn in the build."
+ },
+ {
+ "name": "MAVEN_ARGS_APPEND",
+ "displayName": "Extra Maven Arguments",
+ "description": "Extra arguments passed to mvn, e.g. for multi-module builds."
+ },
+ {
+ "name": "ARTIFACT_DIR",
+ "displayName": "Maven build directory",
+ "description": "Directory of the artifact to be built, e.g. for multi-module builds."
+ },
+ {
+ "name": "IMAGE_STREAM_NAMESPACE",
+ "displayName": "Image Stream Namespace",
+ "value": "openshift",
+ "required": true,
+ "description": "Namespace in which the Fuse ImageStreams are installed. These ImageStreams are normally installed in the openshift namespace. You should only need to modify this if you've installed the ImageStreams in a different namespace/project."
+ },
+ {
+ "name": "BUILD_SECRET",
+ "displayName": "Git Build Secret",
+ "generate": "expression",
+ "description": "The secret needed to trigger a build.",
+ "from": "[a-zA-Z0-9]{40}"
+ },
+ {
+ "name": "CPU_REQUEST",
+ "displayName": "CPU request",
+ "value": "0.2",
+ "required": true,
+ "description": "The amount of CPU to requests."
+ },
+ {
+ "name": "CPU_LIMIT",
+ "displayName": "CPU limit",
+ "value": "1.0",
+ "required": true,
+ "description": "The amount of CPU the container is limited to use."
+ }
+ ],
+ "objects": [
+ {
+ "kind": "ImageStream",
+ "apiVersion": "v1",
+ "metadata": {
+ "name": "${APP_NAME}",
+ "creationTimestamp": null,
+ "labels": {
+ "component": "${APP_NAME}",
+ "group": "quickstarts",
+ "project": "${APP_NAME}",
+ "provider": "s2i",
+ "version": "${APP_VERSION}"
+ }
+ },
+ "spec": {},
+ "status": {
+ "dockerImageRepository": ""
+ }
+ },
+ {
+ "kind": "BuildConfig",
+ "apiVersion": "v1",
+ "metadata": {
+ "name": "${APP_NAME}",
+ "creationTimestamp": null,
+ "labels": {
+ "component": "${APP_NAME}",
+ "group": "quickstarts",
+ "project": "${APP_NAME}",
+ "provider": "s2i",
+ "version": "${APP_VERSION}"
+ }
+ },
+ "spec": {
+ "triggers": [
+ {
+ "type": "GitHub",
+ "github": {
+ "secret": "${BUILD_SECRET}"
+ }
+ },
+ {
+ "type": "Generic",
+ "generic": {
+ "secret": "${BUILD_SECRET}"
+ }
+ },
+ {
+ "type": "ConfigChange"
+ },
+ {
+ "type": "ImageChange",
+ "imageChange": {}
+ }
+ ],
+ "source": {
+ "type": "Git",
+ "git": {
+ "uri": "${GIT_REPO}",
+ "ref": "${GIT_REF}"
+ }
+ },
+ "strategy": {
+ "type": "Source",
+ "sourceStrategy": {
+ "from": {
+ "kind": "ImageStreamTag",
+ "namespace": "${IMAGE_STREAM_NAMESPACE}",
+ "name": "fis-java-openshift:${BUILDER_VERSION}"
+ },
+ "forcePull": true,
+ "incremental": true,
+ "env": [
+ {
+ "name": "BUILD_LOGLEVEL",
+ "value": "5"
+ },
+ {
+ "name": "ARTIFACT_DIR",
+ "value": "${ARTIFACT_DIR}"
+ },
+ {
+ "name": "MAVEN_ARGS",
+ "value": "${MAVEN_ARGS}"
+ },
+ {
+ "name": "MAVEN_ARGS_APPEND",
+ "value": "${MAVEN_ARGS_APPEND}"
+ }
+ ]
+ }
+ },
+ "output": {
+ "to": {
+ "kind": "ImageStreamTag",
+ "name": "${APP_NAME}:latest"
+ }
+ },
+ "resources": {}
+ },
+ "status": {
+ "lastVersion": 0
+ }
+ },
+ {
+ "kind": "DeploymentConfig",
+ "apiVersion": "v1",
+ "metadata": {
+ "name": "${APP_NAME}",
+ "creationTimestamp": null,
+ "labels": {
+ "component": "${APP_NAME}",
+ "group": "quickstarts",
+ "project": "${APP_NAME}",
+ "provider": "s2i",
+ "version": "${APP_VERSION}"
+ }
+ },
+ "spec": {
+ "strategy": {
+ "resources": {}
+ },
+ "triggers": [
+ {
+ "type": "ConfigChange"
+ },
+ {
+ "type": "ImageChange",
+ "imageChangeParams": {
+ "automatic": true,
+ "containerNames": [
+ "${APP_NAME}"
+ ],
+ "from": {
+ "kind": "ImageStreamTag",
+ "name": "${APP_NAME}:latest"
+ }
+ }
+ }
+ ],
+ "replicas": 1,
+ "selector": {
+ "component": "${APP_NAME}",
+ "deploymentconfig": "${APP_NAME}",
+ "group": "quickstarts",
+ "project": "${APP_NAME}",
+ "provider": "s2i",
+ "version": "${APP_VERSION}"
+ },
+ "template": {
+ "metadata": {
+ "creationTimestamp": null,
+ "labels": {
+ "component": "${APP_NAME}",
+ "deploymentconfig": "${APP_NAME}",
+ "group": "quickstarts",
+ "project": "${APP_NAME}",
+ "provider": "s2i",
+ "version": "${APP_VERSION}"
+ }
+ },
+ "spec": {
+ "containers": [
+ {
+ "name": "${APP_NAME}",
+ "image": "library/${APP_NAME}:latest",
+ "readinessProbe" : {
+ "httpGet" : {
+ "path" : "/health",
+ "port" : 8081
+ },
+ "initialDelaySeconds" : 10
+ },
+ "livenessProbe" : {
+ "httpGet" : {
+ "path" : "/health",
+ "port" : 8081
+ },
+ "initialDelaySeconds" : 180
+ },
+ "ports": [
+ {
+ "containerPort": 8778,
+ "name": "jolokia"
+ }
+ ],
+ "env" : [ {
+ "name" : "KUBERNETES_NAMESPACE",
+ "valueFrom" : {
+ "fieldRef" : {
+ "fieldPath" : "metadata.namespace"
+ }
+ }
+ } ],
+ "resources": {
+ "requests": {
+ "cpu": "${CPU_REQUEST}"
+ },
+ "limits": {
+ "cpu": "${CPU_LIMIT}"
+ }
+ }
+ }
+ ]
+ }
+ }
+ },
+ "status": {}
+ }
+ ]
+}
diff --git a/roles/openshift_examples/files/examples/v1.4/xpaas-templates/spring-boot-cxf-jaxws-template.json b/roles/openshift_examples/files/examples/v1.4/xpaas-templates/spring-boot-cxf-jaxws-template.json
new file mode 100644
index 000000000..c70ee7726
--- /dev/null
+++ b/roles/openshift_examples/files/examples/v1.4/xpaas-templates/spring-boot-cxf-jaxws-template.json
@@ -0,0 +1,305 @@
+{
+ "apiVersion": "v1",
+ "kind": "Template",
+ "metadata": {
+ "annotations": {
+ "description": "Spring-Boot and CXF JAXWS QuickStart. This example demonstrates how you can use Apache CXF JAXWS with Spring Boot on Openshift. The quickstart uses Spring Boot to configure a little application that includes a CXF JAXWS endpoint.",
+ "tags": "quickstart,java,springboot,fis",
+ "iconClass": "icon-jboss",
+ "version": "2.0"
+ },
+ "name": "s2i-spring-boot-cxf-jaxws"
+ },
+ "labels": {
+ "template": "s2i-spring-boot-cxf-jaxws"
+ },
+ "parameters": [
+ {
+ "name": "APP_NAME",
+ "displayName": "Application Name",
+ "required": true,
+ "value": "s2i-spring-boot-cxf-jaxws",
+ "description": "The name assigned to the application."
+ },
+ {
+ "name": "GIT_REPO",
+ "displayName": "Git Repository URL",
+ "required": true,
+ "value": "https://github.com/fabric8-quickstarts/spring-boot-cxf-jaxws.git",
+ "description": "The URL of the repository with your application source code."
+ },
+ {
+ "name": "GIT_REF",
+ "displayName": "Git Reference",
+ "value": "spring-boot-cxf-jaxws-1.0.0.redhat-000005",
+ "description": "Set this to a branch name, tag or other ref of your repository if you are not using the default branch."
+ },
+ {
+ "name": "BUILDER_VERSION",
+ "displayName": "Builder version",
+ "value": "2.0",
+ "description": "The version of the FIS S2I builder image to use."
+ },
+ {
+ "name": "APP_VERSION",
+ "displayName": "Application Version",
+ "value": "1.0.0.redhat-000005",
+ "description": "The application version."
+ },
+ {
+ "name": "MAVEN_ARGS",
+ "displayName": "Maven Arguments",
+ "value": "package -DskipTests -Dfabric8.skip -e -B",
+ "description": "Arguments passed to mvn in the build."
+ },
+ {
+ "name": "MAVEN_ARGS_APPEND",
+ "displayName": "Extra Maven Arguments",
+ "description": "Extra arguments passed to mvn, e.g. for multi-module builds."
+ },
+ {
+ "name": "ARTIFACT_DIR",
+ "displayName": "Maven build directory",
+ "description": "Directory of the artifact to be built, e.g. for multi-module builds."
+ },
+ {
+ "name": "IMAGE_STREAM_NAMESPACE",
+ "displayName": "Image Stream Namespace",
+ "value": "openshift",
+ "required": true,
+ "description": "Namespace in which the Fuse ImageStreams are installed. These ImageStreams are normally installed in the openshift namespace. You should only need to modify this if you've installed the ImageStreams in a different namespace/project."
+ },
+ {
+ "name": "BUILD_SECRET",
+ "displayName": "Git Build Secret",
+ "generate": "expression",
+ "description": "The secret needed to trigger a build.",
+ "from": "[a-zA-Z0-9]{40}"
+ },
+ {
+ "name": "CPU_REQUEST",
+ "displayName": "CPU request",
+ "value": "0.2",
+ "required": true,
+ "description": "The amount of CPU to requests."
+ },
+ {
+ "name": "CPU_LIMIT",
+ "displayName": "CPU limit",
+ "value": "1.0",
+ "required": true,
+ "description": "The amount of CPU the container is limited to use."
+ }
+ ],
+ "objects": [
+ {
+ "kind": "ImageStream",
+ "apiVersion": "v1",
+ "metadata": {
+ "name": "${APP_NAME}",
+ "creationTimestamp": null,
+ "labels": {
+ "component": "${APP_NAME}",
+ "group": "quickstarts",
+ "project": "${APP_NAME}",
+ "provider": "s2i",
+ "version": "${APP_VERSION}"
+ }
+ },
+ "spec": {},
+ "status": {
+ "dockerImageRepository": ""
+ }
+ },
+ {
+ "kind": "BuildConfig",
+ "apiVersion": "v1",
+ "metadata": {
+ "name": "${APP_NAME}",
+ "creationTimestamp": null,
+ "labels": {
+ "component": "${APP_NAME}",
+ "group": "quickstarts",
+ "project": "${APP_NAME}",
+ "provider": "s2i",
+ "version": "${APP_VERSION}"
+ }
+ },
+ "spec": {
+ "triggers": [
+ {
+ "type": "GitHub",
+ "github": {
+ "secret": "${BUILD_SECRET}"
+ }
+ },
+ {
+ "type": "Generic",
+ "generic": {
+ "secret": "${BUILD_SECRET}"
+ }
+ },
+ {
+ "type": "ConfigChange"
+ },
+ {
+ "type": "ImageChange",
+ "imageChange": {}
+ }
+ ],
+ "source": {
+ "type": "Git",
+ "git": {
+ "uri": "${GIT_REPO}",
+ "ref": "${GIT_REF}"
+ }
+ },
+ "strategy": {
+ "type": "Source",
+ "sourceStrategy": {
+ "from": {
+ "kind": "ImageStreamTag",
+ "namespace": "${IMAGE_STREAM_NAMESPACE}",
+ "name": "fis-java-openshift:${BUILDER_VERSION}"
+ },
+ "forcePull": true,
+ "incremental": true,
+ "env": [
+ {
+ "name": "BUILD_LOGLEVEL",
+ "value": "5"
+ },
+ {
+ "name": "ARTIFACT_DIR",
+ "value": "${ARTIFACT_DIR}"
+ },
+ {
+ "name": "MAVEN_ARGS",
+ "value": "${MAVEN_ARGS}"
+ },
+ {
+ "name": "MAVEN_ARGS_APPEND",
+ "value": "${MAVEN_ARGS_APPEND}"
+ }
+ ]
+ }
+ },
+ "output": {
+ "to": {
+ "kind": "ImageStreamTag",
+ "name": "${APP_NAME}:latest"
+ }
+ },
+ "resources": {}
+ },
+ "status": {
+ "lastVersion": 0
+ }
+ },
+ {
+ "kind": "DeploymentConfig",
+ "apiVersion": "v1",
+ "metadata": {
+ "name": "${APP_NAME}",
+ "creationTimestamp": null,
+ "labels": {
+ "component": "${APP_NAME}",
+ "group": "quickstarts",
+ "project": "${APP_NAME}",
+ "provider": "s2i",
+ "version": "${APP_VERSION}"
+ }
+ },
+ "spec": {
+ "strategy": {
+ "resources": {}
+ },
+ "triggers": [
+ {
+ "type": "ConfigChange"
+ },
+ {
+ "type": "ImageChange",
+ "imageChangeParams": {
+ "automatic": true,
+ "containerNames": [
+ "${APP_NAME}"
+ ],
+ "from": {
+ "kind": "ImageStreamTag",
+ "name": "${APP_NAME}:latest"
+ }
+ }
+ }
+ ],
+ "replicas": 1,
+ "selector": {
+ "component": "${APP_NAME}",
+ "deploymentconfig": "${APP_NAME}",
+ "group": "quickstarts",
+ "project": "${APP_NAME}",
+ "provider": "s2i",
+ "version": "${APP_VERSION}"
+ },
+ "template": {
+ "metadata": {
+ "creationTimestamp": null,
+ "labels": {
+ "component": "${APP_NAME}",
+ "deploymentconfig": "${APP_NAME}",
+ "group": "quickstarts",
+ "project": "${APP_NAME}",
+ "provider": "s2i",
+ "version": "${APP_VERSION}"
+ }
+ },
+ "spec": {
+ "containers": [
+ {
+ "name": "${APP_NAME}",
+ "image": "library/${APP_NAME}:latest",
+ "readinessProbe" : {
+ "httpGet" : {
+ "path" : "/health",
+ "port" : 8081
+ },
+ "initialDelaySeconds" : 10
+ },
+ "livenessProbe" : {
+ "httpGet" : {
+ "path" : "/health",
+ "port" : 8081
+ },
+ "initialDelaySeconds" : 180
+ },
+ "ports": [
+ {
+ "containerPort": 8778,
+ "name": "jolokia"
+ }
+ ],
+ "env" : [ {
+ "name" : "KUBERNETES_NAMESPACE",
+ "valueFrom" : {
+ "fieldRef" : {
+ "fieldPath" : "metadata.namespace"
+ }
+ }
+ } ],
+ "resources": {
+ "requests": {
+ "cpu": "${CPU_REQUEST}"
+ },
+ "limits": {
+ "cpu": "${CPU_LIMIT}"
+ }
+ }
+ }
+ ]
+ }
+ }
+ },
+ "status": {}
+ }
+ ]
+}
diff --git a/roles/openshift_examples/files/examples/v1.5/db-templates/mariadb-ephemeral-template.json b/roles/openshift_examples/files/examples/v1.5/db-templates/mariadb-ephemeral-template.json
index cfbfc3e20..f347f1f9f 100644
--- a/roles/openshift_examples/files/examples/v1.5/db-templates/mariadb-ephemeral-template.json
+++ b/roles/openshift_examples/files/examples/v1.5/db-templates/mariadb-ephemeral-template.json
@@ -7,7 +7,11 @@
"openshift.io/display-name": "MariaDB (Ephemeral)",
"description": "MariaDB database service, without persistent storage. For more information about using this template, including OpenShift considerations, see https://github.com/sclorg/mariadb-container/blob/master/10.1/README.md.\n\nWARNING: Any data stored will be lost upon pod destruction. Only use this template for testing",
"iconClass": "icon-mariadb",
- "tags": "database,mariadb"
+ "tags": "database,mariadb",
+ "template.openshift.io/long-description": "This template provides a standalone MariaDB server with a database created. The database is not stored on persistent storage, so any restart of the service will result in all data being lost. The database name, username, and password are chosen via parameters when provisioning this service.",
+ "template.openshift.io/provider-display-name": "Red Hat, Inc.",
+ "template.openshift.io/documentation-url": "https://github.com/sclorg/mariadb-container/blob/master/10.1/README.md",
+ "template.openshift.io/support-url": "https://access.redhat.com"
}
},
"message": "The following service(s) have been created in your project: ${DATABASE_SERVICE_NAME}.\n\n Username: ${MYSQL_USER}\n Password: ${MYSQL_PASSWORD}\n Database Name: ${MYSQL_DATABASE}\n Connection URL: mysql://${DATABASE_SERVICE_NAME}:3306/\n\nFor more information about using this template, including OpenShift considerations, see https://github.com/sclorg/mariadb-container/blob/master/10.1/README.md.",
diff --git a/roles/openshift_examples/files/examples/v1.5/db-templates/mariadb-persistent-template.json b/roles/openshift_examples/files/examples/v1.5/db-templates/mariadb-persistent-template.json
index e933eecf0..6ed744777 100644
--- a/roles/openshift_examples/files/examples/v1.5/db-templates/mariadb-persistent-template.json
+++ b/roles/openshift_examples/files/examples/v1.5/db-templates/mariadb-persistent-template.json
@@ -7,7 +7,11 @@
"openshift.io/display-name": "MariaDB (Persistent)",
"description": "MariaDB database service, with persistent storage. For more information about using this template, including OpenShift considerations, see https://github.com/sclorg/mariadb-container/blob/master/10.1/README.md.\n\nNOTE: Scaling to more than one replica is not supported. You must have persistent volumes available in your cluster to use this template.",
"iconClass": "icon-mariadb",
- "tags": "database,mariadb"
+ "tags": "database,mariadb",
+ "template.openshift.io/long-description": "This template provides a standalone MariaDB server with a database created. The database is stored on persistent storage. The database name, username, and password are chosen via parameters when provisioning this service.",
+ "template.openshift.io/provider-display-name": "Red Hat, Inc.",
+ "template.openshift.io/documentation-url": "https://github.com/sclorg/mariadb-container/blob/master/10.1/README.md",
+ "template.openshift.io/support-url": "https://access.redhat.com"
}
},
"message": "The following service(s) have been created in your project: ${DATABASE_SERVICE_NAME}.\n\n Username: ${MYSQL_USER}\n Password: ${MYSQL_PASSWORD}\n Database Name: ${MYSQL_DATABASE}\n Connection URL: mysql://${DATABASE_SERVICE_NAME}:3306/\n\nFor more information about using this template, including OpenShift considerations, see https://github.com/sclorg/mariadb-container/blob/master/10.1/README.md.",
diff --git a/roles/openshift_examples/files/examples/v1.5/db-templates/mongodb-ephemeral-template.json b/roles/openshift_examples/files/examples/v1.5/db-templates/mongodb-ephemeral-template.json
index 8b8fcb58b..97a8abf6d 100644
--- a/roles/openshift_examples/files/examples/v1.5/db-templates/mongodb-ephemeral-template.json
+++ b/roles/openshift_examples/files/examples/v1.5/db-templates/mongodb-ephemeral-template.json
@@ -8,7 +8,11 @@
"openshift.io/display-name": "MongoDB (Ephemeral)",
"description": "MongoDB database service, without persistent storage. For more information about using this template, including OpenShift considerations, see https://github.com/sclorg/mongodb-container/blob/master/3.2/README.md.\n\nWARNING: Any data stored will be lost upon pod destruction. Only use this template for testing",
"iconClass": "icon-mongodb",
- "tags": "database,mongodb"
+ "tags": "database,mongodb",
+ "template.openshift.io/long-description": "This template provides a standalone MongoDB server with a database created. The database is not stored on persistent storage, so any restart of the service will result in all data being lost. The database name, username, and password are chosen via parameters when provisioning this service.",
+ "template.openshift.io/provider-display-name": "Red Hat, Inc.",
+ "template.openshift.io/documentation-url": "https://docs.openshift.org/latest/using_images/db_images/mongodb.html",
+ "template.openshift.io/support-url": "https://access.redhat.com"
}
},
"message": "The following service(s) have been created in your project: ${DATABASE_SERVICE_NAME}.\n\n Username: ${MONGODB_USER}\n Password: ${MONGODB_PASSWORD}\n Database Name: ${MONGODB_DATABASE}\n Connection URL: mongodb://${MONGODB_USER}:${MONGODB_PASSWORD}@${DATABASE_SERVICE_NAME}/${MONGODB_DATABASE}\n\nFor more information about using this template, including OpenShift considerations, see https://github.com/sclorg/mongodb-container/blob/master/3.2/README.md.",
@@ -196,7 +200,8 @@
"name": "MEMORY_LIMIT",
"displayName": "Memory Limit",
"description": "Maximum amount of memory the container can use.",
- "value": "512Mi"
+ "value": "512Mi",
+ "required": true
},
{
"name": "NAMESPACE",
diff --git a/roles/openshift_examples/files/examples/v1.5/db-templates/mongodb-persistent-template.json b/roles/openshift_examples/files/examples/v1.5/db-templates/mongodb-persistent-template.json
index 72d3a8556..0656219fb 100644
--- a/roles/openshift_examples/files/examples/v1.5/db-templates/mongodb-persistent-template.json
+++ b/roles/openshift_examples/files/examples/v1.5/db-templates/mongodb-persistent-template.json
@@ -8,7 +8,11 @@
"openshift.io/display-name": "MongoDB (Persistent)",
"description": "MongoDB database service, with persistent storage. For more information about using this template, including OpenShift considerations, see https://github.com/sclorg/mongodb-container/blob/master/3.2/README.md.\n\nNOTE: Scaling to more than one replica is not supported. You must have persistent volumes available in your cluster to use this template.",
"iconClass": "icon-mongodb",
- "tags": "database,mongodb"
+ "tags": "database,mongodb",
+ "template.openshift.io/long-description": "This template provides a standalone MongoDB server with a database created. The database is stored on persistent storage. The database name, username, and password are chosen via parameters when provisioning this service.",
+ "template.openshift.io/provider-display-name": "Red Hat, Inc.",
+ "template.openshift.io/documentation-url": "https://docs.openshift.org/latest/using_images/db_images/mongodb.html",
+ "template.openshift.io/support-url": "https://access.redhat.com"
}
},
"message": "The following service(s) have been created in your project: ${DATABASE_SERVICE_NAME}.\n\n Username: ${MONGODB_USER}\n Password: ${MONGODB_PASSWORD}\n Database Name: ${MONGODB_DATABASE}\n Connection URL: mongodb://${MONGODB_USER}:${MONGODB_PASSWORD}@${DATABASE_SERVICE_NAME}/${MONGODB_DATABASE}\n\nFor more information about using this template, including OpenShift considerations, see https://github.com/sclorg/mongodb-container/blob/master/3.2/README.md.",
@@ -213,7 +217,8 @@
"name": "MEMORY_LIMIT",
"displayName": "Memory Limit",
"description": "Maximum amount of memory the container can use.",
- "value": "512Mi"
+ "value": "512Mi",
+ "required": true
},
{
"name": "NAMESPACE",
diff --git a/roles/openshift_examples/files/examples/v1.5/db-templates/mysql-ephemeral-template.json b/roles/openshift_examples/files/examples/v1.5/db-templates/mysql-ephemeral-template.json
index 34dd2ed78..d60b4647d 100644
--- a/roles/openshift_examples/files/examples/v1.5/db-templates/mysql-ephemeral-template.json
+++ b/roles/openshift_examples/files/examples/v1.5/db-templates/mysql-ephemeral-template.json
@@ -7,7 +7,11 @@
"openshift.io/display-name": "MySQL (Ephemeral)",
"description": "MySQL database service, without persistent storage. For more information about using this template, including OpenShift considerations, see https://github.com/sclorg/mysql-container/blob/master/5.7/README.md.\n\nWARNING: Any data stored will be lost upon pod destruction. Only use this template for testing",
"iconClass": "icon-mysql-database",
- "tags": "database,mysql"
+ "tags": "database,mysql",
+ "template.openshift.io/long-description": "This template provides a standalone MySQL server with a database created. The database is not stored on persistent storage, so any restart of the service will result in all data being lost. The database name, username, and password are chosen via parameters when provisioning this service.",
+ "template.openshift.io/provider-display-name": "Red Hat, Inc.",
+ "template.openshift.io/documentation-url": "https://docs.openshift.org/latest/using_images/db_images/mysql.html",
+ "template.openshift.io/support-url": "https://access.redhat.com"
}
},
"message": "The following service(s) have been created in your project: ${DATABASE_SERVICE_NAME}.\n\n Username: ${MYSQL_USER}\n Password: ${MYSQL_PASSWORD}\n Database Name: ${MYSQL_DATABASE}\n Connection URL: mysql://${DATABASE_SERVICE_NAME}:3306/\n\nFor more information about using this template, including OpenShift considerations, see https://github.com/sclorg/mysql-container/blob/master/5.7/README.md.",
@@ -196,7 +200,8 @@
"name": "MEMORY_LIMIT",
"displayName": "Memory Limit",
"description": "Maximum amount of memory the container can use.",
- "value": "512Mi"
+ "value": "512Mi",
+ "required": true
},
{
"name": "NAMESPACE",
diff --git a/roles/openshift_examples/files/examples/v1.5/db-templates/mysql-persistent-template.json b/roles/openshift_examples/files/examples/v1.5/db-templates/mysql-persistent-template.json
index 85c48da01..c2bfa40fd 100644
--- a/roles/openshift_examples/files/examples/v1.5/db-templates/mysql-persistent-template.json
+++ b/roles/openshift_examples/files/examples/v1.5/db-templates/mysql-persistent-template.json
@@ -7,7 +7,11 @@
"openshift.io/display-name": "MySQL (Persistent)",
"description": "MySQL database service, with persistent storage. For more information about using this template, including OpenShift considerations, see https://github.com/sclorg/mysql-container/blob/master/5.7/README.md.\n\nNOTE: Scaling to more than one replica is not supported. You must have persistent volumes available in your cluster to use this template.",
"iconClass": "icon-mysql-database",
- "tags": "database,mysql"
+ "tags": "database,mysql",
+ "template.openshift.io/long-description": "This template provides a standalone MySQL server with a database created. The database is stored on persistent storage. The database name, username, and password are chosen via parameters when provisioning this service.",
+ "template.openshift.io/provider-display-name": "Red Hat, Inc.",
+ "template.openshift.io/documentation-url": "https://docs.openshift.org/latest/using_images/db_images/mysql.html",
+ "template.openshift.io/support-url": "https://access.redhat.com"
}
},
"message": "The following service(s) have been created in your project: ${DATABASE_SERVICE_NAME}.\n\n Username: ${MYSQL_USER}\n Password: ${MYSQL_PASSWORD}\n Database Name: ${MYSQL_DATABASE}\n Connection URL: mysql://${DATABASE_SERVICE_NAME}:3306/\n\nFor more information about using this template, including OpenShift considerations, see https://github.com/sclorg/mysql-container/blob/master/5.7/README.md.",
diff --git a/roles/openshift_examples/files/examples/v1.5/db-templates/postgresql-ephemeral-template.json b/roles/openshift_examples/files/examples/v1.5/db-templates/postgresql-ephemeral-template.json
index 1025ab056..7a16e742a 100644
--- a/roles/openshift_examples/files/examples/v1.5/db-templates/postgresql-ephemeral-template.json
+++ b/roles/openshift_examples/files/examples/v1.5/db-templates/postgresql-ephemeral-template.json
@@ -8,7 +8,11 @@
"openshift.io/display-name": "PostgreSQL (Ephemeral)",
"description": "PostgreSQL database service, without persistent storage. For more information about using this template, including OpenShift considerations, see https://github.com/sclorg/postgresql-container/blob/master/9.5.\n\nWARNING: Any data stored will be lost upon pod destruction. Only use this template for testing",
"iconClass": "icon-postgresql",
- "tags": "database,postgresql"
+ "tags": "database,postgresql",
+ "template.openshift.io/long-description": "This template provides a standalone PostgreSQL server with a database created. The database is not stored on persistent storage, so any restart of the service will result in all data being lost. The database name, username, and password are chosen via parameters when provisioning this service.",
+ "template.openshift.io/provider-display-name": "Red Hat, Inc.",
+ "template.openshift.io/documentation-url": "https://docs.openshift.org/latest/using_images/db_images/postgresql.html",
+ "template.openshift.io/support-url": "https://access.redhat.com"
}
},
"message": "The following service(s) have been created in your project: ${DATABASE_SERVICE_NAME}.\n\n Username: ${POSTGRESQL_USER}\n Password: ${POSTGRESQL_PASSWORD}\n Database Name: ${POSTGRESQL_DATABASE}\n Connection URL: postgresql://${DATABASE_SERVICE_NAME}:5432/\n\nFor more information about using this template, including OpenShift considerations, see https://github.com/sclorg/postgresql-container/blob/master/9.5.",
@@ -186,7 +190,8 @@
"name": "MEMORY_LIMIT",
"displayName": "Memory Limit",
"description": "Maximum amount of memory the container can use.",
- "value": "512Mi"
+ "value": "512Mi",
+ "required": true
},
{
"name": "NAMESPACE",
diff --git a/roles/openshift_examples/files/examples/v1.5/db-templates/postgresql-persistent-template.json b/roles/openshift_examples/files/examples/v1.5/db-templates/postgresql-persistent-template.json
index 1968e727a..242212d6f 100644
--- a/roles/openshift_examples/files/examples/v1.5/db-templates/postgresql-persistent-template.json
+++ b/roles/openshift_examples/files/examples/v1.5/db-templates/postgresql-persistent-template.json
@@ -8,7 +8,11 @@
"openshift.io/display-name": "PostgreSQL (Persistent)",
"description": "PostgreSQL database service, with persistent storage. For more information about using this template, including OpenShift considerations, see https://github.com/sclorg/postgresql-container/blob/master/9.5.\n\nNOTE: Scaling to more than one replica is not supported. You must have persistent volumes available in your cluster to use this template.",
"iconClass": "icon-postgresql",
- "tags": "database,postgresql"
+ "tags": "database,postgresql",
+ "template.openshift.io/long-description": "This template provides a standalone PostgreSQL server with a database created. The database is stored on persistent storage. The database name, username, and password are chosen via parameters when provisioning this service.",
+ "template.openshift.io/provider-display-name": "Red Hat, Inc.",
+ "template.openshift.io/documentation-url": "https://docs.openshift.org/latest/using_images/db_images/postgresql.html",
+ "template.openshift.io/support-url": "https://access.redhat.com"
}
},
"message": "The following service(s) have been created in your project: ${DATABASE_SERVICE_NAME}.\n\n Username: ${POSTGRESQL_USER}\n Password: ${POSTGRESQL_PASSWORD}\n Database Name: ${POSTGRESQL_DATABASE}\n Connection URL: postgresql://${DATABASE_SERVICE_NAME}:5432/\n\nFor more information about using this template, including OpenShift considerations, see https://github.com/sclorg/postgresql-container/blob/master/9.5.",
@@ -203,7 +207,8 @@
"name": "MEMORY_LIMIT",
"displayName": "Memory Limit",
"description": "Maximum amount of memory the container can use.",
- "value": "512Mi"
+ "value": "512Mi",
+ "required": true
},
{
"name": "NAMESPACE",
diff --git a/roles/openshift_examples/files/examples/v1.5/db-templates/redis-ephemeral-template.json b/roles/openshift_examples/files/examples/v1.5/db-templates/redis-ephemeral-template.json
index c9ae8a539..82a09a3ec 100644
--- a/roles/openshift_examples/files/examples/v1.5/db-templates/redis-ephemeral-template.json
+++ b/roles/openshift_examples/files/examples/v1.5/db-templates/redis-ephemeral-template.json
@@ -8,7 +8,11 @@
"openshift.io/display-name": "Redis (Ephemeral)",
"description": "Redis in-memory data structure store, without persistent storage. For more information about using this template, including OpenShift considerations, see https://github.com/sclorg/redis-container/blob/master/3.2.\n\nWARNING: Any data stored will be lost upon pod destruction. Only use this template for testing",
"iconClass": "icon-redis",
- "tags": "database,redis"
+ "tags": "database,redis",
+ "template.openshift.io/long-description": "This template provides a standalone Redis server. The data is not stored on persistent storage, so any restart of the service will result in all data being lost.",
+ "template.openshift.io/provider-display-name": "Red Hat, Inc.",
+ "template.openshift.io/documentation-url": "https://github.com/sclorg/redis-container/tree/master/3.2",
+ "template.openshift.io/support-url": "https://access.redhat.com"
}
},
"message": "The following service(s) have been created in your project: ${DATABASE_SERVICE_NAME}.\n\n Password: ${REDIS_PASSWORD}\n Connection URL: redis://${DATABASE_SERVICE_NAME}:6379/\n\nFor more information about using this template, including OpenShift considerations, see https://github.com/sclorg/redis-container/blob/master/3.2.",
@@ -157,7 +161,8 @@
"name": "MEMORY_LIMIT",
"displayName": "Memory Limit",
"description": "Maximum amount of memory the container can use.",
- "value": "512Mi"
+ "value": "512Mi",
+ "required": true
},
{
"name": "NAMESPACE",
diff --git a/roles/openshift_examples/files/examples/v1.5/db-templates/redis-persistent-template.json b/roles/openshift_examples/files/examples/v1.5/db-templates/redis-persistent-template.json
index e9db9ec9d..1d5f59188 100644
--- a/roles/openshift_examples/files/examples/v1.5/db-templates/redis-persistent-template.json
+++ b/roles/openshift_examples/files/examples/v1.5/db-templates/redis-persistent-template.json
@@ -8,7 +8,11 @@
"openshift.io/display-name": "Redis (Persistent)",
"description": "Redis in-memory data structure store, with persistent storage. For more information about using this template, including OpenShift considerations, see https://github.com/sclorg/redis-container/blob/master/3.2.\n\nNOTE: You must have persistent volumes available in your cluster to use this template.",
"iconClass": "icon-redis",
- "tags": "database,redis"
+ "tags": "database,redis",
+ "template.openshift.io/long-description": "This template provides a standalone Redis server. The data is stored on persistent storage.",
+ "template.openshift.io/provider-display-name": "Red Hat, Inc.",
+ "template.openshift.io/documentation-url": "https://github.com/sclorg/redis-container/tree/master/3.2",
+ "template.openshift.io/support-url": "https://access.redhat.com"
}
},
"message": "The following service(s) have been created in your project: ${DATABASE_SERVICE_NAME}.\n\n Password: ${REDIS_PASSWORD}\n Connection URL: redis://${DATABASE_SERVICE_NAME}:6379/\n\nFor more information about using this template, including OpenShift considerations, see https://github.com/sclorg/redis-container/blob/master/3.2.",
@@ -174,7 +178,8 @@
"name": "MEMORY_LIMIT",
"displayName": "Memory Limit",
"description": "Maximum amount of memory the container can use.",
- "value": "512Mi"
+ "value": "512Mi",
+ "required": true
},
{
"name": "NAMESPACE",
diff --git a/roles/openshift_examples/files/examples/v1.5/image-streams/image-streams-rhel7.json b/roles/openshift_examples/files/examples/v1.5/image-streams/image-streams-rhel7.json
index 9b19b8bd0..eb94c3bb4 100644
--- a/roles/openshift_examples/files/examples/v1.5/image-streams/image-streams-rhel7.json
+++ b/roles/openshift_examples/files/examples/v1.5/image-streams/image-streams-rhel7.json
@@ -241,7 +241,7 @@
},
"from": {
"kind": "ImageStreamTag",
- "name": "5.6"
+ "name": "7.0"
}
},
{
diff --git a/roles/openshift_examples/files/examples/v1.5/quickstart-templates/cakephp-mysql-persistent.json b/roles/openshift_examples/files/examples/v1.5/quickstart-templates/cakephp-mysql-persistent.json
index 0ba57864e..eb3d296be 100644
--- a/roles/openshift_examples/files/examples/v1.5/quickstart-templates/cakephp-mysql-persistent.json
+++ b/roles/openshift_examples/files/examples/v1.5/quickstart-templates/cakephp-mysql-persistent.json
@@ -7,7 +7,11 @@
"openshift.io/display-name": "CakePHP + MySQL (Persistent)",
"description": "An example CakePHP application with a MySQL database. For more information about using this template, including OpenShift considerations, see https://github.com/openshift/cakephp-ex/blob/master/README.md.",
"tags": "quickstart,php,cakephp",
- "iconClass": "icon-php"
+ "iconClass": "icon-php",
+ "template.openshift.io/long-description": "This template defines resources needed to develop a CakePHP application, including a build configuration, application deployment configuration, and database deployment configuration.",
+ "template.openshift.io/provider-display-name": "Red Hat, Inc.",
+ "template.openshift.io/documentation-url": "https://github.com/openshift/cakephp-ex",
+ "template.openshift.io/support-url": "https://access.redhat.com"
}
},
"message": "The following service(s) have been created in your project: ${NAME}, ${DATABASE_SERVICE_NAME}.\n\nFor more information about using this template, including OpenShift considerations, see https://github.com/openshift/cake-ex/blob/master/README.md.",
@@ -129,7 +133,10 @@
"secret": "${GITHUB_WEBHOOK_SECRET}"
}
}
- ]
+ ],
+ "postCommit": {
+ "script": "./lib/Cake/Console/cake test app AllTests"
+ }
}
},
{
diff --git a/roles/openshift_examples/files/examples/v1.5/quickstart-templates/cakephp-mysql.json b/roles/openshift_examples/files/examples/v1.5/quickstart-templates/cakephp-mysql.json
index 9732e59e1..da2454d2e 100644
--- a/roles/openshift_examples/files/examples/v1.5/quickstart-templates/cakephp-mysql.json
+++ b/roles/openshift_examples/files/examples/v1.5/quickstart-templates/cakephp-mysql.json
@@ -7,7 +7,11 @@
"openshift.io/display-name": "CakePHP + MySQL (Ephemeral)",
"description": "An example CakePHP application with a MySQL database. For more information about using this template, including OpenShift considerations, see https://github.com/openshift/cakephp-ex/blob/master/README.md.\n\nWARNING: Any data stored will be lost upon pod destruction. Only use this template for testing.",
"tags": "quickstart,php,cakephp",
- "iconClass": "icon-php"
+ "iconClass": "icon-php",
+ "template.openshift.io/long-description": "This template defines resources needed to develop a CakePHP application, including a build configuration, application deployment configuration, and database deployment configuration. The database is stored in non-persistent storage, so this configuration should be used for experimental purposes only.",
+ "template.openshift.io/provider-display-name": "Red Hat, Inc.",
+ "template.openshift.io/documentation-url": "https://github.com/openshift/cakephp-ex",
+ "template.openshift.io/support-url": "https://access.redhat.com"
}
},
"message": "The following service(s) have been created in your project: ${NAME}, ${DATABASE_SERVICE_NAME}.\n\nFor more information about using this template, including OpenShift considerations, see https://github.com/openshift/cake-ex/blob/master/README.md.",
@@ -129,7 +133,10 @@
"secret": "${GITHUB_WEBHOOK_SECRET}"
}
}
- ]
+ ],
+ "postCommit": {
+ "script": "./lib/Cake/Console/cake test app AllTests"
+ }
}
},
{
diff --git a/roles/openshift_examples/files/examples/v1.5/quickstart-templates/dancer-mysql-persistent.json b/roles/openshift_examples/files/examples/v1.5/quickstart-templates/dancer-mysql-persistent.json
index 074561550..81ae63416 100644
--- a/roles/openshift_examples/files/examples/v1.5/quickstart-templates/dancer-mysql-persistent.json
+++ b/roles/openshift_examples/files/examples/v1.5/quickstart-templates/dancer-mysql-persistent.json
@@ -7,7 +7,11 @@
"openshift.io/display-name": "Dancer + MySQL (Persistent)",
"description": "An example Dancer application with a MySQL database. For more information about using this template, including OpenShift considerations, see https://github.com/openshift/dancer-ex/blob/master/README.md.",
"tags": "quickstart,perl,dancer",
- "iconClass": "icon-perl"
+ "iconClass": "icon-perl",
+ "template.openshift.io/long-description": "This template defines resources needed to develop a Dancer based application, including a build configuration, application deployment configuration, and database deployment configuration.",
+ "template.openshift.io/provider-display-name": "Red Hat, Inc.",
+ "template.openshift.io/documentation-url": "https://github.com/openshift/dancer-ex",
+ "template.openshift.io/support-url": "https://access.redhat.com"
}
},
"message": "The following service(s) have been created in your project: ${NAME}, ${DATABASE_SERVICE_NAME}.\n\nFor more information about using this template, including OpenShift considerations, see https://github.com/openshift/dancer-ex/blob/master/README.md.",
diff --git a/roles/openshift_examples/files/examples/v1.5/quickstart-templates/dancer-mysql.json b/roles/openshift_examples/files/examples/v1.5/quickstart-templates/dancer-mysql.json
index 18100974b..7a285dba8 100644
--- a/roles/openshift_examples/files/examples/v1.5/quickstart-templates/dancer-mysql.json
+++ b/roles/openshift_examples/files/examples/v1.5/quickstart-templates/dancer-mysql.json
@@ -7,7 +7,11 @@
"openshift.io/display-name": "Dancer + MySQL (Ephemeral)",
"description": "An example Dancer application with a MySQL database. For more information about using this template, including OpenShift considerations, see https://github.com/openshift/dancer-ex/blob/master/README.md.\n\nWARNING: Any data stored will be lost upon pod destruction. Only use this template for testing.",
"tags": "quickstart,perl,dancer",
- "iconClass": "icon-perl"
+ "iconClass": "icon-perl",
+ "template.openshift.io/long-description": "This template defines resources needed to develop a Dancer based application, including a build configuration, application deployment configuration, and database deployment configuration. The database is stored in non-persistent storage, so this configuration should be used for experimental purposes only.",
+ "template.openshift.io/provider-display-name": "Red Hat, Inc.",
+ "template.openshift.io/documentation-url": "https://github.com/openshift/dancer-ex",
+ "template.openshift.io/support-url": "https://access.redhat.com"
}
},
"message": "The following service(s) have been created in your project: ${NAME}, ${DATABASE_SERVICE_NAME}.\n\nFor more information about using this template, including OpenShift considerations, see https://github.com/openshift/dancer-ex/blob/master/README.md.",
diff --git a/roles/openshift_examples/files/examples/v1.5/quickstart-templates/django-postgresql-persistent.json b/roles/openshift_examples/files/examples/v1.5/quickstart-templates/django-postgresql-persistent.json
index b39771bd8..9f982c286 100644
--- a/roles/openshift_examples/files/examples/v1.5/quickstart-templates/django-postgresql-persistent.json
+++ b/roles/openshift_examples/files/examples/v1.5/quickstart-templates/django-postgresql-persistent.json
@@ -7,7 +7,11 @@
"openshift.io/display-name": "Django + PostgreSQL (Persistent)",
"description": "An example Django application with a PostgreSQL database. For more information about using this template, including OpenShift considerations, see https://github.com/openshift/django-ex/blob/master/README.md.",
"tags": "quickstart,python,django",
- "iconClass": "icon-python"
+ "iconClass": "icon-python",
+ "template.openshift.io/long-description": "This template defines resources needed to develop a Django based application, including a build configuration, application deployment configuration, and database deployment configuration.",
+ "template.openshift.io/provider-display-name": "Red Hat, Inc.",
+ "template.openshift.io/documentation-url": "https://github.com/openshift/django-ex",
+ "template.openshift.io/support-url": "https://access.redhat.com"
}
},
"message": "The following service(s) have been created in your project: ${NAME}, ${DATABASE_SERVICE_NAME}.\n\nFor more information about using this template, including OpenShift considerations, see https://github.com/openshift/django-ex/blob/master/README.md.",
diff --git a/roles/openshift_examples/files/examples/v1.5/quickstart-templates/django-postgresql.json b/roles/openshift_examples/files/examples/v1.5/quickstart-templates/django-postgresql.json
index 64b914e61..7bee85ddd 100644
--- a/roles/openshift_examples/files/examples/v1.5/quickstart-templates/django-postgresql.json
+++ b/roles/openshift_examples/files/examples/v1.5/quickstart-templates/django-postgresql.json
@@ -7,7 +7,11 @@
"openshift.io/display-name": "Django + PostgreSQL (Ephemeral)",
"description": "An example Django application with a PostgreSQL database. For more information about using this template, including OpenShift considerations, see https://github.com/openshift/django-ex/blob/master/README.md.\n\nWARNING: Any data stored will be lost upon pod destruction. Only use this template for testing.",
"tags": "quickstart,python,django",
- "iconClass": "icon-python"
+ "iconClass": "icon-python",
+ "template.openshift.io/long-description": "This template defines resources needed to develop a Django based application, including a build configuration, application deployment configuration, and database deployment configuration. The database is stored in non-persistent storage, so this configuration should be used for experimental purposes only.",
+ "template.openshift.io/provider-display-name": "Red Hat, Inc.",
+ "template.openshift.io/documentation-url": "https://github.com/openshift/django-ex",
+ "template.openshift.io/support-url": "https://access.redhat.com"
}
},
"message": "The following service(s) have been created in your project: ${NAME}, ${DATABASE_SERVICE_NAME}.\n\nFor more information about using this template, including OpenShift considerations, see https://github.com/openshift/django-ex/blob/master/README.md.",
diff --git a/roles/openshift_examples/files/examples/v1.5/quickstart-templates/jenkins-ephemeral-template.json b/roles/openshift_examples/files/examples/v1.5/quickstart-templates/jenkins-ephemeral-template.json
index 62ccc5b7f..b0aef3cfc 100644
--- a/roles/openshift_examples/files/examples/v1.5/quickstart-templates/jenkins-ephemeral-template.json
+++ b/roles/openshift_examples/files/examples/v1.5/quickstart-templates/jenkins-ephemeral-template.json
@@ -8,7 +8,11 @@
"openshift.io/display-name": "Jenkins (Ephemeral)",
"description": "Jenkins service, without persistent storage.\n\nWARNING: Any data stored will be lost upon pod destruction. Only use this template for testing.",
"iconClass": "icon-jenkins",
- "tags": "instant-app,jenkins"
+ "tags": "instant-app,jenkins",
+ "template.openshift.io/long-description": "This template deploys a Jenkins server capable of managing OpenShift Pipeline builds and supporting OpenShift-based oauth login. The Jenkins configuration is stored in non-persistent storage, so this configuration should be used for experimental purposes only.",
+ "template.openshift.io/provider-display-name": "Red Hat, Inc.",
+ "template.openshift.io/documentation-url": "https://docs.openshift.org/latest/using_images/other_images/jenkins.html",
+ "template.openshift.io/support-url": "https://access.redhat.com"
}
},
"message": "A Jenkins service has been created in your project. Log into Jenkins with your OpenShift account. The tutorial at https://github.com/openshift/origin/blob/master/examples/jenkins/README.md contains more information about using this template.",
diff --git a/roles/openshift_examples/files/examples/v1.5/quickstart-templates/jenkins-persistent-template.json b/roles/openshift_examples/files/examples/v1.5/quickstart-templates/jenkins-persistent-template.json
index 50c4ad566..a542de219 100644
--- a/roles/openshift_examples/files/examples/v1.5/quickstart-templates/jenkins-persistent-template.json
+++ b/roles/openshift_examples/files/examples/v1.5/quickstart-templates/jenkins-persistent-template.json
@@ -8,7 +8,11 @@
"openshift.io/display-name": "Jenkins (Persistent)",
"description": "Jenkins service, with persistent storage.\n\nNOTE: You must have persistent volumes available in your cluster to use this template.",
"iconClass": "icon-jenkins",
- "tags": "instant-app,jenkins"
+ "tags": "instant-app,jenkins",
+ "template.openshift.io/long-description": "This template deploys a Jenkins server capable of managing OpenShift Pipeline builds and supporting OpenShift-based oauth login.",
+ "template.openshift.io/provider-display-name": "Red Hat, Inc.",
+ "template.openshift.io/documentation-url": "https://docs.openshift.org/latest/using_images/other_images/jenkins.html",
+ "template.openshift.io/support-url": "https://access.redhat.com"
}
},
"message": "A Jenkins service has been created in your project. Log into Jenkins with your OpenShift account. The tutorial at https://github.com/openshift/origin/blob/master/examples/jenkins/README.md contains more information about using this template.",
diff --git a/roles/openshift_examples/files/examples/v1.5/quickstart-templates/nodejs-mongodb-persistent.json b/roles/openshift_examples/files/examples/v1.5/quickstart-templates/nodejs-mongodb-persistent.json
index fecb84662..6ee999cb1 100644
--- a/roles/openshift_examples/files/examples/v1.5/quickstart-templates/nodejs-mongodb-persistent.json
+++ b/roles/openshift_examples/files/examples/v1.5/quickstart-templates/nodejs-mongodb-persistent.json
@@ -7,7 +7,11 @@
"openshift.io/display-name": "Node.js + MongoDB (Persistent)",
"description": "An example Node.js application with a MongoDB database. For more information about using this template, including OpenShift considerations, see https://github.com/openshift/nodejs-ex/blob/master/README.md.",
"tags": "quickstart,nodejs",
- "iconClass": "icon-nodejs"
+ "iconClass": "icon-nodejs",
+ "template.openshift.io/long-description": "This template defines resources needed to develop a NodeJS application, including a build configuration, application deployment configuration, and database deployment configuration.",
+ "template.openshift.io/provider-display-name": "Red Hat, Inc.",
+ "template.openshift.io/documentation-url": "https://github.com/openshift/nodejs-ex",
+ "template.openshift.io/support-url": "https://access.redhat.com"
}
},
"message": "The following service(s) have been created in your project: ${NAME}, ${DATABASE_SERVICE_NAME}.\n\nFor more information about using this template, including OpenShift considerations, see https://github.com/openshift/nodejs-ex/blob/master/README.md.",
diff --git a/roles/openshift_examples/files/examples/v1.5/quickstart-templates/nodejs-mongodb.json b/roles/openshift_examples/files/examples/v1.5/quickstart-templates/nodejs-mongodb.json
index 6a55f0251..5c177a7e0 100644
--- a/roles/openshift_examples/files/examples/v1.5/quickstart-templates/nodejs-mongodb.json
+++ b/roles/openshift_examples/files/examples/v1.5/quickstart-templates/nodejs-mongodb.json
@@ -7,7 +7,11 @@
"openshift.io/display-name": "Node.js + MongoDB (Ephemeral)",
"description": "An example Node.js application with a MongoDB database. For more information about using this template, including OpenShift considerations, see https://github.com/openshift/nodejs-ex/blob/master/README.md.\n\nWARNING: Any data stored will be lost upon pod destruction. Only use this template for testing.",
"tags": "quickstart,nodejs",
- "iconClass": "icon-nodejs"
+ "iconClass": "icon-nodejs",
+ "template.openshift.io/long-description": "This template defines resources needed to develop a NodeJS application, including a build configuration, application deployment configuration, and database deployment configuration. The database is stored in non-persistent storage, so this configuration should be used for experimental purposes only.",
+ "template.openshift.io/provider-display-name": "Red Hat, Inc.",
+ "template.openshift.io/documentation-url": "https://github.com/openshift/nodejs-ex",
+ "template.openshift.io/support-url": "https://access.redhat.com"
}
},
"message": "The following service(s) have been created in your project: ${NAME}, ${DATABASE_SERVICE_NAME}.\n\nFor more information about using this template, including OpenShift considerations, see https://github.com/openshift/nodejs-ex/blob/master/README.md.",
diff --git a/roles/openshift_examples/files/examples/v1.5/quickstart-templates/rails-postgresql-persistent.json b/roles/openshift_examples/files/examples/v1.5/quickstart-templates/rails-postgresql-persistent.json
index 6c0a484b5..b400cfdb3 100644
--- a/roles/openshift_examples/files/examples/v1.5/quickstart-templates/rails-postgresql-persistent.json
+++ b/roles/openshift_examples/files/examples/v1.5/quickstart-templates/rails-postgresql-persistent.json
@@ -7,7 +7,11 @@
"openshift.io/display-name": "Rails + PostgreSQL (Persistent)",
"description": "An example Rails application with a PostgreSQL database. For more information about using this template, including OpenShift considerations, see https://github.com/openshift/rails-ex/blob/master/README.md.",
"tags": "quickstart,ruby,rails",
- "iconClass": "icon-ruby"
+ "iconClass": "icon-ruby",
+ "template.openshift.io/long-description": "This template defines resources needed to develop a Rails application, including a build configuration, application deployment configuration, and database deployment configuration.",
+ "template.openshift.io/provider-display-name": "Red Hat, Inc.",
+ "template.openshift.io/documentation-url": "https://github.com/openshift/rails-ex",
+ "template.openshift.io/support-url": "https://access.redhat.com"
}
},
"message": "The following service(s) have been created in your project: ${NAME}, ${DATABASE_SERVICE_NAME}.\n\nFor more information about using this template, including OpenShift considerations, see https://github.com/openshift/rails-ex/blob/master/README.md.",
@@ -165,7 +169,7 @@
"imageChangeParams": {
"automatic": true,
"containerNames": [
- "rails-pgsql-persistent"
+ "${NAME}"
],
"from": {
"kind": "ImageStreamTag",
@@ -191,7 +195,7 @@
"spec": {
"containers": [
{
- "name": "rails-pgsql-persistent",
+ "name": "${NAME}",
"image": " ",
"ports": [
{
diff --git a/roles/openshift_examples/files/examples/v1.5/quickstart-templates/rails-postgresql.json b/roles/openshift_examples/files/examples/v1.5/quickstart-templates/rails-postgresql.json
index 043554c79..fa67412ff 100644
--- a/roles/openshift_examples/files/examples/v1.5/quickstart-templates/rails-postgresql.json
+++ b/roles/openshift_examples/files/examples/v1.5/quickstart-templates/rails-postgresql.json
@@ -7,7 +7,11 @@
"openshift.io/display-name": "Rails + PostgreSQL (Ephemeral)",
"description": "An example Rails application with a PostgreSQL database. For more information about using this template, including OpenShift considerations, see https://github.com/openshift/rails-ex/blob/master/README.md.\n\nWARNING: Any data stored will be lost upon pod destruction. Only use this template for testing.",
"tags": "quickstart,ruby,rails",
- "iconClass": "icon-ruby"
+ "iconClass": "icon-ruby",
+ "template.openshift.io/long-description": "This template defines resources needed to develop a Rails application, including a build configuration, application deployment configuration, and database deployment configuration. The database is stored in non-persistent storage, so this configuration should be used for experimental purposes only.",
+ "template.openshift.io/provider-display-name": "Red Hat, Inc.",
+ "template.openshift.io/documentation-url": "https://github.com/openshift/rails-ex",
+ "template.openshift.io/support-url": "https://access.redhat.com"
}
},
"message": "The following service(s) have been created in your project: ${NAME}, ${DATABASE_SERVICE_NAME}.\n\nFor more information about using this template, including OpenShift considerations, see https://github.com/openshift/rails-ex/blob/master/README.md.",
@@ -165,7 +169,7 @@
"imageChangeParams": {
"automatic": true,
"containerNames": [
- "rails-postgresql-example"
+ "${NAME}"
],
"from": {
"kind": "ImageStreamTag",
@@ -191,7 +195,7 @@
"spec": {
"containers": [
{
- "name": "rails-postgresql-example",
+ "name": "${NAME}",
"image": " ",
"ports": [
{
diff --git a/roles/openshift_examples/files/examples/v1.5/xpaas-streams/fis-image-streams.json b/roles/openshift_examples/files/examples/v1.5/xpaas-streams/fis-image-streams.json
index ed0e94bed..9d99973be 100644
--- a/roles/openshift_examples/files/examples/v1.5/xpaas-streams/fis-image-streams.json
+++ b/roles/openshift_examples/files/examples/v1.5/xpaas-streams/fis-image-streams.json
@@ -20,12 +20,22 @@
{
"name": "1.0",
"annotations": {
- "description": "JBoss Fuse Integration Services 6.2.1 Java S2I images.",
+ "description": "JBoss Fuse Integration Services 1.0 Java S2I images.",
"iconClass": "icon-jboss",
"tags": "builder,jboss-fuse,java,xpaas",
"supports":"jboss-fuse:6.2.1,java:8,xpaas:1.2",
"version": "1.0"
}
+ },
+ {
+ "name": "2.0",
+ "annotations": {
+ "description": "JBoss Fuse Integration Services 2.0 Java S2I images.",
+ "iconClass": "icon-jboss",
+ "tags": "builder,jboss-fuse,java,xpaas",
+ "supports":"jboss-fuse:6.3.0,java:8,xpaas:1.2",
+ "version": "2.0"
+ }
}
]
}
@@ -42,12 +52,22 @@
{
"name": "1.0",
"annotations": {
- "description": "JBoss Fuse Integration Services 6.2.1 Karaf S2I images.",
+ "description": "JBoss Fuse Integration Services 1.0 Karaf S2I images.",
"iconClass": "icon-jboss",
"tags": "builder,jboss-fuse,java,karaf,xpaas",
"supports":"jboss-fuse:6.2.1,java:8,xpaas:1.2",
"version": "1.0"
}
+ },
+ {
+ "name": "2.0",
+ "annotations": {
+ "description": "JBoss Fuse Integration Services 2.0 Karaf S2I images.",
+ "iconClass": "icon-jboss",
+ "tags": "builder,jboss-fuse,java,karaf,xpaas",
+ "supports":"jboss-fuse:6.3.0,java:8,xpaas:1.2",
+ "version": "2.0"
+ }
}
]
}
diff --git a/roles/openshift_examples/files/examples/v1.5/xpaas-streams/jboss-image-streams.json b/roles/openshift_examples/files/examples/v1.5/xpaas-streams/jboss-image-streams.json
index a7cb12867..049f3f884 100644
--- a/roles/openshift_examples/files/examples/v1.5/xpaas-streams/jboss-image-streams.json
+++ b/roles/openshift_examples/files/examples/v1.5/xpaas-streams/jboss-image-streams.json
@@ -367,6 +367,31 @@
}
]
}
+ },
+ {
+ "kind": "ImageStream",
+ "apiVersion": "v1",
+ "metadata": {
+ "name": "redhat-openjdk18-openshift"
+ },
+ "spec": {
+ "dockerImageRepository": "registry.access.redhat.com/redhat-openjdk-18/openjdk18-openshift",
+ "tags": [
+ {
+ "name": "1.0",
+ "annotations": {
+ "openshift.io/display-name": "Red Hat OpenJDK 8",
+ "description": "Build and run Java applications using Maven and OpenJDK 8.",
+ "iconClass": "icon-jboss",
+ "tags": "builder,java,xpaas,openjdk",
+ "supports":"java:8,xpaas:1.0",
+ "sampleRepo": "https://github.com/jboss-openshift/openshift-quickstarts",
+ "sampleContextDir": "undertow-servlet",
+ "version": "1.0"
+ }
+ }
+ ]
+ }
}
]
}
diff --git a/roles/openshift_examples/files/examples/v1.5/xpaas-templates/karaf2-camel-amq-template.json b/roles/openshift_examples/files/examples/v1.5/xpaas-templates/karaf2-camel-amq-template.json
new file mode 100644
index 000000000..cd0bec3c1
--- /dev/null
+++ b/roles/openshift_examples/files/examples/v1.5/xpaas-templates/karaf2-camel-amq-template.json
@@ -0,0 +1,362 @@
+{
+ "apiVersion": "v1",
+ "kind": "Template",
+ "metadata": {
+ "annotations": {
+ "description": "Camel route using ActiveMQ in Karaf container.",
+ "tags": "quickstart,java,karaf,fis",
+ "iconClass": "icon-jboss",
+ "version": "2.0"
+ },
+ "name": "s2i-karaf2-camel-amq"
+ },
+ "labels": {
+ "template": "s2i-karaf2-camel-amq"
+ },
+ "parameters": [
+ {
+ "name": "APP_NAME",
+ "displayName": "Application Name",
+ "required": true,
+ "value": "s2i-karaf2-camel-amq",
+ "description": "The name assigned to the application."
+ },
+ {
+ "name": "GIT_REPO",
+ "displayName": "Git Repository URL",
+ "value": "https://github.com/fabric8-quickstarts/karaf2-camel-amq.git",
+ "required": true,
+ "description": "The URL of the repository with your application source code."
+ },
+ {
+ "name": "GIT_REF",
+ "displayName": "Git Reference",
+ "value": "karaf2-camel-amq-1.0.0.redhat-000010",
+ "description": "Set this to a branch name, tag or other ref of your repository if you are not using the default branch."
+ },
+ {
+ "name": "SERVICE_NAME",
+ "displayName": "Service Name",
+ "value": "s2i-karaf2-camel-amq",
+ "description": "Exposed Service name."
+ },
+ {
+ "name": "BUILDER_VERSION",
+ "displayName": "Builder version",
+ "value": "2.0",
+ "description": "The version of the FIS S2I builder image to use."
+ },
+ {
+ "name": "APP_VERSION",
+ "displayName": "Application Version",
+ "value": "1.0.0.redhat-000010",
+ "description": "The application version."
+ },
+ {
+ "name": "MAVEN_ARGS",
+ "displayName": "Maven Arguments",
+ "value": "install -DskipTests -Dfabric8.skip -e -B",
+ "description": "Arguments passed to mvn in the build."
+ },
+ {
+ "name": "MAVEN_ARGS_APPEND",
+ "displayName": "Extra Maven Arguments",
+ "description": "Extra arguments passed to mvn, e.g. for multi-module builds."
+ },
+ {
+ "name": "ARTIFACT_DIR",
+ "displayName": "Maven build directory",
+ "description": "Directory of the artifact to be built, e.g. for multi-module builds."
+ },
+ {
+ "name": "IMAGE_STREAM_NAMESPACE",
+ "displayName": "Image Stream Namespace",
+ "value": "openshift",
+ "required": true,
+ "description": "Namespace in which the Fuse ImageStreams are installed. These ImageStreams are normally installed in the openshift namespace. You should only need to modify this if you've installed the ImageStreams in a different namespace/project."
+ },
+ {
+ "name": "BUILD_SECRET",
+ "displayName": "Git Build Secret",
+ "generate": "expression",
+ "description": "The secret needed to trigger a build.",
+ "from": "[a-zA-Z0-9]{40}"
+ },
+ {
+ "name": "ACTIVEMQ_SERVICE_NAME",
+ "displayName": "ActiveMQ Broker Service",
+ "required": true,
+ "value": "broker-amq-tcp",
+ "description": "Set this to the name of the TCP service of the ActiveMQ broker. You may need to create a broker first."
+ },
+ {
+ "name": "ACTIVEMQ_USERNAME",
+ "displayName": "ActiveMQ Broker Username",
+ "description": "The username used to authenticate with the ActiveMQ broker. Leave it empty if authentication is disabled."
+ },
+ {
+ "name": "ACTIVEMQ_PASSWORD",
+ "displayName": "ActiveMQ Broker Password",
+ "description": "The password used to authenticate with the ActiveMQ broker. Leave it empty if authentication is disabled."
+ },
+ {
+ "name": "CPU_REQUEST",
+ "displayName": "CPU request",
+ "value": "0.2",
+ "required": true,
+ "description": "The amount of CPU to requests."
+ },
+ {
+ "name": "MEMORY_REQUEST",
+ "displayName": "Memory request",
+ "value": "1.5G",
+ "required": true,
+ "description": "The amount of memory required for the container to run."
+ },
+ {
+ "name": "CPU_LIMIT",
+ "displayName": "CPU limit",
+ "value": "1.0",
+ "required": true,
+ "description": "The amount of CPU the container is limited to use."
+ },
+ {
+ "name": "MEMORY_LIMIT",
+ "displayName": "Memory limit",
+ "value": "2G",
+ "required": true,
+ "description": "The amount of memory the container is limited to use."
+ }
+ ],
+ "objects": [
+ {
+ "kind": "ImageStream",
+ "apiVersion": "v1",
+ "metadata": {
+ "name": "${APP_NAME}",
+ "creationTimestamp": null,
+ "labels": {
+ "component": "${APP_NAME}",
+ "container": "karaf",
+ "group": "quickstarts",
+ "project": "${APP_NAME}",
+ "provider": "s2i",
+ "version": "${APP_VERSION}"
+ }
+ },
+ "spec": {},
+ "status": {
+ "dockerImageRepository": ""
+ }
+ },
+ {
+ "kind": "BuildConfig",
+ "apiVersion": "v1",
+ "metadata": {
+ "name": "${APP_NAME}",
+ "creationTimestamp": null,
+ "labels": {
+ "component": "${APP_NAME}",
+ "container": "karaf",
+ "group": "quickstarts",
+ "project": "${APP_NAME}",
+ "provider": "s2i",
+ "version": "${APP_VERSION}"
+ }
+ },
+ "spec": {
+ "triggers": [
+ {
+ "type": "GitHub",
+ "github": {
+ "secret": "${BUILD_SECRET}"
+ }
+ },
+ {
+ "type": "Generic",
+ "generic": {
+ "secret": "${BUILD_SECRET}"
+ }
+ },
+ {
+ "type": "ConfigChange"
+ },
+ {
+ "type": "ImageChange",
+ "imageChange": {}
+ }
+ ],
+ "source": {
+ "type": "Git",
+ "git": {
+ "uri": "${GIT_REPO}",
+ "ref": "${GIT_REF}"
+ }
+ },
+ "strategy": {
+ "type": "Source",
+ "sourceStrategy": {
+ "from": {
+ "kind": "ImageStreamTag",
+ "namespace": "${IMAGE_STREAM_NAMESPACE}",
+ "name": "fis-karaf-openshift:${BUILDER_VERSION}"
+ },
+ "forcePull": true,
+ "incremental": true,
+ "env": [
+ {
+ "name": "BUILD_LOGLEVEL",
+ "value": "5"
+ },
+ {
+ "name": "ARTIFACT_DIR",
+ "value": "${ARTIFACT_DIR}"
+ },
+ {
+ "name": "MAVEN_ARGS",
+ "value": "${MAVEN_ARGS}"
+ },
+ {
+ "name": "MAVEN_ARGS_APPEND",
+ "value": "${MAVEN_ARGS_APPEND}"
+ }
+ ]
+ }
+ },
+ "output": {
+ "to": {
+ "kind": "ImageStreamTag",
+ "name": "${APP_NAME}:latest"
+ }
+ },
+ "resources": {}
+ },
+ "status": {
+ "lastVersion": 0
+ }
+ },
+ {
+ "kind": "DeploymentConfig",
+ "apiVersion": "v1",
+ "metadata": {
+ "name": "${APP_NAME}",
+ "creationTimestamp": null,
+ "labels": {
+ "component": "${APP_NAME}",
+ "container": "karaf",
+ "group": "quickstarts",
+ "project": "${APP_NAME}",
+ "provider": "s2i",
+ "version": "${APP_VERSION}"
+ }
+ },
+ "spec": {
+ "strategy": {
+ "resources": {}
+ },
+ "triggers": [
+ {
+ "type": "ConfigChange"
+ },
+ {
+ "type": "ImageChange",
+ "imageChangeParams": {
+ "automatic": true,
+ "containerNames": [
+ "${APP_NAME}"
+ ],
+ "from": {
+ "kind": "ImageStreamTag",
+ "name": "${APP_NAME}:latest"
+ }
+ }
+ }
+ ],
+ "replicas": 1,
+ "selector": {
+ "component": "${APP_NAME}",
+ "container": "karaf",
+ "deploymentconfig": "${APP_NAME}",
+ "group": "quickstarts",
+ "project": "${APP_NAME}",
+ "provider": "s2i",
+ "version": "${APP_VERSION}"
+ },
+ "template": {
+ "metadata": {
+ "creationTimestamp": null,
+ "labels": {
+ "component": "${APP_NAME}",
+ "container": "karaf",
+ "deploymentconfig": "${APP_NAME}",
+ "group": "quickstarts",
+ "project": "${APP_NAME}",
+ "provider": "s2i",
+ "version": "${APP_VERSION}"
+ }
+ },
+ "spec": {
+ "containers": [
+ {
+ "name": "${APP_NAME}",
+ "image": "library/${APP_NAME}:latest",
+ "readinessProbe" : {
+ "httpGet" : {
+ "path" : "/readiness-check",
+ "port" : 8181
+ },
+ "initialDelaySeconds" : 10
+ },
+ "livenessProbe" : {
+ "httpGet" : {
+ "path" : "/health-check",
+ "port" : 8181
+ },
+ "initialDelaySeconds" : 180
+ },
+ "ports": [
+ {
+ "containerPort": 8181,
+ "name": "http"
+ },
+ {
+ "containerPort": 8778,
+ "name": "jolokia"
+ }
+ ],
+ "env" : [ {
+ "name" : "KUBERNETES_NAMESPACE",
+ "valueFrom" : {
+ "fieldRef" : {
+ "fieldPath" : "metadata.namespace"
+ }
+ }
+ }, {
+ "name": "ACTIVEMQ_SERVICE_NAME",
+ "value": "${ACTIVEMQ_SERVICE_NAME}"
+ }, {
+ "name": "ACTIVEMQ_USERNAME",
+ "value": "${ACTIVEMQ_USERNAME}"
+ }, {
+ "name": "ACTIVEMQ_PASSWORD",
+ "value": "${ACTIVEMQ_PASSWORD}"
+ } ],
+ "resources": {
+ "requests": {
+ "cpu": "${CPU_REQUEST}",
+ "memory": "${MEMORY_REQUEST}"
+ },
+ "limits": {
+ "cpu": "${CPU_LIMIT}",
+ "memory": "${MEMORY_LIMIT}"
+ }
+ }
+ }
+ ]
+ }
+ }
+ },
+ "status": {}
+ }
+ ]
+}
diff --git a/roles/openshift_examples/files/examples/v1.5/xpaas-templates/karaf2-camel-log-template.json b/roles/openshift_examples/files/examples/v1.5/xpaas-templates/karaf2-camel-log-template.json
new file mode 100644
index 000000000..2ecce08a9
--- /dev/null
+++ b/roles/openshift_examples/files/examples/v1.5/xpaas-templates/karaf2-camel-log-template.json
@@ -0,0 +1,336 @@
+{
+ "apiVersion": "v1",
+ "kind": "Template",
+ "metadata": {
+ "annotations": {
+ "description": "A simple Camel route in Karaf container.",
+ "tags": "quickstart,java,karaf,fis",
+ "iconClass": "icon-jboss",
+ "version": "2.0"
+ },
+ "name": "s2i-karaf2-camel-log"
+ },
+ "labels": {
+ "template": "s2i-karaf2-camel-log"
+ },
+ "parameters": [
+ {
+ "name": "APP_NAME",
+ "displayName": "Application Name",
+ "required": true,
+ "value": "s2i-karaf2-camel-log",
+ "description": "The name assigned to the application."
+ },
+ {
+ "name": "GIT_REPO",
+ "displayName": "Git Repository URL",
+ "value": "https://github.com/fabric8-quickstarts/karaf2-camel-log.git",
+ "required": true,
+ "description": "The URL of the repository with your application source code."
+ },
+ {
+ "name": "GIT_REF",
+ "displayName": "Git Reference",
+ "value": "karaf2-camel-log-1.0.0.redhat-000010",
+ "description": "Set this to a branch name, tag or other ref of your repository if you are not using the default branch."
+ },
+ {
+ "name": "SERVICE_NAME",
+ "displayName": "Service Name",
+ "value": "s2i-karaf2-camel-log",
+ "description": "Exposed Service name."
+ },
+ {
+ "name": "BUILDER_VERSION",
+ "displayName": "Builder version",
+ "value": "2.0",
+ "description": "The version of the FIS S2I builder image to use."
+ },
+ {
+ "name": "APP_VERSION",
+ "displayName": "Application Version",
+ "value": "1.0.0.redhat-000010",
+ "description": "The application version."
+ },
+ {
+ "name": "MAVEN_ARGS",
+ "displayName": "Maven Arguments",
+ "value": "install -DskipTests -Dfabric8.skip -e -B",
+ "description": "Arguments passed to mvn in the build."
+ },
+ {
+ "name": "MAVEN_ARGS_APPEND",
+ "displayName": "Extra Maven Arguments",
+ "description": "Extra arguments passed to mvn, e.g. for multi-module builds."
+ },
+ {
+ "name": "ARTIFACT_DIR",
+ "displayName": "Maven build directory",
+ "description": "Directory of the artifact to be built, e.g. for multi-module builds."
+ },
+ {
+ "name": "IMAGE_STREAM_NAMESPACE",
+ "displayName": "Image Stream Namespace",
+ "value": "openshift",
+ "required": true,
+ "description": "Namespace in which the Fuse ImageStreams are installed. These ImageStreams are normally installed in the openshift namespace. You should only need to modify this if you've installed the ImageStreams in a different namespace/project."
+ },
+ {
+ "name": "BUILD_SECRET",
+ "displayName": "Git Build Secret",
+ "generate": "expression",
+ "description": "The secret needed to trigger a build.",
+ "from": "[a-zA-Z0-9]{40}"
+ },
+ {
+ "name": "CPU_REQUEST",
+ "displayName": "CPU request",
+ "value": "0.2",
+ "required": true,
+ "description": "The amount of CPU to requests."
+ },
+ {
+ "name": "MEMORY_REQUEST",
+ "displayName": "Memory request",
+ "value": "1.5G",
+ "required": true,
+ "description": "The amount of memory required for the container to run."
+ },
+ {
+ "name": "CPU_LIMIT",
+ "displayName": "CPU limit",
+ "value": "1.0",
+ "required": true,
+ "description": "The amount of CPU the container is limited to use."
+ },
+ {
+ "name": "MEMORY_LIMIT",
+ "displayName": "Memory limit",
+ "value": "2G",
+ "required": true,
+ "description": "The amount of memory the container is limited to use."
+ }
+ ],
+ "objects": [
+ {
+ "kind": "ImageStream",
+ "apiVersion": "v1",
+ "metadata": {
+ "name": "${APP_NAME}",
+ "creationTimestamp": null,
+ "labels": {
+ "component": "${APP_NAME}",
+ "container": "karaf",
+ "group": "quickstarts",
+ "project": "${APP_NAME}",
+ "provider": "s2i",
+ "version": "${APP_VERSION}"
+ }
+ },
+ "spec": {},
+ "status": {
+ "dockerImageRepository": ""
+ }
+ },
+ {
+ "kind": "BuildConfig",
+ "apiVersion": "v1",
+ "metadata": {
+ "name": "${APP_NAME}",
+ "creationTimestamp": null,
+ "labels": {
+ "component": "${APP_NAME}",
+ "container": "karaf",
+ "group": "quickstarts",
+ "project": "${APP_NAME}",
+ "provider": "s2i",
+ "version": "${APP_VERSION}"
+ }
+ },
+ "spec": {
+ "triggers": [
+ {
+ "type": "GitHub",
+ "github": {
+ "secret": "${BUILD_SECRET}"
+ }
+ },
+ {
+ "type": "Generic",
+ "generic": {
+ "secret": "${BUILD_SECRET}"
+ }
+ },
+ {
+ "type": "ConfigChange"
+ },
+ {
+ "type": "ImageChange",
+ "imageChange": {}
+ }
+ ],
+ "source": {
+ "type": "Git",
+ "git": {
+ "uri": "${GIT_REPO}",
+ "ref": "${GIT_REF}"
+ }
+ },
+ "strategy": {
+ "type": "Source",
+ "sourceStrategy": {
+ "from": {
+ "kind": "ImageStreamTag",
+ "namespace": "${IMAGE_STREAM_NAMESPACE}",
+ "name": "fis-karaf-openshift:${BUILDER_VERSION}"
+ },
+ "forcePull": true,
+ "incremental": true,
+ "env": [
+ {
+ "name": "BUILD_LOGLEVEL",
+ "value": "5"
+ },
+ {
+ "name": "ARTIFACT_DIR",
+ "value": "${ARTIFACT_DIR}"
+ },
+ {
+ "name": "MAVEN_ARGS",
+ "value": "${MAVEN_ARGS}"
+ },
+ {
+ "name": "MAVEN_ARGS_APPEND",
+ "value": "${MAVEN_ARGS_APPEND}"
+ }
+ ]
+ }
+ },
+ "output": {
+ "to": {
+ "kind": "ImageStreamTag",
+ "name": "${APP_NAME}:latest"
+ }
+ },
+ "resources": {}
+ },
+ "status": {
+ "lastVersion": 0
+ }
+ },
+ {
+ "kind": "DeploymentConfig",
+ "apiVersion": "v1",
+ "metadata": {
+ "name": "${APP_NAME}",
+ "creationTimestamp": null,
+ "labels": {
+ "component": "${APP_NAME}",
+ "container": "karaf",
+ "group": "quickstarts",
+ "project": "${APP_NAME}",
+ "provider": "s2i",
+ "version": "${APP_VERSION}"
+ }
+ },
+ "spec": {
+ "strategy": {
+ "resources": {}
+ },
+ "triggers": [
+ {
+ "type": "ConfigChange"
+ },
+ {
+ "type": "ImageChange",
+ "imageChangeParams": {
+ "automatic": true,
+ "containerNames": [
+ "${APP_NAME}"
+ ],
+ "from": {
+ "kind": "ImageStreamTag",
+ "name": "${APP_NAME}:latest"
+ }
+ }
+ }
+ ],
+ "replicas": 1,
+ "selector": {
+ "component": "${APP_NAME}",
+ "container": "karaf",
+ "deploymentconfig": "${APP_NAME}",
+ "group": "quickstarts",
+ "project": "${APP_NAME}",
+ "provider": "s2i",
+ "version": "${APP_VERSION}"
+ },
+ "template": {
+ "metadata": {
+ "creationTimestamp": null,
+ "labels": {
+ "component": "${APP_NAME}",
+ "container": "karaf",
+ "deploymentconfig": "${APP_NAME}",
+ "group": "quickstarts",
+ "project": "${APP_NAME}",
+ "provider": "s2i",
+ "version": "${APP_VERSION}"
+ }
+ },
+ "spec": {
+ "containers": [
+ {
+ "name": "${APP_NAME}",
+ "image": "library/${APP_NAME}:latest",
+ "readinessProbe" : {
+ "httpGet" : {
+ "path" : "/readiness-check",
+ "port" : 8181
+ },
+ "initialDelaySeconds" : 10
+ },
+ "livenessProbe" : {
+ "httpGet" : {
+ "path" : "/health-check",
+ "port" : 8181
+ },
+ "initialDelaySeconds" : 180
+ },
+ "ports": [
+ {
+ "containerPort": 8181,
+ "name": "http"
+ },
+ {
+ "containerPort": 8778,
+ "name": "jolokia"
+ }
+ ],
+ "env" : [ {
+ "name" : "KUBERNETES_NAMESPACE",
+ "valueFrom" : {
+ "fieldRef" : {
+ "fieldPath" : "metadata.namespace"
+ }
+ }
+ } ],
+ "resources": {
+ "requests": {
+ "cpu": "${CPU_REQUEST}",
+ "memory": "${MEMORY_REQUEST}"
+ },
+ "limits": {
+ "cpu": "${CPU_LIMIT}",
+ "memory": "${MEMORY_LIMIT}"
+ }
+ }
+ }
+ ]
+ }
+ }
+ },
+ "status": {}
+ }
+ ]
+}
diff --git a/roles/openshift_examples/files/examples/v1.5/xpaas-templates/karaf2-camel-rest-sql-template.json b/roles/openshift_examples/files/examples/v1.5/xpaas-templates/karaf2-camel-rest-sql-template.json
new file mode 100644
index 000000000..d80939efb
--- /dev/null
+++ b/roles/openshift_examples/files/examples/v1.5/xpaas-templates/karaf2-camel-rest-sql-template.json
@@ -0,0 +1,421 @@
+{
+ "apiVersion": "v1",
+ "kind": "Template",
+ "metadata": {
+ "annotations": {
+ "description": "Camel example using Rest DSL with SQL Database in Karaf container.",
+ "tags": "quickstart,java,karaf,fis",
+ "iconClass": "icon-jboss",
+ "version": "2.0"
+ },
+ "name": "s2i-karaf2-camel-rest-sql"
+ },
+ "labels": {
+ "template": "s2i-karaf2-camel-rest-sql"
+ },
+ "parameters": [
+ {
+ "name": "APP_NAME",
+ "displayName": "Application Name",
+ "required": true,
+ "value": "s2i-karaf2-camel-rest-sql",
+ "description": "The name assigned to the application."
+ },
+ {
+ "name": "GIT_REPO",
+ "displayName": "Git Repository URL",
+ "value": "https://github.com/fabric8-quickstarts/karaf2-camel-rest-sql.git",
+ "required": true,
+ "description": "The URL of the repository with your application source code."
+ },
+ {
+ "name": "GIT_REF",
+ "displayName": "Git Reference",
+ "value": "karaf2-camel-rest-sql-1.0.0.redhat-000010",
+ "description": "Set this to a branch name, tag or other ref of your repository if you are not using the default branch."
+ },
+ {
+ "name": "SERVICE_NAME",
+ "displayName": "Service Name",
+ "value": "s2i-karaf2-camel-rest",
+ "description": "Exposed Service name."
+ },
+ {
+ "name": "MYSQL_SERVICE_NAME",
+ "displayName": "MySQL Server Service",
+ "required": true,
+ "value": "mysql",
+ "description": "Set this to the name of the TCP service of the MySQL server. You may need to create a server first."
+ },
+ {
+ "name": "MYSQL_SERVICE_DATABASE",
+ "displayName": "MySQL Server Database",
+ "required": true,
+ "value": "sampledb",
+ "description": "The database hosted by the MySQL server to be used by the application."
+ },
+ {
+ "name": "MYSQL_SERVICE_USERNAME",
+ "displayName": "MySQL Server Username",
+ "description": "The username used to authenticate with the MySQL server. Leave it empty if authentication is disabled."
+ },
+ {
+ "name": "MYSQL_SERVICE_PASSWORD",
+ "displayName": "MySQL Server Password",
+ "description": "The password used to authenticate with the MySQL server. Leave it empty if authentication is disabled."
+ },
+ {
+ "name": "BUILDER_VERSION",
+ "displayName": "Builder version",
+ "value": "2.0",
+ "description": "The version of the FIS S2I builder image to use."
+ },
+ {
+ "name": "APP_VERSION",
+ "displayName": "Application Version",
+ "value": "1.0.0.redhat-000010",
+ "description": "The application version."
+ },
+ {
+ "name": "MAVEN_ARGS",
+ "displayName": "Maven Arguments",
+ "value": "install -DskipTests -Dfabric8.skip -e -B",
+ "description": "Arguments passed to mvn in the build."
+ },
+ {
+ "name": "MAVEN_ARGS_APPEND",
+ "displayName": "Extra Maven Arguments",
+ "description": "Extra arguments passed to mvn, e.g. for multi-module builds."
+ },
+ {
+ "name": "ARTIFACT_DIR",
+ "displayName": "Maven build directory",
+ "description": "Directory of the artifact to be built, e.g. for multi-module builds."
+ },
+ {
+ "name": "IMAGE_STREAM_NAMESPACE",
+ "displayName": "Image Stream Namespace",
+ "value": "openshift",
+ "required": true,
+ "description": "Namespace in which the Fuse ImageStreams are installed. These ImageStreams are normally installed in the openshift namespace. You should only need to modify this if you've installed the ImageStreams in a different namespace/project."
+ },
+ {
+ "name": "CPU_REQUEST",
+ "displayName": "CPU request",
+ "value": "0.2",
+ "required": true,
+ "description": "The amount of CPU to requests."
+ },
+ {
+ "name": "MEMORY_REQUEST",
+ "displayName": "Memory request",
+ "value": "1.5G",
+ "required": true,
+ "description": "The amount of memory required for the container to run."
+ },
+ {
+ "name": "CPU_LIMIT",
+ "displayName": "CPU limit",
+ "value": "1.0",
+ "required": true,
+ "description": "The amount of CPU the container is limited to use."
+ },
+ {
+ "name": "MEMORY_LIMIT",
+ "displayName": "Memory limit",
+ "value": "2G",
+ "required": true,
+ "description": "The amount of memory the container is limited to use."
+ }
+ ],
+ "objects": [
+ {
+ "apiVersion": "v1",
+ "kind": "Route",
+ "metadata": {
+ "labels": {
+ "container": "karaf",
+ "component": "${APP_NAME}",
+ "provider": "s2i",
+ "project": "${APP_NAME}",
+ "version": "${APP_VERSION}",
+ "group": "quickstarts"
+ },
+ "name": "${SERVICE_NAME}-route"
+ },
+ "spec": {
+ "to": {
+ "kind": "Service",
+ "name": "${SERVICE_NAME}"
+ }
+ }
+ },
+ {
+ "apiVersion": "v1",
+ "kind": "Service",
+ "metadata": {
+ "annotations": {
+ },
+ "labels": {
+ "container": "karaf",
+ "component": "${APP_NAME}",
+ "provider": "s2i",
+ "project": "${APP_NAME}",
+ "version": "${APP_VERSION}",
+ "group": "quickstarts"
+ },
+ "name": "${SERVICE_NAME}"
+ },
+ "spec": {
+ "clusterIP": "None",
+ "deprecatedPublicIPs": [],
+ "ports": [
+ {
+ "port": 9411,
+ "protocol": "TCP",
+ "targetPort": 8181
+ }
+ ],
+ "selector": {
+ "container": "karaf",
+ "project": "${APP_NAME}",
+ "component": "${APP_NAME}",
+ "provider": "s2i",
+ "group": "quickstarts"
+ }
+ }
+ },
+ {
+ "kind": "ImageStream",
+ "apiVersion": "v1",
+ "metadata": {
+ "name": "${APP_NAME}",
+ "creationTimestamp": null,
+ "labels": {
+ "component": "${APP_NAME}",
+ "container": "karaf",
+ "group": "quickstarts",
+ "project": "${APP_NAME}",
+ "provider": "s2i",
+ "version": "${APP_VERSION}"
+ }
+ },
+ "spec": {},
+ "status": {
+ "dockerImageRepository": ""
+ }
+ },
+ {
+ "kind": "BuildConfig",
+ "apiVersion": "v1",
+ "metadata": {
+ "name": "${APP_NAME}",
+ "creationTimestamp": null,
+ "labels": {
+ "component": "${APP_NAME}",
+ "container": "karaf",
+ "group": "quickstarts",
+ "project": "${APP_NAME}",
+ "provider": "s2i",
+ "version": "${APP_VERSION}"
+ }
+ },
+ "spec": {
+ "triggers": [
+ {
+ "type": "GitHub",
+ "github": {
+ "secret": "${BUILD_SECRET}"
+ }
+ },
+ {
+ "type": "Generic",
+ "generic": {
+ "secret": "${BUILD_SECRET}"
+ }
+ },
+ {
+ "type": "ConfigChange"
+ },
+ {
+ "type": "ImageChange",
+ "imageChange": {}
+ }
+ ],
+ "source": {
+ "type": "Git",
+ "git": {
+ "uri": "${GIT_REPO}",
+ "ref": "${GIT_REF}"
+ }
+ },
+ "strategy": {
+ "type": "Source",
+ "sourceStrategy": {
+ "from": {
+ "kind": "ImageStreamTag",
+ "namespace": "${IMAGE_STREAM_NAMESPACE}",
+ "name": "fis-karaf-openshift:${BUILDER_VERSION}"
+ },
+ "forcePull": true,
+ "incremental": true,
+ "env": [
+ {
+ "name": "BUILD_LOGLEVEL",
+ "value": "5"
+ },
+ {
+ "name": "ARTIFACT_DIR",
+ "value": "${ARTIFACT_DIR}"
+ },
+ {
+ "name": "MAVEN_ARGS",
+ "value": "${MAVEN_ARGS}"
+ },
+ {
+ "name": "MAVEN_ARGS_APPEND",
+ "value": "${MAVEN_ARGS_APPEND}"
+ }
+ ]
+ }
+ },
+ "output": {
+ "to": {
+ "kind": "ImageStreamTag",
+ "name": "${APP_NAME}:latest"
+ }
+ },
+ "resources": {}
+ },
+ "status": {
+ "lastVersion": 0
+ }
+ },
+ {
+ "kind": "DeploymentConfig",
+ "apiVersion": "v1",
+ "metadata": {
+ "name": "${APP_NAME}",
+ "creationTimestamp": null,
+ "labels": {
+ "component": "${APP_NAME}",
+ "container": "karaf",
+ "group": "quickstarts",
+ "project": "${APP_NAME}",
+ "provider": "s2i",
+ "version": "${APP_VERSION}"
+ }
+ },
+ "spec": {
+ "strategy": {
+ "resources": {}
+ },
+ "triggers": [
+ {
+ "type": "ConfigChange"
+ },
+ {
+ "type": "ImageChange",
+ "imageChangeParams": {
+ "automatic": true,
+ "containerNames": [
+ "${APP_NAME}"
+ ],
+ "from": {
+ "kind": "ImageStreamTag",
+ "name": "${APP_NAME}:latest"
+ }
+ }
+ }
+ ],
+ "replicas": 1,
+ "selector": {
+ "component": "${APP_NAME}",
+ "container": "karaf",
+ "deploymentconfig": "${APP_NAME}",
+ "group": "quickstarts",
+ "project": "${APP_NAME}",
+ "provider": "s2i",
+ "version": "${APP_VERSION}"
+ },
+ "template": {
+ "metadata": {
+ "creationTimestamp": null,
+ "labels": {
+ "component": "${APP_NAME}",
+ "container": "karaf",
+ "deploymentconfig": "${APP_NAME}",
+ "group": "quickstarts",
+ "project": "${APP_NAME}",
+ "provider": "s2i",
+ "version": "${APP_VERSION}"
+ }
+ },
+ "spec": {
+ "containers": [
+ {
+ "name": "${APP_NAME}",
+ "image": "library/${APP_NAME}:latest",
+ "readinessProbe" : {
+ "httpGet" : {
+ "path" : "/readiness-check",
+ "port" : 8181
+ },
+ "initialDelaySeconds" : 10
+ },
+ "livenessProbe" : {
+ "httpGet" : {
+ "path" : "/health-check",
+ "port" : 8181
+ },
+ "initialDelaySeconds" : 180
+ },
+ "ports": [
+ {
+ "containerPort": 8181,
+ "name": "http"
+ },
+ {
+ "containerPort": 8778,
+ "name": "jolokia"
+ }
+ ],
+ "env" : [ {
+ "name" : "KUBERNETES_NAMESPACE",
+ "valueFrom" : {
+ "fieldRef" : {
+ "fieldPath" : "metadata.namespace"
+ }
+ }
+ } , {
+ "name": "MYSQL_SERVICE_NAME",
+ "value": "${MYSQL_SERVICE_NAME}"
+ }, {
+ "name": "MYSQL_SERVICE_DATABASE",
+ "value": "${MYSQL_SERVICE_DATABASE}"
+ }, {
+ "name": "MYSQL_SERVICE_USERNAME",
+ "value": "${MYSQL_SERVICE_USERNAME}"
+ }, {
+ "name": "MYSQL_SERVICE_PASSWORD",
+ "value": "${MYSQL_SERVICE_PASSWORD}"
+ } ],
+ "resources": {
+ "requests": {
+ "cpu": "${CPU_REQUEST}",
+ "memory": "${MEMORY_REQUEST}"
+ },
+ "limits": {
+ "cpu": "${CPU_LIMIT}",
+ "memory": "${MEMORY_LIMIT}"
+ }
+ }
+ }
+ ]
+ }
+ }
+ },
+ "status": {}
+ }
+ ]
+}
diff --git a/roles/openshift_examples/files/examples/v1.5/xpaas-templates/karaf2-cxf-rest-template.json b/roles/openshift_examples/files/examples/v1.5/xpaas-templates/karaf2-cxf-rest-template.json
new file mode 100644
index 000000000..f99099868
--- /dev/null
+++ b/roles/openshift_examples/files/examples/v1.5/xpaas-templates/karaf2-cxf-rest-template.json
@@ -0,0 +1,385 @@
+{
+ "apiVersion": "v1",
+ "kind": "Template",
+ "metadata": {
+ "annotations": {
+ "description": "REST example using CXF in Karaf container.",
+ "tags": "quickstart,java,karaf,fis",
+ "iconClass": "icon-jboss",
+ "version": "2.0"
+ },
+ "name": "s2i-karaf2-cxf-rest"
+ },
+ "labels": {
+ "template": "s2i-karaf2-cxf-rest"
+ },
+ "parameters": [
+ {
+ "name": "APP_NAME",
+ "displayName": "Application Name",
+ "required": true,
+ "value": "s2i-karaf2-cxf-rest",
+ "description": "The name assigned to the application."
+ },
+ {
+ "name": "GIT_REPO",
+ "displayName": "Git Repository URL",
+ "value": "https://github.com/fabric8-quickstarts/karaf2-cxf-rest.git",
+ "required": true,
+ "description": "The URL of the repository with your application source code."
+ },
+ {
+ "name": "GIT_REF",
+ "displayName": "Git Reference",
+ "value": "karaf2-cxf-rest-1.0.0.redhat-000010",
+ "description": "Set this to a branch name, tag or other ref of your repository if you are not using the default branch."
+ },
+ {
+ "name": "SERVICE_NAME",
+ "displayName": "Service Name",
+ "value": "s2i-karaf2-cxf-rest",
+ "description": "Exposed Service name."
+ },
+ {
+ "name": "BUILDER_VERSION",
+ "displayName": "Builder version",
+ "value": "2.0",
+ "description": "The version of the FIS S2I builder image to use."
+ },
+ {
+ "name": "APP_VERSION",
+ "displayName": "Application Version",
+ "value": "1.0.0.redhat-000010",
+ "description": "The application version."
+ },
+ {
+ "name": "MAVEN_ARGS",
+ "displayName": "Maven Arguments",
+ "value": "install -DskipTests -Dfabric8.skip -e -B",
+ "description": "Arguments passed to mvn in the build."
+ },
+ {
+ "name": "MAVEN_ARGS_APPEND",
+ "displayName": "Extra Maven Arguments",
+ "description": "Extra arguments passed to mvn, e.g. for multi-module builds."
+ },
+ {
+ "name": "ARTIFACT_DIR",
+ "displayName": "Maven build directory",
+ "description": "Directory of the artifact to be built, e.g. for multi-module builds."
+ },
+ {
+ "name": "IMAGE_STREAM_NAMESPACE",
+ "displayName": "Image Stream Namespace",
+ "value": "openshift",
+ "required": true,
+ "description": "Namespace in which the Fuse ImageStreams are installed. These ImageStreams are normally installed in the openshift namespace. You should only need to modify this if you've installed the ImageStreams in a different namespace/project."
+ },
+ {
+ "name": "CPU_REQUEST",
+ "displayName": "CPU request",
+ "value": "0.2",
+ "required": true,
+ "description": "The amount of CPU to requests."
+ },
+ {
+ "name": "MEMORY_REQUEST",
+ "displayName": "Memory request",
+ "value": "1.5G",
+ "required": true,
+ "description": "The amount of memory required for the container to run."
+ },
+ {
+ "name": "CPU_LIMIT",
+ "displayName": "CPU limit",
+ "value": "1.0",
+ "required": true,
+ "description": "The amount of CPU the container is limited to use."
+ },
+ {
+ "name": "MEMORY_LIMIT",
+ "displayName": "Memory limit",
+ "value": "2G",
+ "required": true,
+ "description": "The amount of memory the container is limited to use."
+ }
+ ],
+ "objects": [
+ {
+ "apiVersion": "v1",
+ "kind": "Route",
+ "metadata": {
+ "labels": {
+ "container": "java",
+ "component": "${APP_NAME}",
+ "provider": "s2i",
+ "project": "${APP_NAME}",
+ "version": "${APP_VERSION}",
+ "group": "quickstarts"
+ },
+ "name": "${SERVICE_NAME}-route"
+ },
+ "spec": {
+ "to": {
+ "kind": "Service",
+ "name": "${SERVICE_NAME}"
+ }
+ }
+ },
+ {
+ "apiVersion": "v1",
+ "kind": "Service",
+ "metadata": {
+ "annotations": {
+ },
+ "labels": {
+ "container": "karaf",
+ "component": "${APP_NAME}",
+ "provider": "s2i",
+ "project": "${APP_NAME}",
+ "version": "${APP_VERSION}",
+ "group": "quickstarts"
+ },
+ "name": "${SERVICE_NAME}"
+ },
+ "spec": {
+ "clusterIP": "None",
+ "deprecatedPublicIPs": [],
+ "ports": [
+ {
+ "port": 9412,
+ "protocol": "TCP",
+ "targetPort": 8181
+ }
+ ],
+ "selector": {
+ "container": "karaf",
+ "project": "${APP_NAME}",
+ "component": "${APP_NAME}",
+ "provider": "s2i",
+ "group": "quickstarts"
+ }
+ }
+ },
+ {
+ "kind": "ImageStream",
+ "apiVersion": "v1",
+ "metadata": {
+ "name": "${APP_NAME}",
+ "creationTimestamp": null,
+ "labels": {
+ "component": "${APP_NAME}",
+ "container": "karaf",
+ "group": "quickstarts",
+ "project": "${APP_NAME}",
+ "provider": "s2i",
+ "version": "${APP_VERSION}"
+ }
+ },
+ "spec": {},
+ "status": {
+ "dockerImageRepository": ""
+ }
+ },
+ {
+ "kind": "BuildConfig",
+ "apiVersion": "v1",
+ "metadata": {
+ "name": "${APP_NAME}",
+ "creationTimestamp": null,
+ "labels": {
+ "component": "${APP_NAME}",
+ "container": "karaf",
+ "group": "quickstarts",
+ "project": "${APP_NAME}",
+ "provider": "s2i",
+ "version": "${APP_VERSION}"
+ }
+ },
+ "spec": {
+ "triggers": [
+ {
+ "type": "GitHub",
+ "github": {
+ "secret": "${BUILD_SECRET}"
+ }
+ },
+ {
+ "type": "Generic",
+ "generic": {
+ "secret": "${BUILD_SECRET}"
+ }
+ },
+ {
+ "type": "ConfigChange"
+ },
+ {
+ "type": "ImageChange",
+ "imageChange": {}
+ }
+ ],
+ "source": {
+ "type": "Git",
+ "git": {
+ "uri": "${GIT_REPO}",
+ "ref": "${GIT_REF}"
+ }
+ },
+ "strategy": {
+ "type": "Source",
+ "sourceStrategy": {
+ "from": {
+ "kind": "ImageStreamTag",
+ "namespace": "${IMAGE_STREAM_NAMESPACE}",
+ "name": "fis-karaf-openshift:${BUILDER_VERSION}"
+ },
+ "forcePull": true,
+ "incremental": true,
+ "env": [
+ {
+ "name": "BUILD_LOGLEVEL",
+ "value": "5"
+ },
+ {
+ "name": "ARTIFACT_DIR",
+ "value": "${ARTIFACT_DIR}"
+ },
+ {
+ "name": "MAVEN_ARGS",
+ "value": "${MAVEN_ARGS}"
+ },
+ {
+ "name": "MAVEN_ARGS_APPEND",
+ "value": "${MAVEN_ARGS_APPEND}"
+ }
+ ]
+ }
+ },
+ "output": {
+ "to": {
+ "kind": "ImageStreamTag",
+ "name": "${APP_NAME}:latest"
+ }
+ },
+ "resources": {}
+ },
+ "status": {
+ "lastVersion": 0
+ }
+ },
+ {
+ "kind": "DeploymentConfig",
+ "apiVersion": "v1",
+ "metadata": {
+ "name": "${APP_NAME}",
+ "creationTimestamp": null,
+ "labels": {
+ "component": "${APP_NAME}",
+ "container": "karaf",
+ "group": "quickstarts",
+ "project": "${APP_NAME}",
+ "provider": "s2i",
+ "version": "${APP_VERSION}"
+ }
+ },
+ "spec": {
+ "strategy": {
+ "resources": {}
+ },
+ "triggers": [
+ {
+ "type": "ConfigChange"
+ },
+ {
+ "type": "ImageChange",
+ "imageChangeParams": {
+ "automatic": true,
+ "containerNames": [
+ "${APP_NAME}"
+ ],
+ "from": {
+ "kind": "ImageStreamTag",
+ "name": "${APP_NAME}:latest"
+ }
+ }
+ }
+ ],
+ "replicas": 1,
+ "selector": {
+ "component": "${APP_NAME}",
+ "container": "karaf",
+ "deploymentconfig": "${APP_NAME}",
+ "group": "quickstarts",
+ "project": "${APP_NAME}",
+ "provider": "s2i",
+ "version": "${APP_VERSION}"
+ },
+ "template": {
+ "metadata": {
+ "creationTimestamp": null,
+ "labels": {
+ "component": "${APP_NAME}",
+ "container": "karaf",
+ "deploymentconfig": "${APP_NAME}",
+ "group": "quickstarts",
+ "project": "${APP_NAME}",
+ "provider": "s2i",
+ "version": "${APP_VERSION}"
+ }
+ },
+ "spec": {
+ "containers": [
+ {
+ "name": "${APP_NAME}",
+ "image": "library/${APP_NAME}:latest",
+ "readinessProbe" : {
+ "httpGet" : {
+ "path" : "/readiness-check",
+ "port" : 8181
+ },
+ "initialDelaySeconds" : 10
+ },
+ "livenessProbe" : {
+ "httpGet" : {
+ "path" : "/health-check",
+ "port" : 8181
+ },
+ "initialDelaySeconds" : 180
+ },
+ "ports": [
+ {
+ "containerPort": 8181,
+ "name": "http"
+ },
+ {
+ "containerPort": 8778,
+ "name": "jolokia"
+ }
+ ],
+ "env" : [ {
+ "name" : "KUBERNETES_NAMESPACE",
+ "valueFrom" : {
+ "fieldRef" : {
+ "fieldPath" : "metadata.namespace"
+ }
+ }
+ } ],
+ "resources": {
+ "requests": {
+ "cpu": "${CPU_REQUEST}",
+ "memory": "${MEMORY_REQUEST}"
+ },
+ "limits": {
+ "cpu": "${CPU_LIMIT}",
+ "memory": "${MEMORY_LIMIT}"
+ }
+ }
+ }
+ ]
+ }
+ }
+ },
+ "status": {}
+ }
+ ]
+}
diff --git a/roles/openshift_examples/files/examples/v1.5/xpaas-templates/openjdk18-web-basic-s2i.json b/roles/openshift_examples/files/examples/v1.5/xpaas-templates/openjdk18-web-basic-s2i.json
new file mode 100644
index 000000000..143e16756
--- /dev/null
+++ b/roles/openshift_examples/files/examples/v1.5/xpaas-templates/openjdk18-web-basic-s2i.json
@@ -0,0 +1,267 @@
+{
+ "kind": "Template",
+ "apiVersion": "v1",
+ "metadata": {
+ "annotations": {
+ "iconClass": "icon-jboss",
+ "description": "Application template for Java applications built using S2I.",
+ "tags": "java,xpaas",
+ "version": "1.0.0"
+ },
+ "name": "openjdk18-web-basic-s2i"
+ },
+ "labels": {
+ "template": "openjdk18-web-basic-s2i",
+ "xpaas": "1.0.0"
+ },
+ "message": "A new java application has been created in your project.",
+ "parameters": [
+ {
+ "description": "The name for the application.",
+ "displayName": "Application Name",
+ "name": "APPLICATION_NAME",
+ "value": "openjdk-app",
+ "required": true
+ },
+ {
+ "description": "Custom hostname for http service route. Leave blank for default hostname, e.g.: <application-name>-<project>.<default-domain-suffix>",
+ "displayName": "Custom http Route Hostname",
+ "name": "HOSTNAME_HTTP",
+ "value": "",
+ "required": false
+ },
+ {
+ "description": "Git source URI for application",
+ "displayName": "Git Repository URL",
+ "name": "SOURCE_REPOSITORY_URL",
+ "value": "https://github.com/jboss-openshift/openshift-quickstarts",
+ "required": true
+ },
+ {
+ "description": "Git branch/tag reference",
+ "displayName": "Git Reference",
+ "name": "SOURCE_REPOSITORY_REF",
+ "value": "master",
+ "required": false
+ },
+ {
+ "description": "Path within Git project to build; empty for root project directory.",
+ "displayName": "Context Directory",
+ "name": "CONTEXT_DIR",
+ "value": "undertow-servlet",
+ "required": false
+ },
+ {
+ "description": "GitHub trigger secret",
+ "displayName": "Github Webhook Secret",
+ "name": "GITHUB_WEBHOOK_SECRET",
+ "from": "[a-zA-Z0-9]{8}",
+ "generate": "expression",
+ "required": true
+ },
+ {
+ "description": "Generic build trigger secret",
+ "displayName": "Generic Webhook Secret",
+ "name": "GENERIC_WEBHOOK_SECRET",
+ "from": "[a-zA-Z0-9]{8}",
+ "generate": "expression",
+ "required": true
+ },
+ {
+ "description": "Namespace in which the ImageStreams for Red Hat Middleware images are installed. These ImageStreams are normally installed in the openshift namespace. You should only need to modify this if you've installed the ImageStreams in a different namespace/project.",
+ "displayName": "ImageStream Namespace",
+ "name": "IMAGE_STREAM_NAMESPACE",
+ "value": "openshift",
+ "required": true
+ }
+ ],
+ "objects": [
+ {
+ "kind": "Service",
+ "apiVersion": "v1",
+ "spec": {
+ "ports": [
+ {
+ "port": 8080,
+ "targetPort": 8080
+ }
+ ],
+ "selector": {
+ "deploymentConfig": "${APPLICATION_NAME}"
+ }
+ },
+ "metadata": {
+ "name": "${APPLICATION_NAME}",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ },
+ "annotations": {
+ "description": "The application's http port."
+ }
+ }
+ },
+ {
+ "kind": "Route",
+ "apiVersion": "v1",
+ "id": "${APPLICATION_NAME}-http",
+ "metadata": {
+ "name": "${APPLICATION_NAME}",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ },
+ "annotations": {
+ "description": "Route for application's http service."
+ }
+ },
+ "spec": {
+ "host": "${HOSTNAME_HTTP}",
+ "to": {
+ "name": "${APPLICATION_NAME}"
+ }
+ }
+ },
+ {
+ "kind": "ImageStream",
+ "apiVersion": "v1",
+ "metadata": {
+ "name": "${APPLICATION_NAME}",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ }
+ }
+ },
+ {
+ "kind": "BuildConfig",
+ "apiVersion": "v1",
+ "metadata": {
+ "name": "${APPLICATION_NAME}",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ }
+ },
+ "spec": {
+ "source": {
+ "type": "Git",
+ "git": {
+ "uri": "${SOURCE_REPOSITORY_URL}",
+ "ref": "${SOURCE_REPOSITORY_REF}"
+ },
+ "contextDir": "${CONTEXT_DIR}"
+ },
+ "strategy": {
+ "type": "Source",
+ "sourceStrategy": {
+ "forcePull": true,
+ "from": {
+ "kind": "ImageStreamTag",
+ "namespace": "${IMAGE_STREAM_NAMESPACE}",
+ "name": "redhat-openjdk18-openshift:1.0"
+ }
+ }
+ },
+ "output": {
+ "to": {
+ "kind": "ImageStreamTag",
+ "name": "${APPLICATION_NAME}:latest"
+ }
+ },
+ "triggers": [
+ {
+ "type": "GitHub",
+ "github": {
+ "secret": "${GITHUB_WEBHOOK_SECRET}"
+ }
+ },
+ {
+ "type": "Generic",
+ "generic": {
+ "secret": "${GENERIC_WEBHOOK_SECRET}"
+ }
+ },
+ {
+ "type": "ImageChange",
+ "imageChange": {}
+ },
+ {
+ "type": "ConfigChange"
+ }
+ ]
+ }
+ },
+ {
+ "kind": "DeploymentConfig",
+ "apiVersion": "v1",
+ "metadata": {
+ "name": "${APPLICATION_NAME}",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ }
+ },
+ "spec": {
+ "strategy": {
+ "type": "Recreate"
+ },
+ "triggers": [
+ {
+ "type": "ImageChange",
+ "imageChangeParams": {
+ "automatic": true,
+ "containerNames": [
+ "${APPLICATION_NAME}"
+ ],
+ "from": {
+ "kind": "ImageStreamTag",
+ "name": "${APPLICATION_NAME}:latest"
+ }
+ }
+ },
+ {
+ "type": "ConfigChange"
+ }
+ ],
+ "replicas": 1,
+ "selector": {
+ "deploymentConfig": "${APPLICATION_NAME}"
+ },
+ "template": {
+ "metadata": {
+ "name": "${APPLICATION_NAME}",
+ "labels": {
+ "deploymentConfig": "${APPLICATION_NAME}",
+ "application": "${APPLICATION_NAME}"
+ }
+ },
+ "spec": {
+ "terminationGracePeriodSeconds": 75,
+ "containers": [
+ {
+ "name": "${APPLICATION_NAME}",
+ "image": "${APPLICATION_NAME}",
+ "imagePullPolicy": "Always",
+ "env": [
+ ],
+ "ports": [
+ {
+ "name": "jolokia",
+ "containerPort": 8778,
+ "protocol": "TCP"
+ },
+ {
+ "name": "http",
+ "containerPort": 8080,
+ "protocol": "TCP"
+ },
+ {
+ "name": "https",
+ "containerPort": 8443,
+ "protocol": "TCP"
+ }
+ ]
+ }
+ ]
+ }
+ }
+ }
+ }
+ ]
+}
diff --git a/roles/openshift_examples/files/examples/v1.5/xpaas-templates/spring-boot-camel-amq-template.json b/roles/openshift_examples/files/examples/v1.5/xpaas-templates/spring-boot-camel-amq-template.json
new file mode 100644
index 000000000..8b3cd6ed0
--- /dev/null
+++ b/roles/openshift_examples/files/examples/v1.5/xpaas-templates/spring-boot-camel-amq-template.json
@@ -0,0 +1,331 @@
+{
+ "apiVersion": "v1",
+ "kind": "Template",
+ "metadata": {
+ "annotations": {
+ "description": "Spring Boot, Camel and ActiveMQ QuickStart. This quickstart demonstrates how to connect a Spring-Boot application to an ActiveMQ broker and use JMS messaging between two Camel routes using OpenShift. In this example we will use two containers, one container to run as a ActiveMQ broker, and another as a client to the broker, where the Camel routes are running. This quickstart requires the ActiveMQ broker has been deployed and running first.",
+ "tags": "quickstart,java,springboot,fis",
+ "iconClass": "icon-jboss",
+ "version": "2.0"
+ },
+ "name": "s2i-spring-boot-camel-amq"
+ },
+ "labels": {
+ "template": "s2i-spring-boot-camel-amq"
+ },
+ "parameters": [
+ {
+ "name": "APP_NAME",
+ "displayName": "Application Name",
+ "required": true,
+ "value": "s2i-spring-boot-camel-amq",
+ "description": "The name assigned to the application."
+ },
+ {
+ "name": "GIT_REPO",
+ "displayName": "Git Repository URL",
+ "required": true,
+ "value": "https://github.com/fabric8-quickstarts/spring-boot-camel-amq.git",
+ "description": "The URL of the repository with your application source code."
+ },
+ {
+ "name": "GIT_REF",
+ "displayName": "Git Reference",
+ "value": "spring-boot-camel-amq-1.0.0.redhat-000055",
+ "description": "Set this to a branch name, tag or other ref of your repository if you are not using the default branch."
+ },
+ {
+ "name": "ACTIVEMQ_SERVICE_NAME",
+ "displayName": "ActiveMQ Broker Service",
+ "required": true,
+ "value": "broker-amq-tcp",
+ "description": "Set this to the name of the TCP service of the ActiveMQ broker. You may need to create a broker first."
+ },
+ {
+ "name": "ACTIVEMQ_BROKER_USERNAME",
+ "displayName": "ActiveMQ Broker Username",
+ "description": "The username used to authenticate with the ActiveMQ broker. Leave it empty if authentication is disabled."
+ },
+ {
+ "name": "ACTIVEMQ_BROKER_PASSWORD",
+ "displayName": "ActiveMQ Broker Password",
+ "description": "The password used to authenticate with the ActiveMQ broker. Leave it empty if authentication is disabled."
+ },
+ {
+ "name": "BUILDER_VERSION",
+ "displayName": "Builder version",
+ "value": "2.0",
+ "description": "The version of the FIS S2I builder image to use."
+ },
+ {
+ "name": "APP_VERSION",
+ "displayName": "Application Version",
+ "value": "1.0.0.redhat-000055",
+ "description": "The application version."
+ },
+ {
+ "name": "MAVEN_ARGS",
+ "displayName": "Maven Arguments",
+ "value": "package -DskipTests -Dfabric8.skip -e -B",
+ "description": "Arguments passed to mvn in the build."
+ },
+ {
+ "name": "MAVEN_ARGS_APPEND",
+ "displayName": "Extra Maven Arguments",
+ "description": "Extra arguments passed to mvn, e.g. for multi-module builds."
+ },
+ {
+ "name": "ARTIFACT_DIR",
+ "displayName": "Maven build directory",
+ "description": "Directory of the artifact to be built, e.g. for multi-module builds."
+ },
+ {
+ "name": "IMAGE_STREAM_NAMESPACE",
+ "displayName": "Image Stream Namespace",
+ "value": "openshift",
+ "required": true,
+ "description": "Namespace in which the Fuse ImageStreams are installed. These ImageStreams are normally installed in the openshift namespace. You should only need to modify this if you've installed the ImageStreams in a different namespace/project."
+ },
+ {
+ "name": "BUILD_SECRET",
+ "displayName": "Git Build Secret",
+ "generate": "expression",
+ "description": "The secret needed to trigger a build.",
+ "from": "[a-zA-Z0-9]{40}"
+ },
+ {
+ "name": "CPU_REQUEST",
+ "displayName": "CPU request",
+ "value": "0.2",
+ "required": true,
+ "description": "The amount of CPU to requests."
+ },
+ {
+ "name": "CPU_LIMIT",
+ "displayName": "CPU limit",
+ "value": "1.0",
+ "required": true,
+ "description": "The amount of CPU the container is limited to use."
+ }
+ ],
+ "objects": [
+ {
+ "kind": "ImageStream",
+ "apiVersion": "v1",
+ "metadata": {
+ "name": "${APP_NAME}",
+ "creationTimestamp": null,
+ "labels": {
+ "component": "${APP_NAME}",
+ "group": "quickstarts",
+ "project": "${APP_NAME}",
+ "provider": "s2i",
+ "version": "${APP_VERSION}"
+ }
+ },
+ "spec": {},
+ "status": {
+ "dockerImageRepository": ""
+ }
+ },
+ {
+ "kind": "BuildConfig",
+ "apiVersion": "v1",
+ "metadata": {
+ "name": "${APP_NAME}",
+ "creationTimestamp": null,
+ "labels": {
+ "component": "${APP_NAME}",
+ "group": "quickstarts",
+ "project": "${APP_NAME}",
+ "provider": "s2i",
+ "version": "${APP_VERSION}"
+ }
+ },
+ "spec": {
+ "triggers": [
+ {
+ "type": "GitHub",
+ "github": {
+ "secret": "${BUILD_SECRET}"
+ }
+ },
+ {
+ "type": "Generic",
+ "generic": {
+ "secret": "${BUILD_SECRET}"
+ }
+ },
+ {
+ "type": "ConfigChange"
+ },
+ {
+ "type": "ImageChange",
+ "imageChange": {}
+ }
+ ],
+ "source": {
+ "type": "Git",
+ "git": {
+ "uri": "${GIT_REPO}",
+ "ref": "${GIT_REF}"
+ }
+ },
+ "strategy": {
+ "type": "Source",
+ "sourceStrategy": {
+ "from": {
+ "kind": "ImageStreamTag",
+ "namespace": "${IMAGE_STREAM_NAMESPACE}",
+ "name": "fis-java-openshift:${BUILDER_VERSION}"
+ },
+ "forcePull": true,
+ "incremental": true,
+ "env": [
+ {
+ "name": "BUILD_LOGLEVEL",
+ "value": "5"
+ },
+ {
+ "name": "ARTIFACT_DIR",
+ "value": "${ARTIFACT_DIR}"
+ },
+ {
+ "name": "MAVEN_ARGS",
+ "value": "${MAVEN_ARGS}"
+ },
+ {
+ "name": "MAVEN_ARGS_APPEND",
+ "value": "${MAVEN_ARGS_APPEND}"
+ }
+ ]
+ }
+ },
+ "output": {
+ "to": {
+ "kind": "ImageStreamTag",
+ "name": "${APP_NAME}:latest"
+ }
+ },
+ "resources": {}
+ },
+ "status": {
+ "lastVersion": 0
+ }
+ },
+ {
+ "kind": "DeploymentConfig",
+ "apiVersion": "v1",
+ "metadata": {
+ "name": "${APP_NAME}",
+ "creationTimestamp": null,
+ "labels": {
+ "component": "${APP_NAME}",
+ "group": "quickstarts",
+ "project": "${APP_NAME}",
+ "provider": "s2i",
+ "version": "${APP_VERSION}"
+ }
+ },
+ "spec": {
+ "strategy": {
+ "resources": {}
+ },
+ "triggers": [
+ {
+ "type": "ConfigChange"
+ },
+ {
+ "type": "ImageChange",
+ "imageChangeParams": {
+ "automatic": true,
+ "containerNames": [
+ "${APP_NAME}"
+ ],
+ "from": {
+ "kind": "ImageStreamTag",
+ "name": "${APP_NAME}:latest"
+ }
+ }
+ }
+ ],
+ "replicas": 1,
+ "selector": {
+ "component": "${APP_NAME}",
+ "deploymentconfig": "${APP_NAME}",
+ "group": "quickstarts",
+ "project": "${APP_NAME}",
+ "provider": "s2i",
+ "version": "${APP_VERSION}"
+ },
+ "template": {
+ "metadata": {
+ "creationTimestamp": null,
+ "labels": {
+ "component": "${APP_NAME}",
+ "deploymentconfig": "${APP_NAME}",
+ "group": "quickstarts",
+ "project": "${APP_NAME}",
+ "provider": "s2i",
+ "version": "${APP_VERSION}"
+ }
+ },
+ "spec": {
+ "containers": [
+ {
+ "name": "${APP_NAME}",
+ "image": "library/${APP_NAME}:latest",
+ "readinessProbe" : {
+ "httpGet" : {
+ "path" : "/health",
+ "port" : 8081
+ },
+ "initialDelaySeconds" : 10
+ },
+ "livenessProbe" : {
+ "httpGet" : {
+ "path" : "/health",
+ "port" : 8081
+ },
+ "initialDelaySeconds" : 180
+ },
+ "ports": [
+ {
+ "containerPort": 8778,
+ "name": "jolokia"
+ }
+ ],
+ "env" : [ {
+ "name" : "KUBERNETES_NAMESPACE",
+ "valueFrom" : {
+ "fieldRef" : {
+ "fieldPath" : "metadata.namespace"
+ }
+ }
+ } , {
+ "name": "ACTIVEMQ_SERVICE_NAME",
+ "value": "${ACTIVEMQ_SERVICE_NAME}"
+ }, {
+ "name": "ACTIVEMQ_BROKER_USERNAME",
+ "value": "${ACTIVEMQ_BROKER_USERNAME}"
+ }, {
+ "name": "ACTIVEMQ_BROKER_PASSWORD",
+ "value": "${ACTIVEMQ_BROKER_PASSWORD}"
+ } ],
+ "resources": {
+ "requests": {
+ "cpu": "${CPU_REQUEST}"
+ },
+ "limits": {
+ "cpu": "${CPU_LIMIT}"
+ }
+ }
+ }
+ ]
+ }
+ }
+ },
+ "status": {}
+ }
+ ]
+}
diff --git a/roles/openshift_examples/files/examples/v1.5/xpaas-templates/spring-boot-camel-config-template.json b/roles/openshift_examples/files/examples/v1.5/xpaas-templates/spring-boot-camel-config-template.json
new file mode 100644
index 000000000..bc5bbad22
--- /dev/null
+++ b/roles/openshift_examples/files/examples/v1.5/xpaas-templates/spring-boot-camel-config-template.json
@@ -0,0 +1,327 @@
+{
+ "apiVersion": "v1",
+ "kind": "Template",
+ "metadata": {
+ "annotations": {
+ "description": "Spring Boot and Camel using ConfigMaps and Secrets. This quickstart demonstrates how to configure a Spring-Boot application using Openshift ConfigMaps and Secrets.",
+ "tags": "quickstart,java,springboot,fis",
+ "iconClass": "icon-jboss",
+ "version": "2.0"
+ },
+ "name": "s2i-spring-boot-camel-config"
+ },
+ "labels": {
+ "template": "s2i-spring-boot-camel-config"
+ },
+ "parameters": [
+ {
+ "name": "APP_NAME",
+ "displayName": "Application Name",
+ "required": true,
+ "value": "s2i-spring-boot-camel-config",
+ "description": "The name assigned to the application."
+ },
+ {
+ "name": "GIT_REPO",
+ "displayName": "Git Repository URL",
+ "required": true,
+ "value": "https://github.com/fabric8-quickstarts/spring-boot-camel-config.git",
+ "description": "The URL of the repository with your application source code."
+ },
+ {
+ "name": "GIT_REF",
+ "displayName": "Git Reference",
+ "value": "spring-boot-camel-config-1.0.0.redhat-000005",
+ "description": "Set this to a branch name, tag or other ref of your repository if you are not using the default branch."
+ },
+ {
+ "name": "SERVICE_ACCOUNT_NAME",
+ "displayName": "Service Account",
+ "value": "qs-camel-config",
+ "required": true,
+ "description": "The Service Account that will be used to run the container. It must be already present in Openshift and have the view role."
+ },
+ {
+ "name": "SECRET_NAME",
+ "displayName": "Secret Name",
+ "value": "camel-config",
+ "required": true,
+ "description": "The name of the Openshift Secret that will be used to configure the application. It must be already present in Openshift."
+ },
+ {
+ "name": "CONFIGMAP_NAME",
+ "displayName": "ConfigMap Name",
+ "value": "camel-config",
+ "required": true,
+ "description": "The name of the Openshift ConfigMap that will be used to configure the application. It must be already present in Openshift."
+ },
+ {
+ "name": "BUILDER_VERSION",
+ "displayName": "Builder version",
+ "value": "2.0",
+ "description": "The version of the FIS S2I builder image to use."
+ },
+ {
+ "name": "APP_VERSION",
+ "displayName": "Application Version",
+ "value": "1.0.0.redhat-000005",
+ "description": "The application version."
+ },
+ {
+ "name": "MAVEN_ARGS",
+ "displayName": "Maven Arguments",
+ "value": "package -DskipTests -Dfabric8.skip -e -B",
+ "description": "Arguments passed to mvn in the build."
+ },
+ {
+ "name": "MAVEN_ARGS_APPEND",
+ "displayName": "Extra Maven Arguments",
+ "description": "Extra arguments passed to mvn, e.g. for multi-module builds."
+ },
+ {
+ "name": "ARTIFACT_DIR",
+ "displayName": "Maven build directory",
+ "description": "Directory of the artifact to be built, e.g. for multi-module builds."
+ },
+ {
+ "name": "IMAGE_STREAM_NAMESPACE",
+ "displayName": "Image Stream Namespace",
+ "value": "openshift",
+ "required": true,
+ "description": "Namespace in which the Fuse ImageStreams are installed. These ImageStreams are normally installed in the openshift namespace. You should only need to modify this if you've installed the ImageStreams in a different namespace/project."
+ },
+ {
+ "name": "BUILD_SECRET",
+ "displayName": "Git Build Secret",
+ "generate": "expression",
+ "description": "The secret needed to trigger a build.",
+ "from": "[a-zA-Z0-9]{40}"
+ }
+ ],
+ "objects": [
+ {
+ "kind": "ImageStream",
+ "apiVersion": "v1",
+ "metadata": {
+ "name": "${APP_NAME}",
+ "creationTimestamp": null,
+ "labels": {
+ "component": "${APP_NAME}",
+ "group": "quickstarts",
+ "project": "${APP_NAME}",
+ "provider": "s2i",
+ "version": "${APP_VERSION}"
+ }
+ },
+ "spec": {},
+ "status": {
+ "dockerImageRepository": ""
+ }
+ },
+ {
+ "kind": "BuildConfig",
+ "apiVersion": "v1",
+ "metadata": {
+ "name": "${APP_NAME}",
+ "creationTimestamp": null,
+ "labels": {
+ "component": "${APP_NAME}",
+ "group": "quickstarts",
+ "project": "${APP_NAME}",
+ "provider": "s2i",
+ "version": "${APP_VERSION}"
+ }
+ },
+ "spec": {
+ "triggers": [
+ {
+ "type": "GitHub",
+ "github": {
+ "secret": "${BUILD_SECRET}"
+ }
+ },
+ {
+ "type": "Generic",
+ "generic": {
+ "secret": "${BUILD_SECRET}"
+ }
+ },
+ {
+ "type": "ConfigChange"
+ },
+ {
+ "type": "ImageChange",
+ "imageChange": {}
+ }
+ ],
+ "source": {
+ "type": "Git",
+ "git": {
+ "uri": "${GIT_REPO}",
+ "ref": "${GIT_REF}"
+ }
+ },
+ "strategy": {
+ "type": "Source",
+ "sourceStrategy": {
+ "from": {
+ "kind": "ImageStreamTag",
+ "namespace": "${IMAGE_STREAM_NAMESPACE}",
+ "name": "fis-java-openshift:${BUILDER_VERSION}"
+ },
+ "forcePull": true,
+ "incremental": true,
+ "env": [
+ {
+ "name": "BUILD_LOGLEVEL",
+ "value": "5"
+ },
+ {
+ "name": "ARTIFACT_DIR",
+ "value": "${ARTIFACT_DIR}"
+ },
+ {
+ "name": "MAVEN_ARGS",
+ "value": "${MAVEN_ARGS}"
+ },
+ {
+ "name": "MAVEN_ARGS_APPEND",
+ "value": "${MAVEN_ARGS_APPEND}"
+ }
+ ]
+ }
+ },
+ "output": {
+ "to": {
+ "kind": "ImageStreamTag",
+ "name": "${APP_NAME}:latest"
+ }
+ },
+ "resources": {}
+ },
+ "status": {
+ "lastVersion": 0
+ }
+ },
+ {
+ "kind": "DeploymentConfig",
+ "apiVersion": "v1",
+ "metadata": {
+ "name": "${APP_NAME}",
+ "creationTimestamp": null,
+ "labels": {
+ "component": "${APP_NAME}",
+ "group": "quickstarts",
+ "project": "${APP_NAME}",
+ "provider": "s2i",
+ "version": "${APP_VERSION}"
+ }
+ },
+ "spec": {
+ "strategy": {
+ "resources": {}
+ },
+ "triggers": [
+ {
+ "type": "ConfigChange"
+ },
+ {
+ "type": "ImageChange",
+ "imageChangeParams": {
+ "automatic": true,
+ "containerNames": [
+ "${APP_NAME}"
+ ],
+ "from": {
+ "kind": "ImageStreamTag",
+ "name": "${APP_NAME}:latest"
+ }
+ }
+ }
+ ],
+ "replicas": 1,
+ "selector": {
+ "component": "${APP_NAME}",
+ "deploymentconfig": "${APP_NAME}",
+ "group": "quickstarts",
+ "project": "${APP_NAME}",
+ "provider": "s2i",
+ "version": "${APP_VERSION}"
+ },
+ "template": {
+ "metadata": {
+ "creationTimestamp": null,
+ "labels": {
+ "component": "${APP_NAME}",
+ "deploymentconfig": "${APP_NAME}",
+ "group": "quickstarts",
+ "project": "${APP_NAME}",
+ "provider": "s2i",
+ "version": "${APP_VERSION}"
+ }
+ },
+ "spec": {
+ "serviceAccountName": "${SERVICE_ACCOUNT_NAME}",
+ "volumes": [
+ {
+ "name": "camel-config",
+ "secret": {
+ "secretName": "${SECRET_NAME}"
+ }
+ }
+ ],
+ "containers": [
+ {
+ "name": "${APP_NAME}",
+ "image": "library/${APP_NAME}:latest",
+ "readinessProbe" : {
+ "httpGet" : {
+ "path" : "/health",
+ "port" : 8081
+ },
+ "initialDelaySeconds" : 10
+ },
+ "livenessProbe" : {
+ "httpGet" : {
+ "path" : "/health",
+ "port" : 8081
+ },
+ "initialDelaySeconds" : 180
+ },
+ "ports": [
+ {
+ "containerPort": 8778,
+ "name": "jolokia"
+ }
+ ],
+ "env" : [ {
+ "name" : "KUBERNETES_NAMESPACE",
+ "valueFrom" : {
+ "fieldRef" : {
+ "fieldPath" : "metadata.namespace"
+ }
+ }
+ }, {
+ "name": "SPRING_CLOUD_KUBERNETES_SECRETS_NAME",
+ "value": "${SECRET_NAME}"
+ }, {
+ "name": "SPRING_CLOUD_KUBERNETES_CONFIG_NAME",
+ "value": "${CONFIGMAP_NAME}"
+ } ],
+ "resources": {},
+ "volumeMounts": [
+ {
+ "name": "camel-config",
+ "readOnly": true,
+ "mountPath": "/etc/secrets/camel-config"
+ }
+ ]
+ }
+ ]
+ }
+ }
+ },
+ "status": {}
+ }
+ ]
+}
diff --git a/roles/openshift_examples/files/examples/v1.5/xpaas-templates/spring-boot-camel-drools-template.json b/roles/openshift_examples/files/examples/v1.5/xpaas-templates/spring-boot-camel-drools-template.json
new file mode 100644
index 000000000..e54fa0d59
--- /dev/null
+++ b/roles/openshift_examples/files/examples/v1.5/xpaas-templates/spring-boot-camel-drools-template.json
@@ -0,0 +1,334 @@
+{
+ "apiVersion": "v1",
+ "kind": "Template",
+ "metadata": {
+ "annotations": {
+ "description": "Spring-Boot, Camel and JBoss BRMS QuickStart. This example demonstrates how you can use Apache Camel and JBoss BRMS with Spring Boot on OpenShift. DRL files contain simple rules which are used to create knowledge session via Spring configuration file. Camel routes, defined via Spring as well, are then used to e.g. pass (insert) the Body of the message as a POJO to Drools engine for execution.",
+ "tags": "quickstart,java,springboot,fis",
+ "iconClass": "icon-jboss",
+ "version": "2.0"
+ },
+ "name": "s2i-spring-boot-camel-drools"
+ },
+ "labels": {
+ "template": "s2i-spring-boot-camel-drools"
+ },
+ "parameters": [
+ {
+ "name": "APP_NAME",
+ "displayName": "Application Name",
+ "required": true,
+ "value": "s2i-spring-boot-camel-drools",
+ "description": "The name assigned to the application."
+ },
+ {
+ "name": "GIT_REPO",
+ "displayName": "Git Repository URL",
+ "required": true,
+ "value": "https://github.com/fabric8-quickstarts/spring-boot-camel-drools.git",
+ "description": "The URL of the repository with your application source code."
+ },
+ {
+ "name": "GIT_REF",
+ "displayName": "Git Reference",
+ "value": "spring-boot-camel-drools-1.0.0.redhat-000054",
+ "description": "Set this to a branch name, tag or other ref of your repository if you are not using the default branch."
+ },
+ {
+ "name": "KIESERVER_SERVICE",
+ "displayName": "Decision Server Name",
+ "required": true,
+ "value": "kie-app",
+ "description": "Set this to the name of the Decision Server. You may need to create an instance before."
+ },
+ {
+ "name": "KIESERVER_USERNAME",
+ "displayName": "Decision Server Username",
+ "required": true,
+ "value": "kieserver",
+ "description": "The username used to authenticate with the Decision Server."
+ },
+ {
+ "name": "KIESERVER_PASSWORD",
+ "displayName": "Decision Server Password",
+ "required": true,
+ "description": "The password used to authenticate with the Decision Server."
+ },
+ {
+ "name": "BUILDER_VERSION",
+ "displayName": "Builder version",
+ "value": "2.0",
+ "description": "The version of the FIS S2I builder image to use."
+ },
+ {
+ "name": "APP_VERSION",
+ "displayName": "Application Version",
+ "value": "1.0.0.redhat-000054",
+ "description": "The application version."
+ },
+ {
+ "name": "MAVEN_ARGS",
+ "displayName": "Maven Arguments",
+ "value": "package -DskipTests -Dfabric8.skip -e -B",
+ "description": "Arguments passed to mvn in the build."
+ },
+ {
+ "name": "MAVEN_ARGS_APPEND",
+ "displayName": "Extra Maven Arguments",
+ "description": "Extra arguments passed to mvn, e.g. for multi-module builds."
+ },
+ {
+ "name": "ARTIFACT_DIR",
+ "displayName": "Maven build directory",
+ "description": "Directory of the artifact to be built, e.g. for multi-module builds."
+ },
+ {
+ "name": "IMAGE_STREAM_NAMESPACE",
+ "displayName": "Image Stream Namespace",
+ "value": "openshift",
+ "required": true,
+ "description": "Namespace in which the Fuse ImageStreams are installed. These ImageStreams are normally installed in the openshift namespace. You should only need to modify this if you've installed the ImageStreams in a different namespace/project."
+ },
+ {
+ "name": "BUILD_SECRET",
+ "displayName": "Git Build Secret",
+ "generate": "expression",
+ "description": "The secret needed to trigger a build.",
+ "from": "[a-zA-Z0-9]{40}"
+ },
+ {
+ "name": "CPU_REQUEST",
+ "displayName": "CPU request",
+ "value": "0.2",
+ "required": true,
+ "description": "The amount of CPU to requests."
+ },
+ {
+ "name": "CPU_LIMIT",
+ "displayName": "CPU limit",
+ "value": "1.0",
+ "required": true,
+ "description": "The amount of CPU the container is limited to use."
+ }
+ ],
+ "objects": [
+ {
+ "kind": "ImageStream",
+ "apiVersion": "v1",
+ "metadata": {
+ "name": "${APP_NAME}",
+ "creationTimestamp": null,
+ "labels": {
+ "component": "${APP_NAME}",
+ "group": "quickstarts",
+ "project": "${APP_NAME}",
+ "provider": "s2i",
+ "version": "${APP_VERSION}"
+ }
+ },
+ "spec": {},
+ "status": {
+ "dockerImageRepository": ""
+ }
+ },
+ {
+ "kind": "BuildConfig",
+ "apiVersion": "v1",
+ "metadata": {
+ "name": "${APP_NAME}",
+ "creationTimestamp": null,
+ "labels": {
+ "component": "${APP_NAME}",
+ "group": "quickstarts",
+ "project": "${APP_NAME}",
+ "provider": "s2i",
+ "version": "${APP_VERSION}"
+ }
+ },
+ "spec": {
+ "triggers": [
+ {
+ "type": "GitHub",
+ "github": {
+ "secret": "${BUILD_SECRET}"
+ }
+ },
+ {
+ "type": "Generic",
+ "generic": {
+ "secret": "${BUILD_SECRET}"
+ }
+ },
+ {
+ "type": "ConfigChange"
+ },
+ {
+ "type": "ImageChange",
+ "imageChange": {}
+ }
+ ],
+ "source": {
+ "type": "Git",
+ "git": {
+ "uri": "${GIT_REPO}",
+ "ref": "${GIT_REF}"
+ }
+ },
+ "strategy": {
+ "type": "Source",
+ "sourceStrategy": {
+ "from": {
+ "kind": "ImageStreamTag",
+ "namespace": "${IMAGE_STREAM_NAMESPACE}",
+ "name": "fis-java-openshift:${BUILDER_VERSION}"
+ },
+ "forcePull": true,
+ "incremental": true,
+ "env": [
+ {
+ "name": "BUILD_LOGLEVEL",
+ "value": "5"
+ },
+ {
+ "name": "ARTIFACT_DIR",
+ "value": "${ARTIFACT_DIR}"
+ },
+ {
+ "name": "MAVEN_ARGS",
+ "value": "${MAVEN_ARGS}"
+ },
+ {
+ "name": "MAVEN_ARGS_APPEND",
+ "value": "${MAVEN_ARGS_APPEND}"
+ }
+ ]
+ }
+ },
+ "output": {
+ "to": {
+ "kind": "ImageStreamTag",
+ "name": "${APP_NAME}:latest"
+ }
+ },
+ "resources": {}
+ },
+ "status": {
+ "lastVersion": 0
+ }
+ },
+ {
+ "kind": "DeploymentConfig",
+ "apiVersion": "v1",
+ "metadata": {
+ "name": "${APP_NAME}",
+ "creationTimestamp": null,
+ "labels": {
+ "component": "${APP_NAME}",
+ "group": "quickstarts",
+ "project": "${APP_NAME}",
+ "provider": "s2i",
+ "version": "${APP_VERSION}"
+ }
+ },
+ "spec": {
+ "strategy": {
+ "resources": {}
+ },
+ "triggers": [
+ {
+ "type": "ConfigChange"
+ },
+ {
+ "type": "ImageChange",
+ "imageChangeParams": {
+ "automatic": true,
+ "containerNames": [
+ "${APP_NAME}"
+ ],
+ "from": {
+ "kind": "ImageStreamTag",
+ "name": "${APP_NAME}:latest"
+ }
+ }
+ }
+ ],
+ "replicas": 1,
+ "selector": {
+ "component": "${APP_NAME}",
+ "deploymentconfig": "${APP_NAME}",
+ "group": "quickstarts",
+ "project": "${APP_NAME}",
+ "provider": "s2i",
+ "version": "${APP_VERSION}"
+ },
+ "template": {
+ "metadata": {
+ "creationTimestamp": null,
+ "labels": {
+ "component": "${APP_NAME}",
+ "deploymentconfig": "${APP_NAME}",
+ "group": "quickstarts",
+ "project": "${APP_NAME}",
+ "provider": "s2i",
+ "version": "${APP_VERSION}"
+ }
+ },
+ "spec": {
+ "containers": [
+ {
+ "name": "${APP_NAME}",
+ "image": "library/${APP_NAME}:latest",
+ "readinessProbe" : {
+ "httpGet" : {
+ "path" : "/health",
+ "port" : 8081
+ },
+ "initialDelaySeconds" : 10
+ },
+ "livenessProbe" : {
+ "httpGet" : {
+ "path" : "/health",
+ "port" : 8081
+ },
+ "initialDelaySeconds" : 180
+ },
+ "ports": [
+ {
+ "containerPort": 8778,
+ "name": "jolokia"
+ }
+ ],
+ "env" : [ {
+ "name" : "KUBERNETES_NAMESPACE",
+ "valueFrom" : {
+ "fieldRef" : {
+ "fieldPath" : "metadata.namespace"
+ }
+ }
+ }, {
+ "name": "KIESERVER_SERVICE",
+ "value": "${KIESERVER_SERVICE}"
+ }, {
+ "name": "KIESERVER_USERNAME",
+ "value": "${KIESERVER_USERNAME}"
+ }, {
+ "name": "KIESERVER_PASSWORD",
+ "value": "${KIESERVER_PASSWORD}"
+ } ],
+ "resources": {
+ "requests": {
+ "cpu": "${CPU_REQUEST}"
+ },
+ "limits": {
+ "cpu": "${CPU_LIMIT}"
+ }
+ }
+ }
+ ]
+ }
+ }
+ },
+ "status": {}
+ }
+ ]
+}
diff --git a/roles/openshift_examples/files/examples/v1.5/xpaas-templates/spring-boot-camel-infinispan-template.json b/roles/openshift_examples/files/examples/v1.5/xpaas-templates/spring-boot-camel-infinispan-template.json
new file mode 100644
index 000000000..20ba97dac
--- /dev/null
+++ b/roles/openshift_examples/files/examples/v1.5/xpaas-templates/spring-boot-camel-infinispan-template.json
@@ -0,0 +1,315 @@
+{
+ "apiVersion": "v1",
+ "kind": "Template",
+ "metadata": {
+ "annotations": {
+ "description": "Spring Boot, Camel and JBoss Data Grid QuickStart. This quickstart demonstrates how to connect a Spring-Boot application to a JBoss Data Grid (or Infinispan) server using the Hot Rod protocol. It requires that the data grid server (or cluster) has been deployed first.",
+ "tags": "quickstart,java,springboot,fis",
+ "iconClass": "icon-jboss",
+ "version": "2.0"
+ },
+ "name": "s2i-spring-boot-camel-infinispan"
+ },
+ "labels": {
+ "template": "s2i-spring-boot-camel-infinispan"
+ },
+ "parameters": [
+ {
+ "name": "APP_NAME",
+ "displayName": "Application Name",
+ "required": true,
+ "value": "s2i-spring-boot-camel-infinispan",
+ "description": "The name assigned to the application."
+ },
+ {
+ "name": "GIT_REPO",
+ "displayName": "Git Repository URL",
+ "required": true,
+ "value": "https://github.com/fabric8-quickstarts/spring-boot-camel-infinispan.git",
+ "description": "The URL of the repository with your application source code."
+ },
+ {
+ "name": "GIT_REF",
+ "displayName": "Git Reference",
+ "value": "spring-boot-camel-infinispan-1.0.0.redhat-000024",
+ "description": "Set this to a branch name, tag or other ref of your repository if you are not using the default branch."
+ },
+ {
+ "name": "INFINISPAN_SERVICE",
+ "displayName": "JBoss Data Grid Service (Hot Rod)",
+ "required": true,
+ "value": "datagrid-app-hotrod",
+ "description": "Set this to the name of the Hot Rod service of the JBoss Data Grid. You may need to create the data grid first."
+ },
+ {
+ "name": "BUILDER_VERSION",
+ "displayName": "Builder version",
+ "value": "2.0",
+ "description": "The version of the FIS S2I builder image to use."
+ },
+ {
+ "name": "APP_VERSION",
+ "displayName": "Application Version",
+ "value": "1.0.0.redhat-000024",
+ "description": "The application version."
+ },
+ {
+ "name": "MAVEN_ARGS",
+ "displayName": "Maven Arguments",
+ "value": "package -DskipTests -Dfabric8.skip -e -B",
+ "description": "Arguments passed to mvn in the build."
+ },
+ {
+ "name": "MAVEN_ARGS_APPEND",
+ "displayName": "Extra Maven Arguments",
+ "description": "Extra arguments passed to mvn, e.g. for multi-module builds."
+ },
+ {
+ "name": "ARTIFACT_DIR",
+ "displayName": "Maven build directory",
+ "description": "Directory of the artifact to be built, e.g. for multi-module builds."
+ },
+ {
+ "name": "IMAGE_STREAM_NAMESPACE",
+ "displayName": "Image Stream Namespace",
+ "value": "openshift",
+ "required": true,
+ "description": "Namespace in which the Fuse ImageStreams are installed. These ImageStreams are normally installed in the openshift namespace. You should only need to modify this if you've installed the ImageStreams in a different namespace/project."
+ },
+ {
+ "name": "BUILD_SECRET",
+ "displayName": "Git Build Secret",
+ "generate": "expression",
+ "description": "The secret needed to trigger a build.",
+ "from": "[a-zA-Z0-9]{40}"
+ },
+ {
+ "name": "CPU_REQUEST",
+ "displayName": "CPU request",
+ "value": "0.2",
+ "required": true,
+ "description": "The amount of CPU to requests."
+ },
+ {
+ "name": "CPU_LIMIT",
+ "displayName": "CPU limit",
+ "value": "1.0",
+ "required": true,
+ "description": "The amount of CPU the container is limited to use."
+ }
+ ],
+ "objects": [
+ {
+ "kind": "ImageStream",
+ "apiVersion": "v1",
+ "metadata": {
+ "name": "${APP_NAME}",
+ "creationTimestamp": null,
+ "labels": {
+ "component": "${APP_NAME}",
+ "group": "quickstarts",
+ "project": "${APP_NAME}",
+ "provider": "s2i",
+ "version": "${APP_VERSION}"
+ }
+ },
+ "spec": {},
+ "status": {
+ "dockerImageRepository": ""
+ }
+ },
+ {
+ "kind": "BuildConfig",
+ "apiVersion": "v1",
+ "metadata": {
+ "name": "${APP_NAME}",
+ "creationTimestamp": null,
+ "labels": {
+ "component": "${APP_NAME}",
+ "group": "quickstarts",
+ "project": "${APP_NAME}",
+ "provider": "s2i",
+ "version": "${APP_VERSION}"
+ }
+ },
+ "spec": {
+ "triggers": [
+ {
+ "type": "GitHub",
+ "github": {
+ "secret": "${BUILD_SECRET}"
+ }
+ },
+ {
+ "type": "Generic",
+ "generic": {
+ "secret": "${BUILD_SECRET}"
+ }
+ },
+ {
+ "type": "ConfigChange"
+ },
+ {
+ "type": "ImageChange",
+ "imageChange": {}
+ }
+ ],
+ "source": {
+ "type": "Git",
+ "git": {
+ "uri": "${GIT_REPO}",
+ "ref": "${GIT_REF}"
+ }
+ },
+ "strategy": {
+ "type": "Source",
+ "sourceStrategy": {
+ "from": {
+ "kind": "ImageStreamTag",
+ "namespace": "${IMAGE_STREAM_NAMESPACE}",
+ "name": "fis-java-openshift:${BUILDER_VERSION}"
+ },
+ "forcePull": true,
+ "incremental": true,
+ "env": [
+ {
+ "name": "BUILD_LOGLEVEL",
+ "value": "5"
+ },
+ {
+ "name": "ARTIFACT_DIR",
+ "value": "${ARTIFACT_DIR}"
+ },
+ {
+ "name": "MAVEN_ARGS",
+ "value": "${MAVEN_ARGS}"
+ },
+ {
+ "name": "MAVEN_ARGS_APPEND",
+ "value": "${MAVEN_ARGS_APPEND}"
+ }
+ ]
+ }
+ },
+ "output": {
+ "to": {
+ "kind": "ImageStreamTag",
+ "name": "${APP_NAME}:latest"
+ }
+ },
+ "resources": {}
+ },
+ "status": {
+ "lastVersion": 0
+ }
+ },
+ {
+ "kind": "DeploymentConfig",
+ "apiVersion": "v1",
+ "metadata": {
+ "name": "${APP_NAME}",
+ "creationTimestamp": null,
+ "labels": {
+ "component": "${APP_NAME}",
+ "group": "quickstarts",
+ "project": "${APP_NAME}",
+ "provider": "s2i",
+ "version": "${APP_VERSION}"
+ }
+ },
+ "spec": {
+ "strategy": {
+ "resources": {}
+ },
+ "triggers": [
+ {
+ "type": "ConfigChange"
+ },
+ {
+ "type": "ImageChange",
+ "imageChangeParams": {
+ "automatic": true,
+ "containerNames": [
+ "${APP_NAME}"
+ ],
+ "from": {
+ "kind": "ImageStreamTag",
+ "name": "${APP_NAME}:latest"
+ }
+ }
+ }
+ ],
+ "replicas": 1,
+ "selector": {
+ "component": "${APP_NAME}",
+ "deploymentconfig": "${APP_NAME}",
+ "group": "quickstarts",
+ "project": "${APP_NAME}",
+ "provider": "s2i",
+ "version": "${APP_VERSION}"
+ },
+ "template": {
+ "metadata": {
+ "creationTimestamp": null,
+ "labels": {
+ "component": "${APP_NAME}",
+ "deploymentconfig": "${APP_NAME}",
+ "group": "quickstarts",
+ "project": "${APP_NAME}",
+ "provider": "s2i",
+ "version": "${APP_VERSION}"
+ }
+ },
+ "spec": {
+ "containers": [
+ {
+ "name": "${APP_NAME}",
+ "image": "library/${APP_NAME}:latest",
+ "readinessProbe" : {
+ "httpGet" : {
+ "path" : "/health",
+ "port" : 8081
+ },
+ "initialDelaySeconds" : 10
+ },
+ "livenessProbe" : {
+ "httpGet" : {
+ "path" : "/health",
+ "port" : 8081
+ },
+ "initialDelaySeconds" : 180
+ },
+ "ports": [
+ {
+ "containerPort": 8778,
+ "name": "jolokia"
+ }
+ ],
+ "env" : [ {
+ "name" : "KUBERNETES_NAMESPACE",
+ "valueFrom" : {
+ "fieldRef" : {
+ "fieldPath" : "metadata.namespace"
+ }
+ }
+ } , {
+ "name": "INFINISPAN_SERVICE",
+ "value": "${INFINISPAN_SERVICE}"
+ } ],
+ "resources": {
+ "requests": {
+ "cpu": "${CPU_REQUEST}"
+ },
+ "limits": {
+ "cpu": "${CPU_LIMIT}"
+ }
+ }
+ }
+ ]
+ }
+ }
+ },
+ "status": {}
+ }
+ ]
+}
diff --git a/roles/openshift_examples/files/examples/v1.5/xpaas-templates/spring-boot-camel-rest-sql-template.json b/roles/openshift_examples/files/examples/v1.5/xpaas-templates/spring-boot-camel-rest-sql-template.json
new file mode 100644
index 000000000..555647fab
--- /dev/null
+++ b/roles/openshift_examples/files/examples/v1.5/xpaas-templates/spring-boot-camel-rest-sql-template.json
@@ -0,0 +1,403 @@
+{
+ "apiVersion": "v1",
+ "kind": "Template",
+ "metadata": {
+ "annotations": {
+ "description": "Spring Boot, Camel REST DSL and MySQL QuickStart. This quickstart demonstrates how to connect a Spring Boot application to a MySQL database and expose a REST API with Camel on OpenShift. In this example we will use two containers, one container to run as a MySQL server, and another as a client to the database, where the Camel routes are running. This quickstart requires the MySQL server to be deployed and started first.",
+ "tags": "quickstart,java,springboot,fis",
+ "iconClass": "icon-jboss",
+ "version": "2.0"
+ },
+ "name": "s2i-spring-boot-camel-rest-sql"
+ },
+ "labels": {
+ "template": "s2i-spring-boot-camel-rest-sql"
+ },
+ "parameters": [
+ {
+ "name": "APP_NAME",
+ "displayName": "Application Name",
+ "required": true,
+ "value": "s2i-spring-boot-camel-rest-sql",
+ "description": "The name assigned to the application."
+ },
+ {
+ "name": "GIT_REPO",
+ "displayName": "Git Repository URL",
+ "required": true,
+ "value": "https://github.com/fabric8-quickstarts/spring-boot-camel-rest-sql.git",
+ "description": "The URL of the repository with your application source code."
+ },
+ {
+ "name": "GIT_REF",
+ "displayName": "Git Reference",
+ "value": "spring-boot-camel-rest-sql-1.0.0.redhat-000055",
+ "description": "Set this to a branch name, tag or other ref of your repository if you are not using the default branch."
+ },
+ {
+ "name": "SERVICE_NAME",
+ "displayName": "Service Name",
+ "value": "camel-rest-sql",
+ "description": "Exposed service name."
+ },
+ {
+ "name": "MYSQL_SERVICE_NAME",
+ "displayName": "MySQL Server Service",
+ "required": true,
+ "value": "mysql",
+ "description": "Set this to the name of the TCP service of the MySQL server. You may need to create a server first."
+ },
+ {
+ "name": "MYSQL_SERVICE_DATABASE",
+ "displayName": "MySQL Server Database",
+ "value": "sampledb",
+ "description": "The database hosted by the MySQL server to be used by the application."
+ },
+ {
+ "name": "MYSQL_SERVICE_USERNAME",
+ "displayName": "MySQL Server Username",
+ "description": "The username used to authenticate with the MySQL server. Leave it empty if authentication is disabled."
+ },
+ {
+ "name": "MYSQL_SERVICE_PASSWORD",
+ "displayName": "MySQL Server Password",
+ "description": "The password used to authenticate with the MySQL server. Leave it empty if authentication is disabled."
+ },
+ {
+ "name": "BUILDER_VERSION",
+ "displayName": "Builder version",
+ "value": "2.0",
+ "description": "The version of the FIS S2I builder image to use."
+ },
+ {
+ "name": "APP_VERSION",
+ "displayName": "Application Version",
+ "value": "1.0.0.redhat-000055",
+ "description": "The application version."
+ },
+ {
+ "name": "MAVEN_ARGS",
+ "displayName": "Maven Arguments",
+ "value": "package -DskipTests -Dfabric8.skip -e -B",
+ "description": "Arguments passed to mvn in the build."
+ },
+ {
+ "name": "MAVEN_ARGS_APPEND",
+ "displayName": "Extra Maven Arguments",
+ "description": "Extra arguments passed to mvn, e.g. for multi-module builds."
+ },
+ {
+ "name": "ARTIFACT_DIR",
+ "displayName": "Maven build directory",
+ "description": "Directory of the artifact to be built, e.g. for multi-module builds."
+ },
+ {
+ "name": "IMAGE_STREAM_NAMESPACE",
+ "displayName": "Image Stream Namespace",
+ "value": "openshift",
+ "required": true,
+ "description": "Namespace in which the Fuse ImageStreams are installed. These ImageStreams are normally installed in the openshift namespace. You should only need to modify this if you've installed the ImageStreams in a different namespace/project."
+ },
+ {
+ "name": "BUILD_SECRET",
+ "displayName": "Git Build Secret",
+ "generate": "expression",
+ "description": "The secret needed to trigger a build.",
+ "from": "[a-zA-Z0-9]{40}"
+ },
+ {
+ "name": "CPU_REQUEST",
+ "displayName": "CPU request",
+ "value": "0.2",
+ "required": true,
+ "description": "The amount of CPU to requests."
+ },
+ {
+ "name": "CPU_LIMIT",
+ "displayName": "CPU limit",
+ "value": "1.0",
+ "required": true,
+ "description": "The amount of CPU the container is limited to use."
+ }
+ ],
+ "objects": [
+ {
+ "apiVersion": "v1",
+ "kind": "Route",
+ "metadata": {
+ "labels": {
+ "component": "${APP_NAME}",
+ "provider": "s2i",
+ "project": "${APP_NAME}",
+ "version": "${APP_VERSION}",
+ "group": "quickstarts"
+ },
+ "name": "${SERVICE_NAME}-route"
+ },
+ "spec": {
+ "to": {
+ "kind": "Service",
+ "name": "${SERVICE_NAME}"
+ }
+ }
+ },
+ {
+ "apiVersion": "v1",
+ "kind": "Service",
+ "metadata": {
+ "annotations": {
+ },
+ "labels": {
+ "component": "${APP_NAME}",
+ "provider": "s2i",
+ "project": "${APP_NAME}",
+ "version": "${APP_VERSION}",
+ "group": "quickstarts"
+ },
+ "name": "${SERVICE_NAME}"
+ },
+ "spec": {
+ "clusterIP": "None",
+ "deprecatedPublicIPs": [],
+ "ports": [
+ {
+ "port": 9411,
+ "protocol": "TCP",
+ "targetPort": 8080
+ }
+ ],
+ "selector": {
+ "project": "${APP_NAME}",
+ "component": "${APP_NAME}",
+ "provider": "s2i",
+ "group": "quickstarts"
+ }
+ }
+ },
+ {
+ "kind": "ImageStream",
+ "apiVersion": "v1",
+ "metadata": {
+ "name": "${APP_NAME}",
+ "creationTimestamp": null,
+ "labels": {
+ "component": "${APP_NAME}",
+ "group": "quickstarts",
+ "project": "${APP_NAME}",
+ "provider": "s2i",
+ "version": "${APP_VERSION}"
+ }
+ },
+ "spec": {},
+ "status": {
+ "dockerImageRepository": ""
+ }
+ },
+ {
+ "kind": "BuildConfig",
+ "apiVersion": "v1",
+ "metadata": {
+ "name": "${APP_NAME}",
+ "creationTimestamp": null,
+ "labels": {
+ "component": "${APP_NAME}",
+ "group": "quickstarts",
+ "project": "${APP_NAME}",
+ "provider": "s2i",
+ "version": "${APP_VERSION}"
+ }
+ },
+ "spec": {
+ "triggers": [
+ {
+ "type": "GitHub",
+ "github": {
+ "secret": "${BUILD_SECRET}"
+ }
+ },
+ {
+ "type": "Generic",
+ "generic": {
+ "secret": "${BUILD_SECRET}"
+ }
+ },
+ {
+ "type": "ConfigChange"
+ },
+ {
+ "type": "ImageChange",
+ "imageChange": {}
+ }
+ ],
+ "source": {
+ "type": "Git",
+ "git": {
+ "uri": "${GIT_REPO}",
+ "ref": "${GIT_REF}"
+ }
+ },
+ "strategy": {
+ "type": "Source",
+ "sourceStrategy": {
+ "from": {
+ "kind": "ImageStreamTag",
+ "namespace": "${IMAGE_STREAM_NAMESPACE}",
+ "name": "fis-java-openshift:${BUILDER_VERSION}"
+ },
+ "forcePull": true,
+ "incremental": true,
+ "env": [
+ {
+ "name": "BUILD_LOGLEVEL",
+ "value": "5"
+ },
+ {
+ "name": "ARTIFACT_DIR",
+ "value": "${ARTIFACT_DIR}"
+ },
+ {
+ "name": "MAVEN_ARGS",
+ "value": "${MAVEN_ARGS}"
+ },
+ {
+ "name": "MAVEN_ARGS_APPEND",
+ "value": "${MAVEN_ARGS_APPEND}"
+ }
+ ]
+ }
+ },
+ "output": {
+ "to": {
+ "kind": "ImageStreamTag",
+ "name": "${APP_NAME}:latest"
+ }
+ },
+ "resources": {}
+ },
+ "status": {
+ "lastVersion": 0
+ }
+ },
+ {
+ "kind": "DeploymentConfig",
+ "apiVersion": "v1",
+ "metadata": {
+ "name": "${APP_NAME}",
+ "creationTimestamp": null,
+ "labels": {
+ "component": "${APP_NAME}",
+ "group": "quickstarts",
+ "project": "${APP_NAME}",
+ "provider": "s2i",
+ "version": "${APP_VERSION}"
+ }
+ },
+ "spec": {
+ "strategy": {
+ "resources": {}
+ },
+ "triggers": [
+ {
+ "type": "ConfigChange"
+ },
+ {
+ "type": "ImageChange",
+ "imageChangeParams": {
+ "automatic": true,
+ "containerNames": [
+ "${APP_NAME}"
+ ],
+ "from": {
+ "kind": "ImageStreamTag",
+ "name": "${APP_NAME}:latest"
+ }
+ }
+ }
+ ],
+ "replicas": 1,
+ "selector": {
+ "component": "${APP_NAME}",
+ "deploymentconfig": "${APP_NAME}",
+ "group": "quickstarts",
+ "project": "${APP_NAME}",
+ "provider": "s2i",
+ "version": "${APP_VERSION}"
+ },
+ "template": {
+ "metadata": {
+ "creationTimestamp": null,
+ "labels": {
+ "component": "${APP_NAME}",
+ "deploymentconfig": "${APP_NAME}",
+ "group": "quickstarts",
+ "project": "${APP_NAME}",
+ "provider": "s2i",
+ "version": "${APP_VERSION}"
+ }
+ },
+ "spec": {
+ "containers": [
+ {
+ "name": "${APP_NAME}",
+ "image": "library/${APP_NAME}:latest",
+ "readinessProbe" : {
+ "httpGet" : {
+ "path" : "/health",
+ "port" : 8081
+ },
+ "initialDelaySeconds" : 10
+ },
+ "livenessProbe" : {
+ "httpGet" : {
+ "path" : "/health",
+ "port" : 8081
+ },
+ "initialDelaySeconds" : 180
+ },
+ "ports": [
+ {
+ "containerPort": 8080,
+ "name": "http"
+ },
+ {
+ "containerPort": 8778,
+ "name": "jolokia"
+ }
+ ],
+ "env" : [ {
+ "name" : "KUBERNETES_NAMESPACE",
+ "valueFrom" : {
+ "fieldRef" : {
+ "fieldPath" : "metadata.namespace"
+ }
+ }
+ } , {
+ "name": "MYSQL_SERVICE_NAME",
+ "value": "${MYSQL_SERVICE_NAME}"
+ }, {
+ "name": "MYSQL_SERVICE_DATABASE",
+ "value": "${MYSQL_SERVICE_DATABASE}"
+ }, {
+ "name": "MYSQL_SERVICE_USERNAME",
+ "value": "${MYSQL_SERVICE_USERNAME}"
+ }, {
+ "name": "MYSQL_SERVICE_PASSWORD",
+ "value": "${MYSQL_SERVICE_PASSWORD}"
+ } ],
+ "resources": {
+ "requests": {
+ "cpu": "${CPU_REQUEST}"
+ },
+ "limits": {
+ "cpu": "${CPU_LIMIT}"
+ }
+ }
+ }
+ ]
+ }
+ }
+ },
+ "status": {}
+ }
+ ]
+}
diff --git a/roles/openshift_examples/files/examples/v1.5/xpaas-templates/spring-boot-camel-teiid-template.json b/roles/openshift_examples/files/examples/v1.5/xpaas-templates/spring-boot-camel-teiid-template.json
new file mode 100644
index 000000000..cf9a4e903
--- /dev/null
+++ b/roles/openshift_examples/files/examples/v1.5/xpaas-templates/spring-boot-camel-teiid-template.json
@@ -0,0 +1,343 @@
+{
+ "apiVersion": "v1",
+ "kind": "Template",
+ "metadata": {
+ "annotations": {
+ "description": "Spring-Boot, Camel and JBoss Data Virtualization QuickStart. This example demonstrates how to connect Apache Camel to a remote JBoss Data Virtualization (or Teiid) Server using the JDBC protocol.",
+ "tags": "quickstart,java,springboot,fis",
+ "iconClass": "icon-jboss",
+ "version": "2.0"
+ },
+ "name": "s2i-spring-boot-camel-teiid"
+ },
+ "labels": {
+ "template": "s2i-spring-boot-camel-teiid"
+ },
+ "parameters": [
+ {
+ "name": "APP_NAME",
+ "displayName": "Application Name",
+ "required": true,
+ "value": "s2i-spring-boot-camel-teiid",
+ "description": "The name assigned to the application."
+ },
+ {
+ "name": "GIT_REPO",
+ "displayName": "Git Repository URL",
+ "required": true,
+ "value": "https://github.com/fabric8-quickstarts/spring-boot-camel-teiid.git",
+ "description": "The URL of the repository with your application source code."
+ },
+ {
+ "name": "GIT_REF",
+ "displayName": "Git Reference",
+ "value": "spring-boot-camel-teiid-1.0.0.redhat-000053",
+ "description": "Set this to a branch name, tag or other ref of your repository if you are not using the default branch."
+ },
+ {
+ "name": "TEIID_SERVICE_NAME",
+ "displayName": "JDV Server Name",
+ "required": true,
+ "value": "datavirt-app",
+ "description": "Set this to the name of the JDV Server. You may need to create an instance before."
+ },
+ {
+ "name": "TEIID_PORT_NAME",
+ "displayName": "JDV Port Name",
+ "value": "jdbc",
+ "description": "Set this to the name of the JDV port to use. Set this value if the JDV service contains multiple named ports."
+ },
+ {
+ "name": "TEIID_USERNAME",
+ "displayName": "JDV Server Username",
+ "required": true,
+ "description": "The username used to authenticate with the JDV Server."
+ },
+ {
+ "name": "TEIID_PASSWORD",
+ "displayName": "JDV Server Password",
+ "required": true,
+ "description": "The password used to authenticate with the JDV Server."
+ },
+ {
+ "name": "BUILDER_VERSION",
+ "displayName": "Builder version",
+ "value": "2.0",
+ "description": "The version of the FIS S2I builder image to use."
+ },
+ {
+ "name": "APP_VERSION",
+ "displayName": "Application Version",
+ "value": "1.0.0.redhat-000053",
+ "description": "The application version."
+ },
+ {
+ "name": "MAVEN_ARGS",
+ "displayName": "Maven Arguments",
+ "value": "package -DskipTests -Dfabric8.skip -e -B",
+ "description": "Arguments passed to mvn in the build."
+ },
+ {
+ "name": "MAVEN_ARGS_APPEND",
+ "displayName": "Extra Maven Arguments",
+ "description": "Extra arguments passed to mvn, e.g. for multi-module builds."
+ },
+ {
+ "name": "ARTIFACT_DIR",
+ "displayName": "Maven build directory",
+ "description": "Directory of the artifact to be built, e.g. for multi-module builds."
+ },
+ {
+ "name": "IMAGE_STREAM_NAMESPACE",
+ "displayName": "Image Stream Namespace",
+ "value": "openshift",
+ "required": true,
+ "description": "Namespace in which the Fuse ImageStreams are installed. These ImageStreams are normally installed in the openshift namespace. You should only need to modify this if you've installed the ImageStreams in a different namespace/project."
+ },
+ {
+ "name": "BUILD_SECRET",
+ "displayName": "Git Build Secret",
+ "generate": "expression",
+ "description": "The secret needed to trigger a build.",
+ "from": "[a-zA-Z0-9]{40}"
+ },
+ {
+ "name": "CPU_REQUEST",
+ "displayName": "CPU request",
+ "value": "0.2",
+ "required": true,
+ "description": "The amount of CPU to requests."
+ },
+ {
+ "name": "CPU_LIMIT",
+ "displayName": "CPU limit",
+ "value": "1.0",
+ "required": true,
+ "description": "The amount of CPU the container is limited to use."
+ }
+ ],
+ "objects": [
+ {
+ "kind": "ImageStream",
+ "apiVersion": "v1",
+ "metadata": {
+ "name": "${APP_NAME}",
+ "creationTimestamp": null,
+ "labels": {
+ "component": "${APP_NAME}",
+ "group": "quickstarts",
+ "project": "${APP_NAME}",
+ "provider": "s2i",
+ "version": "${APP_VERSION}"
+ }
+ },
+ "spec": {},
+ "status": {
+ "dockerImageRepository": ""
+ }
+ },
+ {
+ "kind": "BuildConfig",
+ "apiVersion": "v1",
+ "metadata": {
+ "name": "${APP_NAME}",
+ "creationTimestamp": null,
+ "labels": {
+ "component": "${APP_NAME}",
+ "group": "quickstarts",
+ "project": "${APP_NAME}",
+ "provider": "s2i",
+ "version": "${APP_VERSION}"
+ }
+ },
+ "spec": {
+ "triggers": [
+ {
+ "type": "GitHub",
+ "github": {
+ "secret": "${BUILD_SECRET}"
+ }
+ },
+ {
+ "type": "Generic",
+ "generic": {
+ "secret": "${BUILD_SECRET}"
+ }
+ },
+ {
+ "type": "ConfigChange"
+ },
+ {
+ "type": "ImageChange",
+ "imageChange": {}
+ }
+ ],
+ "source": {
+ "type": "Git",
+ "git": {
+ "uri": "${GIT_REPO}",
+ "ref": "${GIT_REF}"
+ }
+ },
+ "strategy": {
+ "type": "Source",
+ "sourceStrategy": {
+ "from": {
+ "kind": "ImageStreamTag",
+ "namespace": "${IMAGE_STREAM_NAMESPACE}",
+ "name": "fis-java-openshift:${BUILDER_VERSION}"
+ },
+ "forcePull": true,
+ "incremental": true,
+ "env": [
+ {
+ "name": "BUILD_LOGLEVEL",
+ "value": "5"
+ },
+ {
+ "name": "ARTIFACT_DIR",
+ "value": "${ARTIFACT_DIR}"
+ },
+ {
+ "name": "MAVEN_ARGS",
+ "value": "${MAVEN_ARGS}"
+ },
+ {
+ "name": "MAVEN_ARGS_APPEND",
+ "value": "${MAVEN_ARGS_APPEND}"
+ }
+ ]
+ }
+ },
+ "output": {
+ "to": {
+ "kind": "ImageStreamTag",
+ "name": "${APP_NAME}:latest"
+ }
+ },
+ "resources": {}
+ },
+ "status": {
+ "lastVersion": 0
+ }
+ },
+ {
+ "kind": "DeploymentConfig",
+ "apiVersion": "v1",
+ "metadata": {
+ "name": "${APP_NAME}",
+ "creationTimestamp": null,
+ "labels": {
+ "component": "${APP_NAME}",
+ "group": "quickstarts",
+ "project": "${APP_NAME}",
+ "provider": "s2i",
+ "version": "${APP_VERSION}"
+ }
+ },
+ "spec": {
+ "strategy": {
+ "resources": {}
+ },
+ "triggers": [
+ {
+ "type": "ConfigChange"
+ },
+ {
+ "type": "ImageChange",
+ "imageChangeParams": {
+ "automatic": true,
+ "containerNames": [
+ "${APP_NAME}"
+ ],
+ "from": {
+ "kind": "ImageStreamTag",
+ "name": "${APP_NAME}:latest"
+ }
+ }
+ }
+ ],
+ "replicas": 1,
+ "selector": {
+ "component": "${APP_NAME}",
+ "deploymentconfig": "${APP_NAME}",
+ "group": "quickstarts",
+ "project": "${APP_NAME}",
+ "provider": "s2i",
+ "version": "${APP_VERSION}"
+ },
+ "template": {
+ "metadata": {
+ "creationTimestamp": null,
+ "labels": {
+ "component": "${APP_NAME}",
+ "deploymentconfig": "${APP_NAME}",
+ "group": "quickstarts",
+ "project": "${APP_NAME}",
+ "provider": "s2i",
+ "version": "${APP_VERSION}"
+ }
+ },
+ "spec": {
+ "containers": [
+ {
+ "name": "${APP_NAME}",
+ "image": "library/${APP_NAME}:latest",
+ "readinessProbe" : {
+ "httpGet" : {
+ "path" : "/health",
+ "port" : 8081
+ },
+ "initialDelaySeconds" : 10
+ },
+ "livenessProbe" : {
+ "httpGet" : {
+ "path" : "/health",
+ "port" : 8081
+ },
+ "initialDelaySeconds" : 180
+ },
+ "ports": [
+ {
+ "containerPort": 8778,
+ "name": "jolokia"
+ }
+ ],
+ "env" : [
+ {
+ "name" : "KUBERNETES_NAMESPACE",
+ "valueFrom" : {
+ "fieldRef" : {
+ "fieldPath" : "metadata.namespace"
+ }
+ }
+ }, {
+ "name": "TEIID_SERVICE_NAME",
+ "value": "${TEIID_SERVICE_NAME}"
+ }, {
+ "name": "TEIID_PORT_NAME",
+ "value": "${TEIID_PORT_NAME}"
+ }, {
+ "name": "TEIID_USERNAME",
+ "value": "${TEIID_USERNAME}"
+ }, {
+ "name": "TEIID_PASSWORD",
+ "value": "${TEIID_PASSWORD}"
+ }],
+ "resources": {
+ "requests": {
+ "cpu": "${CPU_REQUEST}"
+ },
+ "limits": {
+ "cpu": "${CPU_LIMIT}"
+ }
+ }
+ }
+ ]
+ }
+ }
+ },
+ "status": {}
+ }
+ ]
+}
diff --git a/roles/openshift_examples/files/examples/v1.5/xpaas-templates/spring-boot-camel-template.json b/roles/openshift_examples/files/examples/v1.5/xpaas-templates/spring-boot-camel-template.json
new file mode 100644
index 000000000..c78a96f7c
--- /dev/null
+++ b/roles/openshift_examples/files/examples/v1.5/xpaas-templates/spring-boot-camel-template.json
@@ -0,0 +1,305 @@
+{
+ "apiVersion": "v1",
+ "kind": "Template",
+ "metadata": {
+ "annotations": {
+ "description": "Spring-Boot and Camel QuickStart. This example demonstrates how you can use Apache Camel with Spring Boot on Openshift. The quickstart uses Spring Boot to configure a little application that includes a Camel route that triggeres a message every 5th second, and routes the message to a log.",
+ "tags": "quickstart,java,springboot,fis",
+ "iconClass": "icon-jboss",
+ "version": "2.0"
+ },
+ "name": "s2i-spring-boot-camel"
+ },
+ "labels": {
+ "template": "s2i-spring-boot-camel"
+ },
+ "parameters": [
+ {
+ "name": "APP_NAME",
+ "displayName": "Application Name",
+ "required": true,
+ "value": "s2i-spring-boot-camel",
+ "description": "The name assigned to the application."
+ },
+ {
+ "name": "GIT_REPO",
+ "displayName": "Git Repository URL",
+ "required": true,
+ "value": "https://github.com/fabric8-quickstarts/spring-boot-camel.git",
+ "description": "The URL of the repository with your application source code."
+ },
+ {
+ "name": "GIT_REF",
+ "displayName": "Git Reference",
+ "value": "spring-boot-camel-1.0.0.redhat-000055",
+ "description": "Set this to a branch name, tag or other ref of your repository if you are not using the default branch."
+ },
+ {
+ "name": "BUILDER_VERSION",
+ "displayName": "Builder version",
+ "value": "2.0",
+ "description": "The version of the FIS S2I builder image to use."
+ },
+ {
+ "name": "APP_VERSION",
+ "displayName": "Application Version",
+ "value": "1.0.0.redhat-000055",
+ "description": "The application version."
+ },
+ {
+ "name": "MAVEN_ARGS",
+ "displayName": "Maven Arguments",
+ "value": "package -DskipTests -Dfabric8.skip -e -B",
+ "description": "Arguments passed to mvn in the build."
+ },
+ {
+ "name": "MAVEN_ARGS_APPEND",
+ "displayName": "Extra Maven Arguments",
+ "description": "Extra arguments passed to mvn, e.g. for multi-module builds."
+ },
+ {
+ "name": "ARTIFACT_DIR",
+ "displayName": "Maven build directory",
+ "description": "Directory of the artifact to be built, e.g. for multi-module builds."
+ },
+ {
+ "name": "IMAGE_STREAM_NAMESPACE",
+ "displayName": "Image Stream Namespace",
+ "value": "openshift",
+ "required": true,
+ "description": "Namespace in which the Fuse ImageStreams are installed. These ImageStreams are normally installed in the openshift namespace. You should only need to modify this if you've installed the ImageStreams in a different namespace/project."
+ },
+ {
+ "name": "BUILD_SECRET",
+ "displayName": "Git Build Secret",
+ "generate": "expression",
+ "description": "The secret needed to trigger a build.",
+ "from": "[a-zA-Z0-9]{40}"
+ },
+ {
+ "name": "CPU_REQUEST",
+ "displayName": "CPU request",
+ "value": "0.2",
+ "required": true,
+ "description": "The amount of CPU to requests."
+ },
+ {
+ "name": "CPU_LIMIT",
+ "displayName": "CPU limit",
+ "value": "1.0",
+ "required": true,
+ "description": "The amount of CPU the container is limited to use."
+ }
+ ],
+ "objects": [
+ {
+ "kind": "ImageStream",
+ "apiVersion": "v1",
+ "metadata": {
+ "name": "${APP_NAME}",
+ "creationTimestamp": null,
+ "labels": {
+ "component": "${APP_NAME}",
+ "group": "quickstarts",
+ "project": "${APP_NAME}",
+ "provider": "s2i",
+ "version": "${APP_VERSION}"
+ }
+ },
+ "spec": {},
+ "status": {
+ "dockerImageRepository": ""
+ }
+ },
+ {
+ "kind": "BuildConfig",
+ "apiVersion": "v1",
+ "metadata": {
+ "name": "${APP_NAME}",
+ "creationTimestamp": null,
+ "labels": {
+ "component": "${APP_NAME}",
+ "group": "quickstarts",
+ "project": "${APP_NAME}",
+ "provider": "s2i",
+ "version": "${APP_VERSION}"
+ }
+ },
+ "spec": {
+ "triggers": [
+ {
+ "type": "GitHub",
+ "github": {
+ "secret": "${BUILD_SECRET}"
+ }
+ },
+ {
+ "type": "Generic",
+ "generic": {
+ "secret": "${BUILD_SECRET}"
+ }
+ },
+ {
+ "type": "ConfigChange"
+ },
+ {
+ "type": "ImageChange",
+ "imageChange": {}
+ }
+ ],
+ "source": {
+ "type": "Git",
+ "git": {
+ "uri": "${GIT_REPO}",
+ "ref": "${GIT_REF}"
+ }
+ },
+ "strategy": {
+ "type": "Source",
+ "sourceStrategy": {
+ "from": {
+ "kind": "ImageStreamTag",
+ "namespace": "${IMAGE_STREAM_NAMESPACE}",
+ "name": "fis-java-openshift:${BUILDER_VERSION}"
+ },
+ "forcePull": true,
+ "incremental": true,
+ "env": [
+ {
+ "name": "BUILD_LOGLEVEL",
+ "value": "5"
+ },
+ {
+ "name": "ARTIFACT_DIR",
+ "value": "${ARTIFACT_DIR}"
+ },
+ {
+ "name": "MAVEN_ARGS",
+ "value": "${MAVEN_ARGS}"
+ },
+ {
+ "name": "MAVEN_ARGS_APPEND",
+ "value": "${MAVEN_ARGS_APPEND}"
+ }
+ ]
+ }
+ },
+ "output": {
+ "to": {
+ "kind": "ImageStreamTag",
+ "name": "${APP_NAME}:latest"
+ }
+ },
+ "resources": {}
+ },
+ "status": {
+ "lastVersion": 0
+ }
+ },
+ {
+ "kind": "DeploymentConfig",
+ "apiVersion": "v1",
+ "metadata": {
+ "name": "${APP_NAME}",
+ "creationTimestamp": null,
+ "labels": {
+ "component": "${APP_NAME}",
+ "group": "quickstarts",
+ "project": "${APP_NAME}",
+ "provider": "s2i",
+ "version": "${APP_VERSION}"
+ }
+ },
+ "spec": {
+ "strategy": {
+ "resources": {}
+ },
+ "triggers": [
+ {
+ "type": "ConfigChange"
+ },
+ {
+ "type": "ImageChange",
+ "imageChangeParams": {
+ "automatic": true,
+ "containerNames": [
+ "${APP_NAME}"
+ ],
+ "from": {
+ "kind": "ImageStreamTag",
+ "name": "${APP_NAME}:latest"
+ }
+ }
+ }
+ ],
+ "replicas": 1,
+ "selector": {
+ "component": "${APP_NAME}",
+ "deploymentconfig": "${APP_NAME}",
+ "group": "quickstarts",
+ "project": "${APP_NAME}",
+ "provider": "s2i",
+ "version": "${APP_VERSION}"
+ },
+ "template": {
+ "metadata": {
+ "creationTimestamp": null,
+ "labels": {
+ "component": "${APP_NAME}",
+ "deploymentconfig": "${APP_NAME}",
+ "group": "quickstarts",
+ "project": "${APP_NAME}",
+ "provider": "s2i",
+ "version": "${APP_VERSION}"
+ }
+ },
+ "spec": {
+ "containers": [
+ {
+ "name": "${APP_NAME}",
+ "image": "library/${APP_NAME}:latest",
+ "readinessProbe" : {
+ "httpGet" : {
+ "path" : "/health",
+ "port" : 8081
+ },
+ "initialDelaySeconds" : 10
+ },
+ "livenessProbe" : {
+ "httpGet" : {
+ "path" : "/health",
+ "port" : 8081
+ },
+ "initialDelaySeconds" : 180
+ },
+ "ports": [
+ {
+ "containerPort": 8778,
+ "name": "jolokia"
+ }
+ ],
+ "env" : [ {
+ "name" : "KUBERNETES_NAMESPACE",
+ "valueFrom" : {
+ "fieldRef" : {
+ "fieldPath" : "metadata.namespace"
+ }
+ }
+ } ],
+ "resources": {
+ "requests": {
+ "cpu": "${CPU_REQUEST}"
+ },
+ "limits": {
+ "cpu": "${CPU_LIMIT}"
+ }
+ }
+ }
+ ]
+ }
+ }
+ },
+ "status": {}
+ }
+ ]
+}
diff --git a/roles/openshift_examples/files/examples/v1.5/xpaas-templates/spring-boot-camel-xml-template.json b/roles/openshift_examples/files/examples/v1.5/xpaas-templates/spring-boot-camel-xml-template.json
new file mode 100644
index 000000000..620425902
--- /dev/null
+++ b/roles/openshift_examples/files/examples/v1.5/xpaas-templates/spring-boot-camel-xml-template.json
@@ -0,0 +1,305 @@
+{
+ "apiVersion": "v1",
+ "kind": "Template",
+ "metadata": {
+ "annotations": {
+ "description": "Spring-Boot and Camel Xml QuickStart. This example demonstrates how you can use Apache Camel with Spring Boot on Openshift. The quickstart uses Spring Boot to configure a little application that includes a Camel route (in Spring xml) that triggeres a message every 5th second, and routes the message to a log.",
+ "tags": "quickstart,java,springboot,fis",
+ "iconClass": "icon-jboss",
+ "version": "2.0"
+ },
+ "name": "s2i-spring-boot-camel-xml"
+ },
+ "labels": {
+ "template": "s2i-spring-boot-camel-xml"
+ },
+ "parameters": [
+ {
+ "name": "APP_NAME",
+ "displayName": "Application Name",
+ "required": true,
+ "value": "s2i-spring-boot-camel-xml",
+ "description": "The name assigned to the application."
+ },
+ {
+ "name": "GIT_REPO",
+ "displayName": "Git Repository URL",
+ "required": true,
+ "value": "https://github.com/fabric8-quickstarts/spring-boot-camel-xml.git",
+ "description": "The URL of the repository with your application source code."
+ },
+ {
+ "name": "GIT_REF",
+ "displayName": "Git Reference",
+ "value": "spring-boot-camel-xml-1.0.0.redhat-000055",
+ "description": "Set this to a branch name, tag or other ref of your repository if you are not using the default branch."
+ },
+ {
+ "name": "BUILDER_VERSION",
+ "displayName": "Builder version",
+ "value": "2.0",
+ "description": "The version of the FIS S2I builder image to use."
+ },
+ {
+ "name": "APP_VERSION",
+ "displayName": "Application Version",
+ "value": "1.0.0.redhat-000055",
+ "description": "The application version."
+ },
+ {
+ "name": "MAVEN_ARGS",
+ "displayName": "Maven Arguments",
+ "value": "package -DskipTests -Dfabric8.skip -e -B",
+ "description": "Arguments passed to mvn in the build."
+ },
+ {
+ "name": "MAVEN_ARGS_APPEND",
+ "displayName": "Extra Maven Arguments",
+ "description": "Extra arguments passed to mvn, e.g. for multi-module builds."
+ },
+ {
+ "name": "ARTIFACT_DIR",
+ "displayName": "Maven build directory",
+ "description": "Directory of the artifact to be built, e.g. for multi-module builds."
+ },
+ {
+ "name": "IMAGE_STREAM_NAMESPACE",
+ "displayName": "Image Stream Namespace",
+ "value": "openshift",
+ "required": true,
+ "description": "Namespace in which the Fuse ImageStreams are installed. These ImageStreams are normally installed in the openshift namespace. You should only need to modify this if you've installed the ImageStreams in a different namespace/project."
+ },
+ {
+ "name": "BUILD_SECRET",
+ "displayName": "Git Build Secret",
+ "generate": "expression",
+ "description": "The secret needed to trigger a build.",
+ "from": "[a-zA-Z0-9]{40}"
+ },
+ {
+ "name": "CPU_REQUEST",
+ "displayName": "CPU request",
+ "value": "0.2",
+ "required": true,
+ "description": "The amount of CPU to requests."
+ },
+ {
+ "name": "CPU_LIMIT",
+ "displayName": "CPU limit",
+ "value": "1.0",
+ "required": true,
+ "description": "The amount of CPU the container is limited to use."
+ }
+ ],
+ "objects": [
+ {
+ "kind": "ImageStream",
+ "apiVersion": "v1",
+ "metadata": {
+ "name": "${APP_NAME}",
+ "creationTimestamp": null,
+ "labels": {
+ "component": "${APP_NAME}",
+ "group": "quickstarts",
+ "project": "${APP_NAME}",
+ "provider": "s2i",
+ "version": "${APP_VERSION}"
+ }
+ },
+ "spec": {},
+ "status": {
+ "dockerImageRepository": ""
+ }
+ },
+ {
+ "kind": "BuildConfig",
+ "apiVersion": "v1",
+ "metadata": {
+ "name": "${APP_NAME}",
+ "creationTimestamp": null,
+ "labels": {
+ "component": "${APP_NAME}",
+ "group": "quickstarts",
+ "project": "${APP_NAME}",
+ "provider": "s2i",
+ "version": "${APP_VERSION}"
+ }
+ },
+ "spec": {
+ "triggers": [
+ {
+ "type": "GitHub",
+ "github": {
+ "secret": "${BUILD_SECRET}"
+ }
+ },
+ {
+ "type": "Generic",
+ "generic": {
+ "secret": "${BUILD_SECRET}"
+ }
+ },
+ {
+ "type": "ConfigChange"
+ },
+ {
+ "type": "ImageChange",
+ "imageChange": {}
+ }
+ ],
+ "source": {
+ "type": "Git",
+ "git": {
+ "uri": "${GIT_REPO}",
+ "ref": "${GIT_REF}"
+ }
+ },
+ "strategy": {
+ "type": "Source",
+ "sourceStrategy": {
+ "from": {
+ "kind": "ImageStreamTag",
+ "namespace": "${IMAGE_STREAM_NAMESPACE}",
+ "name": "fis-java-openshift:${BUILDER_VERSION}"
+ },
+ "forcePull": true,
+ "incremental": true,
+ "env": [
+ {
+ "name": "BUILD_LOGLEVEL",
+ "value": "5"
+ },
+ {
+ "name": "ARTIFACT_DIR",
+ "value": "${ARTIFACT_DIR}"
+ },
+ {
+ "name": "MAVEN_ARGS",
+ "value": "${MAVEN_ARGS}"
+ },
+ {
+ "name": "MAVEN_ARGS_APPEND",
+ "value": "${MAVEN_ARGS_APPEND}"
+ }
+ ]
+ }
+ },
+ "output": {
+ "to": {
+ "kind": "ImageStreamTag",
+ "name": "${APP_NAME}:latest"
+ }
+ },
+ "resources": {}
+ },
+ "status": {
+ "lastVersion": 0
+ }
+ },
+ {
+ "kind": "DeploymentConfig",
+ "apiVersion": "v1",
+ "metadata": {
+ "name": "${APP_NAME}",
+ "creationTimestamp": null,
+ "labels": {
+ "component": "${APP_NAME}",
+ "group": "quickstarts",
+ "project": "${APP_NAME}",
+ "provider": "s2i",
+ "version": "${APP_VERSION}"
+ }
+ },
+ "spec": {
+ "strategy": {
+ "resources": {}
+ },
+ "triggers": [
+ {
+ "type": "ConfigChange"
+ },
+ {
+ "type": "ImageChange",
+ "imageChangeParams": {
+ "automatic": true,
+ "containerNames": [
+ "${APP_NAME}"
+ ],
+ "from": {
+ "kind": "ImageStreamTag",
+ "name": "${APP_NAME}:latest"
+ }
+ }
+ }
+ ],
+ "replicas": 1,
+ "selector": {
+ "component": "${APP_NAME}",
+ "deploymentconfig": "${APP_NAME}",
+ "group": "quickstarts",
+ "project": "${APP_NAME}",
+ "provider": "s2i",
+ "version": "${APP_VERSION}"
+ },
+ "template": {
+ "metadata": {
+ "creationTimestamp": null,
+ "labels": {
+ "component": "${APP_NAME}",
+ "deploymentconfig": "${APP_NAME}",
+ "group": "quickstarts",
+ "project": "${APP_NAME}",
+ "provider": "s2i",
+ "version": "${APP_VERSION}"
+ }
+ },
+ "spec": {
+ "containers": [
+ {
+ "name": "${APP_NAME}",
+ "image": "library/${APP_NAME}:latest",
+ "readinessProbe" : {
+ "httpGet" : {
+ "path" : "/health",
+ "port" : 8081
+ },
+ "initialDelaySeconds" : 10
+ },
+ "livenessProbe" : {
+ "httpGet" : {
+ "path" : "/health",
+ "port" : 8081
+ },
+ "initialDelaySeconds" : 180
+ },
+ "ports": [
+ {
+ "containerPort": 8778,
+ "name": "jolokia"
+ }
+ ],
+ "env" : [ {
+ "name" : "KUBERNETES_NAMESPACE",
+ "valueFrom" : {
+ "fieldRef" : {
+ "fieldPath" : "metadata.namespace"
+ }
+ }
+ } ],
+ "resources": {
+ "requests": {
+ "cpu": "${CPU_REQUEST}"
+ },
+ "limits": {
+ "cpu": "${CPU_LIMIT}"
+ }
+ }
+ }
+ ]
+ }
+ }
+ },
+ "status": {}
+ }
+ ]
+}
diff --git a/roles/openshift_examples/files/examples/v1.5/xpaas-templates/spring-boot-cxf-jaxrs-template.json b/roles/openshift_examples/files/examples/v1.5/xpaas-templates/spring-boot-cxf-jaxrs-template.json
new file mode 100644
index 000000000..15cfc93fd
--- /dev/null
+++ b/roles/openshift_examples/files/examples/v1.5/xpaas-templates/spring-boot-cxf-jaxrs-template.json
@@ -0,0 +1,305 @@
+{
+ "apiVersion": "v1",
+ "kind": "Template",
+ "metadata": {
+ "annotations": {
+ "description": "Spring-Boot and CXF JAXRS QuickStart. This example demonstrates how you can use Apache CXF JAXRS with Spring Boot on Openshift. The quickstart uses Spring Boot to configure a little application that includes a CXF JAXRS endpoint with Swagger enabled.",
+ "tags": "quickstart,java,springboot,fis",
+ "iconClass": "icon-jboss",
+ "version": "2.0"
+ },
+ "name": "s2i-spring-boot-cxf-jaxrs"
+ },
+ "labels": {
+ "template": "s2i-spring-boot-cxf-jaxrs"
+ },
+ "parameters": [
+ {
+ "name": "APP_NAME",
+ "displayName": "Application Name",
+ "required": true,
+ "value": "s2i-spring-boot-cxf-jaxrs",
+ "description": "The name assigned to the application."
+ },
+ {
+ "name": "GIT_REPO",
+ "displayName": "Git Repository URL",
+ "required": true,
+ "value": "https://github.com/fabric8-quickstarts/spring-boot-cxf-jaxrs.git",
+ "description": "The URL of the repository with your application source code."
+ },
+ {
+ "name": "GIT_REF",
+ "displayName": "Git Reference",
+ "value": "spring-boot-cxf-jaxrs-1.0.0.redhat-000005",
+ "description": "Set this to a branch name, tag or other ref of your repository if you are not using the default branch."
+ },
+ {
+ "name": "BUILDER_VERSION",
+ "displayName": "Builder version",
+ "value": "2.0",
+ "description": "The version of the FIS S2I builder image to use."
+ },
+ {
+ "name": "APP_VERSION",
+ "displayName": "Application Version",
+ "value": "1.0.0.redhat-000005",
+ "description": "The application version."
+ },
+ {
+ "name": "MAVEN_ARGS",
+ "displayName": "Maven Arguments",
+ "value": "package -DskipTests -Dfabric8.skip -e -B",
+ "description": "Arguments passed to mvn in the build."
+ },
+ {
+ "name": "MAVEN_ARGS_APPEND",
+ "displayName": "Extra Maven Arguments",
+ "description": "Extra arguments passed to mvn, e.g. for multi-module builds."
+ },
+ {
+ "name": "ARTIFACT_DIR",
+ "displayName": "Maven build directory",
+ "description": "Directory of the artifact to be built, e.g. for multi-module builds."
+ },
+ {
+ "name": "IMAGE_STREAM_NAMESPACE",
+ "displayName": "Image Stream Namespace",
+ "value": "openshift",
+ "required": true,
+ "description": "Namespace in which the Fuse ImageStreams are installed. These ImageStreams are normally installed in the openshift namespace. You should only need to modify this if you've installed the ImageStreams in a different namespace/project."
+ },
+ {
+ "name": "BUILD_SECRET",
+ "displayName": "Git Build Secret",
+ "generate": "expression",
+ "description": "The secret needed to trigger a build.",
+ "from": "[a-zA-Z0-9]{40}"
+ },
+ {
+ "name": "CPU_REQUEST",
+ "displayName": "CPU request",
+ "value": "0.2",
+ "required": true,
+ "description": "The amount of CPU to requests."
+ },
+ {
+ "name": "CPU_LIMIT",
+ "displayName": "CPU limit",
+ "value": "1.0",
+ "required": true,
+ "description": "The amount of CPU the container is limited to use."
+ }
+ ],
+ "objects": [
+ {
+ "kind": "ImageStream",
+ "apiVersion": "v1",
+ "metadata": {
+ "name": "${APP_NAME}",
+ "creationTimestamp": null,
+ "labels": {
+ "component": "${APP_NAME}",
+ "group": "quickstarts",
+ "project": "${APP_NAME}",
+ "provider": "s2i",
+ "version": "${APP_VERSION}"
+ }
+ },
+ "spec": {},
+ "status": {
+ "dockerImageRepository": ""
+ }
+ },
+ {
+ "kind": "BuildConfig",
+ "apiVersion": "v1",
+ "metadata": {
+ "name": "${APP_NAME}",
+ "creationTimestamp": null,
+ "labels": {
+ "component": "${APP_NAME}",
+ "group": "quickstarts",
+ "project": "${APP_NAME}",
+ "provider": "s2i",
+ "version": "${APP_VERSION}"
+ }
+ },
+ "spec": {
+ "triggers": [
+ {
+ "type": "GitHub",
+ "github": {
+ "secret": "${BUILD_SECRET}"
+ }
+ },
+ {
+ "type": "Generic",
+ "generic": {
+ "secret": "${BUILD_SECRET}"
+ }
+ },
+ {
+ "type": "ConfigChange"
+ },
+ {
+ "type": "ImageChange",
+ "imageChange": {}
+ }
+ ],
+ "source": {
+ "type": "Git",
+ "git": {
+ "uri": "${GIT_REPO}",
+ "ref": "${GIT_REF}"
+ }
+ },
+ "strategy": {
+ "type": "Source",
+ "sourceStrategy": {
+ "from": {
+ "kind": "ImageStreamTag",
+ "namespace": "${IMAGE_STREAM_NAMESPACE}",
+ "name": "fis-java-openshift:${BUILDER_VERSION}"
+ },
+ "forcePull": true,
+ "incremental": true,
+ "env": [
+ {
+ "name": "BUILD_LOGLEVEL",
+ "value": "5"
+ },
+ {
+ "name": "ARTIFACT_DIR",
+ "value": "${ARTIFACT_DIR}"
+ },
+ {
+ "name": "MAVEN_ARGS",
+ "value": "${MAVEN_ARGS}"
+ },
+ {
+ "name": "MAVEN_ARGS_APPEND",
+ "value": "${MAVEN_ARGS_APPEND}"
+ }
+ ]
+ }
+ },
+ "output": {
+ "to": {
+ "kind": "ImageStreamTag",
+ "name": "${APP_NAME}:latest"
+ }
+ },
+ "resources": {}
+ },
+ "status": {
+ "lastVersion": 0
+ }
+ },
+ {
+ "kind": "DeploymentConfig",
+ "apiVersion": "v1",
+ "metadata": {
+ "name": "${APP_NAME}",
+ "creationTimestamp": null,
+ "labels": {
+ "component": "${APP_NAME}",
+ "group": "quickstarts",
+ "project": "${APP_NAME}",
+ "provider": "s2i",
+ "version": "${APP_VERSION}"
+ }
+ },
+ "spec": {
+ "strategy": {
+ "resources": {}
+ },
+ "triggers": [
+ {
+ "type": "ConfigChange"
+ },
+ {
+ "type": "ImageChange",
+ "imageChangeParams": {
+ "automatic": true,
+ "containerNames": [
+ "${APP_NAME}"
+ ],
+ "from": {
+ "kind": "ImageStreamTag",
+ "name": "${APP_NAME}:latest"
+ }
+ }
+ }
+ ],
+ "replicas": 1,
+ "selector": {
+ "component": "${APP_NAME}",
+ "deploymentconfig": "${APP_NAME}",
+ "group": "quickstarts",
+ "project": "${APP_NAME}",
+ "provider": "s2i",
+ "version": "${APP_VERSION}"
+ },
+ "template": {
+ "metadata": {
+ "creationTimestamp": null,
+ "labels": {
+ "component": "${APP_NAME}",
+ "deploymentconfig": "${APP_NAME}",
+ "group": "quickstarts",
+ "project": "${APP_NAME}",
+ "provider": "s2i",
+ "version": "${APP_VERSION}"
+ }
+ },
+ "spec": {
+ "containers": [
+ {
+ "name": "${APP_NAME}",
+ "image": "library/${APP_NAME}:latest",
+ "readinessProbe" : {
+ "httpGet" : {
+ "path" : "/health",
+ "port" : 8081
+ },
+ "initialDelaySeconds" : 10
+ },
+ "livenessProbe" : {
+ "httpGet" : {
+ "path" : "/health",
+ "port" : 8081
+ },
+ "initialDelaySeconds" : 180
+ },
+ "ports": [
+ {
+ "containerPort": 8778,
+ "name": "jolokia"
+ }
+ ],
+ "env" : [ {
+ "name" : "KUBERNETES_NAMESPACE",
+ "valueFrom" : {
+ "fieldRef" : {
+ "fieldPath" : "metadata.namespace"
+ }
+ }
+ } ],
+ "resources": {
+ "requests": {
+ "cpu": "${CPU_REQUEST}"
+ },
+ "limits": {
+ "cpu": "${CPU_LIMIT}"
+ }
+ }
+ }
+ ]
+ }
+ }
+ },
+ "status": {}
+ }
+ ]
+}
diff --git a/roles/openshift_examples/files/examples/v1.5/xpaas-templates/spring-boot-cxf-jaxws-template.json b/roles/openshift_examples/files/examples/v1.5/xpaas-templates/spring-boot-cxf-jaxws-template.json
new file mode 100644
index 000000000..c70ee7726
--- /dev/null
+++ b/roles/openshift_examples/files/examples/v1.5/xpaas-templates/spring-boot-cxf-jaxws-template.json
@@ -0,0 +1,305 @@
+{
+ "apiVersion": "v1",
+ "kind": "Template",
+ "metadata": {
+ "annotations": {
+ "description": "Spring-Boot and CXF JAXWS QuickStart. This example demonstrates how you can use Apache CXF JAXWS with Spring Boot on Openshift. The quickstart uses Spring Boot to configure a little application that includes a CXF JAXWS endpoint.",
+ "tags": "quickstart,java,springboot,fis",
+ "iconClass": "icon-jboss",
+ "version": "2.0"
+ },
+ "name": "s2i-spring-boot-cxf-jaxws"
+ },
+ "labels": {
+ "template": "s2i-spring-boot-cxf-jaxws"
+ },
+ "parameters": [
+ {
+ "name": "APP_NAME",
+ "displayName": "Application Name",
+ "required": true,
+ "value": "s2i-spring-boot-cxf-jaxws",
+ "description": "The name assigned to the application."
+ },
+ {
+ "name": "GIT_REPO",
+ "displayName": "Git Repository URL",
+ "required": true,
+ "value": "https://github.com/fabric8-quickstarts/spring-boot-cxf-jaxws.git",
+ "description": "The URL of the repository with your application source code."
+ },
+ {
+ "name": "GIT_REF",
+ "displayName": "Git Reference",
+ "value": "spring-boot-cxf-jaxws-1.0.0.redhat-000005",
+ "description": "Set this to a branch name, tag or other ref of your repository if you are not using the default branch."
+ },
+ {
+ "name": "BUILDER_VERSION",
+ "displayName": "Builder version",
+ "value": "2.0",
+ "description": "The version of the FIS S2I builder image to use."
+ },
+ {
+ "name": "APP_VERSION",
+ "displayName": "Application Version",
+ "value": "1.0.0.redhat-000005",
+ "description": "The application version."
+ },
+ {
+ "name": "MAVEN_ARGS",
+ "displayName": "Maven Arguments",
+ "value": "package -DskipTests -Dfabric8.skip -e -B",
+ "description": "Arguments passed to mvn in the build."
+ },
+ {
+ "name": "MAVEN_ARGS_APPEND",
+ "displayName": "Extra Maven Arguments",
+ "description": "Extra arguments passed to mvn, e.g. for multi-module builds."
+ },
+ {
+ "name": "ARTIFACT_DIR",
+ "displayName": "Maven build directory",
+ "description": "Directory of the artifact to be built, e.g. for multi-module builds."
+ },
+ {
+ "name": "IMAGE_STREAM_NAMESPACE",
+ "displayName": "Image Stream Namespace",
+ "value": "openshift",
+ "required": true,
+ "description": "Namespace in which the Fuse ImageStreams are installed. These ImageStreams are normally installed in the openshift namespace. You should only need to modify this if you've installed the ImageStreams in a different namespace/project."
+ },
+ {
+ "name": "BUILD_SECRET",
+ "displayName": "Git Build Secret",
+ "generate": "expression",
+ "description": "The secret needed to trigger a build.",
+ "from": "[a-zA-Z0-9]{40}"
+ },
+ {
+ "name": "CPU_REQUEST",
+ "displayName": "CPU request",
+ "value": "0.2",
+ "required": true,
+ "description": "The amount of CPU to requests."
+ },
+ {
+ "name": "CPU_LIMIT",
+ "displayName": "CPU limit",
+ "value": "1.0",
+ "required": true,
+ "description": "The amount of CPU the container is limited to use."
+ }
+ ],
+ "objects": [
+ {
+ "kind": "ImageStream",
+ "apiVersion": "v1",
+ "metadata": {
+ "name": "${APP_NAME}",
+ "creationTimestamp": null,
+ "labels": {
+ "component": "${APP_NAME}",
+ "group": "quickstarts",
+ "project": "${APP_NAME}",
+ "provider": "s2i",
+ "version": "${APP_VERSION}"
+ }
+ },
+ "spec": {},
+ "status": {
+ "dockerImageRepository": ""
+ }
+ },
+ {
+ "kind": "BuildConfig",
+ "apiVersion": "v1",
+ "metadata": {
+ "name": "${APP_NAME}",
+ "creationTimestamp": null,
+ "labels": {
+ "component": "${APP_NAME}",
+ "group": "quickstarts",
+ "project": "${APP_NAME}",
+ "provider": "s2i",
+ "version": "${APP_VERSION}"
+ }
+ },
+ "spec": {
+ "triggers": [
+ {
+ "type": "GitHub",
+ "github": {
+ "secret": "${BUILD_SECRET}"
+ }
+ },
+ {
+ "type": "Generic",
+ "generic": {
+ "secret": "${BUILD_SECRET}"
+ }
+ },
+ {
+ "type": "ConfigChange"
+ },
+ {
+ "type": "ImageChange",
+ "imageChange": {}
+ }
+ ],
+ "source": {
+ "type": "Git",
+ "git": {
+ "uri": "${GIT_REPO}",
+ "ref": "${GIT_REF}"
+ }
+ },
+ "strategy": {
+ "type": "Source",
+ "sourceStrategy": {
+ "from": {
+ "kind": "ImageStreamTag",
+ "namespace": "${IMAGE_STREAM_NAMESPACE}",
+ "name": "fis-java-openshift:${BUILDER_VERSION}"
+ },
+ "forcePull": true,
+ "incremental": true,
+ "env": [
+ {
+ "name": "BUILD_LOGLEVEL",
+ "value": "5"
+ },
+ {
+ "name": "ARTIFACT_DIR",
+ "value": "${ARTIFACT_DIR}"
+ },
+ {
+ "name": "MAVEN_ARGS",
+ "value": "${MAVEN_ARGS}"
+ },
+ {
+ "name": "MAVEN_ARGS_APPEND",
+ "value": "${MAVEN_ARGS_APPEND}"
+ }
+ ]
+ }
+ },
+ "output": {
+ "to": {
+ "kind": "ImageStreamTag",
+ "name": "${APP_NAME}:latest"
+ }
+ },
+ "resources": {}
+ },
+ "status": {
+ "lastVersion": 0
+ }
+ },
+ {
+ "kind": "DeploymentConfig",
+ "apiVersion": "v1",
+ "metadata": {
+ "name": "${APP_NAME}",
+ "creationTimestamp": null,
+ "labels": {
+ "component": "${APP_NAME}",
+ "group": "quickstarts",
+ "project": "${APP_NAME}",
+ "provider": "s2i",
+ "version": "${APP_VERSION}"
+ }
+ },
+ "spec": {
+ "strategy": {
+ "resources": {}
+ },
+ "triggers": [
+ {
+ "type": "ConfigChange"
+ },
+ {
+ "type": "ImageChange",
+ "imageChangeParams": {
+ "automatic": true,
+ "containerNames": [
+ "${APP_NAME}"
+ ],
+ "from": {
+ "kind": "ImageStreamTag",
+ "name": "${APP_NAME}:latest"
+ }
+ }
+ }
+ ],
+ "replicas": 1,
+ "selector": {
+ "component": "${APP_NAME}",
+ "deploymentconfig": "${APP_NAME}",
+ "group": "quickstarts",
+ "project": "${APP_NAME}",
+ "provider": "s2i",
+ "version": "${APP_VERSION}"
+ },
+ "template": {
+ "metadata": {
+ "creationTimestamp": null,
+ "labels": {
+ "component": "${APP_NAME}",
+ "deploymentconfig": "${APP_NAME}",
+ "group": "quickstarts",
+ "project": "${APP_NAME}",
+ "provider": "s2i",
+ "version": "${APP_VERSION}"
+ }
+ },
+ "spec": {
+ "containers": [
+ {
+ "name": "${APP_NAME}",
+ "image": "library/${APP_NAME}:latest",
+ "readinessProbe" : {
+ "httpGet" : {
+ "path" : "/health",
+ "port" : 8081
+ },
+ "initialDelaySeconds" : 10
+ },
+ "livenessProbe" : {
+ "httpGet" : {
+ "path" : "/health",
+ "port" : 8081
+ },
+ "initialDelaySeconds" : 180
+ },
+ "ports": [
+ {
+ "containerPort": 8778,
+ "name": "jolokia"
+ }
+ ],
+ "env" : [ {
+ "name" : "KUBERNETES_NAMESPACE",
+ "valueFrom" : {
+ "fieldRef" : {
+ "fieldPath" : "metadata.namespace"
+ }
+ }
+ } ],
+ "resources": {
+ "requests": {
+ "cpu": "${CPU_REQUEST}"
+ },
+ "limits": {
+ "cpu": "${CPU_LIMIT}"
+ }
+ }
+ }
+ ]
+ }
+ }
+ },
+ "status": {}
+ }
+ ]
+}
diff --git a/roles/openshift_excluder/tasks/status.yml b/roles/openshift_excluder/tasks/status.yml
index 6ef4af22d..ef118d94c 100644
--- a/roles/openshift_excluder/tasks/status.yml
+++ b/roles/openshift_excluder/tasks/status.yml
@@ -20,6 +20,7 @@
- name: Update to latest excluder packages
package:
name: "{{ openshift.common.service_type }}-excluder"
+ state: latest
when:
- "{{ openshift_excluder_installed.installed_versions | default([]) | length > 0 }}"
- not openshift.common.is_containerized | bool
@@ -27,6 +28,7 @@
- name: Update to the latest docker-excluder packages
package:
name: "{{ openshift.common.service_type }}-docker-excluder"
+ state: latest
when:
- "{{ docker_excluder_installed.installed_versions | default([]) | length > 0 }}"
- not openshift.common.is_containerized | bool
diff --git a/roles/openshift_facts/library/openshift_facts.py b/roles/openshift_facts/library/openshift_facts.py
index 23a2bbcde..75b55c369 100755
--- a/roles/openshift_facts/library/openshift_facts.py
+++ b/roles/openshift_facts/library/openshift_facts.py
@@ -882,7 +882,7 @@ def set_version_facts_if_unset(facts):
if 'common' in facts:
deployment_type = facts['common']['deployment_type']
openshift_version = get_openshift_version(facts)
- if openshift_version:
+ if openshift_version and openshift_version != "latest":
version = LooseVersion(openshift_version)
facts['common']['version'] = openshift_version
facts['common']['short_version'] = '.'.join([str(x) for x in version.version[0:2]])
@@ -893,7 +893,7 @@ def set_version_facts_if_unset(facts):
version_gte_3_3_or_1_3 = version >= LooseVersion('1.3.0')
version_gte_3_4_or_1_4 = version >= LooseVersion('1.4.0')
version_gte_3_5_or_1_5 = version >= LooseVersion('1.5.0')
- version_gte_3_6_or_1_6 = version >= LooseVersion('1.6.0')
+ version_gte_3_6_or_1_6 = version >= LooseVersion('3.6.0') or version >= LooseVersion('1.6.0')
else:
version_gte_3_1_or_1_1 = version >= LooseVersion('3.0.2.905')
version_gte_3_1_1_or_1_1_1 = version >= LooseVersion('3.1.1')
diff --git a/roles/openshift_facts/tasks/main.yml b/roles/openshift_facts/tasks/main.yml
index 7eb879140..0ec294bbc 100644
--- a/roles/openshift_facts/tasks/main.yml
+++ b/roles/openshift_facts/tasks/main.yml
@@ -1,66 +1,64 @@
---
-- block:
- - name: Detecting Operating System
- stat:
- path: /run/ostree-booted
- register: ostree_booted
+- name: Detecting Operating System
+ stat:
+ path: /run/ostree-booted
+ register: ostree_booted
- # Locally setup containerized facts for now
- - name: Set l_is_atomic
- set_fact:
- l_is_atomic: "{{ ostree_booted.stat.exists }}"
- - name: Set other local variables
- set_fact:
- l_is_containerized: "{{ (l_is_atomic | bool) or (containerized | default(false) | bool) }}"
- l_is_openvswitch_system_container: "{{ (use_openvswitch_system_container | default(use_system_containers) | bool) }}"
- l_is_node_system_container: "{{ (use_node_system_container | default(use_system_containers) | bool) }}"
- l_is_master_system_container: "{{ (use_master_system_container | default(use_system_containers) | bool) }}"
- l_is_etcd_system_container: "{{ (use_etcd_system_container | default(use_system_containers) | bool) }}"
+# Locally setup containerized facts for now
+- set_fact:
+ l_is_atomic: "{{ ostree_booted.stat.exists }}"
+- set_fact:
+ l_is_containerized: "{{ (l_is_atomic | bool) or (containerized | default(false) | bool) }}"
+ l_is_openvswitch_system_container: "{{ (use_openvswitch_system_container | default(use_system_containers) | bool) }}"
+ l_is_node_system_container: "{{ (use_node_system_container | default(use_system_containers) | bool) }}"
+ l_is_master_system_container: "{{ (use_master_system_container | default(use_system_containers) | bool) }}"
+ l_is_etcd_system_container: "{{ (use_etcd_system_container | default(use_system_containers) | bool) }}"
- - name: Ensure various deps are installed
- package: name={{ item }} state=present
- with_items: "{{ required_packages }}"
- when: not l_is_atomic | bool
+- name: Validate python version
+ fail:
+ msg: |
+ openshift-ansible requires Python 3 for {{ ansible_distribution }};
+ For information on enabling Python 3 with Ansible, see https://docs.ansible.com/ansible/python_3_support.html
+ when: ansible_distribution == 'Fedora' and ansible_python['version']['major'] != 3
- - name: Gather Cluster facts and set is_containerized if needed
- openshift_facts:
- role: common
- local_facts:
- debug_level: "{{ openshift_debug_level | default(2) }}"
- # TODO: Deprecate deployment_type in favor of openshift_deployment_type
- deployment_type: "{{ openshift_deployment_type | default(deployment_type) }}"
- deployment_subtype: "{{ openshift_deployment_subtype | default(None) }}"
- cluster_id: "{{ openshift_cluster_id | default('default') }}"
- hostname: "{{ openshift_hostname | default(None) }}"
- ip: "{{ openshift_ip | default(None) }}"
- is_containerized: "{{ l_is_containerized | default(None) }}"
- is_openvswitch_system_container: "{{ l_is_openvswitch_system_container | default(false) }}"
- is_node_system_container: "{{ l_is_node_system_container | default(false) }}"
- is_master_system_container: "{{ l_is_master_system_container | default(false) }}"
- is_etcd_system_container: "{{ l_is_etcd_system_container | default(false) }}"
- system_images_registry: "{{ system_images_registry | default('') }}"
- public_hostname: "{{ openshift_public_hostname | default(None) }}"
- public_ip: "{{ openshift_public_ip | default(None) }}"
- portal_net: "{{ openshift_portal_net | default(openshift_master_portal_net) | default(None) }}"
- http_proxy: "{{ openshift_http_proxy | default(None) }}"
- https_proxy: "{{ openshift_https_proxy | default(None) }}"
- no_proxy: "{{ openshift_no_proxy | default(None) }}"
- generate_no_proxy_hosts: "{{ openshift_generate_no_proxy_hosts | default(True) }}"
- no_proxy_internal_hostnames: "{{ openshift_no_proxy_internal_hostnames | default(None) }}"
- sdn_network_plugin_name: "{{ os_sdn_network_plugin_name | default(None) }}"
- use_openshift_sdn: "{{ openshift_use_openshift_sdn | default(None) }}"
+- name: Validate python version
+ fail:
+ msg: "openshift-ansible requires Python 2 for {{ ansible_distribution }}"
+ when: ansible_distribution != 'Fedora' and ansible_python['version']['major'] != 2
- - name: Set repoquery command
- set_fact:
- repoquery_cmd: "{{ 'dnf repoquery --latest-limit 1 -d 0' if ansible_pkg_mgr == 'dnf' else 'repoquery --plugins' }}"
+- name: Ensure various deps are installed
+ package: name={{ item }} state=present
+ with_items: "{{ required_packages }}"
+ when: not l_is_atomic | bool
- # This `when` allows us to skip this expensive block of tasks on
- # subsequent calls to the `openshift_facts` role. You will notice
- # speed-ups in proportion to the size of your cluster as this will
- # skip all tasks on the next calls to the `openshift_facts` role.
- when:
- - openshift_facts_init is not defined
+- name: Gather Cluster facts and set is_containerized if needed
+ openshift_facts:
+ role: common
+ local_facts:
+ debug_level: "{{ openshift_debug_level | default(2) }}"
+ # TODO: Deprecate deployment_type in favor of openshift_deployment_type
+ deployment_type: "{{ openshift_deployment_type | default(deployment_type) }}"
+ deployment_subtype: "{{ openshift_deployment_subtype | default(None) }}"
+ cluster_id: "{{ openshift_cluster_id | default('default') }}"
+ hostname: "{{ openshift_hostname | default(None) }}"
+ ip: "{{ openshift_ip | default(None) }}"
+ is_containerized: "{{ l_is_containerized | default(None) }}"
+ is_openvswitch_system_container: "{{ l_is_openvswitch_system_container | default(false) }}"
+ is_node_system_container: "{{ l_is_node_system_container | default(false) }}"
+ is_master_system_container: "{{ l_is_master_system_container | default(false) }}"
+ is_etcd_system_container: "{{ l_is_etcd_system_container | default(false) }}"
+ system_images_registry: "{{ system_images_registry | default('') }}"
+ public_hostname: "{{ openshift_public_hostname | default(None) }}"
+ public_ip: "{{ openshift_public_ip | default(None) }}"
+ portal_net: "{{ openshift_portal_net | default(openshift_master_portal_net) | default(None) }}"
+ http_proxy: "{{ openshift_http_proxy | default(None) }}"
+ https_proxy: "{{ openshift_https_proxy | default(None) }}"
+ no_proxy: "{{ openshift_no_proxy | default(None) }}"
+ generate_no_proxy_hosts: "{{ openshift_generate_no_proxy_hosts | default(True) }}"
+ no_proxy_internal_hostnames: "{{ openshift_no_proxy_internal_hostnames | default(None) }}"
+ sdn_network_plugin_name: "{{ os_sdn_network_plugin_name | default(None) }}"
+ use_openshift_sdn: "{{ openshift_use_openshift_sdn | default(None) }}"
-- name: Record that openshift_facts has initialized
+- name: Set repoquery command
set_fact:
- openshift_facts_init: true
+ repoquery_cmd: "{{ 'dnf repoquery --latest-limit 1 -d 0' if ansible_pkg_mgr == 'dnf' else 'repoquery --plugins' }}"
diff --git a/roles/openshift_health_checker/HOWTO_CHECKS.md b/roles/openshift_health_checker/HOWTO_CHECKS.md
index 1573c14da..6c5662a4e 100644
--- a/roles/openshift_health_checker/HOWTO_CHECKS.md
+++ b/roles/openshift_health_checker/HOWTO_CHECKS.md
@@ -10,7 +10,7 @@ Checks are typically implemented as two parts:
2. a custom Ansible module in [library/](library), for cases when the modules
shipped with Ansible do not provide the required functionality.
-The checks are called from an Ansible playbooks via the `openshift_health_check`
+The checks are called from Ansible playbooks via the `openshift_health_check`
action plugin. See
[playbooks/byo/openshift-preflight/check.yml](../../playbooks/byo/openshift-preflight/check.yml)
for an example.
diff --git a/roles/openshift_health_checker/openshift_checks/__init__.py b/roles/openshift_health_checker/openshift_checks/__init__.py
index c31242624..2c70438c9 100644
--- a/roles/openshift_health_checker/openshift_checks/__init__.py
+++ b/roles/openshift_health_checker/openshift_checks/__init__.py
@@ -50,6 +50,7 @@ class OpenShiftCheck(object):
@classmethod
def subclasses(cls):
"""Returns a generator of subclasses of this class and its subclasses."""
+ # AUDIT: no-member makes sense due to this having a metaclass
for subclass in cls.__subclasses__(): # pylint: disable=no-member
yield subclass
for subclass in subclass.subclasses():
diff --git a/roles/openshift_health_checker/openshift_checks/mixins.py b/roles/openshift_health_checker/openshift_checks/mixins.py
index 4029fba62..657e15160 100644
--- a/roles/openshift_health_checker/openshift_checks/mixins.py
+++ b/roles/openshift_health_checker/openshift_checks/mixins.py
@@ -2,17 +2,14 @@
from openshift_checks import get_var
-class NotContainerized(object):
+class NotContainerizedMixin(object):
"""Mixin for checks that are only active when not in containerized mode."""
@classmethod
def is_active(cls, task_vars):
return (
- # This mixin is meant to be used with subclasses of
- # OpenShiftCheck. Pylint disables this by default on mixins,
- # though it relies on the class name ending in 'mixin'.
- # pylint: disable=no-member
- super(NotContainerized, cls).is_active(task_vars) and
+ # This mixin is meant to be used with subclasses of OpenShiftCheck.
+ super(NotContainerizedMixin, cls).is_active(task_vars) and
not cls.is_containerized(task_vars)
)
diff --git a/roles/openshift_health_checker/openshift_checks/package_availability.py b/roles/openshift_health_checker/openshift_checks/package_availability.py
index 8faeef5ee..771123d61 100644
--- a/roles/openshift_health_checker/openshift_checks/package_availability.py
+++ b/roles/openshift_health_checker/openshift_checks/package_availability.py
@@ -1,9 +1,9 @@
# pylint: disable=missing-docstring
from openshift_checks import OpenShiftCheck, get_var
-from openshift_checks.mixins import NotContainerized
+from openshift_checks.mixins import NotContainerizedMixin
-class PackageAvailability(NotContainerized, OpenShiftCheck):
+class PackageAvailability(NotContainerizedMixin, OpenShiftCheck):
"""Check that required RPM packages are available."""
name = "package_availability"
diff --git a/roles/openshift_health_checker/openshift_checks/package_update.py b/roles/openshift_health_checker/openshift_checks/package_update.py
index 86b7b6245..c5a226954 100644
--- a/roles/openshift_health_checker/openshift_checks/package_update.py
+++ b/roles/openshift_health_checker/openshift_checks/package_update.py
@@ -1,9 +1,9 @@
# pylint: disable=missing-docstring
from openshift_checks import OpenShiftCheck
-from openshift_checks.mixins import NotContainerized
+from openshift_checks.mixins import NotContainerizedMixin
-class PackageUpdate(NotContainerized, OpenShiftCheck):
+class PackageUpdate(NotContainerizedMixin, OpenShiftCheck):
"""Check that there are no conflicts in RPM packages."""
name = "package_update"
diff --git a/roles/openshift_health_checker/openshift_checks/package_version.py b/roles/openshift_health_checker/openshift_checks/package_version.py
index 7fa09cbfd..2e9d07deb 100644
--- a/roles/openshift_health_checker/openshift_checks/package_version.py
+++ b/roles/openshift_health_checker/openshift_checks/package_version.py
@@ -1,9 +1,9 @@
# pylint: disable=missing-docstring
from openshift_checks import OpenShiftCheck, get_var
-from openshift_checks.mixins import NotContainerized
+from openshift_checks.mixins import NotContainerizedMixin
-class PackageVersion(NotContainerized, OpenShiftCheck):
+class PackageVersion(NotContainerizedMixin, OpenShiftCheck):
"""Check that available RPM packages match the required versions."""
name = "package_version"
diff --git a/roles/openshift_hosted/filter_plugins/filters.py b/roles/openshift_hosted/filter_plugins/filters.py
new file mode 100644
index 000000000..cbfadfe9d
--- /dev/null
+++ b/roles/openshift_hosted/filter_plugins/filters.py
@@ -0,0 +1,35 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+'''
+Custom filters for use in openshift_hosted
+'''
+
+
+class FilterModule(object):
+ ''' Custom ansible filters for use by openshift_hosted role'''
+
+ @staticmethod
+ def get_router_replicas(replicas=None, router_nodes=None):
+ ''' This function will return the number of replicas
+ based on the results from the defined
+ openshift.hosted.router.replicas OR
+ the query from oc_obj on openshift nodes with a selector OR
+ default to 1
+
+ '''
+ # We always use what they've specified if they've specified a value
+ if replicas is not None:
+ return replicas
+
+ if (isinstance(router_nodes, dict) and
+ 'results' in router_nodes and
+ 'results' in router_nodes['results'] and
+ 'items' in router_nodes['results']['results']):
+
+ return len(router_nodes['results']['results'][0]['items'])
+
+ return 1
+
+ def filters(self):
+ ''' returns a mapping of filters to methods '''
+ return {'get_router_replicas': self.get_router_replicas}
diff --git a/roles/openshift_hosted/handlers/main.yml b/roles/openshift_hosted/handlers/main.yml
deleted file mode 100644
index e69de29bb..000000000
--- a/roles/openshift_hosted/handlers/main.yml
+++ /dev/null
diff --git a/roles/openshift_hosted/meta/main.yml b/roles/openshift_hosted/meta/main.yml
index ced71bb41..e9b590550 100644
--- a/roles/openshift_hosted/meta/main.yml
+++ b/roles/openshift_hosted/meta/main.yml
@@ -4,7 +4,7 @@ galaxy_info:
description: OpenShift Embedded Router
company: Red Hat, Inc.
license: Apache License, Version 2.0
- min_ansible_version: 1.9
+ min_ansible_version: 2.1
platforms:
- name: EL
versions:
@@ -20,6 +20,7 @@ dependencies:
- role: openshift_serviceaccounts
openshift_serviceaccounts_names:
- router
+ - registry
openshift_serviceaccounts_namespace: default
openshift_serviceaccounts_sccs:
- hostnetwork
diff --git a/roles/openshift_hosted/tasks/main.yml b/roles/openshift_hosted/tasks/main.yml
index 67c6bbfd7..fe254f72d 100644
--- a/roles/openshift_hosted/tasks/main.yml
+++ b/roles/openshift_hosted/tasks/main.yml
@@ -1,25 +1,6 @@
---
-- name: Create temp directory for kubeconfig
- command: mktemp -d /tmp/openshift-ansible-XXXXXX
- register: mktemp
- changed_when: False
-
-- set_fact:
- openshift_hosted_kubeconfig: "{{ mktemp.stdout }}/admin.kubeconfig"
-
-- name: Copy the admin client config(s)
- command: >
- cp {{ openshift_master_config_dir }}/admin.kubeconfig {{ openshift_hosted_kubeconfig }}
- changed_when: False
-
- include: router/router.yml
when: openshift_hosted_manage_router | default(true) | bool
- include: registry/registry.yml
when: openshift_hosted_manage_registry | default(true) | bool
-
-- name: Delete temp directory
- file:
- name: "{{ mktemp.stdout }}"
- state: absent
- changed_when: False
diff --git a/roles/openshift_hosted/tasks/registry/registry.yml b/roles/openshift_hosted/tasks/registry/registry.yml
index 93b701ebc..39e7de230 100644
--- a/roles/openshift_hosted/tasks/registry/registry.yml
+++ b/roles/openshift_hosted/tasks/registry/registry.yml
@@ -1,64 +1,105 @@
---
-- name: Retrieve list of openshift nodes matching registry selector
- command: >
- {{ openshift.common.client_binary }} --api-version='v1' -o json
- get nodes -n default --config={{ openshift_hosted_kubeconfig }}
- --selector={{ openshift.hosted.registry.selector | default('') }}
- register: registry_nodes_json
- changed_when: false
- when: openshift.hosted.registry.replicas | default(none) is none
+- block:
-- set_fact:
- l_node_count: "{{ (registry_nodes_json.stdout | default('{\"items\":[]}') | from_json)['items'] | length }}"
+ - name: Retrieve list of openshift nodes matching registry selector
+ oc_obj:
+ state: list
+ kind: node
+ selector: "{{ openshift.hosted.registry.selector | default(omit) }}"
+ register: registry_nodes
-# Determine the default number of registry/router replicas to use if no count
-# has been specified.
-# If no registry nodes defined, the default should be 0.
-- set_fact:
- l_default_replicas: 0
- when: l_node_count | int == 0
+ - name: set_fact l_node_count to number of nodes matching registry selector
+ set_fact:
+ l_node_count: "{{ registry_nodes.results.results[0]['items'] | length }}"
-# If registry nodes are defined and the registry storage kind is
-# defined, default should be the number of registry nodes, otherwise
-# just 1:
-- set_fact:
- l_default_replicas: "{{ l_node_count if openshift.hosted.registry.storage.kind | default(none) is not none else 1 }}"
- when: l_node_count | int > 0
+ # Determine the default number of registry/router replicas to use if no count
+ # has been specified.
+ # If no registry nodes defined, the default should be 0.
+ - name: set_fact l_default_replicas when l_node_count == 0
+ set_fact:
+ l_default_replicas: 0
+ when: l_node_count | int == 0
-- set_fact:
- replicas: "{{ openshift.hosted.registry.replicas | default(l_default_replicas) }}"
+ # If registry nodes are defined and the registry storage kind is
+ # defined, default should be the number of registry nodes, otherwise
+ # just 1:
+ - name: set_fact l_default_replicas when l_node_count > 0
+ set_fact:
+ l_default_replicas: "{{ l_node_count if openshift.hosted.registry.storage.kind | default(none) is not none else 1 }}"
+ when: l_node_count | int > 0
-- name: Create OpenShift registry
- command: >
- {{ openshift.common.client_binary }} adm registry --create
- --config={{ openshift_hosted_kubeconfig }}
- {% if replicas > 1 -%}
- --replicas={{ replicas }}
- {% endif -%}
- --namespace={{ openshift.hosted.registry.namespace | default('default') }}
- --service-account=registry
- {% if openshift.hosted.registry.selector | default(none) is not none -%}
- --selector='{{ openshift.hosted.registry.selector }}'
- {% endif -%}
- {% if not openshift.common.version_gte_3_2_or_1_2 | bool -%}
- --credentials={{ openshift_master_config_dir }}/openshift-registry.kubeconfig
- {% endif -%}
- {% if openshift.hosted.registry.registryurl | default(none) is not none -%}
- --images='{{ openshift.hosted.registry.registryurl }}'
- {% endif -%}
- register: openshift_hosted_registry_results
- changed_when: "'service exists' not in openshift_hosted_registry_results.stdout"
- failed_when: "openshift_hosted_registry_results.rc != 0 and 'service exists' not in openshift_hosted_registry_results.stdout and 'deployment_config' not in openshift_hosted_registry_results.stderr and 'service' not in openshift_hosted_registry_results.stderr"
- when: replicas | int > 0
+ when: openshift.hosted.registry.replicas | default(none) is none
+
+- name: set openshift_hosted facts
+ set_fact:
+ openshift_hosted_registry_replicas: "{{ openshift.hosted.registry.replicas | default(l_default_replicas) }}"
+ openshift_hosted_registry_name: docker-registry
+ openshift_hosted_registry_serviceaccount: registry
+ openshift_hosted_registry_namespace: "{{ openshift.hosted.registry.namespace | default('default') }}"
+ openshift_hosted_registry_selector: "{{ openshift.hosted.registry.selector }}"
+ openshift_hosted_registry_images: "{{ openshift.hosted.registry.registryurl | default('openshift3/ose-${component}:${version}')}}"
+ openshift_hosted_registry_volumes: []
+ openshift_hosted_registry_env_vars: {}
+ openshift_hosted_registry_routecertificates: "{{ ('routecertificates' in openshift.hosted.registry.keys()) | ternary(openshift.hosted.registry.routecertificates, {}) }}"
+ openshift_hosted_registry_routehost: "{{ ('routehost' in openshift.hosted.registry.keys()) | ternary(openshift.hosted.registry.routehost, False) }}"
+ openshift_hosted_registry_routetermination: "{{ ('routetermination' in openshift.hosted.registry.keys()) | ternary(openshift.hosted.registry.routetermination, 'passthrough') }}"
+ openshift_hosted_registry_edits:
+ # These edits are being specified only to prevent 'changed' on rerun
+ - key: spec.strategy.rollingParams
+ value:
+ intervalSeconds: 1
+ maxSurge: "25%"
+ maxUnavailable: "25%"
+ timeoutSeconds: 600
+ updatePeriodSeconds: 1
+ action: put
+ openshift_hosted_registry_force:
+ - False
+
+- name: create the default registry service
+ oc_service:
+ namespace: "{{ openshift_hosted_registry_namespace }}"
+ name: "{{ openshift_hosted_registry_name }}"
+ ports:
+ - name: 5000-tcp
+ port: 5000
+ protocol: TCP
+ targetPort: 5000
+ selector: "{{ openshift_hosted_registry_selector }}"
+ session_affinity: ClientIP
+ service_type: ClusterIP
- include: secure.yml
static: no
- when: replicas | int > 0 and not (openshift.docker.hosted_registry_insecure | default(false) | bool)
+ run_once: true
+ when:
+ - not (openshift.docker.hosted_registry_insecure | default(false) | bool)
- include: storage/object_storage.yml
static: no
- when: replicas | int > 0 and openshift.hosted.registry.storage.kind | default(none) == 'object'
+ when:
+ - openshift.hosted.registry.storage.kind | default(none) == 'object'
-- include: storage/persistent_volume.yml
- static: no
- when: replicas | int > 0 and openshift.hosted.registry.storage.kind | default(none) in ['nfs', 'openstack']
+- name: Update openshift_hosted facts for persistent volumes
+ set_fact:
+ openshift_hosted_registry_volumes: "{{ openshift_hosted_registry_volumes | union(pvc_volume_mounts) }}"
+ vars:
+ pvc_volume_mounts:
+ - name: registry-storage
+ type: persistentVolumeClaim
+ claim_name: "{{ openshift.hosted.registry.storage.volume.name }}-claim"
+ when:
+ - openshift.hosted.registry.storage.kind | default(none) in ['nfs', 'openstack']
+
+- name: Create OpenShift registry
+ oc_adm_registry:
+ name: "{{ openshift_hosted_registry_name }}"
+ namespace: "{{ openshift_hosted_registry_namespace }}"
+ selector: "{{ openshift_hosted_registry_selector }}"
+ replicas: "{{ openshift_hosted_registry_replicas }}"
+ service_account: "{{ openshift_hosted_registry_serviceaccount }}"
+ images: "{{ openshift_hosted_registry_images }}"
+ env_vars: "{{ openshift_hosted_registry_env_vars }}"
+ volume_mounts: "{{ openshift_hosted_registry_volumes }}"
+ edits: "{{ openshift_hosted_registry_edits }}"
+ force: "{{ True|bool in openshift_hosted_registry_force }}"
diff --git a/roles/openshift_hosted/tasks/registry/secure.yml b/roles/openshift_hosted/tasks/registry/secure.yml
index 8b44b94c6..bd513b943 100644
--- a/roles/openshift_hosted/tasks/registry/secure.yml
+++ b/roles/openshift_hosted/tasks/registry/secure.yml
@@ -1,132 +1,105 @@
---
-- name: Create passthrough route for docker-registry
+- name: Set fact docker_registry_route_hostname
+ set_fact:
+ docker_registry_route_hostname: "{{ 'docker-registry-default.' ~ (openshift_master_default_subdomain | default('router.default.svc.cluster.local', true)) }}"
+
+- name: Get the certificate contents for registry
+ copy:
+ backup: True
+ dest: "/etc/origin/master/named_certificates/{{ item.value | basename }}"
+ src: "{{ item.value }}"
+ when: item.key in ['certfile', 'keyfile', 'cafile'] and item.value is not None
+ with_dict: "{{ openshift_hosted_registry_routecertificates }}"
+
+# When certificates are defined we will create the reencrypt
+# docker-registry route
+- name: Create a reencrypt route for docker-registry
oc_route:
- kubeconfig: "{{ openshift_hosted_kubeconfig }}"
name: docker-registry
- namespace: default
+ namespace: "{{ openshift_hosted_registry_namespace }}"
service_name: docker-registry
- state: present
- tls_termination: passthrough
- run_once: true
+ tls_termination: "{{ openshift_hosted_registry_routetermination }}"
+ host: "{{ openshift_hosted_registry_routehost | default(docker_registry_route_hostname) }}"
+ cert_path: "/etc/origin/master/named_certificates/{{ openshift_hosted_registry_routecertificates['certfile'] | basename }}"
+ key_path: "/etc/origin/master/named_certificates/{{ openshift_hosted_registry_routecertificates['keyfile'] | basename }}"
+ cacert_path: "/etc/origin/master/named_certificates/{{ openshift_hosted_registry_routecertificates['cafile'] | basename }}"
+ dest_cacert_path: /etc/origin/master/ca.crt
+ when:
+ - "'cafile' in openshift_hosted_registry_routecertificates"
+ - "'certfile' in openshift_hosted_registry_routecertificates"
+ - "'keyfile' in openshift_hosted_registry_routecertificates"
-- name: Determine if registry certificate must be created
- stat:
- path: "{{ openshift_master_config_dir }}/{{ item }}"
- with_items:
- - registry.crt
- - registry.key
- register: docker_registry_certificates_stat_result
- changed_when: false
- failed_when: false
+# When routetermination is passthrough we will create the route
+- name: Create passthrough route for docker-registry
+ oc_route:
+ name: docker-registry
+ namespace: "{{ openshift_hosted_registry_namespace }}"
+ service_name: docker-registry
+ tls_termination: "{{ openshift_hosted_registry_routetermination }}"
+ host: "{{ openshift_hosted_registry_routehost | ternary(openshift_hosted_registry_routehost, docker_registry_route_hostname) }}"
+ when: openshift_hosted_registry_routetermination == 'passthrough'
- name: Retrieve registry service IP
oc_service:
- namespace: default
+ namespace: "{{ openshift_hosted_registry_namespace }}"
name: docker-registry
state: list
register: docker_registry_service_ip
- changed_when: false
-- set_fact:
- docker_registry_route_hostname: "{{ 'docker-registry-default.' ~ (openshift_master_default_subdomain | default('router.default.svc.cluster.local', true)) }}"
-
-- name: Create registry certificates if they do not exist
- command: >
- {{ openshift.common.client_binary }} adm ca create-server-cert
- --signer-cert={{ openshift_master_config_dir }}/ca.crt
- --signer-key={{ openshift_master_config_dir }}/ca.key
- --signer-serial={{ openshift_master_config_dir }}/ca.serial.txt
- --hostnames="{{ docker_registry_service_ip.results.clusterip }},docker-registry.default.svc.cluster.local,{{ docker_registry_route_hostname }}"
- --cert={{ openshift_master_config_dir }}/registry.crt
- --key={{ openshift_master_config_dir }}/registry.key
- when: False in (docker_registry_certificates_stat_result.results | default([]) | oo_collect(attribute='stat.exists') | list)
+- name: Create registry certificates
+ oc_adm_ca_server_cert:
+ signer_cert: "{{ openshift_master_config_dir }}/ca.crt"
+ signer_key: "{{ openshift_master_config_dir }}/ca.key"
+ signer_serial: "{{ openshift_master_config_dir }}/ca.serial.txt"
+ hostnames:
+ - "{{ docker_registry_service_ip.results.clusterip }}"
+ - docker-registry.default.svc.cluster.local
+ - "{{ docker_registry_route_hostname }}"
+ cert: "{{ openshift_master_config_dir }}/registry.crt"
+ key: "{{ openshift_master_config_dir }}/registry.key"
+ register: server_cert_out
- name: Create the secret for the registry certificates
oc_secret:
- kubeconfig: "{{ openshift_hosted_kubeconfig }}"
name: registry-certificates
- namespace: default
- state: present
+ namespace: "{{ openshift_hosted_registry_namespace }}"
files:
- name: registry.crt
path: "{{ openshift_master_config_dir }}/registry.crt"
- name: registry.key
path: "{{ openshift_master_config_dir }}/registry.key"
- register: create_registry_certificates_secret
- run_once: true
+ register: create_registry_certificates_secret_out
-- name: "Add the secret to the registry's pod service accounts"
+- name: Add the secret to the registry's pod service accounts
oc_serviceaccount_secret:
service_account: "{{ item }}"
secret: registry-certificates
- namespace: default
- kubeconfig: "{{ openshift_hosted_kubeconfig }}"
- state: present
+ namespace: "{{ openshift_hosted_registry_namespace }}"
with_items:
- registry
- default
-- name: Determine if registry-certificates secret volume attached
- command: >
- {{ openshift.common.client_binary }} get dc/docker-registry
- -o jsonpath='{.spec.template.spec.volumes[?(@.secret)].secret.secretName}'
- --config={{ openshift_hosted_kubeconfig }}
- -n default
- register: docker_registry_volumes
- changed_when: false
- failed_when: "docker_registry_volumes.stdout != '' and 'secretName is not found' not in docker_registry_volumes.stdout and docker_registry_volumes.rc != 0"
-
-- name: Attach registry-certificates secret volume
- command: >
- {{ openshift.common.client_binary }} volume dc/docker-registry --add --type=secret
- --secret-name=registry-certificates
- -m /etc/secrets
- --config={{ openshift_hosted_kubeconfig }}
- -n default
- when: "'registry-certificates' not in docker_registry_volumes.stdout"
-
-- name: Determine if registry environment variables must be set
- command: >
- {{ openshift.common.client_binary }} env dc/docker-registry
- --list
- --config={{ openshift_hosted_kubeconfig }}
- -n default
- register: docker_registry_env
- changed_when: false
-
-- name: Configure certificates in registry deplomentConfig
- command: >
- {{ openshift.common.client_binary }} env dc/docker-registry
- REGISTRY_HTTP_TLS_CERTIFICATE=/etc/secrets/registry.crt
- REGISTRY_HTTP_TLS_KEY=/etc/secrets/registry.key
- --config={{ openshift_hosted_kubeconfig }}
- -n default
- when: "'REGISTRY_HTTP_TLS_CERTIFICATE=/etc/secrets/registry.crt' not in docker_registry_env.stdout or 'REGISTRY_HTTP_TLS_KEY=/etc/secrets/registry.key' not in docker_registry_env.stdout"
-
-- name: Determine if registry liveness probe scheme is HTTPS
- command: >
- {{ openshift.common.client_binary }} get dc/docker-registry
- -o jsonpath='{.spec.template.spec.containers[*].livenessProbe.httpGet.scheme}'
- --config={{ openshift_hosted_kubeconfig }}
- -n default
- register: docker_registry_liveness_probe
- changed_when: false
-
-# This command is on a single line to preserve patch json.
-- name: Update registry liveness probe from HTTP to HTTPS
- command: "{{ openshift.common.client_binary }} patch dc/docker-registry --api-version=v1 -p '{\"spec\":{\"template\":{\"spec\":{\"containers\":[{\"name\":\"registry\",\"livenessProbe\":{\"httpGet\":{\"scheme\":\"HTTPS\"}}}]}}}}' --config={{ openshift_hosted_kubeconfig }} -n default"
- when: "'HTTPS' not in docker_registry_liveness_probe.stdout"
-
-- name: Determine if registry readiness probe scheme is HTTPS
- command: >
- {{ openshift.common.client_binary }} get dc/docker-registry
- -o jsonpath='{.spec.template.spec.containers[*].readinessProbe.httpGet.scheme}'
- --config={{ openshift_hosted_kubeconfig }}
- -n default
- register: docker_registry_readiness_probe
- changed_when: false
+- name: Set facts for secure registry
+ set_fact:
+ registry_secure_volume_mounts:
+ - name: registry-certificates
+ path: /etc/secrets
+ type: secret
+ secret_name: registry-certificates
+ registry_secure_env_vars:
+ REGISTRY_HTTP_TLS_CERTIFICATE: /etc/secrets/registry.crt
+ REGISTRY_HTTP_TLS_KEY: /etc/secrets/registry.key
+ registry_secure_edits:
+ - key: spec.template.spec.containers[0].livenessProbe.httpGet.scheme
+ value: HTTPS
+ action: put
+ - key: spec.template.spec.containers[0].readinessProbe.httpGet.scheme
+ value: HTTPS
+ action: put
-# This command is on a single line to preserve patch json.
-- name: Update registry readiness probe from HTTP to HTTPS
- command: "{{ openshift.common.client_binary }} patch dc/docker-registry --api-version=v1 -p '{\"spec\":{\"template\":{\"spec\":{\"containers\":[{\"name\":\"registry\",\"readinessProbe\":{\"httpGet\":{\"scheme\":\"HTTPS\"}}}]}}}}' --config={{ openshift_hosted_kubeconfig }} -n default"
- when: "'HTTPS' not in docker_registry_readiness_probe.stdout"
+- name: Update openshift_hosted facts with secure registry variables
+ set_fact:
+ openshift_hosted_registry_volumes: "{{ openshift_hosted_registry_volumes | union(registry_secure_volume_mounts) }}"
+ openshift_hosted_registry_env_vars: "{{ openshift_hosted_registry_env_vars | combine(registry_secure_env_vars) }}"
+ openshift_hosted_registry_edits: "{{ openshift_hosted_registry_edits | union(registry_secure_edits) }}"
+ openshift_hosted_registry_force: "{{ openshift_hosted_registry_force | union([server_cert_out.changed]) | union([create_registry_certificates_secret_out.changed]) }}"
diff --git a/roles/openshift_hosted/tasks/registry/storage/object_storage.yml b/roles/openshift_hosted/tasks/registry/storage/object_storage.yml
index 15128784e..3dde83bee 100644
--- a/roles/openshift_hosted/tasks/registry/storage/object_storage.yml
+++ b/roles/openshift_hosted/tasks/registry/storage/object_storage.yml
@@ -1,105 +1,52 @@
---
-- fail:
+- name: Assert supported openshift.hosted.registry.storage.provider
+ assert:
+ that:
+ - openshift.hosted.registry.storage.provider in ['azure_blob', 's3', 'swift']
msg: >
- Object Storage Provider: {{ openshift.hosted.registry.storage.provider }}
+ Object Storage Provider: "{{ openshift.hosted.registry.storage.provider }}"
is not currently supported
- when: openshift.hosted.registry.storage.provider not in ['azure_blob', 's3', 'swift']
-- fail:
+- name: Assert implemented openshift.hosted.registry.storage.provider
+ assert:
+ that:
+ - openshift.hosted.registry.storage.provider not in ['azure_blob', 'swift']
msg: >
Support for provider: "{{ openshift.hosted.registry.storage.provider }}"
not implemented yet
- when: openshift.hosted.registry.storage.provider in ['azure_blob', 'swift']
- include: s3.yml
when: openshift.hosted.registry.storage.provider == 's3'
-- name: Test if docker registry config secret exists
- command: >
- {{ openshift.common.client_binary }}
- --config={{ openshift_hosted_kubeconfig }}
- --namespace={{ openshift.hosted.registry.namespace | default('default') }}
- get secrets {{ registry_config_secret_name }} -o json
- register: secrets
- changed_when: false
- failed_when: false
-
-- set_fact:
- registry_config: "{{ lookup('template', 'registry_config.j2') | b64encode }}"
-
-- set_fact:
- registry_config_secret: "{{ lookup('template', 'registry_config_secret.j2') | from_yaml }}"
-
-- set_fact:
- same_storage_provider: "{{ (secrets.stdout|from_json)['metadata']['annotations']['provider'] | default(none) == openshift.hosted.registry.storage.provider }}"
- when: secrets.rc == 0
-
-- name: Update registry config secret
- command: >
- {{ openshift.common.client_binary }}
- --config={{ openshift_hosted_kubeconfig }}
- --namespace={{ openshift.hosted.registry.namespace | default('default') }}
- patch secret/{{ registry_config_secret_name }}
- -p '{"data": {"config.yml": "{{ registry_config }}"}}'
- register: update_config_secret
- when: secrets.rc == 0 and (secrets.stdout|from_json)['data']['config.yml'] != registry_config and same_storage_provider | bool
-
-- name: Create registry config secret
- shell: >
- echo '{{ registry_config_secret |to_json }}' |
- {{ openshift.common.client_binary }}
- --config={{ openshift_hosted_kubeconfig }}
- --namespace={{ openshift.hosted.registry.namespace | default('default') }}
- create -f -
- when: secrets.rc == 1
+- name: Ensure the resgistry secret exists
+ oc_secret:
+ name: "{{ registry_config_secret_name }}"
+ state: present
+ contents:
+ - path: /tmp/config.yml
+ data: "{{ lookup('template', 'registry_config.j2') }}"
+ register: registry_config_out
- name: Add secrets to registry service account
oc_serviceaccount_secret:
service_account: registry
secret: "{{ registry_config_secret_name }}"
- namespace: "{{ openshift.hosted.registry.namespace | default('default') }}"
- kubeconfig: "{{ openshift_hosted_kubeconfig }}"
+ namespace: "{{ openshift_hosted_registry_namespace }}"
state: present
-
-- name: Determine if deployment config contains secrets
- command: >
- {{ openshift.common.client_binary }}
- --config={{ openshift_hosted_kubeconfig }}
- --namespace={{ openshift.hosted.registry.namespace | default('default') }}
- set volumes dc/docker-registry --list
- register: volume
- changed_when: false
-
-- name: Add secrets to registry deployment config
- command: >
- {{ openshift.common.client_binary }}
- --config={{ openshift_hosted_kubeconfig }}
- --namespace={{ openshift.hosted.registry.namespace | default('default') }}
- set volumes dc/docker-registry --add --name=docker-config -m /etc/registry
- --type=secret --secret-name={{ registry_config_secret_name }}
- when: registry_config_secret_name not in volume.stdout
-
-- name: Determine if registry environment variable needs to be created
- command: >
- {{ openshift.common.client_binary }}
- --config={{ openshift_hosted_kubeconfig }}
- --namespace={{ openshift.hosted.registry.namespace | default('default') }}
- set env --list dc/docker-registry
- register: oc_env
- changed_when: false
-
-- name: Add registry environment variable
- command: >
- {{ openshift.common.client_binary }}
- --config={{ openshift_hosted_kubeconfig }}
- --namespace={{ openshift.hosted.registry.namespace | default('default') }}
- set env dc/docker-registry REGISTRY_CONFIGURATION_PATH=/etc/registry/config.yml
- when: "'REGISTRY_CONFIGURATION_PATH' not in oc_env.stdout"
-
-- name: Redeploy registry
- command: >
- {{ openshift.common.client_binary }}
- --config={{ openshift_hosted_kubeconfig }}
- --namespace={{ openshift.hosted.registry.namespace | default('default') }}
- deploy dc/docker-registry --latest
- when: secrets.rc == 0 and not update_config_secret | skipped and update_config_secret.rc == 0 and same_storage_provider | bool
+ register: svcac
+
+- name: Set facts for registry object storage
+ set_fact:
+ registry_obj_storage_volume_mounts:
+ - name: docker-config
+ path: /etc/registry
+ type: secret
+ secret_name: "{{ registry_config_secret_name }}"
+ registry_obj_storage_env_vars:
+ REGISTRY_CONFIGURATION_PATH: /etc/registry/config.yml
+
+- name: Update openshift_hosted registry facts for storage
+ set_fact:
+ openshift_hosted_registry_volumes: "{{ openshift_hosted_registry_volumes | union(registry_obj_storage_volume_mounts) }}"
+ openshift_hosted_registry_env_vars: "{{ openshift_hosted_registry_env_vars | combine(registry_obj_storage_env_vars) }}"
+ openshift_hosted_registry_force: "{{ openshift_hosted_registry_force | union([registry_config_out.changed]) | union([svcac.changed]) }}"
diff --git a/roles/openshift_hosted/tasks/registry/storage/persistent_volume.yml b/roles/openshift_hosted/tasks/registry/storage/persistent_volume.yml
deleted file mode 100644
index 0172f5ca0..000000000
--- a/roles/openshift_hosted/tasks/registry/storage/persistent_volume.yml
+++ /dev/null
@@ -1,26 +0,0 @@
----
-- set_fact:
- registry_volume_claim: "{{ openshift.hosted.registry.storage.volume.name }}-claim"
-
-- name: Determine if volume is already attached to dc/docker-registry
- command: >
- {{ openshift.common.client_binary }}
- --config={{ openshift_hosted_kubeconfig }}
- --namespace={{ openshift.hosted.registry.namespace | default('default') }}
- get -o template dc/docker-registry --template=\\{\\{.spec.template.spec.volumes\\}\\} --output-version=v1
- changed_when: false
- failed_when: false
- register: registry_volumes_output
-
-- set_fact:
- volume_attached: "{{ registry_volume_claim in (registry_volumes_output).stdout | default(['']) }}"
-
-- name: Add volume to dc/docker-registry
- command: >
- {{ openshift.common.client_binary }}
- --config={{ openshift_hosted_kubeconfig }}
- --namespace={{ openshift.hosted.registry.namespace | default('default') }}
- volume dc/docker-registry
- --add --overwrite -t persistentVolumeClaim --claim-name={{ registry_volume_claim }}
- --name=registry-storage
- when: not volume_attached | bool
diff --git a/roles/openshift_hosted/tasks/registry/storage/s3.yml b/roles/openshift_hosted/tasks/registry/storage/s3.yml
index f73d9f0ae..26f921f15 100644
--- a/roles/openshift_hosted/tasks/registry/storage/s3.yml
+++ b/roles/openshift_hosted/tasks/registry/storage/s3.yml
@@ -1,33 +1,49 @@
---
-- fail:
- msg: >
- openshift_hosted_registry_storage_s3_accesskey and
- openshift_hosted_registry_storage_s3_secretkey are required
- when: openshift.hosted.registry.storage.s3.accesskey | default(none) is none or openshift.hosted.registry.storage.s3.secretkey | default(none) is none
-
-- fail:
- msg: >
- openshift_hosted_registry_storage_s3_bucket and
- openshift_hosted_registry_storage_s3_region are required
- when: openshift.hosted.registry.storage.s3.bucket | default(none) is none or openshift.hosted.registry.storage.s3.region | default(none) is none
+- name: Assert that S3 variables are provided for registry_config template
+ assert:
+ that:
+ - openshift.hosted.registry.storage.s3.accesskey | default(none) is not none
+ - openshift.hosted.registry.storage.s3.secretkey | default(none) is not none
+ - openshift.hosted.registry.storage.s3.bucket | default(none) is not none
+ - openshift.hosted.registry.storage.s3.region | default(none) is not none
+ msg: |
+ When using S3 storage, the following variables are required:
+ openshift_hosted_registry_storage_s3_accesskey
+ openshift_hosted_registry_storage_s3_secretkey
+ openshift_hosted_registry_storage_s3_bucket
+ openshift_hosted_registry_storage_s3_region
-# If cloudfront is being used, fail if we don't have all the required variables
-- assert:
+- name: If cloudfront is being used, assert that we have all the required variables
+ assert:
that:
- - "openshift_hosted_registry_storage_s3_cloudfront_baseurl is not defined or openshift_hosted_registry_storage_s3_cloudfront_privatekeyfile | default(none) is not none"
- - "openshift_hosted_registry_storage_s3_cloudfront_baseurl is not defined or openshift_hosted_registry_storage_s3_cloudfront_keypairid | default(none) is not none"
- msg: >
+ - "openshift_hosted_registry_storage_s3_cloudfront_privatekeyfile | default(none) is not none"
+ - "openshift_hosted_registry_storage_s3_cloudfront_keypairid | default(none) is not none"
+ msg: |
When openshift_hosted_registry_storage_s3_cloudfront_baseurl is provided
- openshift_hosted_registry_storage_s3_cloudfront_keypairid and
- openshift_hosted_registry_storage_s3_cloudfront_privatekeyfile are required
+ openshift_hosted_registry_storage_s3_cloudfront_keypairid and
+ openshift_hosted_registry_storage_s3_cloudfront_privatekeyfile are required
+ when: openshift_hosted_registry_storage_s3_cloudfront_baseurl is defined
+
+# Inject the cloudfront private key as a secret when required
+- block:
+
+ - name: Create registry secret for cloudfront
+ oc_secret:
+ state: present
+ namespace: "{{ openshift_hosted_registry_namespace }}"
+ name: docker-registry-s3-cloudfront
+ contents:
+ - path: cloudfront.pem
+ data: "{{ lookup('file', openshift_hosted_registry_storage_s3_cloudfront_privatekeyfile) }}"
+
+ - name: Append cloudfront secret registry volume to openshift_hosted_registry_volumes
+ set_fact:
+ openshift_hosted_registry_volumes: "{{ openshift_hosted_registry_volumes | union(s3_volume_mount) }}"
+ vars:
+ s3_volume_mount:
+ - name: cloudfront-vol
+ path: /etc/origin
+ type: secret
+ secret_name: docker-registry-s3-cloudfront
-# Copy the cloudfront.pem to the host if the baseurl is given
-- name: Copy cloudfront.pem to the registry
- copy:
- src: "{{ openshift_hosted_registry_storage_s3_cloudfront_privatekeyfile }}"
- dest: /etc/s3-cloudfront/cloudfront.pem
- backup: true
- owner: root
- group: root
- mode: 0600
when: openshift_hosted_registry_storage_s3_cloudfront_baseurl | default(none) is not none
diff --git a/roles/openshift_hosted/tasks/router/router.yml b/roles/openshift_hosted/tasks/router/router.yml
index b944fa522..607ace7f9 100644
--- a/roles/openshift_hosted/tasks/router/router.yml
+++ b/roles/openshift_hosted/tasks/router/router.yml
@@ -1,80 +1,91 @@
---
-- fail:
- msg: "'certfile', 'keyfile' and 'cafile' keys must be specified when supplying the openshift_hosted_router_certificate variable."
- when: openshift_hosted_router_certificate is defined and ('certfile' not in openshift_hosted_router_certificate or 'keyfile' not in openshift_hosted_router_certificate or 'cafile' not in openshift_hosted_router_certificate)
+- name: Retrieve list of openshift nodes matching router selector
+ oc_obj:
+ state: list
+ kind: node
+ namespace: "{{ openshift.hosted.router.namespace | default('default') }}"
+ selector: "{{ openshift.hosted.router.selector | default(omit) }}"
+ register: router_nodes
+ when: openshift.hosted.router.replicas | default(none) is none
-- name: Read router certificate and key
- become: no
- local_action:
- module: slurp
- src: "{{ item }}"
- register: openshift_router_certificate_output
- # Defaulting dictionary keys to none to avoid deprecation warnings
- # (future fatal errors) during template evaluation. Dictionary keys
- # won't be accessed unless openshift_hosted_router_certificate is
- # defined and has all keys (certfile, keyfile, cafile) which we
- # check above.
- with_items:
- - "{{ (openshift_hosted_router_certificate | default({'certfile':none})).certfile }}"
- - "{{ (openshift_hosted_router_certificate | default({'keyfile':none})).keyfile }}"
- - "{{ (openshift_hosted_router_certificate | default({'cafile':none})).cafile }}"
- when: openshift_hosted_router_certificate is defined
+- name: set_fact replicas
+ set_fact:
+ replicas: "{{ openshift.hosted.router.replicas|default(None) | get_router_replicas(router_nodes) }}"
-- name: Persist certificate contents
- openshift_facts:
- role: hosted
- openshift_env:
- openshift_hosted_router_certificate_contents: "{% for certificate in openshift_router_certificate_output.results -%}{{ certificate.content | b64decode }}{% endfor -%}"
- when: openshift_hosted_router_certificate is defined
+- block:
-- name: Create PEM certificate
- copy:
- content: "{{ openshift.hosted.router.certificate.contents }}"
- dest: "{{ openshift_master_config_dir }}/openshift-router.pem"
- mode: 0600
- when: "'certificate' in openshift.hosted.router and 'contents' in openshift.hosted.router.certificate"
+ - name: Assert that 'certfile', 'keyfile' and 'cafile' keys provided in openshift_hosted_router_certificate
+ assert:
+ that:
+ - "'certfile' in openshift_hosted_router_certificate"
+ - "'keyfile' in openshift_hosted_router_certificate"
+ - "'cafile' in openshift_hosted_router_certificate"
+ msg: "'certfile', 'keyfile' and 'cafile' keys must be specified when supplying the openshift_hosted_router_certificate variable."
-- name: Retrieve list of openshift nodes matching router selector
- command: >
- {{ openshift.common.client_binary }} --api-version='v1' -o json
- get nodes -n default --config={{ openshift_hosted_kubeconfig }}
- --selector={{ openshift.hosted.router.selector | default('') }}
- register: router_nodes_json
- changed_when: false
- when: openshift.hosted.router.replicas | default(none) is none
+ - name: Read router certificate and key
+ become: no
+ local_action:
+ module: slurp
+ src: "{{ item }}"
+ register: openshift_router_certificate_output
+ # Defaulting dictionary keys to none to avoid deprecation warnings
+ # (future fatal errors) during template evaluation. Dictionary keys
+ # won't be accessed unless openshift_hosted_router_certificate is
+ # defined and has all keys (certfile, keyfile, cafile) which we
+ # check above.
+ with_items:
+ - "{{ (openshift_hosted_router_certificate | default({'certfile':none})).certfile }}"
+ - "{{ (openshift_hosted_router_certificate | default({'keyfile':none})).keyfile }}"
+ - "{{ (openshift_hosted_router_certificate | default({'cafile':none})).cafile }}"
+
+ - name: Persist certificate contents
+ openshift_facts:
+ role: hosted
+ openshift_env:
+ openshift_hosted_router_certificate_contents: "{% for certificate in openshift_router_certificate_output.results -%}{{ certificate.content | b64decode }}{% endfor -%}"
-- set_fact:
- replicas: "{{ openshift.hosted.router.replicas | default((router_nodes_json.stdout | default('{\"items\":[]}') | from_json)['items'] | length) }}"
+ - name: Create PEM certificate
+ copy:
+ content: "{{ openshift.hosted.router.certificate.contents }}"
+ dest: "{{ openshift_master_config_dir }}/openshift-router.pem"
+ mode: 0600
+
+ when: openshift_hosted_router_certificate is defined
- name: Create OpenShift router
- command: >
- {{ openshift.common.client_binary }} adm router --create
- --config={{ openshift_hosted_kubeconfig }}
- {% if replicas > 1 -%}
- --replicas={{ replicas }}
- {% endif -%}
- {% if 'certificate' in openshift.hosted.router and 'contents' in openshift.hosted.router.certificate -%}
- --default-cert={{ openshift_master_config_dir }}/openshift-router.pem
- {% endif -%}
- --namespace={{ openshift.hosted.router.namespace | default('default') }}
- {% if openshift.hosted.router.force_subdomain | default(none) is not none %}
- --force-subdomain={{ openshift.hosted.router.force_subdomain }}
- {% endif %}
- --service-account=router
- {% if openshift.hosted.router.selector | default(none) is not none -%}
- --selector='{{ openshift.hosted.router.selector }}'
- {% endif -%}
- {% if not openshift.common.version_gte_3_2_or_1_2 | bool -%}
- --credentials={{ openshift_master_config_dir }}/openshift-router.kubeconfig
- {% endif -%}
- {% if openshift.hosted.router.registryurl | default(none) is not none -%}
- --images='{{ openshift.hosted.router.registryurl }}'
- {% endif -%}
- {% if openshift.hosted.router.name | default(none) is not none -%}
- {{ openshift.hosted.router.name }}
- {% endif -%}
+ oc_adm_router:
+ name: "{{ openshift.hosted.router.name | default('router') }}"
+ replicas: "{{ replicas }}"
+ namespace: "{{ openshift.hosted.router.namespace | default('default') }}"
+ # This option is not yet implemented
+ # force_subdomain: "{{ openshift.hosted.router.force_subdomain | default(none) }}"
+ service_account: router
+ selector: "{{ openshift.hosted.router.selector | default(none) }}"
+ images: "{{ openshift.hosted.router.registryurl | default(none) }}"
+ default_cert: "{{ openshift_hosted_router_certificate is defined | default(false) | ternary(openshift_master_config_dir + '/openshift-router.pem', omit) }}"
+ # These edits are being specified only to prevent 'changed' on rerun
+ edits:
+ - key: spec.strategy.rollingParams.intervalSeconds
+ value: 1
+ action: put
+ - key: spec.strategy.rollingParams.updatePeriodSeconds
+ value: 1
+ action: put
+ - key: spec.strategy.activeDeadlineSeconds
+ value: 21600
+ action: put
+ register: routerout
+
+# This should probably move to module
+- name: wait for deploy
+ pause:
+ seconds: 30
+ when: routerout.changed
- register: openshift_hosted_router_results
- changed_when: "'service exists' not in openshift_hosted_router_results.stdout"
- failed_when: "openshift_hosted_router_results.rc != 0 and 'service exists' not in openshift_hosted_router_results.stdout and 'deployment_config' not in openshift_hosted_router_results.stderr and 'service' not in openshift_hosted_router_results.stderr"
+- name: Ensure router replica count matches desired
+ oc_scale:
+ kind: dc
+ name: "{{ openshift.hosted.router.name | default('router') }}"
+ namespace: "{{ openshift.hosted.router.namespace | default('default') }}"
+ replicas: "{{ replicas }}"
when: replicas | int > 0
diff --git a/roles/openshift_hosted/templates/registry_config.j2 b/roles/openshift_hosted/templates/registry_config.j2
index 557fd03af..f3336334a 100644
--- a/roles/openshift_hosted/templates/registry_config.j2
+++ b/roles/openshift_hosted/templates/registry_config.j2
@@ -78,7 +78,7 @@ middleware:
- name: cloudfront
options:
baseurl: {{ openshift_hosted_registry_storage_s3_cloudfront_baseurl }}
- privatekey: {{ openshift_hosted_registry_storage_s3_cloudfront_privatekeyfile }}
+ privatekey: /etc/origin/cloudfront.pem
keypairid: {{ openshift_hosted_registry_storage_s3_cloudfront_keypairid }}
{% elif openshift.common.version_gte_3_3_or_1_3 | bool %}
storage:
diff --git a/roles/openshift_hosted_logging/handlers/main.yml b/roles/openshift_hosted_logging/handlers/main.yml
index ad79e62ae..ffb812271 100644
--- a/roles/openshift_hosted_logging/handlers/main.yml
+++ b/roles/openshift_hosted_logging/handlers/main.yml
@@ -3,3 +3,24 @@
systemd: name={{ openshift.common.service_type }}-master state=restarted
when: (openshift.master.ha is not defined or not openshift.master.ha | bool) and (not (master_service_status_changed | default(false) | bool))
notify: Verify API Server
+
+- name: Verify API Server
+ # Using curl here since the uri module requires python-httplib2 and
+ # wait_for port doesn't provide health information.
+ command: >
+ curl --silent --tlsv1.2
+ {% if openshift.common.version_gte_3_2_or_1_2 | bool %}
+ --cacert {{ openshift.common.config_base }}/master/ca-bundle.crt
+ {% else %}
+ --cacert {{ openshift.common.config_base }}/master/ca.crt
+ {% endif %}
+ {{ openshift.master.api_url }}/healthz/ready
+ args:
+ # Disables the following warning:
+ # Consider using get_url or uri module rather than running curl
+ warn: no
+ register: api_available_output
+ until: api_available_output.stdout == 'ok'
+ retries: 120
+ delay: 1
+ changed_when: false
diff --git a/roles/openshift_logging/README.md b/roles/openshift_logging/README.md
index f7b2f7743..14b80304d 100644
--- a/roles/openshift_logging/README.md
+++ b/roles/openshift_logging/README.md
@@ -25,7 +25,7 @@ When both `openshift_logging_install_logging` and `openshift_logging_upgrade_log
- `openshift_logging_image_version`: The image version for the logging images to use. Defaults to 'latest'.
- `openshift_logging_use_ops`: If 'True', set up a second ES and Kibana cluster for infrastructure logs. Defaults to 'False'.
- `openshift_logging_master_url`: The URL for the Kubernetes master, this does not need to be public facing but should be accessible from within the cluster. Defaults to 'https://kubernetes.default.svc.{{openshift.common.dns_domain}}'.
-- `openshift_logging_master_public_url`: The public facing URL for the Kubernetes master, this is used for Authentication redirection. Defaults to 'https://{{openshift.common.public_hostname}}:8443'.
+- `openshift_logging_master_public_url`: The public facing URL for the Kubernetes master, this is used for Authentication redirection. Defaults to 'https://{{openshift.common.public_hostname}}:{{openshift.master.api_port}}'.
- `openshift_logging_namespace`: The namespace that Aggregated Logging will be installed in. Defaults to 'logging'.
- `openshift_logging_curator_default_days`: The default minimum age (in days) Curator uses for deleting log records. Defaults to '30'.
- `openshift_logging_curator_run_hour`: The hour of the day that Curator will run at. Defaults to '0'.
@@ -46,6 +46,7 @@ When both `openshift_logging_install_logging` and `openshift_logging_upgrade_log
- `openshift_logging_kibana_proxy_memory_limit`: The amount of memory to allocate to Kibana proxy or unset if not specified.
- `openshift_logging_kibana_replica_count`: The number of replicas Kibana should be scaled up to. Defaults to 1.
- `openshift_logging_kibana_nodeselector`: A map of labels (e.g. {"node":"infra","region":"west"} to select the nodes where the pod will land.
+- `openshift_logging_kibana_edge_term_policy`: Insecure Edge Termination Policy. Defaults to Redirect.
- `openshift_logging_fluentd_nodeselector`: The node selector that the Fluentd daemonset uses to determine where to deploy to. Defaults to '"logging-infra-fluentd": "true"'.
- `openshift_logging_fluentd_cpu_limit`: The CPU limit for Fluentd pods. Defaults to '100m'.
@@ -63,7 +64,7 @@ When both `openshift_logging_install_logging` and `openshift_logging_upgrade_log
- `openshift_logging_es_cluster_size`: The number of ES cluster members. Defaults to '1'.
- `openshift_logging_es_cpu_limit`: The amount of CPU limit for the ES cluster. Unused if not set
-- `openshift_logging_es_memory_limit`: The amount of RAM that should be assigned to ES. Defaults to '1024Mi'.
+- `openshift_logging_es_memory_limit`: The amount of RAM that should be assigned to ES. Defaults to '8Gi'.
- `openshift_logging_es_pv_selector`: A key/value map added to a PVC in order to select specific PVs. Defaults to 'None'.
- `openshift_logging_es_pvc_dynamic`: Whether or not to add the dynamic PVC annotation for any generated PVCs. Defaults to 'False'.
- `openshift_logging_es_pvc_size`: The requested size for the ES PVCs, when not provided the role will not generate any PVCs. Defaults to '""'.
@@ -81,7 +82,7 @@ same as above for their non-ops counterparts, but apply to the OPS cluster insta
- `openshift_logging_es_ops_client_key`: /etc/fluent/keys/key
- `openshift_logging_es_ops_cluster_size`: 1
- `openshift_logging_es_ops_cpu_limit`: The amount of CPU limit for the ES cluster. Unused if not set
-- `openshift_logging_es_ops_memory_limit`: 1024Mi
+- `openshift_logging_es_ops_memory_limit`: 8Gi
- `openshift_logging_es_ops_pvc_dynamic`: False
- `openshift_logging_es_ops_pvc_size`: ""
- `openshift_logging_es_ops_pvc_prefix`: logging-es-ops
diff --git a/roles/openshift_logging/defaults/main.yml b/roles/openshift_logging/defaults/main.yml
index d9eebe688..5440a3647 100644
--- a/roles/openshift_logging/defaults/main.yml
+++ b/roles/openshift_logging/defaults/main.yml
@@ -3,7 +3,7 @@ openshift_logging_image_prefix: "{{ openshift_hosted_logging_deployer_prefix | d
openshift_logging_image_version: "{{ openshift_hosted_logging_deployer_version | default('latest') }}"
openshift_logging_use_ops: False
openshift_logging_master_url: "https://kubernetes.default.svc.{{ openshift.common.dns_domain }}"
-openshift_logging_master_public_url: "{{ openshift_hosted_logging_master_public_url | default('https://{{openshift.common.public_hostname}}:8443') }}"
+openshift_logging_master_public_url: "{{ openshift_hosted_logging_master_public_url | default('https://' + openshift.common.public_hostname + ':' + openshift.master.api_port) }}"
openshift_logging_namespace: logging
openshift_logging_install_logging: True
@@ -19,13 +19,14 @@ openshift_logging_curator_memory_limit: null
openshift_logging_curator_ops_cpu_limit: 100m
openshift_logging_curator_ops_memory_limit: null
-openshift_logging_kibana_hostname: "{{ openshift_hosted_logging_hostname | default('kibana.{{openshift.common.dns_domain}}') }}"
+openshift_logging_kibana_hostname: "{{ openshift_hosted_logging_hostname | default('kibana.' + openshift.common.dns_domain) }}"
openshift_logging_kibana_cpu_limit: null
openshift_logging_kibana_memory_limit: null
openshift_logging_kibana_proxy_debug: false
openshift_logging_kibana_proxy_cpu_limit: null
openshift_logging_kibana_proxy_memory_limit: null
openshift_logging_kibana_replica_count: 1
+openshift_logging_kibana_edge_term_policy: Redirect
#The absolute path on the control node to the cert file to use
#for the public facing kibana certs
@@ -39,7 +40,7 @@ openshift_logging_kibana_key: ""
#for the public facing kibana certs
openshift_logging_kibana_ca: ""
-openshift_logging_kibana_ops_hostname: "{{ openshift_hosted_logging_ops_hostname | default('kibana-ops.{{openshift.common.dns_domain}}') }}"
+openshift_logging_kibana_ops_hostname: "{{ openshift_hosted_logging_ops_hostname | default('kibana-ops.' + openshift.common.dns_domain) }}"
openshift_logging_kibana_ops_cpu_limit: null
openshift_logging_kibana_ops_memory_limit: null
openshift_logging_kibana_ops_proxy_debug: false
@@ -62,7 +63,7 @@ openshift_logging_es_client_cert: /etc/fluent/keys/cert
openshift_logging_es_client_key: /etc/fluent/keys/key
openshift_logging_es_cluster_size: "{{ openshift_hosted_logging_elasticsearch_cluster_size | default(1) }}"
openshift_logging_es_cpu_limit: null
-openshift_logging_es_memory_limit: 1024Mi
+openshift_logging_es_memory_limit: 8Gi
openshift_logging_es_pv_selector: null
openshift_logging_es_pvc_dynamic: "{{ openshift_hosted_logging_elasticsearch_pvc_dynamic | default(False) }}"
openshift_logging_es_pvc_size: "{{ openshift_hosted_logging_elasticsearch_pvc_size | default('') }}"
@@ -80,7 +81,7 @@ openshift_logging_es_ops_client_cert: /etc/fluent/keys/cert
openshift_logging_es_ops_client_key: /etc/fluent/keys/key
openshift_logging_es_ops_cluster_size: "{{ openshift_hosted_logging_elasticsearch_ops_cluster_size | default(1) }}"
openshift_logging_es_ops_cpu_limit: null
-openshift_logging_es_ops_memory_limit: 1024Mi
+openshift_logging_es_ops_memory_limit: 8Gi
openshift_logging_es_ops_pv_selector: None
openshift_logging_es_ops_pvc_dynamic: "{{ openshift_hosted_logging_elasticsearch_ops_pvc_dynamic | default(False) }}"
openshift_logging_es_ops_pvc_size: "{{ openshift_hosted_logging_elasticsearch_ops_pvc_size | default('') }}"
diff --git a/roles/openshift_logging/files/fluent.conf b/roles/openshift_logging/files/fluent.conf
index c0c1c8a44..aeaa705ee 100644
--- a/roles/openshift_logging/files/fluent.conf
+++ b/roles/openshift_logging/files/fluent.conf
@@ -22,7 +22,7 @@
@include configs.d/openshift/filter-k8s-flatten-hash.conf
@include configs.d/openshift/filter-k8s-record-transform.conf
@include configs.d/openshift/filter-syslog-record-transform.conf
- @include configs.d/openshift/filter-common-data-model.conf
+ @include configs.d/openshift/filter-viaq-data-model.conf
@include configs.d/openshift/filter-post-*.conf
##
diff --git a/roles/openshift_logging/handlers/main.yml b/roles/openshift_logging/handlers/main.yml
index ad79e62ae..ffb812271 100644
--- a/roles/openshift_logging/handlers/main.yml
+++ b/roles/openshift_logging/handlers/main.yml
@@ -3,3 +3,24 @@
systemd: name={{ openshift.common.service_type }}-master state=restarted
when: (openshift.master.ha is not defined or not openshift.master.ha | bool) and (not (master_service_status_changed | default(false) | bool))
notify: Verify API Server
+
+- name: Verify API Server
+ # Using curl here since the uri module requires python-httplib2 and
+ # wait_for port doesn't provide health information.
+ command: >
+ curl --silent --tlsv1.2
+ {% if openshift.common.version_gte_3_2_or_1_2 | bool %}
+ --cacert {{ openshift.common.config_base }}/master/ca-bundle.crt
+ {% else %}
+ --cacert {{ openshift.common.config_base }}/master/ca.crt
+ {% endif %}
+ {{ openshift.master.api_url }}/healthz/ready
+ args:
+ # Disables the following warning:
+ # Consider using get_url or uri module rather than running curl
+ warn: no
+ register: api_available_output
+ until: api_available_output.stdout == 'ok'
+ retries: 120
+ delay: 1
+ changed_when: false
diff --git a/roles/openshift_logging/tasks/generate_routes.yaml b/roles/openshift_logging/tasks/generate_routes.yaml
index 3c462378b..7af17a708 100644
--- a/roles/openshift_logging/tasks/generate_routes.yaml
+++ b/roles/openshift_logging/tasks/generate_routes.yaml
@@ -26,6 +26,7 @@
tls_cert: "{{kibana_cert | default('') | b64decode}}"
tls_ca_cert: "{{kibana_ca | b64decode}}"
tls_dest_ca_cert: "{{key_pairs | entry_from_named_pair('ca_file')| b64decode }}"
+ edge_term_policy: "{{openshift_logging_kibana_edge_term_policy | default('') }}"
labels:
component: support
logging-infra: support
diff --git a/roles/openshift_logging/tasks/generate_secrets.yaml b/roles/openshift_logging/tasks/generate_secrets.yaml
index 1829acaee..81fac8b5e 100644
--- a/roles/openshift_logging/tasks/generate_secrets.yaml
+++ b/roles/openshift_logging/tasks/generate_secrets.yaml
@@ -17,7 +17,7 @@
- name: Generating secrets for logging components
template: src=secret.j2 dest={{mktemp.stdout}}/templates/{{secret_name}}-secret.yaml
vars:
- secret_name: logging-{{component}}
+ secret_name: "logging-{{component}}"
secret_key_file: "{{component}}_key"
secret_cert_file: "{{component}}_cert"
secrets:
diff --git a/roles/openshift_logging/tasks/install_elasticsearch.yaml b/roles/openshift_logging/tasks/install_elasticsearch.yaml
index f9c2c81fb..244949505 100644
--- a/roles/openshift_logging/tasks/install_elasticsearch.yaml
+++ b/roles/openshift_logging/tasks/install_elasticsearch.yaml
@@ -5,6 +5,7 @@
- name: Generate PersistentVolumeClaims
include: "{{ role_path}}/tasks/generate_pvcs.yaml"
vars:
+ es_pvc_pool: []
es_pvc_names: "{{openshift_logging_facts.elasticsearch.pvcs.keys()}}"
es_dc_names: "{{openshift_logging_facts.elasticsearch.deploymentconfigs.keys()}}"
@@ -63,6 +64,7 @@
- name: Generate PersistentVolumeClaims for Ops
include: "{{ role_path}}/tasks/generate_pvcs.yaml"
vars:
+ es_pvc_pool: []
es_pvc_names: "{{openshift_logging_facts.elasticsearch_ops.pvcs.keys()}}"
es_dc_names: "{{openshift_logging_facts.elasticsearch_ops.deploymentconfigs.keys()}}"
openshift_logging_es_pvc_prefix: "{{openshift_logging_es_ops_pvc_prefix}}"
diff --git a/roles/openshift_logging/tasks/start_cluster.yaml b/roles/openshift_logging/tasks/start_cluster.yaml
index 3e97487dc..edbb62c3e 100644
--- a/roles/openshift_logging/tasks/start_cluster.yaml
+++ b/roles/openshift_logging/tasks/start_cluster.yaml
@@ -16,7 +16,7 @@
name: "{{ fluentd_host }}"
kind: node
state: add
- label: "{{ openshift_logging_fluentd_nodeselector | oo_dict_to_list_of_dict }}"
+ labels: "{{ openshift_logging_fluentd_nodeselector | oo_dict_to_list_of_dict }}"
with_items: "{{ openshift_logging_fluentd_hosts }}"
loop_control:
loop_var: fluentd_host
diff --git a/roles/openshift_logging/tasks/stop_cluster.yaml b/roles/openshift_logging/tasks/stop_cluster.yaml
index bae6aebbb..4b3722e29 100644
--- a/roles/openshift_logging/tasks/stop_cluster.yaml
+++ b/roles/openshift_logging/tasks/stop_cluster.yaml
@@ -16,7 +16,7 @@
name: "{{ fluentd_host }}"
kind: node
state: absent
- label: "{{ openshift_logging_fluentd_nodeselector | oo_dict_to_list_of_dict }}"
+ labels: "{{ openshift_logging_fluentd_nodeselector | oo_dict_to_list_of_dict }}"
with_items: "{{ openshift_logging_fluentd_hosts }}"
loop_control:
loop_var: fluentd_host
diff --git a/roles/openshift_logging/tasks/update_master_config.yaml b/roles/openshift_logging/tasks/update_master_config.yaml
index af303c47c..cef835668 100644
--- a/roles/openshift_logging/tasks/update_master_config.yaml
+++ b/roles/openshift_logging/tasks/update_master_config.yaml
@@ -5,3 +5,5 @@
yaml_key: assetConfig.loggingPublicURL
yaml_value: "https://{{ openshift_logging_kibana_hostname }}"
notify: restart master
+ tags:
+ - update_master_config
diff --git a/roles/openshift_logging/tasks/upgrade_logging.yaml b/roles/openshift_logging/tasks/upgrade_logging.yaml
index 0421cdf58..30fdbd2af 100644
--- a/roles/openshift_logging/tasks/upgrade_logging.yaml
+++ b/roles/openshift_logging/tasks/upgrade_logging.yaml
@@ -26,14 +26,14 @@
loop_control:
loop_var: object
-- name: Wait for pods to stop
+- name: Wait for pods to start
oc_obj:
state: list
- kind: dc
+ kind: pods
selector: "component=es"
namespace: "{{openshift_logging_namespace}}"
register: running_pod
- until: running_pod.results.results.items[?(@.status.phase == "Running")].metadata.name != ''
+ until: running_pod.results.results[0]['items'] | selectattr('status.phase', 'match', '^Running$') | map(attribute='metadata.name') | list | length != 0
retries: 30
delay: 10
diff --git a/roles/openshift_logging/templates/curator.j2 b/roles/openshift_logging/templates/curator.j2
index b7bc15b62..a0fefd882 100644
--- a/roles/openshift_logging/templates/curator.j2
+++ b/roles/openshift_logging/templates/curator.j2
@@ -31,7 +31,7 @@ spec:
{% if curator_node_selector is iterable and curator_node_selector | length > 0 %}
nodeSelector:
{% for key, value in curator_node_selector.iteritems() %}
- {{key}}: {{value}}
+ {{key}}: "{{value}}"
{% endfor %}
{% endif %}
containers:
@@ -87,7 +87,7 @@ spec:
mountPath: /etc/curator/keys
readOnly: true
- name: config
- mountPath: /usr/curator/settings
+ mountPath: /etc/curator/settings
readOnly: true
- name: elasticsearch-storage
mountPath: /elasticsearch/persistent
diff --git a/roles/openshift_logging/templates/elasticsearch.yml.j2 b/roles/openshift_logging/templates/elasticsearch.yml.j2
index dad78b844..f2d098f10 100644
--- a/roles/openshift_logging/templates/elasticsearch.yml.j2
+++ b/roles/openshift_logging/templates/elasticsearch.yml.j2
@@ -8,7 +8,7 @@ script:
index:
number_of_shards: 1
number_of_replicas: 0
- auto_expand_replicas: 0-3
+ auto_expand_replicas: 0-2
unassigned.node_left.delayed_timeout: 2m
translog:
flush_threshold_size: 256mb
@@ -38,6 +38,11 @@ gateway:
io.fabric8.elasticsearch.authentication.users: ["system.logging.kibana", "system.logging.fluentd", "system.logging.curator", "system.admin"]
+openshift.config:
+ use_common_data_model: true
+ project_index_prefix: "project"
+ time_field_name: "@timestamp"
+
openshift.searchguard:
keystore.path: /etc/elasticsearch/secret/admin.jks
truststore.path: /etc/elasticsearch/secret/searchguard.truststore
diff --git a/roles/openshift_logging/templates/es.j2 b/roles/openshift_logging/templates/es.j2
index ec84c6b76..81ae070be 100644
--- a/roles/openshift_logging/templates/es.j2
+++ b/roles/openshift_logging/templates/es.j2
@@ -33,7 +33,7 @@ spec:
{% if es_node_selector is iterable and es_node_selector | length > 0 %}
nodeSelector:
{% for key, value in es_node_selector.iteritems() %}
- {{key}}: {{value}}
+ {{key}}: "{{value}}"
{% endfor %}
{% endif %}
containers:
diff --git a/roles/openshift_logging/templates/kibana.j2 b/roles/openshift_logging/templates/kibana.j2
index be9b45ab4..e6ecf82ff 100644
--- a/roles/openshift_logging/templates/kibana.j2
+++ b/roles/openshift_logging/templates/kibana.j2
@@ -30,7 +30,7 @@ spec:
{% if kibana_node_selector is iterable and kibana_node_selector | length > 0 %}
nodeSelector:
{% for key, value in kibana_node_selector.iteritems() %}
- {{key}}: {{value}}
+ {{key}}: "{{value}}"
{% endfor %}
{% endif %}
containers:
diff --git a/roles/openshift_logging/templates/route_reencrypt.j2 b/roles/openshift_logging/templates/route_reencrypt.j2
index 341ffdd84..cf8a9e65f 100644
--- a/roles/openshift_logging/templates/route_reencrypt.j2
+++ b/roles/openshift_logging/templates/route_reencrypt.j2
@@ -28,6 +28,9 @@ spec:
{{ line }}
{% endfor %}
termination: reencrypt
+{% if edge_term_policy is defined and edge_term_policy | length > 0 %}
+ insecureEdgeTerminationPolicy: {{ edge_term_policy }}
+{% endif %}
to:
kind: Service
name: {{ service_name }}
diff --git a/roles/openshift_logging/templates/secret.j2 b/roles/openshift_logging/templates/secret.j2
index d73bae9c4..eba4197da 100644
--- a/roles/openshift_logging/templates/secret.j2
+++ b/roles/openshift_logging/templates/secret.j2
@@ -1,9 +1,9 @@
apiVersion: v1
kind: Secret
metadata:
- name: {{secret_name}}
+ name: "{{secret_name}}"
type: Opaque
data:
{% for s in secrets %}
- {{s.key}}: {{s.value | b64encode}}
+ "{{s.key}}" : "{{s.value | b64encode}}"
{% endfor %}
diff --git a/roles/openshift_master/tasks/system_container.yml b/roles/openshift_master/tasks/system_container.yml
index a0e1ac75e..1b3e0dba1 100644
--- a/roles/openshift_master/tasks/system_container.yml
+++ b/roles/openshift_master/tasks/system_container.yml
@@ -1,4 +1,8 @@
---
+- name: Load lib_openshift modules
+ include_role:
+ name: lib_openshift
+
- name: Pre-pull master system container image
command: >
atomic pull --storage=ostree {{ openshift.common.system_images_registry }}/{{ openshift.master.master_system_image }}:{{ openshift_image_tag }}
@@ -10,68 +14,31 @@
atomic containers list --no-trunc -a -f container={{ openshift.common.service_type }}-master
register: result
-- name: Update Master system container package
- command: >
- atomic containers update {{ openshift.common.service_type }}-master
- register: update_result
- changed_when: "'Extracting' in update_result.stdout"
- when:
- - ("master" in result.stdout)
- - l_is_same_version
- - not l_is_ha
-
-- name: Uninstall Master system container package
- command: >
- atomic uninstall {{ openshift.common.service_type }}-master
- failed_when: False
- when:
- - ("master" in result.stdout)
- - not l_is_same_version
- - not l_is_ha
-
-- name: Install Master system container package
- command: >
- atomic install --system --name={{ openshift.common.service_type }}-master {{ openshift.common.system_images_registry }}/{{ openshift.master.master_system_image }}:{{ openshift_image_tag }}
+- name: Install or Update master system container
+ oc_atomic_container:
+ name: "{{ openshift.common.service_type }}-master"
+ image: "{{ openshift.common.system_images_registry }}/{{ openshift.master.master_system_image }}:{{ openshift_image_tag }}"
+ state: latest
when:
- - not l_is_same_version or ("master" not in result.stdout) | bool
- not l_is_ha
- notify:
- - restart master
# HA
-- name: Update Master HA system container package
- command: >
- atomic containers update {{ openshift.common.service_type }}-master-{{ item }}
- register: update_result
- changed_when: "'Extracting' in update_result.stdout"
- with_items:
- - api
- - controllers
- when:
- - ("master" in result.stdout)
- - l_is_same_version
- - l_is_ha
-
-- name: Uninstall Master HA system container package
- command: >
- atomic uninstall {{ openshift.common.service_type }}-master-{{ item }}
- failed_when: False
- with_items:
- - api
- - controllers
+- name: Install or Update HA api master system container
+ oc_atomic_container:
+ name: "{{ openshift.common.service_type }}-master-api"
+ image: "{{ openshift.common.system_images_registry }}/{{ openshift.master.master_system_image }}:{{ openshift_image_tag }}"
+ state: latest
+ values:
+ - COMMAND=api
when:
- - ("master" in result.stdout)
- - not l_is_same_version
- l_is_ha
-- name: Install Master HA system container package
- command: >
- atomic install --system --set COMMAND={{ item }} --name={{ openshift.common.service_type }}-master-{{ item }} {{ openshift.common.system_images_registry }}/{{ openshift.master.master_system_image }}:{{ openshift_image_tag }}
- with_items:
- - api
- - controllers
+- name: Install or Update HA controller master system container
+ oc_atomic_container:
+ name: "{{ openshift.common.service_type }}-master-controllers"
+ image: "{{ openshift.common.system_images_registry }}/{{ openshift.master.master_system_image }}:{{ openshift_image_tag }}"
+ state: latest
+ values:
+ - COMMAND=controllers
when:
- - not l_is_same_version or ("master" not in result.stdout) | bool
- l_is_ha
- notify:
- - restart master
diff --git a/roles/openshift_master/vars/main.yml b/roles/openshift_master/vars/main.yml
index 1e157097d..c5ba20409 100644
--- a/roles/openshift_master/vars/main.yml
+++ b/roles/openshift_master/vars/main.yml
@@ -20,4 +20,3 @@ openshift_master_valid_grant_methods:
- deny
l_is_ha: "{{ openshift.master.ha is defined and openshift.master.ha | bool }}"
-l_is_same_version: "{{ (openshift.common.version is defined) and (openshift.common.version == openshift_version) | bool }}"
diff --git a/roles/openshift_master_certificates/tasks/main.yml b/roles/openshift_master_certificates/tasks/main.yml
index 7a5ed51ec..61541acb8 100644
--- a/roles/openshift_master_certificates/tasks/main.yml
+++ b/roles/openshift_master_certificates/tasks/main.yml
@@ -159,7 +159,7 @@
become: no
- name: Lookup default group for ansible_ssh_user
- command: "/usr/bin/id -g {{ ansible_ssh_user }}"
+ command: "/usr/bin/id -g {{ ansible_ssh_user | quote }}"
changed_when: false
register: _ansible_ssh_user_gid
diff --git a/roles/openshift_master_facts/filter_plugins/openshift_master.py b/roles/openshift_master_facts/filter_plugins/openshift_master.py
index 6d009077a..db24028cd 100644
--- a/roles/openshift_master_facts/filter_plugins/openshift_master.py
+++ b/roles/openshift_master_facts/filter_plugins/openshift_master.py
@@ -527,7 +527,7 @@ class FilterModule(object):
'master.kubelet-client.crt',
'master.kubelet-client.key']
if bool(include_ca):
- certs += ['ca.crt', 'ca.key']
+ certs += ['ca.crt', 'ca.key', 'ca-bundle.crt']
if bool(include_keys):
certs += ['serviceaccounts.private.key',
'serviceaccounts.public.key']
diff --git a/roles/openshift_master_facts/lookup_plugins/openshift_master_facts_default_predicates.py b/roles/openshift_master_facts/lookup_plugins/openshift_master_facts_default_predicates.py
index ef322bd7d..7f7bc4316 100644
--- a/roles/openshift_master_facts/lookup_plugins/openshift_master_facts_default_predicates.py
+++ b/roles/openshift_master_facts/lookup_plugins/openshift_master_facts_default_predicates.py
@@ -40,7 +40,7 @@ class LookupModule(LookupBase):
# pylint: disable=line-too-long
raise AnsibleError("Either OpenShift needs to be installed or openshift_release needs to be specified")
if deployment_type == 'origin':
- if short_version not in ['1.1', '1.2', '1.3', '1.4', '1.5', '1.6', 'latest']:
+ if short_version not in ['1.1', '1.2', '1.3', '1.4', '1.5', '1.6', '3.6', 'latest']:
raise AnsibleError("Unknown short_version %s" % short_version)
elif deployment_type == 'openshift-enterprise':
if short_version not in ['3.1', '3.2', '3.3', '3.4', '3.5', '3.6', 'latest']:
@@ -48,17 +48,17 @@ class LookupModule(LookupBase):
else:
raise AnsibleError("Unknown deployment_type %s" % deployment_type)
- if deployment_type == 'openshift-enterprise':
- # convert short_version to origin short_version
- short_version = re.sub('^3.', '1.', short_version)
+ if deployment_type == 'origin':
+ # convert short_version to enterpise short_version
+ short_version = re.sub('^1.', '3.', short_version)
if short_version == 'latest':
- short_version = '1.6'
+ short_version = '3.6'
# Predicates ordered according to OpenShift Origin source:
# origin/vendor/k8s.io/kubernetes/plugin/pkg/scheduler/algorithmprovider/defaults/defaults.go
- if short_version == '1.1':
+ if short_version == '3.1':
predicates.extend([
{'name': 'PodFitsHostPorts'},
{'name': 'PodFitsResources'},
@@ -66,7 +66,7 @@ class LookupModule(LookupBase):
{'name': 'MatchNodeSelector'},
])
- if short_version == '1.2':
+ if short_version == '3.2':
predicates.extend([
{'name': 'PodFitsHostPorts'},
{'name': 'PodFitsResources'},
@@ -77,7 +77,7 @@ class LookupModule(LookupBase):
{'name': 'MaxGCEPDVolumeCount'}
])
- if short_version == '1.3':
+ if short_version == '3.3':
predicates.extend([
{'name': 'NoDiskConflict'},
{'name': 'NoVolumeZoneConflict'},
@@ -88,7 +88,7 @@ class LookupModule(LookupBase):
{'name': 'CheckNodeMemoryPressure'}
])
- if short_version == '1.4':
+ if short_version == '3.4':
predicates.extend([
{'name': 'NoDiskConflict'},
{'name': 'NoVolumeZoneConflict'},
@@ -101,7 +101,7 @@ class LookupModule(LookupBase):
{'name': 'MatchInterPodAffinity'}
])
- if short_version in ['1.5', '1.6']:
+ if short_version in ['3.5', '3.6']:
predicates.extend([
{'name': 'NoVolumeZoneConflict'},
{'name': 'MaxEBSVolumeCount'},
diff --git a/roles/openshift_master_facts/lookup_plugins/openshift_master_facts_default_priorities.py b/roles/openshift_master_facts/lookup_plugins/openshift_master_facts_default_priorities.py
index 6ad40e748..66e6ecea3 100644
--- a/roles/openshift_master_facts/lookup_plugins/openshift_master_facts_default_priorities.py
+++ b/roles/openshift_master_facts/lookup_plugins/openshift_master_facts_default_priorities.py
@@ -41,7 +41,7 @@ class LookupModule(LookupBase):
raise AnsibleError("Either OpenShift needs to be installed or openshift_release needs to be specified")
if deployment_type == 'origin':
- if short_version not in ['1.1', '1.2', '1.3', '1.4', '1.5', '1.6', 'latest']:
+ if short_version not in ['1.1', '1.2', '1.3', '1.4', '1.5', '1.6', '3.6', 'latest']:
raise AnsibleError("Unknown short_version %s" % short_version)
elif deployment_type == 'openshift-enterprise':
if short_version not in ['3.1', '3.2', '3.3', '3.4', '3.5', '3.6', 'latest']:
@@ -49,21 +49,21 @@ class LookupModule(LookupBase):
else:
raise AnsibleError("Unknown deployment_type %s" % deployment_type)
- if deployment_type == 'openshift-enterprise':
+ if deployment_type == 'origin':
# convert short_version to origin short_version
- short_version = re.sub('^3.', '1.', short_version)
+ short_version = re.sub('^1.', '3.', short_version)
if short_version == 'latest':
- short_version = '1.6'
+ short_version = '3.6'
- if short_version == '1.1':
+ if short_version == '3.1':
priorities.extend([
{'name': 'LeastRequestedPriority', 'weight': 1},
{'name': 'BalancedResourceAllocation', 'weight': 1},
{'name': 'SelectorSpreadPriority', 'weight': 1}
])
- if short_version == '1.2':
+ if short_version == '3.2':
priorities.extend([
{'name': 'LeastRequestedPriority', 'weight': 1},
{'name': 'BalancedResourceAllocation', 'weight': 1},
@@ -71,7 +71,7 @@ class LookupModule(LookupBase):
{'name': 'NodeAffinityPriority', 'weight': 1}
])
- if short_version == '1.3':
+ if short_version == '3.3':
priorities.extend([
{'name': 'LeastRequestedPriority', 'weight': 1},
{'name': 'BalancedResourceAllocation', 'weight': 1},
@@ -80,7 +80,7 @@ class LookupModule(LookupBase):
{'name': 'TaintTolerationPriority', 'weight': 1}
])
- if short_version == '1.4':
+ if short_version == '3.4':
priorities.extend([
{'name': 'LeastRequestedPriority', 'weight': 1},
{'name': 'BalancedResourceAllocation', 'weight': 1},
@@ -91,7 +91,7 @@ class LookupModule(LookupBase):
{'name': 'InterPodAffinityPriority', 'weight': 1}
])
- if short_version in ['1.5', '1.6']:
+ if short_version in ['3.5', '3.6']:
priorities.extend([
{'name': 'SelectorSpreadPriority', 'weight': 1},
{'name': 'InterPodAffinityPriority', 'weight': 1},
diff --git a/roles/openshift_master_facts/test/conftest.py b/roles/openshift_master_facts/test/conftest.py
new file mode 100644
index 000000000..e67d24f04
--- /dev/null
+++ b/roles/openshift_master_facts/test/conftest.py
@@ -0,0 +1,54 @@
+import os
+import sys
+
+import pytest
+
+sys.path.insert(1, os.path.join(os.path.dirname(__file__), os.pardir, "lookup_plugins"))
+
+from openshift_master_facts_default_predicates import LookupModule as PredicatesLookupModule # noqa: E402
+from openshift_master_facts_default_priorities import LookupModule as PrioritiesLookupModule # noqa: E402
+
+
+@pytest.fixture()
+def predicates_lookup():
+ return PredicatesLookupModule()
+
+
+@pytest.fixture()
+def priorities_lookup():
+ return PrioritiesLookupModule()
+
+
+@pytest.fixture()
+def facts(request):
+ return {
+ 'openshift': {
+ 'common': {}
+ }
+ }
+
+
+@pytest.fixture(params=[True, False])
+def regions_enabled(request):
+ return request.param
+
+
+@pytest.fixture(params=[True, False])
+def zones_enabled(request):
+ return request.param
+
+
+def v_prefix(release):
+ """Prefix a release number with 'v'."""
+ return "v" + release
+
+
+def minor(release):
+ """Add a suffix to release, making 'X.Y' become 'X.Y.Z'."""
+ return release + ".1"
+
+
+@pytest.fixture(params=[str, v_prefix, minor])
+def release_mod(request):
+ """Modifies a release string to alternative valid values."""
+ return request.param
diff --git a/roles/openshift_master_facts/test/openshift_master_facts_bad_input_tests.py b/roles/openshift_master_facts/test/openshift_master_facts_bad_input_tests.py
new file mode 100644
index 000000000..e8da1e04a
--- /dev/null
+++ b/roles/openshift_master_facts/test/openshift_master_facts_bad_input_tests.py
@@ -0,0 +1,57 @@
+import copy
+import os
+import sys
+
+from ansible.errors import AnsibleError
+import pytest
+
+sys.path.insert(1, os.path.join(os.path.dirname(__file__), os.pardir, "lookup_plugins"))
+
+from openshift_master_facts_default_predicates import LookupModule # noqa: E402
+
+
+class TestOpenShiftMasterFactsBadInput(object):
+ lookup = LookupModule()
+ default_facts = {
+ 'openshift': {
+ 'common': {}
+ }
+ }
+
+ def test_missing_openshift_facts(self):
+ with pytest.raises(AnsibleError):
+ facts = {}
+ self.lookup.run(None, variables=facts)
+
+ def test_missing_deployment_type(self):
+ with pytest.raises(AnsibleError):
+ facts = copy.deepcopy(self.default_facts)
+ facts['openshift']['common']['short_version'] = '10.10'
+ self.lookup.run(None, variables=facts)
+
+ def test_missing_short_version_and_missing_openshift_release(self):
+ with pytest.raises(AnsibleError):
+ facts = copy.deepcopy(self.default_facts)
+ facts['openshift']['common']['deployment_type'] = 'origin'
+ self.lookup.run(None, variables=facts)
+
+ def test_unknown_deployment_types(self):
+ with pytest.raises(AnsibleError):
+ facts = copy.deepcopy(self.default_facts)
+ facts['openshift']['common']['short_version'] = '1.1'
+ facts['openshift']['common']['deployment_type'] = 'bogus'
+ self.lookup.run(None, variables=facts)
+
+ def test_unknown_origin_version(self):
+ with pytest.raises(AnsibleError):
+ facts = copy.deepcopy(self.default_facts)
+ facts['openshift']['common']['short_version'] = '0.1'
+ facts['openshift']['common']['deployment_type'] = 'origin'
+ self.lookup.run(None, variables=facts)
+
+ def test_unknown_ocp_version(self):
+ with pytest.raises(AnsibleError):
+ facts = copy.deepcopy(self.default_facts)
+ facts['openshift']['common']['short_version'] = '0.1'
+ facts['openshift']['common']['deployment_type'] = 'openshift-enterprise'
+ self.lookup.run(None, variables=facts)
diff --git a/roles/openshift_master_facts/test/openshift_master_facts_default_predicates_tests.py b/roles/openshift_master_facts/test/openshift_master_facts_default_predicates_tests.py
index 68b6deb88..5a9e545a3 100644
--- a/roles/openshift_master_facts/test/openshift_master_facts_default_predicates_tests.py
+++ b/roles/openshift_master_facts/test/openshift_master_facts_default_predicates_tests.py
@@ -1,13 +1,5 @@
-import copy
-import os
-import sys
+import pytest
-from ansible.errors import AnsibleError
-from nose.tools import raises, assert_equal
-
-sys.path = [os.path.abspath(os.path.dirname(__file__) + "/../lookup_plugins/")] + sys.path
-
-from openshift_master_facts_default_predicates import LookupModule # noqa: E402
# Predicates ordered according to OpenShift Origin source:
# origin/vendor/k8s.io/kubernetes/plugin/pkg/scheduler/algorithmprovider/defaults/defaults.go
@@ -84,183 +76,88 @@ TEST_VARS = [
('1.5', 'origin', DEFAULT_PREDICATES_1_5),
('3.5', 'openshift-enterprise', DEFAULT_PREDICATES_1_5),
('1.6', 'origin', DEFAULT_PREDICATES_1_5),
+ ('3.6', 'origin', DEFAULT_PREDICATES_1_5),
('3.6', 'openshift-enterprise', DEFAULT_PREDICATES_1_5),
]
-class TestOpenShiftMasterFactsDefaultPredicates(object):
- def setUp(self):
- self.lookup = LookupModule()
- self.default_facts = {
- 'openshift': {
- 'common': {}
- }
- }
+def assert_ok(predicates_lookup, default_predicates, regions_enabled, **kwargs):
+ results = predicates_lookup.run(None, regions_enabled=regions_enabled, **kwargs)
+ if regions_enabled:
+ assert results == default_predicates + [REGION_PREDICATE]
+ else:
+ assert results == default_predicates
+
+
+def test_openshift_version(predicates_lookup, openshift_version_fixture, regions_enabled):
+ facts, default_predicates = openshift_version_fixture
+ assert_ok(predicates_lookup, default_predicates, variables=facts, regions_enabled=regions_enabled)
+
+
+@pytest.fixture(params=TEST_VARS)
+def openshift_version_fixture(request, facts):
+ version, deployment_type, default_predicates = request.param
+ version += '.1'
+ facts['openshift_version'] = version
+ facts['openshift']['common']['deployment_type'] = deployment_type
+ return facts, default_predicates
+
+
+def test_openshift_release(predicates_lookup, openshift_release_fixture, regions_enabled):
+ facts, default_predicates = openshift_release_fixture
+ assert_ok(predicates_lookup, default_predicates, variables=facts, regions_enabled=regions_enabled)
+
+
+@pytest.fixture(params=TEST_VARS)
+def openshift_release_fixture(request, facts, release_mod):
+ release, deployment_type, default_predicates = request.param
+ facts['openshift_release'] = release_mod(release)
+ facts['openshift']['common']['deployment_type'] = deployment_type
+ return facts, default_predicates
+
+
+def test_short_version(predicates_lookup, short_version_fixture, regions_enabled):
+ facts, default_predicates = short_version_fixture
+ assert_ok(predicates_lookup, default_predicates, variables=facts, regions_enabled=regions_enabled)
+
+
+@pytest.fixture(params=TEST_VARS)
+def short_version_fixture(request, facts):
+ short_version, deployment_type, default_predicates = request.param
+ facts['openshift']['common']['short_version'] = short_version
+ facts['openshift']['common']['deployment_type'] = deployment_type
+ return facts, default_predicates
+
+
+def test_short_version_kwarg(predicates_lookup, short_version_kwarg_fixture, regions_enabled):
+ facts, short_version, default_predicates = short_version_kwarg_fixture
+ assert_ok(predicates_lookup, default_predicates, variables=facts, regions_enabled=regions_enabled, short_version=short_version)
+
+
+@pytest.fixture(params=TEST_VARS)
+def short_version_kwarg_fixture(request, facts):
+ short_version, deployment_type, default_predicates = request.param
+ facts['openshift']['common']['deployment_type'] = deployment_type
+ return facts, short_version, default_predicates
+
+
+def test_deployment_type_kwarg(predicates_lookup, deployment_type_kwarg_fixture, regions_enabled):
+ facts, deployment_type, default_predicates = deployment_type_kwarg_fixture
+ assert_ok(predicates_lookup, default_predicates, variables=facts, regions_enabled=regions_enabled, deployment_type=deployment_type)
+
+
+@pytest.fixture(params=TEST_VARS)
+def deployment_type_kwarg_fixture(request, facts):
+ short_version, deployment_type, default_predicates = request.param
+ facts['openshift']['common']['short_version'] = short_version
+ return facts, deployment_type, default_predicates
+
+
+def test_short_version_deployment_type_kwargs(predicates_lookup, short_version_deployment_type_kwargs_fixture, regions_enabled):
+ short_version, deployment_type, default_predicates = short_version_deployment_type_kwargs_fixture
+ assert_ok(predicates_lookup, default_predicates, regions_enabled=regions_enabled, short_version=short_version, deployment_type=deployment_type)
+
- @raises(AnsibleError)
- def test_missing_short_version_and_missing_openshift_release(self):
- facts = copy.deepcopy(self.default_facts)
- facts['openshift']['common']['deployment_type'] = 'origin'
- self.lookup.run(None, variables=facts)
-
- def check_defaults_short_version(self, short_version, deployment_type, default_predicates,
- regions_enabled):
- facts = copy.deepcopy(self.default_facts)
- facts['openshift']['common']['short_version'] = short_version
- facts['openshift']['common']['deployment_type'] = deployment_type
- results = self.lookup.run(None, variables=facts,
- regions_enabled=regions_enabled)
- if regions_enabled:
- assert_equal(results, default_predicates + [REGION_PREDICATE])
- else:
- assert_equal(results, default_predicates)
-
- def check_defaults_short_version_kwarg(self, short_version, deployment_type, default_predicates,
- regions_enabled):
- facts = copy.deepcopy(self.default_facts)
- facts['openshift']['common']['deployment_type'] = deployment_type
- results = self.lookup.run(None, variables=facts,
- regions_enabled=regions_enabled,
- short_version=short_version)
- if regions_enabled:
- assert_equal(results, default_predicates + [REGION_PREDICATE])
- else:
- assert_equal(results, default_predicates)
-
- def check_defaults_deployment_type_kwarg(self, short_version, deployment_type,
- default_predicates, regions_enabled):
- facts = copy.deepcopy(self.default_facts)
- facts['openshift']['common']['short_version'] = short_version
- results = self.lookup.run(None, variables=facts,
- regions_enabled=regions_enabled,
- deployment_type=deployment_type)
- if regions_enabled:
- assert_equal(results, default_predicates + [REGION_PREDICATE])
- else:
- assert_equal(results, default_predicates)
-
- def check_defaults_only_kwargs(self, short_version, deployment_type,
- default_predicates, regions_enabled):
- facts = copy.deepcopy(self.default_facts)
- results = self.lookup.run(None, variables=facts,
- regions_enabled=regions_enabled,
- short_version=short_version,
- deployment_type=deployment_type)
- if regions_enabled:
- assert_equal(results, default_predicates + [REGION_PREDICATE])
- else:
- assert_equal(results, default_predicates)
-
- def check_defaults_release(self, release, deployment_type, default_predicates,
- regions_enabled):
- facts = copy.deepcopy(self.default_facts)
- facts['openshift_release'] = release
- facts['openshift']['common']['deployment_type'] = deployment_type
- results = self.lookup.run(None, variables=facts,
- regions_enabled=regions_enabled)
- if regions_enabled:
- assert_equal(results, default_predicates + [REGION_PREDICATE])
- else:
- assert_equal(results, default_predicates)
-
- def check_defaults_version(self, version, deployment_type, default_predicates,
- regions_enabled):
- facts = copy.deepcopy(self.default_facts)
- facts['openshift_version'] = version
- facts['openshift']['common']['deployment_type'] = deployment_type
- results = self.lookup.run(None, variables=facts,
- regions_enabled=regions_enabled)
- if regions_enabled:
- assert_equal(results, default_predicates + [REGION_PREDICATE])
- else:
- assert_equal(results, default_predicates)
-
- def check_defaults_override_vars(self, release, deployment_type,
- default_predicates, regions_enabled,
- extra_facts=None):
- facts = copy.deepcopy(self.default_facts)
- facts['openshift']['common']['short_version'] = release
- facts['openshift']['common']['deployment_type'] = deployment_type
- if extra_facts is not None:
- for fact in extra_facts:
- facts[fact] = extra_facts[fact]
- results = self.lookup.run(None, variables=facts,
- regions_enabled=regions_enabled,
- return_set_vars=False)
- if regions_enabled:
- assert_equal(results, default_predicates + [REGION_PREDICATE])
- else:
- assert_equal(results, default_predicates)
-
- def test_openshift_version(self):
- for regions_enabled in (True, False):
- for release, deployment_type, default_predicates in TEST_VARS:
- release = release + '.1'
- yield self.check_defaults_version, release, deployment_type, default_predicates, regions_enabled
-
- def test_v_release_defaults(self):
- for regions_enabled in (True, False):
- for release, deployment_type, default_predicates in TEST_VARS:
- yield self.check_defaults_release, 'v' + release, deployment_type, default_predicates, regions_enabled
-
- def test_release_defaults(self):
- for regions_enabled in (True, False):
- for release, deployment_type, default_predicates in TEST_VARS:
- yield self.check_defaults_release, release, deployment_type, default_predicates, regions_enabled
-
- def test_short_version_defaults(self):
- for regions_enabled in (True, False):
- for release, deployment_type, default_predicates in TEST_VARS:
- yield self.check_defaults_short_version, release, deployment_type, default_predicates, regions_enabled
-
- def test_short_version_kwarg(self):
- for regions_enabled in (True, False):
- for release, deployment_type, default_predicates in TEST_VARS:
- yield self.check_defaults_short_version_kwarg, release, deployment_type, default_predicates, regions_enabled
-
- def test_only_kwargs(self):
- for regions_enabled in (True, False):
- for release, deployment_type, default_predicates in TEST_VARS:
- yield self.check_defaults_only_kwargs, release, deployment_type, default_predicates, regions_enabled
-
- def test_deployment_type_kwarg(self):
- for regions_enabled in (True, False):
- for release, deployment_type, default_predicates in TEST_VARS:
- yield self.check_defaults_deployment_type_kwarg, release, deployment_type, default_predicates, regions_enabled
-
- def test_trunc_openshift_release(self):
- for release, deployment_type, default_predicates in TEST_VARS:
- release = release + '.1'
- yield self.check_defaults_release, release, deployment_type, default_predicates, False
-
- @raises(AnsibleError)
- def test_unknown_deployment_types(self):
- facts = copy.deepcopy(self.default_facts)
- facts['openshift']['common']['short_version'] = '1.1'
- facts['openshift']['common']['deployment_type'] = 'bogus'
- self.lookup.run(None, variables=facts)
-
- @raises(AnsibleError)
- def test_unknown_origin_version(self):
- facts = copy.deepcopy(self.default_facts)
- facts['openshift']['common']['short_version'] = '0.1'
- facts['openshift']['common']['deployment_type'] = 'origin'
- self.lookup.run(None, variables=facts)
-
- @raises(AnsibleError)
- def test_unknown_ocp_version(self):
- facts = copy.deepcopy(self.default_facts)
- facts['openshift']['common']['short_version'] = '0.1'
- facts['openshift']['common']['deployment_type'] = 'openshift-enterprise'
- self.lookup.run(None, variables=facts)
-
- @raises(AnsibleError)
- def test_missing_deployment_type(self):
- facts = copy.deepcopy(self.default_facts)
- facts['openshift']['common']['short_version'] = '10.10'
- self.lookup.run(None, variables=facts)
-
- @raises(AnsibleError)
- def testMissingOpenShiftFacts(self):
- facts = {}
- self.lookup.run(None, variables=facts)
+@pytest.fixture(params=TEST_VARS)
+def short_version_deployment_type_kwargs_fixture(request):
+ return request.param
diff --git a/roles/openshift_master_facts/test/openshift_master_facts_default_priorities_tests.py b/roles/openshift_master_facts/test/openshift_master_facts_default_priorities_tests.py
index 4e44a2b3d..81d3ee19e 100644
--- a/roles/openshift_master_facts/test/openshift_master_facts_default_priorities_tests.py
+++ b/roles/openshift_master_facts/test/openshift_master_facts_default_priorities_tests.py
@@ -1,13 +1,5 @@
-import copy
-import os
-import sys
+import pytest
-from ansible.errors import AnsibleError
-from nose.tools import raises, assert_equal
-
-sys.path = [os.path.abspath(os.path.dirname(__file__) + "/../lookup_plugins/")] + sys.path
-
-from openshift_master_facts_default_priorities import LookupModule # noqa: E402
DEFAULT_PRIORITIES_1_1 = [
{'name': 'LeastRequestedPriority', 'weight': 1},
@@ -72,181 +64,88 @@ TEST_VARS = [
('1.5', 'origin', DEFAULT_PRIORITIES_1_5),
('3.5', 'openshift-enterprise', DEFAULT_PRIORITIES_1_5),
('1.6', 'origin', DEFAULT_PRIORITIES_1_5),
+ ('3.6', 'origin', DEFAULT_PRIORITIES_1_5),
('3.6', 'openshift-enterprise', DEFAULT_PRIORITIES_1_5),
]
-class TestOpenShiftMasterFactsDefaultPredicates(object):
- def setUp(self):
- self.lookup = LookupModule()
- self.default_facts = {
- 'openshift': {
- 'common': {}
- }
- }
+def assert_ok(priorities_lookup, default_priorities, zones_enabled, **kwargs):
+ results = priorities_lookup.run(None, zones_enabled=zones_enabled, **kwargs)
+ if zones_enabled:
+ assert results == default_priorities + [ZONE_PRIORITY]
+ else:
+ assert results == default_priorities
+
+
+def test_openshift_version(priorities_lookup, openshift_version_fixture, zones_enabled):
+ facts, default_priorities = openshift_version_fixture
+ assert_ok(priorities_lookup, default_priorities, variables=facts, zones_enabled=zones_enabled)
+
+
+@pytest.fixture(params=TEST_VARS)
+def openshift_version_fixture(request, facts):
+ version, deployment_type, default_priorities = request.param
+ version += '.1'
+ facts['openshift_version'] = version
+ facts['openshift']['common']['deployment_type'] = deployment_type
+ return facts, default_priorities
+
+
+def test_openshift_release(priorities_lookup, openshift_release_fixture, zones_enabled):
+ facts, default_priorities = openshift_release_fixture
+ assert_ok(priorities_lookup, default_priorities, variables=facts, zones_enabled=zones_enabled)
+
+
+@pytest.fixture(params=TEST_VARS)
+def openshift_release_fixture(request, facts, release_mod):
+ release, deployment_type, default_priorities = request.param
+ facts['openshift_release'] = release_mod(release)
+ facts['openshift']['common']['deployment_type'] = deployment_type
+ return facts, default_priorities
+
+
+def test_short_version(priorities_lookup, short_version_fixture, zones_enabled):
+ facts, default_priorities = short_version_fixture
+ assert_ok(priorities_lookup, default_priorities, variables=facts, zones_enabled=zones_enabled)
+
+
+@pytest.fixture(params=TEST_VARS)
+def short_version_fixture(request, facts):
+ short_version, deployment_type, default_priorities = request.param
+ facts['openshift']['common']['short_version'] = short_version
+ facts['openshift']['common']['deployment_type'] = deployment_type
+ return facts, default_priorities
+
+
+def test_short_version_kwarg(priorities_lookup, short_version_kwarg_fixture, zones_enabled):
+ facts, short_version, default_priorities = short_version_kwarg_fixture
+ assert_ok(priorities_lookup, default_priorities, variables=facts, zones_enabled=zones_enabled, short_version=short_version)
+
+
+@pytest.fixture(params=TEST_VARS)
+def short_version_kwarg_fixture(request, facts):
+ short_version, deployment_type, default_priorities = request.param
+ facts['openshift']['common']['deployment_type'] = deployment_type
+ return facts, short_version, default_priorities
+
+
+def test_deployment_type_kwarg(priorities_lookup, deployment_type_kwarg_fixture, zones_enabled):
+ facts, deployment_type, default_priorities = deployment_type_kwarg_fixture
+ assert_ok(priorities_lookup, default_priorities, variables=facts, zones_enabled=zones_enabled, deployment_type=deployment_type)
+
+
+@pytest.fixture(params=TEST_VARS)
+def deployment_type_kwarg_fixture(request, facts):
+ short_version, deployment_type, default_priorities = request.param
+ facts['openshift']['common']['short_version'] = short_version
+ return facts, deployment_type, default_priorities
+
+
+def test_short_version_deployment_type_kwargs(priorities_lookup, short_version_deployment_type_kwargs_fixture, zones_enabled):
+ short_version, deployment_type, default_priorities = short_version_deployment_type_kwargs_fixture
+ assert_ok(priorities_lookup, default_priorities, zones_enabled=zones_enabled, short_version=short_version, deployment_type=deployment_type)
+
- @raises(AnsibleError)
- def test_missing_short_version_and_missing_openshift_release(self):
- facts = copy.deepcopy(self.default_facts)
- facts['openshift']['common']['deployment_type'] = 'origin'
- self.lookup.run(None, variables=facts)
-
- def check_defaults_short_version(self, release, deployment_type,
- default_priorities, zones_enabled):
- facts = copy.deepcopy(self.default_facts)
- facts['openshift']['common']['short_version'] = release
- facts['openshift']['common']['deployment_type'] = deployment_type
- results = self.lookup.run(None, variables=facts, zones_enabled=zones_enabled)
- if zones_enabled:
- assert_equal(results, default_priorities + [ZONE_PRIORITY])
- else:
- assert_equal(results, default_priorities)
-
- def check_defaults_short_version_kwarg(self, release, deployment_type,
- default_priorities, zones_enabled):
- facts = copy.deepcopy(self.default_facts)
- facts['openshift']['common']['deployment_type'] = deployment_type
- results = self.lookup.run(None, variables=facts,
- zones_enabled=zones_enabled,
- short_version=release)
- if zones_enabled:
- assert_equal(results, default_priorities + [ZONE_PRIORITY])
- else:
- assert_equal(results, default_priorities)
-
- def check_defaults_deployment_type_kwarg(self, release, deployment_type,
- default_priorities, zones_enabled):
- facts = copy.deepcopy(self.default_facts)
- facts['openshift']['common']['short_version'] = release
- results = self.lookup.run(None, variables=facts,
- zones_enabled=zones_enabled,
- deployment_type=deployment_type)
- if zones_enabled:
- assert_equal(results, default_priorities + [ZONE_PRIORITY])
- else:
- assert_equal(results, default_priorities)
-
- def check_defaults_only_kwargs(self, release, deployment_type,
- default_priorities, zones_enabled):
- facts = copy.deepcopy(self.default_facts)
- results = self.lookup.run(None, variables=facts,
- zones_enabled=zones_enabled,
- short_version=release,
- deployment_type=deployment_type)
- if zones_enabled:
- assert_equal(results, default_priorities + [ZONE_PRIORITY])
- else:
- assert_equal(results, default_priorities)
-
- def check_defaults_release(self, release, deployment_type, default_priorities,
- zones_enabled):
- facts = copy.deepcopy(self.default_facts)
- facts['openshift_release'] = release
- facts['openshift']['common']['deployment_type'] = deployment_type
- results = self.lookup.run(None, variables=facts, zones_enabled=zones_enabled)
- if zones_enabled:
- assert_equal(results, default_priorities + [ZONE_PRIORITY])
- else:
- assert_equal(results, default_priorities)
-
- def check_defaults_version(self, release, deployment_type, default_priorities,
- zones_enabled):
- facts = copy.deepcopy(self.default_facts)
- facts['openshift_version'] = release
- facts['openshift']['common']['deployment_type'] = deployment_type
- results = self.lookup.run(None, variables=facts, zones_enabled=zones_enabled)
- if zones_enabled:
- assert_equal(results, default_priorities + [ZONE_PRIORITY])
- else:
- assert_equal(results, default_priorities)
-
- def check_defaults_override_vars(self, release, deployment_type,
- default_priorities, zones_enabled,
- extra_facts=None):
- facts = copy.deepcopy(self.default_facts)
- facts['openshift']['common']['short_version'] = release
- facts['openshift']['common']['deployment_type'] = deployment_type
- if extra_facts is not None:
- for fact in extra_facts:
- facts[fact] = extra_facts[fact]
- results = self.lookup.run(None, variables=facts,
- zones_enabled=zones_enabled,
- return_set_vars=False)
- if zones_enabled:
- assert_equal(results, default_priorities + [ZONE_PRIORITY])
- else:
- assert_equal(results, default_priorities)
-
- def test_openshift_version(self):
- for zones_enabled in (True, False):
- for release, deployment_type, default_priorities in TEST_VARS:
- release = release + '.1'
- yield self.check_defaults_version, release, deployment_type, default_priorities, zones_enabled
-
- def test_v_release_defaults(self):
- for zones_enabled in (True, False):
- for release, deployment_type, default_priorities in TEST_VARS:
- release = 'v' + release
- yield self.check_defaults_release, release, deployment_type, default_priorities, zones_enabled
-
- def test_release_defaults(self):
- for zones_enabled in (True, False):
- for release, deployment_type, default_priorities in TEST_VARS:
- yield self.check_defaults_release, release, deployment_type, default_priorities, zones_enabled
-
- def test_short_version_defaults(self):
- for zones_enabled in (True, False):
- for short_version, deployment_type, default_priorities in TEST_VARS:
- yield self.check_defaults_short_version, short_version, deployment_type, default_priorities, zones_enabled
-
- def test_only_kwargs(self):
- for zones_enabled in (True, False):
- for short_version, deployment_type, default_priorities in TEST_VARS:
- yield self.check_defaults_only_kwargs, short_version, deployment_type, default_priorities, zones_enabled
-
- def test_deployment_type_kwarg(self):
- for zones_enabled in (True, False):
- for short_version, deployment_type, default_priorities in TEST_VARS:
- yield self.check_defaults_deployment_type_kwarg, short_version, deployment_type, default_priorities, zones_enabled
-
- def test_release_kwarg(self):
- for zones_enabled in (True, False):
- for short_version, deployment_type, default_priorities in TEST_VARS:
- yield self.check_defaults_short_version_kwarg, short_version, deployment_type, default_priorities, zones_enabled
-
- def test_trunc_openshift_release(self):
- for release, deployment_type, default_priorities in TEST_VARS:
- release = release + '.1'
- yield self.check_defaults_release, release, deployment_type, default_priorities, False
-
- @raises(AnsibleError)
- def test_unknown_origin_version(self):
- facts = copy.deepcopy(self.default_facts)
- facts['openshift']['common']['short_version'] = '0.1'
- facts['openshift']['common']['deployment_type'] = 'origin'
- self.lookup.run(None, variables=facts)
-
- @raises(AnsibleError)
- def test_unknown_ocp_version(self):
- facts = copy.deepcopy(self.default_facts)
- facts['openshift']['common']['short_version'] = '0.1'
- facts['openshift']['common']['deployment_type'] = 'openshift-enterprise'
- self.lookup.run(None, variables=facts)
-
- @raises(AnsibleError)
- def test_unknown_deployment_types(self):
- facts = copy.deepcopy(self.default_facts)
- facts['openshift']['common']['short_version'] = '1.1'
- facts['openshift']['common']['deployment_type'] = 'bogus'
- self.lookup.run(None, variables=facts)
-
- @raises(AnsibleError)
- def test_missing_deployment_type(self):
- facts = copy.deepcopy(self.default_facts)
- facts['openshift']['common']['short_version'] = '10.10'
- self.lookup.run(None, variables=facts)
-
- @raises(AnsibleError)
- def test_missing_openshift_facts(self):
- facts = {}
- self.lookup.run(None, variables=facts)
+@pytest.fixture(params=TEST_VARS)
+def short_version_deployment_type_kwargs_fixture(request):
+ return request.param
diff --git a/roles/openshift_metrics/files/import_jks_certs.sh b/roles/openshift_metrics/files/import_jks_certs.sh
index f4315ef34..c8d5bb3d2 100755
--- a/roles/openshift_metrics/files/import_jks_certs.sh
+++ b/roles/openshift_metrics/files/import_jks_certs.sh
@@ -24,11 +24,10 @@ function import_certs() {
hawkular_cassandra_keystore_password=$(echo $CASSANDRA_KEYSTORE_PASSWD | base64 -d)
hawkular_metrics_truststore_password=$(echo $METRICS_TRUSTSTORE_PASSWD | base64 -d)
hawkular_cassandra_truststore_password=$(echo $CASSANDRA_TRUSTSTORE_PASSWD | base64 -d)
- hawkular_jgroups_password=$(echo $JGROUPS_PASSWD | base64 -d)
-
+
cassandra_alias=`keytool -noprompt -list -keystore $dir/hawkular-cassandra.truststore -storepass ${hawkular_cassandra_truststore_password} | sed -n '7~2s/,.*$//p'`
hawkular_alias=`keytool -noprompt -list -keystore $dir/hawkular-metrics.truststore -storepass ${hawkular_metrics_truststore_password} | sed -n '7~2s/,.*$//p'`
-
+
if [ ! -f $dir/hawkular-metrics.keystore ]; then
echo "Creating the Hawkular Metrics keystore from the PEM file"
keytool -importkeystore -v \
@@ -50,7 +49,7 @@ function import_certs() {
-srcstorepass $hawkular_cassandra_keystore_password \
-deststorepass $hawkular_cassandra_keystore_password
fi
-
+
if [[ ! ${cassandra_alias[*]} =~ hawkular-metrics ]]; then
echo "Importing the Hawkular Certificate into the Cassandra Truststore"
keytool -noprompt -import -v -trustcacerts -alias hawkular-metrics \
@@ -59,7 +58,7 @@ function import_certs() {
-trustcacerts \
-storepass $hawkular_cassandra_truststore_password
fi
-
+
if [[ ! ${hawkular_alias[*]} =~ hawkular-cassandra ]]; then
echo "Importing the Cassandra Certificate into the Hawkular Truststore"
keytool -noprompt -import -v -trustcacerts -alias hawkular-cassandra \
@@ -101,16 +100,6 @@ function import_certs() {
-storepass $hawkular_metrics_truststore_password
fi
done
-
- if [ ! -f $dir/hawkular-jgroups.keystore ]; then
- echo "Generating the jgroups keystore"
- keytool -genseckey -alias hawkular -keypass ${hawkular_jgroups_password} \
- -storepass ${hawkular_jgroups_password} \
- -keyalg Blowfish \
- -keysize 56 \
- -keystore $dir/hawkular-jgroups.keystore \
- -storetype JCEKS
- fi
}
import_certs
diff --git a/roles/openshift_metrics/tasks/generate_certificates.yaml b/roles/openshift_metrics/tasks/generate_certificates.yaml
index f7cba0093..7af3f9467 100644
--- a/roles/openshift_metrics/tasks/generate_certificates.yaml
+++ b/roles/openshift_metrics/tasks/generate_certificates.yaml
@@ -6,6 +6,6 @@
--key='{{ mktemp.stdout }}/ca.key'
--cert='{{ mktemp.stdout }}/ca.crt'
--serial='{{ mktemp.stdout }}/ca.serial.txt'
- --name="metrics-signer@$(date +%s)"
+ --name="metrics-signer@{{lookup('pipe','date +%s')}}"
- include: generate_hawkular_certificates.yaml
diff --git a/roles/openshift_metrics/tasks/generate_hawkular_certificates.yaml b/roles/openshift_metrics/tasks/generate_hawkular_certificates.yaml
index 854697abb..61a240a33 100644
--- a/roles/openshift_metrics/tasks/generate_hawkular_certificates.yaml
+++ b/roles/openshift_metrics/tasks/generate_hawkular_certificates.yaml
@@ -3,7 +3,7 @@
include: setup_certificate.yaml
vars:
component: hawkular-metrics
- hostnames: "hawkular-metrics,{{ openshift_metrics_hawkular_hostname }}"
+ hostnames: "hawkular-metrics,hawkular-metrics.{{ openshift_metrics_project }}.svc.cluster.local,{{ openshift_metrics_hawkular_hostname }}"
changed_when: no
- name: generate hawkular-cassandra certificates
@@ -24,7 +24,6 @@
with_items:
- hawkular-metrics.pwd
- hawkular-metrics.htpasswd
- - hawkular-jgroups-keystore.pwd
changed_when: no
- set_fact:
@@ -32,11 +31,10 @@
with_items: "{{pwd_file_stat.results}}"
changed_when: no
-- name: generate password for hawkular metrics and jgroups
+- name: generate password for hawkular metrics
local_action: copy dest="{{ local_tmp.stdout}}/{{ item }}.pwd" content="{{ 15 | oo_random_word }}"
with_items:
- hawkular-metrics
- - hawkular-jgroups-keystore
- name: generate htpasswd file for hawkular metrics
local_action: >
@@ -51,7 +49,6 @@
with_items:
- hawkular-metrics.pwd
- hawkular-metrics.htpasswd
- - hawkular-jgroups-keystore.pwd
- include: import_jks_certs.yaml
@@ -69,8 +66,6 @@
- hawkular-metrics-truststore.pwd
- hawkular-metrics.pwd
- hawkular-metrics.htpasswd
- - hawkular-jgroups.keystore
- - hawkular-jgroups-keystore.pwd
- hawkular-cassandra.crt
- hawkular-cassandra.pem
- hawkular-cassandra.keystore
@@ -104,11 +99,6 @@
hawkular-metrics.keystore.alias: "{{ 'hawkular-metrics'|b64encode }}"
hawkular-metrics.htpasswd.file: >
{{ hawkular_secrets['hawkular-metrics.htpasswd'] }}
- hawkular-metrics.jgroups.keystore: >
- {{ hawkular_secrets['hawkular-jgroups.keystore'] }}
- hawkular-metrics.jgroups.keystore.password: >
- {{ hawkular_secrets['hawkular-jgroups-keystore.pwd'] }}
- hawkular-metrics.jgroups.alias: "{{ 'hawkular'|b64encode }}"
when: name not in metrics_secrets.stdout_lines
changed_when: no
diff --git a/roles/openshift_metrics/tasks/import_jks_certs.yaml b/roles/openshift_metrics/tasks/import_jks_certs.yaml
index 57ec70c79..2a67dad0e 100644
--- a/roles/openshift_metrics/tasks/import_jks_certs.yaml
+++ b/roles/openshift_metrics/tasks/import_jks_certs.yaml
@@ -15,10 +15,6 @@
register: metrics_truststore
check_mode: no
-- stat: path="{{mktemp.stdout}}/hawkular-jgroups.keystore"
- register: jgroups_keystore
- check_mode: no
-
- block:
- slurp: src={{ mktemp.stdout }}/hawkular-metrics-keystore.pwd
register: metrics_keystore_password
@@ -26,9 +22,6 @@
- slurp: src={{ mktemp.stdout }}/hawkular-cassandra-keystore.pwd
register: cassandra_keystore_password
- - slurp: src={{ mktemp.stdout }}/hawkular-jgroups-keystore.pwd
- register: jgroups_keystore_password
-
- fetch:
dest: "{{local_tmp.stdout}}/"
src: "{{ mktemp.stdout }}/{{item}}"
@@ -48,7 +41,6 @@
CASSANDRA_KEYSTORE_PASSWD: "{{cassandra_keystore_password.content}}"
METRICS_TRUSTSTORE_PASSWD: "{{hawkular_truststore_password.content}}"
CASSANDRA_TRUSTSTORE_PASSWD: "{{cassandra_truststore_password.content}}"
- JGROUPS_PASSWD: "{{jgroups_keystore_password.content}}"
changed_when: False
- copy:
@@ -59,5 +51,4 @@
when: not metrics_keystore.stat.exists or
not metrics_truststore.stat.exists or
not cassandra_keystore.stat.exists or
- not cassandra_truststore.stat.exists or
- not jgroups_keystore.stat.exists
+ not cassandra_truststore.stat.exists
diff --git a/roles/openshift_metrics/templates/hawkular_metrics_rc.j2 b/roles/openshift_metrics/templates/hawkular_metrics_rc.j2
index d39f1b43a..361378df3 100644
--- a/roles/openshift_metrics/templates/hawkular_metrics_rc.j2
+++ b/roles/openshift_metrics/templates/hawkular_metrics_rc.j2
@@ -58,9 +58,6 @@ spec:
- "--hmw.truststore=/secrets/hawkular-metrics.truststore"
- "--hmw.keystore_password_file=/secrets/hawkular-metrics.keystore.password"
- "--hmw.truststore_password_file=/secrets/hawkular-metrics.truststore.password"
- - "--hmw.jgroups_keystore=/secrets/hawkular-metrics.jgroups.keystore"
- - "--hmw.jgroups_keystore_password_file=/secrets/hawkular-metrics.jgroups.keystore.password"
- - "--hmw.jgroups_alias_file=/secrets/hawkular-metrics.jgroups.alias"
env:
- name: POD_NAMESPACE
valueFrom:
@@ -68,6 +65,8 @@ spec:
fieldPath: metadata.namespace
- name: MASTER_URL
value: "{{ openshift_metrics_master_url }}"
+ - name: JGROUPS_PASSWORD
+ value: "{{ 17 | oo_random_word }}"
- name: OPENSHIFT_KUBE_PING_NAMESPACE
valueFrom:
fieldRef:
@@ -81,10 +80,10 @@ spec:
mountPath: "/secrets"
- name: hawkular-metrics-client-secrets
mountPath: "/client-secrets"
-{% if ((openshift_metrics_hawkular_limits_cpu is defined and openshift_metrics_hawkular_limits_cpu is not none)
+{% if ((openshift_metrics_hawkular_limits_cpu is defined and openshift_metrics_hawkular_limits_cpu is not none)
or (openshift_metrics_hawkular_limits_memory is defined and openshift_metrics_hawkular_limits_memory is not none)
or (openshift_metrics_hawkular_requests_cpu is defined and openshift_metrics_hawkular_requests_cpu is not none)
- or (openshift_metrics_hawkular_requests_memory is defined and openshift_metrics_hawkular_requests_memory is not none))
+ or (openshift_metrics_hawkular_requests_memory is defined and openshift_metrics_hawkular_requests_memory is not none))
%}
resources:
{% if (openshift_metrics_hawkular_limits_cpu is not none
@@ -98,8 +97,8 @@ spec:
memory: "{{openshift_metrics_hawkular_limits_memory}}"
{% endif %}
{% endif %}
-{% if (openshift_metrics_hawkular_requests_cpu is not none
- or openshift_metrics_hawkular_requests_memory is not none)
+{% if (openshift_metrics_hawkular_requests_cpu is not none
+ or openshift_metrics_hawkular_requests_memory is not none)
%}
requests:
{% if openshift_metrics_hawkular_requests_cpu is not none %}
diff --git a/roles/openshift_node/tasks/node_system_container.yml b/roles/openshift_node/tasks/node_system_container.yml
index 3ae5c7600..abe139418 100644
--- a/roles/openshift_node/tasks/node_system_container.yml
+++ b/roles/openshift_node/tasks/node_system_container.yml
@@ -1,36 +1,16 @@
---
+- name: Load lib_openshift modules
+ include_role:
+ name: lib_openshift
+
- name: Pre-pull node system container image
command: >
atomic pull --storage=ostree {{ openshift.common.system_images_registry }}/{{ openshift.node.node_system_image }}:{{ openshift_image_tag }}
register: pull_result
changed_when: "'Pulling layer' in pull_result.stdout"
-- name: Check Node system container package
- command: >
- atomic containers list --no-trunc -a -f container={{ openshift.common.service_type }}-node
- register: result
-
-- name: Update Node system container package
- command: >
- atomic containers update {{ openshift.common.service_type }}-node
- register: update_result
- changed_when: "'Extracting' in update_result.stdout"
- when:
- - l_is_same_version
- - ("node" in result.stdout)
-
-- name: Uninstall Node system container package
- command: >
- atomic uninstall {{ openshift.common.service_type }}-node
- failed_when: False
- when:
- - not l_is_same_version
- - ("node" in result.stdout)
-
-- name: Install Node system container package
- command: >
- atomic install --system --name={{ openshift.common.service_type }}-node {{ openshift.common.system_images_registry }}/{{ openshift.node.node_system_image }}:{{ openshift_image_tag }}
- register: install_node_result
- changed_when: "'Extracting' in pull_result.stdout"
- when:
- - not l_is_same_version or ("node" not in result.stdout) | bool
+- name: Install or Update node system container
+ oc_atomic_container:
+ name: "{{ openshift.common.service_type }}-node"
+ image: "{{ openshift.common.system_images_registry }}/{{ openshift.node.node_system_image }}:{{ openshift_image_tag }}"
+ state: latest
diff --git a/roles/openshift_node/tasks/openvswitch_system_container.yml b/roles/openshift_node/tasks/openvswitch_system_container.yml
index 6114230d0..b76ce8797 100644
--- a/roles/openshift_node/tasks/openvswitch_system_container.yml
+++ b/roles/openshift_node/tasks/openvswitch_system_container.yml
@@ -1,36 +1,16 @@
---
+- name: Load lib_openshift modules
+ include_role:
+ name: lib_openshift
+
- name: Pre-pull OpenVSwitch system container image
command: >
atomic pull --storage=ostree {{ openshift.common.system_images_registry }}/{{ openshift.node.ovs_system_image }}:{{ openshift_image_tag }}
register: pull_result
changed_when: "'Pulling layer' in pull_result.stdout"
-- name: Check OpenvSwitch system container package
- command: >
- atomic containers list --no-trunc -a -f container=openvswitch
- register: result
-
-- name: Update OpenvSwitch system container package
- command: >
- atomic containers update openvswitch
- register: update_result
- changed_when: "'Extracting' in update_result.stdout"
- when:
- - l_is_same_version
- - ("openvswitch" in result.stdout) | bool
-
-- name: Uninstall OpenvSwitch system container package
- command: >
- atomic uninstall openvswitch
- failed_when: False
- when:
- - not l_is_same_version
- - ("openvswitch" in result.stdout) | bool
-
-- name: Install OpenvSwitch system container package
- command: >
- atomic install --system --name=openvswitch {{ openshift.common.system_images_registry }}/{{ openshift.node.ovs_system_image }}:{{ openshift_image_tag }}
- when:
- - not l_is_same_version or ("openvswitch" not in result.stdout) | bool
- notify:
- - restart docker
+- name: Install or Update OpenVSwitch system container
+ oc_atomic_container:
+ name: openvswitch
+ image: "{{ openshift.common.system_images_registry }}/{{ openshift.node.ovs_system_image }}:{{ openshift_image_tag }}"
+ state: latest
diff --git a/roles/openshift_node/vars/main.yml b/roles/openshift_node/vars/main.yml
deleted file mode 100644
index 0c2abf3b9..000000000
--- a/roles/openshift_node/vars/main.yml
+++ /dev/null
@@ -1,2 +0,0 @@
----
-l_is_same_version: "{{ (openshift.common.version is defined) and (openshift.common.version == openshift_version) | bool }}"
diff --git a/roles/openshift_node_upgrade/README.md b/roles/openshift_node_upgrade/README.md
index e21bee412..66bce38ec 100644
--- a/roles/openshift_node_upgrade/README.md
+++ b/roles/openshift_node_upgrade/README.md
@@ -82,7 +82,7 @@ Including an example of how to use your role (for instance, with variables passe
- name: Drain Node for Kubelet upgrade
command: >
- {{ hostvars[groups.oo_first_master.0].openshift.common.admin_binary }} drain {{ openshift.node.nodename | lower }} --force --delete-local-data
+ {{ hostvars[groups.oo_first_master.0].openshift.common.admin_binary }} drain {{ openshift.node.nodename | lower }} --force --delete-local-data --ignore-daemonsets
delegate_to: "{{ groups.oo_first_master.0 }}"
roles:
diff --git a/roles/openshift_node_upgrade/tasks/main.yml b/roles/openshift_node_upgrade/tasks/main.yml
index 609ca2a6e..2f79931df 100644
--- a/roles/openshift_node_upgrade/tasks/main.yml
+++ b/roles/openshift_node_upgrade/tasks/main.yml
@@ -13,7 +13,10 @@
vars:
# We will restart Docker ourselves after everything is ready:
skip_docker_restart: True
- when: l_docker_upgrade is defined and l_docker_upgrade | bool and not openshift.common.is_atomic | bool
+ when:
+ - l_docker_upgrade is defined
+ - l_docker_upgrade | bool
+ - not openshift.common.is_containerized | bool
- include: "{{ node_config_hook }}"
when: node_config_hook is defined
@@ -25,14 +28,19 @@
when: not openshift.common.is_containerized | bool
- name: Remove obsolete docker-sdn-ovs.conf
- file: path=/etc/systemd/system/docker.service.d/docker-sdn-ovs.conf state=absent
- when: (deployment_type == 'openshift-enterprise' and openshift_release | version_compare('3.4', '>=')) or (deployment_type == 'origin' and openshift_release | version_compare('1.4', '>='))
+ file:
+ path: "/etc/systemd/system/docker.service.d/docker-sdn-ovs.conf"
+ state: absent
+ when: (deployment_type == 'openshift-enterprise' and openshift_release | version_compare('3.4', '>='))
+ or (deployment_type == 'origin' and openshift_release | version_compare('1.4', '>='))
- include: containerized_node_upgrade.yml
when: openshift.common.is_containerized | bool
- name: Ensure containerized services stopped before Docker restart
- service: name={{ item }} state=stopped
+ service:
+ name: "{{ item }}"
+ state: stopped
with_items:
- etcd_container
- openvswitch
@@ -62,22 +70,19 @@
- include: docker/restart.yml
- name: Restart rpm node service
- service: name="{{ openshift.common.service_type }}-node" state=restarted
+ service:
+ name: "{{ openshift.common.service_type }}-node"
+ state: restarted
when: not openshift.common.is_containerized | bool
- name: Wait for node to be ready
- command: >
- {{ hostvars[groups.oo_first_master.0].openshift.common.client_binary }} get node {{ openshift.common.hostname | lower }} --no-headers
+ oc_obj:
+ state: list
+ kind: node
+ name: "{{ openshift.common.hostname | lower }}"
register: node_output
delegate_to: "{{ groups.oo_first_master.0 }}"
- until: "{{ node_output.stdout.split()[1].startswith('Ready')}}"
- # Give the node two minutes to come back online. Note that we pre-pull images now
- # so containerized services should restart quickly as well.
+ until: node_output.results.results[0].status.conditions | selectattr('type', 'match', '^Ready$') | map(attribute='status') | join | bool == True
+ # Give the node two minutes to come back online.
retries: 24
delay: 5
- # AUDIT:changed_when: `false` because we are only inspecting the
- # state of the node, we aren't changing anything (we changed node
- # service state in the previous task). You could say we shouldn't
- # override this because something will be changing (the state of a
- # service), but that should be part of the last task.
- changed_when: false
diff --git a/roles/openshift_repos/meta/main.yml b/roles/openshift_repos/meta/main.yml
index 0558b822c..cc18c453c 100644
--- a/roles/openshift_repos/meta/main.yml
+++ b/roles/openshift_repos/meta/main.yml
@@ -11,5 +11,4 @@ galaxy_info:
- 7
categories:
- cloud
-dependencies:
-- { role: openshift_facts }
+dependencies: []
diff --git a/roles/openshift_repos/tasks/main.yaml b/roles/openshift_repos/tasks/main.yaml
index 23dcd0440..ffb760bfe 100644
--- a/roles/openshift_repos/tasks/main.yaml
+++ b/roles/openshift_repos/tasks/main.yaml
@@ -1,50 +1,47 @@
---
-# TODO: Add flag for enabling EPEL repo, default to false
-
-# TODO: Add subscription-management config, with parameters
-# for username, password, poolid(name), and official repos to
-# enable/disable. Might need to make a module that extends the
-# subscription management module to take a poolid and enable/disable the
-# proper repos correctly.
+- name: openshift_repos detect ostree
+ stat:
+ path: /run/ostree-booted
+ register: ostree_booted
- assert:
that: openshift_deployment_type in known_openshift_deployment_types
- when: not openshift.common.is_containerized | bool
+ msg: "openshift_deployment_type must be one of {{ known_openshift_deployment_types }}"
-- name: Ensure libselinux-python is installed
- package: name=libselinux-python state=present
- when: not openshift.common.is_containerized | bool
+- block:
+ - name: Ensure libselinux-python is installed
+ package: name=libselinux-python state=present
-- name: Create any additional repos that are defined
- template:
- src: yum_repo.j2
- dest: /etc/yum.repos.d/openshift_additional.repo
- when: openshift_additional_repos | length > 0 and not openshift.common.is_containerized | bool
- notify: refresh cache
+ - name: Create any additional repos that are defined
+ template:
+ src: yum_repo.j2
+ dest: /etc/yum.repos.d/openshift_additional.repo
+ when:
+ - openshift_additional_repos | length > 0
+ notify: refresh cache
-- name: Remove the additional repos if no longer defined
- file:
- dest: /etc/yum.repos.d/openshift_additional.repo
- state: absent
- when: openshift_additional_repos | length == 0 and not openshift.common.is_containerized | bool
- notify: refresh cache
+ - name: Remove the additional repos if no longer defined
+ file:
+ dest: /etc/yum.repos.d/openshift_additional.repo
+ state: absent
+ when:
+ - openshift_additional_repos | length == 0
+ notify: refresh cache
-- name: Configure origin gpg keys if needed
- copy:
- src: origin/gpg_keys/openshift-ansible-CentOS-SIG-PaaS
- dest: /etc/pki/rpm-gpg/
- notify: refresh cache
- when: ansible_os_family == "RedHat" and ansible_distribution != "Fedora"
- and openshift_deployment_type == 'origin'
- and not openshift.common.is_containerized | bool
- and openshift_enable_origin_repo | default(true) | bool
+ - name: Configure origin gpg keys if needed
+ copy:
+ src: "{{ item.src }}"
+ dest: "{{ item.dest }}"
+ with_items:
+ - src: origin/gpg_keys/openshift-ansible-CentOS-SIG-PaaS
+ dest: /etc/pki/rpm-gpg/
+ - src: origin/repos/openshift-ansible-centos-paas-sig.repo
+ dest: /etc/yum.repos.d/
+ notify: refresh cache
+ when:
+ - ansible_os_family == "RedHat"
+ - ansible_distribution != "Fedora"
+ - openshift_deployment_type == 'origin'
+ - openshift_enable_origin_repo | default(true) | bool
-- name: Configure origin yum repositories RHEL/CentOS
- copy:
- src: origin/repos/openshift-ansible-centos-paas-sig.repo
- dest: /etc/yum.repos.d/
- notify: refresh cache
- when: ansible_os_family == "RedHat" and ansible_distribution != "Fedora"
- and openshift_deployment_type == 'origin'
- and not openshift.common.is_containerized | bool
- and openshift_enable_origin_repo | default(true) | bool
+ when: not ostree_booted.stat.exists